--- a/src/Benchmarks/Quickcheck_Benchmark/Needham_Schroeder_Base.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Benchmarks/Quickcheck_Benchmark/Needham_Schroeder_Base.thy Sat Jan 05 17:24:33 2019 +0100
@@ -151,15 +151,15 @@
fun of_set compfuns (Type ("fun", [T, _])) =
case body_type (Predicate_Compile_Aux.mk_monadT compfuns T) of
Type ("Quickcheck_Exhaustive.three_valued", _) =>
- Const(@{const_name neg_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
- | Type ("Predicate.pred", _) => Const(@{const_name pred_of_set}, HOLogic.mk_setT T --> Predicate_Compile_Aux.mk_monadT compfuns T)
- | _ => Const(@{const_name pos_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ Const(\<^const_name>\<open>neg_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ | Type ("Predicate.pred", _) => Const(\<^const_name>\<open>pred_of_set\<close>, HOLogic.mk_setT T --> Predicate_Compile_Aux.mk_monadT compfuns T)
+ | _ => Const(\<^const_name>\<open>pos_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
fun member compfuns (U as Type ("fun", [T, _])) =
(absdummy T (absdummy (HOLogic.mk_setT T) (Predicate_Compile_Aux.mk_if compfuns
- (Const (@{const_name "Set.member"}, T --> HOLogic.mk_setT T --> @{typ bool}) $ Bound 1 $ Bound 0))))
+ (Const (\<^const_name>\<open>Set.member\<close>, T --> HOLogic.mk_setT T --> \<^typ>\<open>bool\<close>) $ Bound 1 $ Bound 0))))
in
- Core_Data.force_modes_and_compilations @{const_name Set.member}
+ Core_Data.force_modes_and_compilations \<^const_name>\<open>Set.member\<close>
[(oi, (of_set, false)), (ii, (member, false))]
end
\<close>
@@ -186,7 +186,7 @@
code_pred [generator_cps] synth' unfolding synth'_def by (rule synth.cases) fastforce+
-setup \<open>Predicate_Compile_Data.ignore_consts [@{const_name analz}, @{const_name knows}]\<close>
+setup \<open>Predicate_Compile_Data.ignore_consts [\<^const_name>\<open>analz\<close>, \<^const_name>\<open>knows\<close>]\<close>
declare ListMem_iff[symmetric, code_pred_inline]
declare [[quickcheck_timing]]
--- a/src/Benchmarks/Record_Benchmark/Record_Benchmark.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Benchmarks/Record_Benchmark/Record_Benchmark.thy Sat Jan 05 17:24:33 2019 +0100
@@ -356,49 +356,49 @@
lemma "(r\<lparr>A255:=x,A253:=y,A255:=z \<rparr>) = r\<lparr>A253:=y,A255:=z\<rparr>"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.upd_simproc]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.upd_simproc]) 1\<close>)
done
lemma "(\<forall>r. P (A155 r)) \<longrightarrow> (\<forall>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<forall>r. P (A155 r)) \<longrightarrow> (\<forall>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (A155 r)) \<longrightarrow> (\<exists>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (A155 r)) \<longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "\<And>r. P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply auto
done
lemma "\<And>r. P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
lemma "P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
lemma fixes r shows "P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
@@ -409,7 +409,7 @@
assume "P (A155 r)"
then have "\<exists>x. P x"
apply -
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
end
@@ -417,7 +417,7 @@
lemma "\<exists>r. A155 r = x"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.ex_sel_eq_simproc]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.ex_sel_eq_simproc]) 1\<close>)
done
print_record many_A
--- a/src/Doc/Classes/Classes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Classes/Classes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -119,7 +119,7 @@
subsection \<open>Class instantiation \label{sec:class_inst}\<close>
text \<open>
- The concrete type @{typ int} is made a @{class semigroup} instance
+ The concrete type \<^typ>\<open>int\<close> is made a \<^class>\<open>semigroup\<close> instance
by providing a suitable definition for the class parameter \<open>(\<otimes>)\<close> and a proof for the specification of @{fact assoc}. This is
accomplished by the @{command instantiation} target:
\<close>
@@ -149,11 +149,10 @@
relevant primitive proof goals; typically it is the first method
applied in an instantiation proof.
- From now on, the type-checker will consider @{typ int} as a @{class
- semigroup} automatically, i.e.\ any general results are immediately
+ From now on, the type-checker will consider \<^typ>\<open>int\<close> as a \<^class>\<open>semigroup\<close> automatically, i.e.\ any general results are immediately
available on concrete instances.
- \<^medskip> Another instance of @{class semigroup} yields the natural
+ \<^medskip> Another instance of \<^class>\<open>semigroup\<close> yields the natural
numbers:
\<close>
@@ -206,7 +205,7 @@
\<^noindent> Associativity of product semigroups is established using
the definition of \<open>(\<otimes>)\<close> on products and the hypothetical
associativity of the type components; these hypotheses are
- legitimate due to the @{class semigroup} constraints imposed on the
+ legitimate due to the \<^class>\<open>semigroup\<close> constraints imposed on the
type components by the @{command instance} proposition. Indeed,
this pattern often occurs with parametric types and type classes.
\<close>
@@ -216,7 +215,7 @@
text \<open>
We define a subclass \<open>monoidl\<close> (a semigroup with a left-hand
- neutral) by extending @{class semigroup} with one additional
+ neutral) by extending \<^class>\<open>semigroup\<close> with one additional
parameter \<open>neutral\<close> together with its characteristic property:
\<close>
@@ -388,7 +387,7 @@
qed
text \<open>
- \<^noindent> Here the \qt{@{keyword "in"} @{class group}} target
+ \<^noindent> Here the \qt{@{keyword "in"} \<^class>\<open>group\<close>} target
specification indicates that the result is recorded within that
context for later use. This local theorem is also lifted to the
global one @{fact "group.left_cancel:"} @{prop [source] "\<And>x y z ::
@@ -433,7 +432,7 @@
functors that have a canonical interpretation as type classes.
There is also the possibility of other interpretations. For
example, \<open>list\<close>s also form a monoid with \<open>append\<close> and
- @{term "[]"} as operations, but it seems inappropriate to apply to
+ \<^term>\<open>[]\<close> as operations, but it seems inappropriate to apply to
lists the same operations as for genuinely algebraic types. In such
a case, we can simply make a particular interpretation of monoids
for lists:
@@ -469,7 +468,7 @@
text \<open>
\<^noindent> This pattern is also helpful to reuse abstract
specifications on the \emph{same} type. For example, think of a
- class \<open>preorder\<close>; for type @{typ nat}, there are at least two
+ class \<open>preorder\<close>; for type \<^typ>\<open>nat\<close>, there are at least two
possible instances: the natural order or the order induced by the
divides relation. But only one of these instances can be used for
@{command instantiation}; using the locale behind the class \<open>preorder\<close>, it is still possible to utilise the same abstract
--- a/src/Doc/Classes/Setup.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Classes/Setup.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,18 +18,18 @@
fun alpha_ast_tr [] = Ast.Variable "'a"
| alpha_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
fun alpha_ofsort_ast_tr [ast] =
- Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'a", ast]
+ Ast.Appl [Ast.Constant \<^syntax_const>\<open>_ofsort\<close>, Ast.Variable "'a", ast]
| alpha_ofsort_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
fun beta_ast_tr [] = Ast.Variable "'b"
| beta_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
fun beta_ofsort_ast_tr [ast] =
- Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'b", ast]
+ Ast.Appl [Ast.Constant \<^syntax_const>\<open>_ofsort\<close>, Ast.Variable "'b", ast]
| beta_ofsort_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
in
- [(@{syntax_const "_alpha"}, K alpha_ast_tr),
- (@{syntax_const "_alpha_ofsort"}, K alpha_ofsort_ast_tr),
- (@{syntax_const "_beta"}, K beta_ast_tr),
- (@{syntax_const "_beta_ofsort"}, K beta_ofsort_ast_tr)]
+ [(\<^syntax_const>\<open>_alpha\<close>, K alpha_ast_tr),
+ (\<^syntax_const>\<open>_alpha_ofsort\<close>, K alpha_ofsort_ast_tr),
+ (\<^syntax_const>\<open>_beta\<close>, K beta_ast_tr),
+ (\<^syntax_const>\<open>_beta_ofsort\<close>, K beta_ofsort_ast_tr)]
end
\<close>
--- a/src/Doc/Codegen/Adaptation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Adaptation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -127,10 +127,9 @@
\secref{sec:adaptation_mechanisms})
\item Such parametrisations can involve references to a
- target-specific standard \<open>library\<close> (e.g. using the \<open>Haskell\<close> @{verbatim Maybe} type instead of the \<open>HOL\<close>
- @{type "option"} type); if such are used, the corresponding
- identifiers (in our example, @{verbatim Maybe}, @{verbatim
- Nothing} and @{verbatim Just}) also have to be considered \<open>reserved\<close>.
+ target-specific standard \<open>library\<close> (e.g. using the \<open>Haskell\<close> \<^verbatim>\<open>Maybe\<close> type instead of the \<open>HOL\<close>
+ \<^type>\<open>option\<close> type); if such are used, the corresponding
+ identifiers (in our example, \<^verbatim>\<open>Maybe\<close>, \<^verbatim>\<open>Nothing\<close> and \<^verbatim>\<open>Just\<close>) also have to be considered \<open>reserved\<close>.
\item Even more, the user can enrich the library of the
target-language by providing code snippets (\qt{\<open>includes\<close>}) which are prepended to any generated code (see
@@ -147,7 +146,7 @@
subsection \<open>Common adaptation applications \label{sec:common_adaptation}\<close>
text \<open>
- The @{theory Main} theory of Isabelle/HOL already provides a code
+ The \<^theory>\<open>Main\<close> theory of Isabelle/HOL already provides a code
generator setup which should be suitable for most applications.
Common extensions and modifications are available by certain
theories in \<^dir>\<open>~~/src/HOL/Library\<close>; beside being useful in
@@ -156,22 +155,21 @@
\begin{description}
- \item[@{theory "HOL.Code_Numeral"}] provides additional numeric
- types @{typ integer} and @{typ natural} isomorphic to types
- @{typ int} and @{typ nat} respectively. Type @{typ integer}
- is mapped to target-language built-in integers; @{typ natural}
- is implemented as abstract type over @{typ integer}.
+ \item[\<^theory>\<open>HOL.Code_Numeral\<close>] provides additional numeric
+ types \<^typ>\<open>integer\<close> and \<^typ>\<open>natural\<close> isomorphic to types
+ \<^typ>\<open>int\<close> and \<^typ>\<open>nat\<close> respectively. Type \<^typ>\<open>integer\<close>
+ is mapped to target-language built-in integers; \<^typ>\<open>natural\<close>
+ is implemented as abstract type over \<^typ>\<open>integer\<close>.
Useful for code setups which involve e.g.~indexing
of target-language arrays. Part of \<open>HOL-Main\<close>.
- \item[@{theory "HOL.String"}] provides an additional datatype @{typ
- String.literal} which is isomorphic to lists of 7-bit (ASCII) characters;
- @{typ String.literal}s are mapped to target-language strings.
+ \item[\<^theory>\<open>HOL.String\<close>] provides an additional datatype \<^typ>\<open>String.literal\<close> which is isomorphic to lists of 7-bit (ASCII) characters;
+ \<^typ>\<open>String.literal\<close>s are mapped to target-language strings.
- Literal values of type @{typ String.literal} can be written
+ Literal values of type \<^typ>\<open>String.literal\<close> can be written
as \<open>STR ''\<dots>''\<close> for sequences of printable characters and
\<open>STR 0x\<dots>\<close> for one single ASCII code point given
- as hexadecimal numeral; @{typ String.literal} supports concatenation
+ as hexadecimal numeral; \<^typ>\<open>String.literal\<close> supports concatenation
\<open>\<dots> + \<dots>\<close> for all standard target languages.
Note that the particular notion of \qt{string} is target-language
@@ -181,37 +179,36 @@
like verifying parsing algorithms require a dedicated
target-language specific model.
- Nevertheless @{typ String.literal}s can be analyzed; the core operations
- for this are @{term_type String.asciis_of_literal} and
- @{term_type String.literal_of_asciis} which are implemented
- in a target-language-specific way; particularly @{const String.asciis_of_literal}
+ Nevertheless \<^typ>\<open>String.literal\<close>s can be analyzed; the core operations
+ for this are \<^term_type>\<open>String.asciis_of_literal\<close> and
+ \<^term_type>\<open>String.literal_of_asciis\<close> which are implemented
+ in a target-language-specific way; particularly \<^const>\<open>String.asciis_of_literal\<close>
checks its argument at runtime to make sure that it does
not contain non-ASCII-characters, to safeguard consistency.
- On top of these, more abstract conversions like @{term_type
- String.explode} and @{term_type String.implode}
+ On top of these, more abstract conversions like \<^term_type>\<open>String.explode\<close> and \<^term_type>\<open>String.implode\<close>
are implemented.
Part of \<open>HOL-Main\<close>.
- \item[\<open>Code_Target_Int\<close>] implements type @{typ int}
- by @{typ integer} and thus by target-language built-in integers.
+ \item[\<open>Code_Target_Int\<close>] implements type \<^typ>\<open>int\<close>
+ by \<^typ>\<open>integer\<close> and thus by target-language built-in integers.
\item[\<open>Code_Binary_Nat\<close>] implements type
- @{typ nat} using a binary rather than a linear representation,
+ \<^typ>\<open>nat\<close> using a binary rather than a linear representation,
which yields a considerable speedup for computations.
- Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
+ Pattern matching with \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> is eliminated
by a preprocessor.\label{abstract_nat}
- \item[\<open>Code_Target_Nat\<close>] implements type @{typ nat}
- by @{typ integer} and thus by target-language built-in integers.
- Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
+ \item[\<open>Code_Target_Nat\<close>] implements type \<^typ>\<open>nat\<close>
+ by \<^typ>\<open>integer\<close> and thus by target-language built-in integers.
+ Pattern matching with \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> is eliminated
by a preprocessor.
\item[\<open>Code_Target_Numeral\<close>] is a convenience theory
containing both \<open>Code_Target_Nat\<close> and
\<open>Code_Target_Int\<close>.
- \item[@{theory "HOL-Library.IArray"}] provides a type @{typ "'a iarray"}
+ \item[\<^theory>\<open>HOL-Library.IArray\<close>] provides a type \<^typ>\<open>'a iarray\<close>
isomorphic to lists but implemented by (effectively immutable)
arrays \emph{in SML only}.
@@ -245,9 +242,7 @@
distinguished entities with have nothing to do with the SML-built-in
notion of \qt{bool}. This results in less readable code;
additionally, eager evaluation may cause programs to loop or break
- which would perfectly terminate when the existing SML @{verbatim
- "bool"} would be used. To map the HOL @{typ bool} on SML @{verbatim
- "bool"}, we may use \qn{custom serialisations}:
+ which would perfectly terminate when the existing SML \<^verbatim>\<open>bool\<close> would be used. To map the HOL \<^typ>\<open>bool\<close> on SML \<^verbatim>\<open>bool\<close>, we may use \qn{custom serialisations}:
\<close>
code_printing %quotett
@@ -263,7 +258,7 @@
custom serialisation starts with a target language identifier
followed by an expression, which during code serialisation is
inserted whenever the type constructor would occur. Each
- ``@{verbatim "_"}'' in a serialisation expression is treated as a
+ ``\<^verbatim>\<open>_\<close>'' in a serialisation expression is treated as a
placeholder for the constant's or the type constructor's arguments.
\<close>
@@ -300,7 +295,7 @@
text \<open>
\noindent Next, we try to map HOL pairs to SML pairs, using the
- infix ``@{verbatim "*"}'' type constructor and parentheses:
+ infix ``\<^verbatim>\<open>*\<close>'' type constructor and parentheses:
\<close>
(*<*)
code_printing %invisible
@@ -312,11 +307,11 @@
| constant Pair \<rightharpoonup> (SML) "!((_),/ (_))"
text \<open>
- \noindent The initial bang ``@{verbatim "!"}'' tells the serialiser
+ \noindent The initial bang ``\<^verbatim>\<open>!\<close>'' tells the serialiser
never to put parentheses around the whole expression (they are
already present), while the parentheses around argument place
holders tell not to put parentheses around the arguments. The slash
- ``@{verbatim "/"}'' (followed by arbitrary white space) inserts a
+ ``\<^verbatim>\<open>/\<close>'' (followed by arbitrary white space) inserts a
space which may be used as a break if necessary during pretty
printing.
@@ -326,9 +321,9 @@
serialisations are completely axiomatic.
A further noteworthy detail is that any special character in a
- custom serialisation may be quoted using ``@{verbatim "'"}''; thus,
- in ``@{verbatim "fn '_ => _"}'' the first ``@{verbatim "_"}'' is a
- proper underscore while the second ``@{verbatim "_"}'' is a
+ custom serialisation may be quoted using ``\<^verbatim>\<open>'\<close>''; thus,
+ in ``\<^verbatim>\<open>fn '_ => _\<close>'' the first ``\<^verbatim>\<open>_\<close>'' is a
+ proper underscore while the second ``\<^verbatim>\<open>_\<close>'' is a
placeholder.
\<close>
@@ -337,8 +332,8 @@
text \<open>
For convenience, the default \<open>HOL\<close> setup for \<open>Haskell\<close>
- maps the @{class equal} class to its counterpart in \<open>Haskell\<close>,
- giving custom serialisations for the class @{class equal}
+ maps the \<^class>\<open>equal\<close> class to its counterpart in \<open>Haskell\<close>,
+ giving custom serialisations for the class \<^class>\<open>equal\<close>
and its operation @{const [source] HOL.equal}.
\<close>
@@ -348,7 +343,7 @@
text \<open>
\noindent A problem now occurs whenever a type which is an instance
- of @{class equal} in \<open>HOL\<close> is mapped on a \<open>Haskell\<close>-built-in type which is also an instance of \<open>Haskell\<close>
+ of \<^class>\<open>equal\<close> in \<open>HOL\<close> is mapped on a \<open>Haskell\<close>-built-in type which is also an instance of \<open>Haskell\<close>
\<open>Eq\<close>:
\<close>
--- a/src/Doc/Codegen/Computations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Computations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -80,7 +80,7 @@
placeholder for its corresponding type in ML under code generation.
\<^item> Then the corresponding computation is an ML function of type
- @{ML_type "Proof.context -> term -> 'ml"}
+ \<^ML_type>\<open>Proof.context -> term -> 'ml\<close>
partially implementing the morphism \<open>\<Phi> :: \<tau> \<rightarrow> T\<close> for all
\<^emph>\<open>input terms\<close> consisting only of input constants and applications.
@@ -154,7 +154,7 @@
Hence the functional argument accepts the following parameters
- \<^item> A postprocessor function @{ML_type "term -> term"}.
+ \<^item> A postprocessor function \<^ML_type>\<open>term -> term\<close>.
\<^item> The resulting value as optional argument.
@@ -165,7 +165,7 @@
\<close>
ML_val %quotetypewriter \<open>
- comp_nat @{context} @{term "sum_list [Suc 0, Suc (Suc 0)] * Suc (Suc 0)"}
+ comp_nat \<^context> \<^term>\<open>sum_list [Suc 0, Suc (Suc 0)] * Suc (Suc 0)\<close>
\<close>
text \<open>
@@ -193,7 +193,7 @@
(fn post => post o HOLogic.mk_nat o int_of_nat o the);
val comp_nat_list = @{computation "nat list"}
- (fn post => post o HOLogic.mk_list @{typ nat} o
+ (fn post => post o HOLogic.mk_list \<^typ>\<open>nat\<close> o
map (HOLogic.mk_nat o int_of_nat) o the);
end
@@ -272,11 +272,11 @@
ML %quotetypewriter \<open>
local
- fun raw_dvd (b, ct) = Thm.mk_binop @{cterm "Pure.eq :: bool \<Rightarrow> bool \<Rightarrow> prop"}
- ct (if b then @{cterm True} else @{cterm False});
+ fun raw_dvd (b, ct) = Thm.mk_binop \<^cterm>\<open>Pure.eq :: bool \<Rightarrow> bool \<Rightarrow> prop\<close>
+ ct (if b then \<^cterm>\<open>True\<close> else \<^cterm>\<open>False\<close>);
val (_, dvd_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding dvd}, raw_dvd)));
+ (Thm.add_oracle (\<^binding>\<open>dvd\<close>, raw_dvd)));
in
@@ -293,7 +293,7 @@
text \<open>
\<^item> Antiquotation @{ML_antiquotation computation_conv} basically yields
- a conversion of type @{ML_type "Proof.context -> cterm -> thm"}
+ a conversion of type \<^ML_type>\<open>Proof.context -> cterm -> thm\<close>
(see further @{cite "isabelle-implementation"}).
\<^item> The antiquotation expects one functional argument to bridge the
@@ -311,8 +311,8 @@
\<close> (*<*)
(*>*) ML_val %quotetypewriter \<open>
- conv_dvd @{context} @{cterm "7 dvd ( 62437867527846782 :: int)"};
- conv_dvd @{context} @{cterm "7 dvd (-62437867527846783 :: int)"};
+ conv_dvd \<^context> \<^cterm>\<open>7 dvd ( 62437867527846782 :: int)\<close>;
+ conv_dvd \<^context> \<^cterm>\<open>7 dvd (-62437867527846783 :: int)\<close>;
\<close>
text \<open>
@@ -341,7 +341,7 @@
fun integer_of_int (@{code int_of_integer} k) = k
- val cterm_of_int = Thm.cterm_of @{context} o HOLogic.mk_numeral o integer_of_int;
+ val cterm_of_int = Thm.cterm_of \<^context> o HOLogic.mk_numeral o integer_of_int;
val divisor = Thm.dest_arg o Thm.dest_arg;
@@ -366,8 +366,8 @@
\<close>
ML_val %quotetypewriter \<open>
- conv_div @{context}
- @{cterm "46782454343499999992777742432342242323423425 div (7 :: int)"}
+ conv_div \<^context>
+ \<^cterm>\<open>46782454343499999992777742432342242323423425 div (7 :: int)\<close>
\<close>
text \<open>
@@ -388,7 +388,7 @@
text \<open>
The \<open>computation_check\<close> antiquotation is convenient if
only a positive checking of propositions is desired, because then
- the result type is fixed (@{typ prop}) and all the technical
+ the result type is fixed (\<^typ>\<open>prop\<close>) and all the technical
matter concerning postprocessing and oracles is done in the framework
once and for all:
\<close>
@@ -402,17 +402,17 @@
\<close>
text \<open>
- \noindent The HOL judgement @{term Trueprop} embeds an expression
- of type @{typ bool} into @{typ prop}.
+ \noindent The HOL judgement \<^term>\<open>Trueprop\<close> embeds an expression
+ of type \<^typ>\<open>bool\<close> into \<^typ>\<open>prop\<close>.
\<close>
ML_val %quotetypewriter \<open>
- check_nat @{context} @{cprop "less (Suc (Suc 0)) (Suc (Suc (Suc 0)))"}
+ check_nat \<^context> \<^cprop>\<open>less (Suc (Suc 0)) (Suc (Suc (Suc 0)))\<close>
\<close>
text \<open>
\noindent Note that such computations can only \<^emph>\<open>check\<close>
- for @{typ prop}s to hold but not \<^emph>\<open>decide\<close>.
+ for \<^typ>\<open>prop\<close>s to hold but not \<^emph>\<open>decide\<close>.
\<close>
@@ -436,7 +436,7 @@
naively: the compilation pattern for computations fails whenever
target-language literals are involved; since various
common code generator setups (see \secref{sec:common_adaptation})
- implement @{typ nat} and @{typ int} by target-language literals,
+ implement \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close> by target-language literals,
this problem manifests whenever numeric types are involved.
In practice, this is circumvented with a dedicated preprocessor
setup for literals (see also \secref{sec:input_constants_pitfalls}).
@@ -446,7 +446,7 @@
too much detail:
\<close>
-paragraph \<open>An example for @{typ nat}\<close>
+paragraph \<open>An example for \<^typ>\<open>nat\<close>\<close>
ML %quotetypewriter \<open>
val check_nat = @{computation_check terms:
@@ -456,10 +456,10 @@
\<close>
ML_val %quotetypewriter \<open>
- check_nat @{context} @{cprop "even (Suc 0 + 1 + 2 + 3 + 4 + 5)"}
+ check_nat \<^context> \<^cprop>\<open>even (Suc 0 + 1 + 2 + 3 + 4 + 5)\<close>
\<close>
-paragraph \<open>An example for @{typ int}\<close>
+paragraph \<open>An example for \<^typ>\<open>int\<close>\<close>
ML %quotetypewriter \<open>
val check_int = @{computation_check terms:
@@ -469,10 +469,10 @@
\<close>
ML_val %quotetypewriter \<open>
- check_int @{context} @{cprop "even ((0::int) + 1 + 2 + 3 + -1 + -2 + -3)"}
+ check_int \<^context> \<^cprop>\<open>even ((0::int) + 1 + 2 + 3 + -1 + -2 + -3)\<close>
\<close>
-paragraph \<open>An example for @{typ String.literal}\<close>
+paragraph \<open>An example for \<^typ>\<open>String.literal\<close>\<close>
definition %quote is_cap_letter :: "String.literal \<Rightarrow> bool"
where "is_cap_letter s \<longleftrightarrow> (case String.asciis_of_literal s
@@ -485,7 +485,7 @@
\<close>
ML_val %quotetypewriter \<open>
- check_literal @{context} @{cprop "is_cap_letter (STR ''Q'')"}
+ check_literal \<^context> \<^cprop>\<open>is_cap_letter (STR ''Q'')\<close>
\<close>
@@ -500,7 +500,7 @@
One option is to hardcode using code antiquotations (see \secref{sec:code_antiq}).
Another option is to use pre-existing infrastructure in HOL:
- @{ML "Reification.conv"} and @{ML "Reification.tac"}.
+ \<^ML>\<open>Reification.conv\<close> and \<^ML>\<open>Reification.tac\<close>.
A simplistic example:
\<close>
@@ -518,23 +518,23 @@
| "interp (Neg f) vs \<longleftrightarrow> \<not> interp f vs"
text \<open>
- \noindent The datatype @{type form_ord} represents formulae whose semantics is given by
- @{const interp}. Note that values are represented by variable indices (@{typ nat})
- whose concrete values are given in list @{term vs}.
+ \noindent The datatype \<^type>\<open>form_ord\<close> represents formulae whose semantics is given by
+ \<^const>\<open>interp\<close>. Note that values are represented by variable indices (\<^typ>\<open>nat\<close>)
+ whose concrete values are given in list \<^term>\<open>vs\<close>.
\<close>
ML %quotetypewriter (*<*) \<open>\<close>
lemma "thm": fixes x y z :: "'a::order" shows "x < y \<and> x < z \<equiv> interp (And (Less (Suc 0) (Suc (Suc 0))) (Less (Suc 0) 0)) [z, x, y]"
ML_prf %quotetypewriter
(*>*) \<open>val thm =
- Reification.conv @{context} @{thms interp.simps} @{cterm "x < y \<and> x < z"}\<close> (*<*)
-by (tactic \<open>ALLGOALS (resolve_tac @{context} [thm])\<close>)
+ Reification.conv \<^context> @{thms interp.simps} \<^cterm>\<open>x < y \<and> x < z\<close>\<close> (*<*)
+by (tactic \<open>ALLGOALS (resolve_tac \<^context> [thm])\<close>)
(*>*)
text \<open>
- \noindent By virtue of @{fact interp.simps}, @{ML "Reification.conv"} provides a conversion
+ \noindent By virtue of @{fact interp.simps}, \<^ML>\<open>Reification.conv\<close> provides a conversion
which, for this concrete example, yields @{thm thm [no_vars]}. Note that the argument
- to @{const interp} does not contain any free variables and can thus be evaluated
+ to \<^const>\<open>interp\<close> does not contain any free variables and can thus be evaluated
using evaluation.
A less meager example can be found in the AFP, session \<open>Regular-Sets\<close>,
--- a/src/Doc/Codegen/Evaluation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Evaluation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,8 +12,8 @@
Recalling \secref{sec:principle}, code generation turns a system of
equations into a program with the \emph{same} equational semantics.
As a consequence, this program can be used as a \emph{rewrite
- engine} for terms: rewriting a term @{term "t"} using a program to a
- term @{term "t'"} yields the theorems @{prop "t \<equiv> t'"}. This
+ engine} for terms: rewriting a term \<^term>\<open>t\<close> using a program to a
+ term \<^term>\<open>t'\<close> yields the theorems \<^prop>\<open>t \<equiv> t'\<close>. This
application of code generation in the following is referred to as
\emph{evaluation}.
\<close>
@@ -158,10 +158,10 @@
\begin{tabular}{l||c|c|c}
& \<open>simp\<close> & \<open>nbe\<close> & \<open>code\<close> \tabularnewline \hline \hline
interactive evaluation & @{command value} \<open>[simp]\<close> & @{command value} \<open>[nbe]\<close> & @{command value} \<open>[code]\<close> \tabularnewline
- plain evaluation & & & \ttsize@{ML "Code_Evaluation.dynamic_value"} \tabularnewline \hline
+ plain evaluation & & & \ttsize\<^ML>\<open>Code_Evaluation.dynamic_value\<close> \tabularnewline \hline
evaluation method & @{method code_simp} & @{method normalization} & @{method eval} \tabularnewline
- property conversion & & & \ttsize@{ML "Code_Runtime.dynamic_holds_conv"} \tabularnewline \hline
- conversion & \ttsize@{ML "Code_Simp.dynamic_conv"} & \ttsize@{ML "Nbe.dynamic_conv"}
+ property conversion & & & \ttsize\<^ML>\<open>Code_Runtime.dynamic_holds_conv\<close> \tabularnewline \hline
+ conversion & \ttsize\<^ML>\<open>Code_Simp.dynamic_conv\<close> & \ttsize\<^ML>\<open>Nbe.dynamic_conv\<close>
\end{tabular}
\<close>
@@ -181,8 +181,8 @@
text \<open>
For \<open>simp\<close> and \<open>nbe\<close> static evaluation can be achieved using
- @{ML Code_Simp.static_conv} and @{ML Nbe.static_conv}.
- Note that @{ML Nbe.static_conv} by its very nature
+ \<^ML>\<open>Code_Simp.static_conv\<close> and \<^ML>\<open>Nbe.static_conv\<close>.
+ Note that \<^ML>\<open>Nbe.static_conv\<close> by its very nature
requires an invocation of the ML compiler for every call,
which can produce significant overhead.
\<close>
--- a/src/Doc/Codegen/Foundations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Foundations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -119,7 +119,7 @@
Pre- and postprocessor can be setup to transfer between
expressions suitable for logical reasoning and expressions
suitable for execution. As example, take list membership; logically
- it is expressed as @{term "x \<in> set xs"}. But for execution
+ it is expressed as \<^term>\<open>x \<in> set xs\<close>. But for execution
the intermediate set is not desirable. Hence the following
specification:
\<close>
@@ -144,7 +144,7 @@
\emph{Function transformers} provide a very general
interface, transforming a list of function theorems to another list
of function theorems, provided that neither the heading constant nor
- its type change. The @{term "0::nat"} / @{const Suc} pattern
+ its type change. The \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> pattern
used in theory \<open>Code_Abstract_Nat\<close> (see \secref{abstract_nat})
uses this interface.
@@ -195,8 +195,8 @@
\<close>
text \<open>
- \noindent You may note that the equality test @{term "xs = []"} has
- been replaced by the predicate @{term "List.null xs"}. This is due
+ \noindent You may note that the equality test \<^term>\<open>xs = []\<close> has
+ been replaced by the predicate \<^term>\<open>List.null xs\<close>. This is due
to the default setup of the \qn{preprocessor}.
This possibility to select arbitrary code equations is the key
@@ -218,8 +218,7 @@
code_thms %quote dequeue
text \<open>
- \noindent This prints a table with the code equations for @{const
- dequeue}, including \emph{all} code equations those equations depend
+ \noindent This prints a table with the code equations for \<^const>\<open>dequeue\<close>, including \emph{all} code equations those equations depend
on recursively. These dependencies themselves can be visualized using
the @{command_def code_deps} command.
\<close>
@@ -242,7 +241,7 @@
text \<open>
\noindent During preprocessing, the membership test is rewritten,
- resulting in @{const List.member}, which itself performs an explicit
+ resulting in \<^const>\<open>List.member\<close>, which itself performs an explicit
equality check, as can be seen in the corresponding \<open>SML\<close> code:
\<close>
@@ -253,11 +252,10 @@
text \<open>
\noindent Obviously, polymorphic equality is implemented the Haskell
way using a type class. How is this achieved? HOL introduces an
- explicit class @{class equal} with a corresponding operation @{const
- HOL.equal} such that @{thm equal [no_vars]}. The preprocessing
- framework does the rest by propagating the @{class equal} constraints
+ explicit class \<^class>\<open>equal\<close> with a corresponding operation \<^const>\<open>HOL.equal\<close> such that @{thm equal [no_vars]}. The preprocessing
+ framework does the rest by propagating the \<^class>\<open>equal\<close> constraints
through all dependent code equations. For datatypes, instances of
- @{class equal} are implicitly derived when possible. For other types,
+ \<^class>\<open>equal\<close> are implicitly derived when possible. For other types,
you may instantiate \<open>equal\<close> manually like any other type class.
\<close>
@@ -281,7 +279,7 @@
text \<open>
\noindent In the corresponding code, there is no equation
- for the pattern @{term "AQueue [] []"}:
+ for the pattern \<^term>\<open>AQueue [] []\<close>:
\<close>
text %quotetypewriter \<open>
@@ -307,10 +305,9 @@
by (simp_all add: strict_dequeue'_def split: list.splits)
text \<open>
- Observe that on the right hand side of the definition of @{const
- "strict_dequeue'"}, the unspecified constant @{const empty_queue} occurs.
- An attempt to generate code for @{const strict_dequeue'} would
- make the code generator complain that @{const empty_queue} has
+ Observe that on the right hand side of the definition of \<^const>\<open>strict_dequeue'\<close>, the unspecified constant \<^const>\<open>empty_queue\<close> occurs.
+ An attempt to generate code for \<^const>\<open>strict_dequeue'\<close> would
+ make the code generator complain that \<^const>\<open>empty_queue\<close> has
no associated code equations. In most situations unimplemented
constants indeed indicated a broken program; however such
constants can also be thought of as function definitions which always fail,
@@ -339,7 +336,7 @@
declare %quote [[code abort: undefined]]
text \<open>
- \noindent -- hence @{const undefined} can always be used in such
+ \noindent -- hence \<^const>\<open>undefined\<close> can always be used in such
situations.
\<close>
--- a/src/Doc/Codegen/Further.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Further.thy Sat Jan 05 17:24:33 2019 +0100
@@ -30,7 +30,7 @@
arbitrary ML code as well.
A typical example for @{command code_reflect} can be found in the
- @{theory HOL.Predicate} theory.
+ \<^theory>\<open>HOL.Predicate\<close> theory.
\<close>
@@ -188,7 +188,7 @@
text \<open>
\noindent This amends the interpretation morphisms such that
occurrences of the foundational term @{term [source] "power.powers (\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)"}
- are folded to a newly defined constant @{const funpows}.
+ are folded to a newly defined constant \<^const>\<open>funpows\<close>.
After this setup procedure, code generation can continue as usual:
\<close>
--- a/src/Doc/Codegen/Inductive_Predicate.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Inductive_Predicate.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,7 +25,7 @@
this compiler are described in detail in
@{cite "Berghofer-Bulwahn-Haftmann:2009:TPHOL"}.
- Consider the simple predicate @{const append} given by these two
+ Consider the simple predicate \<^const>\<open>append\<close> given by these two
introduction rules:
\<close>
@@ -49,7 +49,7 @@
output. Modes are similar to types, but use the notation \<open>i\<close>
for input and \<open>o\<close> for output.
- For @{term "append"}, the compiler can infer the following modes:
+ For \<^term>\<open>append\<close>, the compiler can infer the following modes:
\begin{itemize}
\item \<open>i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool\<close>
\item \<open>i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool\<close>
@@ -203,8 +203,7 @@
predicate could be inferred that are not disambiguated by the
pattern of the set comprehension. To disambiguate the modes for the
arguments of a predicate, you can state the modes explicitly in the
- @{command "values"} command. Consider the simple predicate @{term
- "succ"}:
+ @{command "values"} command. Consider the simple predicate \<^term>\<open>succ\<close>:
\<close>
inductive %quote succ :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
@@ -243,14 +242,13 @@
(if append [Suc 0, 2] ys zs then Some ys else None)\<close>}
\item If you know that the execution returns only one value (it is
- deterministic), then you can use the combinator @{term
- "Predicate.the"}, e.g., a functional concatenation of lists is
+ deterministic), then you can use the combinator \<^term>\<open>Predicate.the\<close>, e.g., a functional concatenation of lists is
defined with
@{term [display] "functional_concat xs ys = Predicate.the (append_i_i_o xs ys)"}
Note that if the evaluation does not return a unique value, it
- raises a run-time error @{term "not_unique"}.
+ raises a run-time error \<^term>\<open>not_unique\<close>.
\end{itemize}
\<close>
--- a/src/Doc/Codegen/Introduction.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Introduction.thy Sat Jan 05 17:24:33 2019 +0100
@@ -186,8 +186,7 @@
\<close>
text \<open>
- \noindent Note the parameters with trailing underscore (@{verbatim
- "A_"}), which are the dictionary parameters.
+ \noindent Note the parameters with trailing underscore (\<^verbatim>\<open>A_\<close>), which are the dictionary parameters.
\<close>
--- a/src/Doc/Codegen/Refinement.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Refinement.thy Sat Jan 05 17:24:33 2019 +0100
@@ -56,8 +56,7 @@
by (simp_all add: fib_step_def)
text \<open>
- \noindent What remains is to implement @{const fib} by @{const
- fib_step} as follows:
+ \noindent What remains is to implement \<^const>\<open>fib\<close> by \<^const>\<open>fib_step\<close> as follows:
\<close>
lemma %quote [code]:
@@ -110,7 +109,7 @@
code_datatype %quote AQueue
text \<open>
- \noindent Here we define a \qt{constructor} @{const "AQueue"} which
+ \noindent Here we define a \qt{constructor} \<^const>\<open>AQueue\<close> which
is defined in terms of \<open>Queue\<close> and interprets its arguments
according to what the \emph{content} of an amortised queue is supposed
to be.
@@ -147,7 +146,7 @@
\noindent It is good style, although no absolute requirement, to
provide code equations for the original artefacts of the implemented
type, if possible; in our case, these are the datatype constructor
- @{const Queue} and the case combinator @{const case_queue}:
+ \<^const>\<open>Queue\<close> and the case combinator \<^const>\<open>case_queue\<close>:
\<close>
lemma %quote Queue_AQueue [code]:
@@ -168,10 +167,10 @@
text \<open>
The same techniques can also be applied to types which are not
- specified as datatypes, e.g.~type @{typ int} is originally specified
+ specified as datatypes, e.g.~type \<^typ>\<open>int\<close> is originally specified
as quotient type by means of @{command_def typedef}, but for code
generation constants allowing construction of binary numeral values
- are used as constructors for @{typ int}.
+ are used as constructors for \<^typ>\<open>int\<close>.
This approach however fails if the representation of a type demands
invariants; this issue is discussed in the next section.
@@ -183,20 +182,20 @@
text \<open>
Datatype representation involving invariants require a dedicated
setup for the type and its primitive operations. As a running
- example, we implement a type @{typ "'a dlist"} of lists consisting
+ example, we implement a type \<^typ>\<open>'a dlist\<close> of lists consisting
of distinct elements.
- The specification of @{typ "'a dlist"} itself can be found in theory
- @{theory "HOL-Library.Dlist"}.
+ The specification of \<^typ>\<open>'a dlist\<close> itself can be found in theory
+ \<^theory>\<open>HOL-Library.Dlist\<close>.
The first step is to decide on which representation the abstract
- type (in our example @{typ "'a dlist"}) should be implemented.
- Here we choose @{typ "'a list"}. Then a conversion from the concrete
+ type (in our example \<^typ>\<open>'a dlist\<close>) should be implemented.
+ Here we choose \<^typ>\<open>'a list\<close>. Then a conversion from the concrete
type to the abstract type must be specified, here:
\<close>
text %quote \<open>
- @{term_type Dlist}
+ \<^term_type>\<open>Dlist\<close>
\<close>
text \<open>
@@ -205,7 +204,7 @@
\<close>
text %quote \<open>
- @{term_type list_of_dlist}
+ \<^term_type>\<open>list_of_dlist\<close>
\<close>
text \<open>
@@ -219,19 +218,19 @@
text \<open>
\noindent Note that so far the invariant on representations
- (@{term_type distinct}) has never been mentioned explicitly:
+ (\<^term_type>\<open>distinct\<close>) has never been mentioned explicitly:
the invariant is only referred to implicitly: all values in
- set @{term "{xs. list_of_dlist (Dlist xs) = xs}"} are invariant,
- and in our example this is exactly @{term "{xs. distinct xs}"}.
+ set \<^term>\<open>{xs. list_of_dlist (Dlist xs) = xs}\<close> are invariant,
+ and in our example this is exactly \<^term>\<open>{xs. distinct xs}\<close>.
- The primitive operations on @{typ "'a dlist"} are specified
- indirectly using the projection @{const list_of_dlist}. For
- the empty \<open>dlist\<close>, @{const Dlist.empty}, we finally want
+ The primitive operations on \<^typ>\<open>'a dlist\<close> are specified
+ indirectly using the projection \<^const>\<open>list_of_dlist\<close>. For
+ the empty \<open>dlist\<close>, \<^const>\<open>Dlist.empty\<close>, we finally want
the code equation
\<close>
text %quote \<open>
- @{term "Dlist.empty = Dlist []"}
+ \<^term>\<open>Dlist.empty = Dlist []\<close>
\<close>
text \<open>
@@ -244,7 +243,7 @@
text \<open>
\noindent This equation logically encodes both the desired code
- equation and that the expression @{const Dlist} is applied to obeys
+ equation and that the expression \<^const>\<open>Dlist\<close> is applied to obeys
the implicit invariant. Equations for insertion and removal are
similar:
\<close>
@@ -270,9 +269,9 @@
for the meta theory of datatype refinement involving invariants.
Typical data structures implemented by representations involving
- invariants are available in the library, theory @{theory "HOL-Library.Mapping"}
- specifies key-value-mappings (type @{typ "('a, 'b) mapping"});
- these can be implemented by red-black-trees (theory @{theory "HOL-Library.RBT"}).
+ invariants are available in the library, theory \<^theory>\<open>HOL-Library.Mapping\<close>
+ specifies key-value-mappings (type \<^typ>\<open>('a, 'b) mapping\<close>);
+ these can be implemented by red-black-trees (theory \<^theory>\<open>HOL-Library.RBT\<close>).
\<close>
end
--- a/src/Doc/Corec/Corec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Corec/Corec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
primitive corecursion. It describes @{command corec} and related commands:\
@{command corecursive}, @{command friend_of_corec}, and @{command coinduction_upto}.
It also covers the @{method corec_unique} proof method.
-The package is not part of @{theory Main}; it is located in
+The package is not part of \<^theory>\<open>Main\<close>; it is located in
\<^file>\<open>~~/src/HOL/Library/BNF_Corec.thy\<close>.
The @{command corec} command generalizes \keyw{primcorec} in three main
@@ -149,7 +149,7 @@
\noindent
Pointwise sum meets the friendliness criterion. We register it as a friend using
the @{command friend_of_corec} command. The command requires us to give a
-specification of @{const ssum} where a constructor (@{const SCons}) occurs at
+specification of \<^const>\<open>ssum\<close> where a constructor (\<^const>\<open>SCons\<close>) occurs at
the outermost position on the right-hand side. Here, we can simply reuse the
\keyw{primcorec} specification above:
\<close>
@@ -171,7 +171,7 @@
@{thm [source] relator_eq} theorem collection before it invokes
@{method transfer_prover}.
-After registering @{const ssum} as a friend, we can use it in the corecursive
+After registering \<^const>\<open>ssum\<close> as a friend, we can use it in the corecursive
call context, either inside or outside the constructor guard:
\<close>
@@ -204,7 +204,7 @@
The parametricity subgoal is given to \<open>transfer_prover_eq\<close>
(Section~\ref{ssec:transfer-prover-eq}).
-The @{const sprod} and @{const sexp} functions provide shuffle product and
+The \<^const>\<open>sprod\<close> and \<^const>\<open>sexp\<close> functions provide shuffle product and
exponentiation on streams. We can use them to define the stream of factorial
numbers in two different ways:
\<close>
@@ -230,7 +230,7 @@
\noindent
In general, the arguments may be any bounded natural functor (BNF)
@{cite "isabelle-datatypes"}, with the restriction that the target codatatype
-(@{typ "nat stream"}) may occur only in a \emph{live} position of the BNF. For
+(\<^typ>\<open>nat stream\<close>) may occur only in a \emph{live} position of the BNF. For
this reason, the following function, on unbounded sets, cannot be registered as
a friend:
\<close>
@@ -252,7 +252,7 @@
Node (lab: 'a) (sub: "'a tree list")
text \<open>
-We first define the pointwise sum of two trees analogously to @{const ssum}:
+We first define the pointwise sum of two trees analogously to \<^const>\<open>ssum\<close>:
\<close>
corec (friend) tsum :: "('a :: plus) tree \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
@@ -261,13 +261,13 @@
text \<open>
\noindent
-Here, @{const map} is the standard map function on lists, and @{const zip}
-converts two parallel lists into a list of pairs. The @{const tsum} function is
+Here, \<^const>\<open>map\<close> is the standard map function on lists, and \<^const>\<open>zip\<close>
+converts two parallel lists into a list of pairs. The \<^const>\<open>tsum\<close> function is
primitively corecursive. Instead of @{command corec} \<open>(friend)\<close>, we could
also have used \keyw{primcorec} and @{command friend_of_corec}, as we did for
-@{const ssum}.
+\<^const>\<open>ssum\<close>.
-Once @{const tsum} is registered as friendly, we can use it in the corecursive
+Once \<^const>\<open>tsum\<close> is registered as friendly, we can use it in the corecursive
call context of another function:
\<close>
@@ -280,7 +280,7 @@
@{command corec}, @{command corecursive}, and @{command friend_of_corec}. In
particular, nesting through the function type can be expressed using
\<open>\<lambda>\<close>-abstractions and function applications rather than through composition
-(@{term "(\<circ>)"}, the map function for \<open>\<Rightarrow>\<close>). For example:
+(\<^term>\<open>(\<circ>)\<close>, the map function for \<open>\<Rightarrow>\<close>). For example:
\<close>
codatatype 'a language =
@@ -322,7 +322,7 @@
finite number of unguarded recursive calls perform this calculation before
reaching a guarded corecursive call. Intuitively, the unguarded recursive call
can be unfolded to arbitrary finite depth, ultimately yielding a purely
-corecursive definition. An example is the @{term primes} function from Di
+corecursive definition. An example is the \<^term>\<open>primes\<close> function from Di
Gianantonio and Miculan @{cite "di-gianantonio-miculan-2003"}:
\<close>
@@ -343,15 +343,15 @@
The @{command corecursive} command is a variant of @{command corec} that allows
us to specify a termination argument for any unguarded self-call.
-When called with \<open>m = 1\<close> and \<open>n = 2\<close>, the @{const primes}
+When called with \<open>m = 1\<close> and \<open>n = 2\<close>, the \<^const>\<open>primes\<close>
function computes the stream of prime numbers. The unguarded call in the
-\<open>else\<close> branch increments @{term n} until it is coprime to the first
-argument @{term m} (i.e., the greatest common divisor of @{term m} and
-@{term n} is \<open>1\<close>).
+\<open>else\<close> branch increments \<^term>\<open>n\<close> until it is coprime to the first
+argument \<^term>\<open>m\<close> (i.e., the greatest common divisor of \<^term>\<open>m\<close> and
+\<^term>\<open>n\<close> is \<open>1\<close>).
-For any positive integers @{term m} and @{term n}, the numbers @{term m} and
+For any positive integers \<^term>\<open>m\<close> and \<^term>\<open>n\<close>, the numbers \<^term>\<open>m\<close> and
\<open>m * n + 1\<close> are coprime, yielding an upper bound on the number of times
-@{term n} is increased. Hence, the function will take the \<open>else\<close> branch at
+\<^term>\<open>n\<close> is increased. Hence, the function will take the \<open>else\<close> branch at
most finitely often before taking the then branch and producing one constructor.
There is a slight complication when \<open>m = 0 \<and> n > 1\<close>: Without the first
disjunct in the \<open>if\<close> condition, the function could stall. (This corner
@@ -410,7 +410,7 @@
specifications, our package provides the more advanced proof principle of
\emph{coinduction up to congruence}---or simply \emph{coinduction up-to}.
-The structural coinduction principle for @{typ "'a stream"}, called
+The structural coinduction principle for \<^typ>\<open>'a stream\<close>, called
@{thm [source] stream.coinduct}, is as follows:
%
\begin{indentblock}
@@ -421,9 +421,9 @@
providing a relation \<open>R\<close> that relates \<open>l\<close> and \<open>r\<close> (first
premise) and that constitutes a bisimulation (second premise). Streams that are
related by a bisimulation cannot be distinguished by taking observations (via
-the selectors @{const shd} and @{const stl}); hence they must be equal.
+the selectors \<^const>\<open>shd\<close> and \<^const>\<open>stl\<close>); hence they must be equal.
-The coinduction up-to principle after registering @{const sskew} as friendly is
+The coinduction up-to principle after registering \<^const>\<open>sskew\<close> as friendly is
available as @{thm [source] sskew.coinduct} and as one of the components of
the theorem collection @{thm [source] stream.coinduct_upto}:
%
@@ -432,10 +432,10 @@
\end{indentblock}
%
This rule is almost identical to structural coinduction, except that the
-corecursive application of @{term R} is generalized to
-@{term "stream.v5.congclp R"}.
+corecursive application of \<^term>\<open>R\<close> is generalized to
+\<^term>\<open>stream.v5.congclp R\<close>.
-The @{const stream.v5.congclp} predicate is equipped with the following
+The \<^const>\<open>stream.v5.congclp\<close> predicate is equipped with the following
introduction rules:
\begin{indentblock}
@@ -471,9 +471,9 @@
The introduction rules are also available as
@{thm [source] sskew.cong_intros}.
-Notice that there is no introduction rule corresponding to @{const sexp},
-because @{const sexp} has a more restrictive result type than @{const sskew}
-(@{typ "nat stream"} vs. @{typ "('a :: {plus,times}) stream"}.
+Notice that there is no introduction rule corresponding to \<^const>\<open>sexp\<close>,
+because \<^const>\<open>sexp\<close> has a more restrictive result type than \<^const>\<open>sskew\<close>
+(\<^typ>\<open>nat stream\<close> vs. \<^typ>\<open>('a :: {plus,times}) stream\<close>.
The version numbers, here \<open>v5\<close>, distinguish the different congruence
closures generated for a given codatatype as more friends are registered. As
@@ -486,10 +486,10 @@
most situations. For this purpose, the package maintains the collection
@{thm [source] stream.coinduct_upto} of coinduction principles ordered by
increasing generality, which works well with Isabelle's philosophy of applying
-the first rule that matches. For example, after registering @{const ssum} as a
-friend, proving the equality @{term "l = r"} on @{typ "nat stream"} might
-require coinduction principle for @{term "nat stream"}, which is up to
-@{const ssum}.
+the first rule that matches. For example, after registering \<^const>\<open>ssum\<close> as a
+friend, proving the equality \<^term>\<open>l = r\<close> on \<^typ>\<open>nat stream\<close> might
+require coinduction principle for \<^term>\<open>nat stream\<close>, which is up to
+\<^const>\<open>ssum\<close>.
The collection @{thm [source] stream.coinduct_upto} is guaranteed to be complete
and up to date with respect to the type instances of definitions considered so
@@ -523,18 +523,18 @@
coinduction principles:
%
\begin{itemize}
-\item @{typ "('a, int) tllist"} up to @{const TNil}, @{const TCons}, and
- @{const square_terminal};
-\item @{typ "(nat, 'b) tllist"} up to @{const TNil}, @{const TCons}, and
- @{const square_elems};
-\item @{typ "('a, 'b) tllist"} up to @{const TNil} and @{const TCons}.
+\item \<^typ>\<open>('a, int) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>, and
+ \<^const>\<open>square_terminal\<close>;
+\item \<^typ>\<open>(nat, 'b) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>, and
+ \<^const>\<open>square_elems\<close>;
+\item \<^typ>\<open>('a, 'b) tllist\<close> up to \<^const>\<open>TNil\<close> and \<^const>\<open>TCons\<close>.
\end{itemize}
%
The following variant is missing:
%
\begin{itemize}
-\item @{typ "(nat, int) tllist"} up to @{const TNil}, @{const TCons},
- @{const square_elems}, and @{const square_terminal}.
+\item \<^typ>\<open>(nat, int) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>,
+ \<^const>\<open>square_elems\<close>, and \<^const>\<open>square_terminal\<close>.
\end{itemize}
%
To generate it without having to define a new function with @{command corec},
@@ -569,23 +569,23 @@
The @{command corec}, @{command corecursive}, and @{command friend_of_corec}
commands generate a property \<open>f.unique\<close> about the function of interest
-@{term f} that can be used to prove that any function that satisfies
-@{term f}'s corecursive specification must be equal to~@{term f}. For example:
+\<^term>\<open>f\<close> that can be used to prove that any function that satisfies
+\<^term>\<open>f\<close>'s corecursive specification must be equal to~\<^term>\<open>f\<close>. For example:
\[@{thm ssum.unique[no_vars]}\]
The uniqueness principles are not restricted to functions defined using
@{command corec} or @{command corecursive} or registered with
-@{command friend_of_corec}. Suppose @{term "t x"} is an arbitrary term
-depending on @{term x}. The @{method corec_unique} proof method, provided by our
+@{command friend_of_corec}. Suppose \<^term>\<open>t x\<close> is an arbitrary term
+depending on \<^term>\<open>x\<close>. The @{method corec_unique} proof method, provided by our
tool, transforms subgoals of the form
-\[@{term "(\<forall>x. f x = H x f) \<Longrightarrow> f x = t x"}\]
+\[\<^term>\<open>(\<forall>x. f x = H x f) \<Longrightarrow> f x = t x\<close>\]
into
-\[@{term "\<forall>x. t x = H x t"}\]
-The higher-order functional @{term H} must be such that @{term "f x = H x f"}
+\[\<^term>\<open>\<forall>x. t x = H x t\<close>\]
+The higher-order functional \<^term>\<open>H\<close> must be such that \<^term>\<open>f x = H x f\<close>
would be a valid @{command corec} specification, but without nested self-calls
or unguarded (recursive) calls. Thus, @{method corec_unique} proves uniqueness
-of @{term t} with respect to the given corecursive equation regardless of how
-@{term t} was defined. For example:
+of \<^term>\<open>t\<close> with respect to the given corecursive equation regardless of how
+\<^term>\<open>t\<close> was defined. For example:
\<close>
lemma
@@ -625,12 +625,12 @@
@{command_def "corecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
(@@{command corec} | @@{command corecursive}) target? \<newline>
@{syntax cr_options}? fix @'where' prop
;
@{syntax_def cr_options}: '(' ((@{syntax plugins} | 'friend' | 'transfer') + ',') ')'
-\<close>}
+\<close>
\medskip
@@ -677,12 +677,12 @@
@{command_def "friend_of_corec"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command friend_of_corec} target? \<newline>
@{syntax foc_options}? fix @'where' prop
;
@{syntax_def foc_options}: '(' ((@{syntax plugins} | 'transfer') + ',') ')'
-\<close>}
+\<close>
\medskip
@@ -720,9 +720,9 @@
@{command_def "coinduction_upto"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command coinduction_upto} target? name ':' type
-\<close>}
+\<close>
\medskip
@@ -774,9 +774,9 @@
\label{ssec:corec-and-corecursive-theorems}\<close>
text \<open>
-For a function @{term f} over codatatype \<open>t\<close>, the @{command corec} and
+For a function \<^term>\<open>f\<close> over codatatype \<open>t\<close>, the @{command corec} and
@{command corecursive} commands generate the following properties (listed for
-@{const sexp}, cf. Section~\ref{ssec:simple-corecursion}):
+\<^const>\<open>sexp\<close>, cf. Section~\ref{ssec:simple-corecursion}):
\begin{indentblock}
\begin{description}
@@ -799,7 +799,7 @@
\item[\<open>f.\<close>\hthm{inner_induct}\rm:] ~ \\
This property is only generated for mixed recursive--corecursive definitions.
-For @{const primes} (Section~\ref{ssec:mixed-recursion-corecursion}, it reads as
+For \<^const>\<open>primes\<close> (Section~\ref{ssec:mixed-recursion-corecursion}, it reads as
follows: \\[\jot]
@{thm primes.inner_induct[no_vars]}
@@ -892,7 +892,7 @@
text \<open>
The @{method transfer_prover_eq} proof method replaces the equality relation
-@{term "(=)"} with compound relator expressions according to
+\<^term>\<open>(=)\<close> with compound relator expressions according to
@{thm [source] relator_eq} before calling @{method transfer_prover} on the
current subgoal. It tends to work better than plain @{method transfer_prover} on
the parametricity proof obligations of @{command corecursive} and
@@ -917,7 +917,7 @@
this derivation fails if in the arguments of a higher-order constant a type variable
occurs on both sides of the function type constructor. The required naturality
theorem can then be declared with @{attribute friend_of_corec_simps}. See
-@{file "~~/src/HOL/Corec_Examples/Tests/Iterate_GPV.thy"} for an example.
+\<^file>\<open>~~/src/HOL/Corec_Examples/Tests/Iterate_GPV.thy\<close> for an example.
\<close>
--- a/src/Doc/Datatypes/Datatypes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Datatypes/Datatypes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -77,7 +77,7 @@
finitely many direct subtrees, whereas those of the second and fourth may have
infinite branching.
-The package is part of @{theory Main}. Additional functionality is provided by
+The package is part of \<^theory>\<open>Main\<close>. Additional functionality is provided by
the theory \<^file>\<open>~~/src/HOL/Library/BNF_Axiomatization.thy\<close>.
The package, like its predecessor, fully adheres to the LCF philosophy
@@ -180,10 +180,10 @@
text \<open>
\noindent
-@{const Truue}, @{const Faalse}, and @{const Perhaaps} have the type @{typ trool}.
+\<^const>\<open>Truue\<close>, \<^const>\<open>Faalse\<close>, and \<^const>\<open>Perhaaps\<close> have the type \<^typ>\<open>trool\<close>.
Polymorphic types are possible, such as the following option type, modeled after
-its homologue from the @{theory HOL.Option} theory:
+its homologue from the \<^theory>\<open>HOL.Option\<close> theory:
\<close>
(*<*)
@@ -231,7 +231,7 @@
text \<open>
\noindent
Lists were shown in the introduction. Terminated lists are a variant that
-stores a value of type @{typ 'b} at the very end:
+stores a value of type \<^typ>\<open>'b\<close> at the very end:
\<close>
datatype (*<*)(in early) (*>*)('a, 'b) tlist = TNil 'b | TCons 'a "('a, 'b) tlist"
@@ -269,7 +269,7 @@
text \<open>
\emph{Nested recursion} occurs when recursive occurrences of a type appear under
a type constructor. The introduction showed some examples of trees with nesting
-through lists. A more complex example, that reuses our @{type option} type,
+through lists. A more complex example, that reuses our \<^type>\<open>option\<close> type,
follows:
\<close>
@@ -297,7 +297,7 @@
text \<open>
\noindent
-The following definition of @{typ 'a}-branching trees is legal:
+The following definition of \<^typ>\<open>'a\<close>-branching trees is legal:
\<close>
datatype 'a ftree = FTLeaf 'a | FTNode "'a \<Rightarrow> 'a ftree"
@@ -314,9 +314,9 @@
In general, type constructors \<open>('a\<^sub>1, \<dots>, 'a\<^sub>m) t\<close>
allow recursion on a subset of their type arguments \<open>'a\<^sub>1\<close>, \ldots,
\<open>'a\<^sub>m\<close>. These type arguments are called \emph{live}; the remaining
-type arguments are called \emph{dead}. In @{typ "'a \<Rightarrow> 'b"} and
-@{typ "('a, 'b) fun_copy"}, the type variable @{typ 'a} is dead and
-@{typ 'b} is live.
+type arguments are called \emph{dead}. In \<^typ>\<open>'a \<Rightarrow> 'b\<close> and
+\<^typ>\<open>('a, 'b) fun_copy\<close>, the type variable \<^typ>\<open>'a\<close> is dead and
+\<^typ>\<open>'b\<close> is live.
Type constructors must be registered as BNFs to have live arguments. This is
done automatically for datatypes and codatatypes introduced by the
@@ -403,24 +403,24 @@
\medskip
-The discriminator @{const null} and the selectors @{const hd} and @{const tl}
+The discriminator \<^const>\<open>null\<close> and the selectors \<^const>\<open>hd\<close> and \<^const>\<open>tl\<close>
are characterized by the following conditional equations:
%
\[@{thm list.collapse(1)[of xs, no_vars]}
\qquad @{thm list.collapse(2)[of xs, no_vars]}\]
%
For two-constructor datatypes, a single discriminator constant is sufficient.
-The discriminator associated with @{const Cons} is simply
-@{term "\<lambda>xs. \<not> null xs"}.
+The discriminator associated with \<^const>\<open>Cons\<close> is simply
+\<^term>\<open>\<lambda>xs. \<not> null xs\<close>.
The \keyw{where} clause at the end of the command specifies a default value for
selectors applied to constructors on which they are not a priori specified.
In the example, it is used to ensure that the tail of the empty list is itself
(instead of being left unspecified).
-Because @{const Nil} is nullary, it is also possible to use
-@{term "\<lambda>xs. xs = Nil"} as a discriminator. This is the default behavior
-if we omit the identifier @{const null} and the associated colon. Some users
+Because \<^const>\<open>Nil\<close> is nullary, it is also possible to use
+\<^term>\<open>\<lambda>xs. xs = Nil\<close> as a discriminator. This is the default behavior
+if we omit the identifier \<^const>\<open>null\<close> and the associated colon. Some users
argue against this, because the mixture of constructors and selectors in the
characteristic theorems can lead Isabelle's automation to switch between the
constructor and the destructor view in surprising ways.
@@ -469,7 +469,7 @@
@{command_def "datatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command datatype} target? @{syntax dt_options}? @{syntax dt_spec}
;
@{syntax_def dt_options}: '(' ((@{syntax plugins} | 'discs_sels') + ',') ')'
@@ -480,7 +480,7 @@
@{syntax map_rel_pred}? (@'where' (prop + '|'))? + @'and')
;
@{syntax_def map_rel_pred}: @'for' ((('map' | 'rel' | 'pred') ':' name) +)
-\<close>}
+\<close>
\medskip
@@ -516,18 +516,18 @@
The left-hand sides of the datatype equations specify the name of the type to
define, its type parameters, and additional information:
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_name}: @{syntax tyargs}? name mixfix?
;
@{syntax_def tyargs}: typefree | '(' (('dead' | name ':')? typefree + ',') ')'
-\<close>}
+\<close>
\medskip
\noindent
The syntactic entity \synt{name} denotes an identifier, \synt{mixfix} denotes
the usual parenthesized mixfix notation, and \synt{typefree} denotes fixed type
-variable (@{typ 'a}, @{typ 'b}, \ldots) @{cite "isabelle-isar-ref"}.
+variable (\<^typ>\<open>'a\<close>, \<^typ>\<open>'b\<close>, \ldots) @{cite "isabelle-isar-ref"}.
The optional names preceding the type variables allow to override the default
names of the set functions (\<open>set\<^sub>1_t\<close>, \ldots, \<open>set\<^sub>m_t\<close>). Type
@@ -541,9 +541,9 @@
Inside a mutually recursive specification, all defined datatypes must
mention exactly the same type variables in the same order.
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_ctor}: (name ':')? name (@{syntax dt_ctor_arg} * ) mixfix?
-\<close>}
+\<close>
\medskip
@@ -555,9 +555,9 @@
\<open>\<lambda>x. x = C\<^sub>j\<close> for nullary constructors and
\<open>t.is_C\<^sub>j\<close> otherwise.
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_ctor_arg}: type | '(' name ':' type ')'
-\<close>}
+\<close>
\medskip
@@ -580,9 +580,9 @@
@{command_def "datatype_compat"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command datatype_compat} (name +)
-\<close>}
+\<close>
\medskip
@@ -595,7 +595,7 @@
text \<open>\blankline\<close>
- ML \<open>Old_Datatype_Data.get_info @{theory} @{type_name even_nat}\<close>
+ ML \<open>Old_Datatype_Data.get_info \<^theory> \<^type_name>\<open>even_nat\<close>\<close>
text \<open>
The syntactic entity \synt{name} denotes an identifier @{cite "isabelle-isar-ref"}.
@@ -712,7 +712,7 @@
text \<open>
The free constructor theorems are partitioned in three subgroups. The first
subgroup of properties is concerned with the constructors. They are listed below
-for @{typ "'a list"}:
+for \<^typ>\<open>'a list\<close>:
\begin{indentblock}
\begin{description}
@@ -803,14 +803,14 @@
@{thm list.collapse(2)[no_vars]} \\
The \<open>[simp]\<close> attribute is exceptionally omitted for datatypes equipped
with a single nullary constructor, because a property of the form
-@{prop "x = C"} is not suitable as a simplification rule.
+\<^prop>\<open>x = C\<close> is not suitable as a simplification rule.
\item[\<open>t.\<close>\hthm{distinct_disc} \<open>[dest]\<close>\rm:] ~ \\
-These properties are missing for @{typ "'a list"} because there is only one
+These properties are missing for \<^typ>\<open>'a list\<close> because there is only one
proper discriminator. If the datatype had been introduced with a second
-discriminator called @{const nonnull}, they would have read as follows: \\[\jot]
-@{prop "null list \<Longrightarrow> \<not> nonnull list"} \\
-@{prop "nonnull list \<Longrightarrow> \<not> null list"}
+discriminator called \<^const>\<open>nonnull\<close>, they would have read as follows: \\[\jot]
+\<^prop>\<open>null list \<Longrightarrow> \<not> nonnull list\<close> \\
+\<^prop>\<open>nonnull list \<Longrightarrow> \<not> null list\<close>
\item[\<open>t.\<close>\hthm{exhaust_disc} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
@{thm list.exhaust_disc[no_vars]}
@@ -851,7 +851,7 @@
text \<open>
The functorial theorems are generated for type constructors with at least
-one live type argument (e.g., @{typ "'a list"}). They are partitioned in two
+one live type argument (e.g., \<^typ>\<open>'a list\<close>). They are partitioned in two
subgroups. The first subgroup consists of properties involving the
constructors or the destructors and either a set function, the map function,
the predicator, or the relator:
@@ -867,7 +867,7 @@
%(Section~\ref{ssec:transfer}).
\item[\<open>t.\<close>\hthm{sel_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
-This property is missing for @{typ "'a list"} because there is no common
+This property is missing for \<^typ>\<open>'a list\<close> because there is no common
selector to all constructors. \\
The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
(Section~\ref{ssec:transfer}).
@@ -1182,10 +1182,10 @@
induction rule can be obtained by applying the \<open>[unfolded
all_mem_range]\<close> attribute on \<open>t.induct\<close>.
-\item \emph{The @{const size} function has a slightly different definition.}
+\item \emph{The \<^const>\<open>size\<close> function has a slightly different definition.}
The new function returns \<open>1\<close> instead of \<open>0\<close> for some nonrecursive
constructors. This departure from the old behavior made it possible to implement
-@{const size} in terms of the generic function \<open>t.size_t\<close>. Moreover,
+\<^const>\<open>size\<close> in terms of the generic function \<open>t.size_t\<close>. Moreover,
the new function considers nested occurrences of a value, in the nested
recursive case. The old behavior can be obtained by disabling the \<open>size\<close>
plugin (Section~\ref{sec:selecting-plugins}) and instantiating the
@@ -1381,7 +1381,7 @@
text \<open>
In a departure from the old datatype package, nested recursion is normally
handled via the map functions of the nesting type constructors. For example,
-recursive calls are lifted to lists using @{const map}:
+recursive calls are lifted to lists using \<^const>\<open>map\<close>:
\<close>
(*<*)
@@ -1397,7 +1397,7 @@
\noindent
The next example features recursion through the \<open>option\<close> type. Although
\<open>option\<close> is not a new-style datatype, it is registered as a BNF with the
-map function @{const map_option}:
+map function \<^const>\<open>map_option\<close>:
\<close>
primrec (*<*)(in early) (*>*)sum_btree :: "('a::{zero,plus}) btree \<Rightarrow> 'a" where
@@ -1435,7 +1435,7 @@
text \<open>
\noindent
For recursion through curried $n$-ary functions, $n$ applications of
-@{term "(\<circ>)"} are necessary. The examples below illustrate the case where
+\<^term>\<open>(\<circ>)\<close> are necessary. The examples below illustrate the case where
$n = 2$:
\<close>
@@ -1532,7 +1532,7 @@
%
% * higher-order approach, considering nesting as nesting, is more
% compositional -- e.g. we saw how we could reuse an existing polymorphic
-% at or the_default, whereas @{const ats\<^sub>f\<^sub>f} is much more specific
+% at or the_default, whereas \<^const>\<open>ats\<^sub>f\<^sub>f\<close> is much more specific
%
% * but:
% * is perhaps less intuitive, because it requires higher-order thinking
@@ -1540,7 +1540,7 @@
% mutually recursive version might be nicer
% * is somewhat indirect -- must apply a map first, then compute a result
% (cannot mix)
-% * the auxiliary functions like @{const ats\<^sub>f\<^sub>f} are sometimes useful in own right
+% * the auxiliary functions like \<^const>\<open>ats\<^sub>f\<^sub>f\<close> are sometimes useful in own right
%
% * impact on automation unclear
%
@@ -1561,14 +1561,14 @@
@{command_def "primrec"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command primrec} target? @{syntax pr_options}? fixes \<newline>
@'where' (@{syntax pr_equation} + '|')
;
@{syntax_def pr_options}: '(' ((@{syntax plugins} | 'nonexhaustive' | 'transfer') + ',') ')'
;
@{syntax_def pr_equation}: thmdecl? prop
-\<close>}
+\<close>
\medskip
@@ -1617,7 +1617,7 @@
text \<open>
The @{command primrec} command generates the following properties (listed
-for @{const tfold}):
+for \<^const>\<open>tfold\<close>):
\begin{indentblock}
\begin{description}
@@ -1816,8 +1816,8 @@
text \<open>
\noindent
-Notice that the @{const cont} selector is associated with both @{const Skip}
-and @{const Action}.
+Notice that the \<^const>\<open>cont\<close> selector is associated with both \<^const>\<open>Skip\<close>
+and \<^const>\<open>Action\<close>.
\<close>
@@ -1863,9 +1863,9 @@
@{command_def "codatatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command codatatype} target? @{syntax dt_options}? @{syntax dt_spec}
-\<close>}
+\<close>
\medskip
@@ -1927,7 +1927,7 @@
\label{sssec:coinductive-theorems}\<close>
text \<open>
-The coinductive theorems are listed below for @{typ "'a llist"}:
+The coinductive theorems are listed below for \<^typ>\<open>'a llist\<close>:
\begin{indentblock}
\begin{description}
@@ -2206,7 +2206,7 @@
\label{sssec:primcorec-nested-corecursion}\<close>
text \<open>
-The next pair of examples generalize the @{const literate} and @{const siterate}
+The next pair of examples generalize the \<^const>\<open>literate\<close> and \<^const>\<open>siterate\<close>
functions (Section~\ref{sssec:primcorec-nested-corecursion}) to possibly
infinite trees in which subnodes are organized either as a lazy list (\<open>tree\<^sub>i\<^sub>i\<close>) or as a finite set (\<open>tree\<^sub>i\<^sub>s\<close>). They rely on the map functions of
the nesting type constructors to lift the corecursive calls:
@@ -2224,9 +2224,9 @@
\noindent
Both examples follow the usual format for constructor arguments associated
with nested recursive occurrences of the datatype. Consider
-@{const iterate\<^sub>i\<^sub>i}. The term @{term "g x"} constructs an @{typ "'a llist"}
-value, which is turned into an @{typ "'a tree\<^sub>i\<^sub>i llist"} value using
-@{const lmap}.
+\<^const>\<open>iterate\<^sub>i\<^sub>i\<close>. The term \<^term>\<open>g x\<close> constructs an \<^typ>\<open>'a llist\<close>
+value, which is turned into an \<^typ>\<open>'a tree\<^sub>i\<^sub>i llist\<close> value using
+\<^const>\<open>lmap\<close>.
This format may sometimes feel artificial. The following function constructs
a tree with a single, infinite branch from a stream:
@@ -2288,7 +2288,7 @@
text \<open>
\noindent
For recursion through curried $n$-ary functions, $n$ applications of
-@{term "(\<circ>)"} are necessary. The examples below illustrate the case where
+\<^term>\<open>(\<circ>)\<close> are necessary. The examples below illustrate the case where
$n = 2$:
\<close>
@@ -2361,8 +2361,8 @@
text \<open>
The constructor view is similar to the code view, but there is one separate
conditional equation per constructor rather than a single unconditional
-equation. Examples that rely on a single constructor, such as @{const literate}
-and @{const siterate}, are identical in both styles.
+equation. Examples that rely on a single constructor, such as \<^const>\<open>literate\<close>
+and \<^const>\<open>siterate\<close>, are identical in both styles.
Here is an example where there is a difference:
\<close>
@@ -2374,15 +2374,15 @@
text \<open>
\noindent
-With the constructor view, we must distinguish between the @{const LNil} and
-the @{const LCons} case. The condition for @{const LCons} is
-left implicit, as the negation of that for @{const LNil}.
+With the constructor view, we must distinguish between the \<^const>\<open>LNil\<close> and
+the \<^const>\<open>LCons\<close> case. The condition for \<^const>\<open>LCons\<close> is
+left implicit, as the negation of that for \<^const>\<open>LNil\<close>.
For this example, the constructor view is slightly more involved than the
code equation. Recall the code view version presented in
Section~\ref{sssec:primcorec-simple-corecursion}.
% TODO: \[{thm code_view.lapp.code}\]
-The constructor view requires us to analyze the second argument (@{term ys}).
+The constructor view requires us to analyze the second argument (\<^term>\<open>ys\<close>).
The code equation generated from the constructor view also suffers from this.
% TODO: \[{thm lapp.code}\]
@@ -2407,14 +2407,14 @@
text \<open>
\noindent
-Since there is no sequentiality, we can apply the equation for @{const Choice}
-without having first to discharge @{term "n mod (4::int) \<noteq> 0"},
-@{term "n mod (4::int) \<noteq> 1"}, and
-@{term "n mod (4::int) \<noteq> 2"}.
+Since there is no sequentiality, we can apply the equation for \<^const>\<open>Choice\<close>
+without having first to discharge \<^term>\<open>n mod (4::int) \<noteq> 0\<close>,
+\<^term>\<open>n mod (4::int) \<noteq> 1\<close>, and
+\<^term>\<open>n mod (4::int) \<noteq> 2\<close>.
The price to pay for this elegance is that we must discharge exclusiveness proof
obligations, one for each pair of conditions
-@{term "(n mod (4::int) = i, n mod (4::int) = j)"}
-with @{term "i < j"}. If we prefer not to discharge any obligations, we can
+\<^term>\<open>(n mod (4::int) = i, n mod (4::int) = j)\<close>
+with \<^term>\<open>i < j\<close>. If we prefer not to discharge any obligations, we can
enable the \<open>sequential\<close> option. This pushes the problem to the users of
the generated properties.
%Here are more examples to conclude:
@@ -2455,8 +2455,8 @@
text \<open>
\noindent
-The first formula in the @{const literate} specification indicates which
-constructor to choose. For @{const siterate} and @{const every_snd}, no such
+The first formula in the \<^const>\<open>literate\<close> specification indicates which
+constructor to choose. For \<^const>\<open>siterate\<close> and \<^const>\<open>every_snd\<close>, no such
formula is necessary, since the type has only one constructor. The last two
formulas are equations specifying the value of the result for the relevant
selectors. Corecursive calls appear directly to the right of the equal sign.
@@ -2514,8 +2514,7 @@
text \<open>
\noindent
-Using the \<open>of\<close> keyword, different equations are specified for @{const
-cont} depending on which constructor is selected.
+Using the \<open>of\<close> keyword, different equations are specified for \<^const>\<open>cont\<close> depending on which constructor is selected.
Here are more examples to conclude:
\<close>
@@ -2550,14 +2549,14 @@
@{command_def "primcorecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
(@@{command primcorec} | @@{command primcorecursive}) target? \<newline>
@{syntax pcr_options}? fixes @'where' (@{syntax pcr_formula} + '|')
;
@{syntax_def pcr_options}: '(' ((@{syntax plugins} | 'sequential' | 'exhaustive' | 'transfer') + ',') ')'
;
@{syntax_def pcr_formula}: thmdecl? prop (@'of' (term * ))?
-\<close>}
+\<close>
\medskip
@@ -2610,7 +2609,7 @@
text \<open>
The @{command primcorec} and @{command primcorecursive} commands generate the
-following properties (listed for @{const literate}):
+following properties (listed for \<^const>\<open>literate\<close>):
\begin{indentblock}
\begin{description}
@@ -2640,12 +2639,12 @@
(Section~\ref{ssec:code-generator}).
\item[\<open>f.\<close>\hthm{exclude}\rm:] ~ \\
-These properties are missing for @{const literate} because no exclusiveness
+These properties are missing for \<^const>\<open>literate\<close> because no exclusiveness
proof obligations arose. In general, the properties correspond to the
discharged proof obligations.
\item[\<open>f.\<close>\hthm{exhaust}\rm:] ~ \\
-This property is missing for @{const literate} because no exhaustiveness
+This property is missing for \<^const>\<open>literate\<close> because no exhaustiveness
proof obligation arose. In general, the property correspond to the discharged
proof obligation.
@@ -2734,7 +2733,7 @@
An $n$-ary BNF is a type constructor equipped with a map function
(functorial action), $n$ set functions (natural transformations),
and an infinite cardinal bound that satisfy certain properties.
-For example, @{typ "'a llist"} is a unary BNF.
+For example, \<^typ>\<open>'a llist\<close> is a unary BNF.
Its predicator \<open>llist_all ::
('a \<Rightarrow> bool) \<Rightarrow>
'a llist \<Rightarrow> bool\<close>
@@ -2745,7 +2744,7 @@
'a llist \<Rightarrow> 'b llist \<Rightarrow> bool\<close>
extends binary predicates over elements to binary predicates over parallel
lazy lists. The cardinal bound limits the number of elements returned by the
-set function; it may not depend on the cardinality of @{typ 'a}.
+set function; it may not depend on the cardinality of \<^typ>\<open>'a\<close>.
The type constructors introduced by @{command datatype} and
@{command codatatype} are automatically registered as BNFs. In addition, a
@@ -2765,8 +2764,8 @@
command. Some of the proof obligations are best viewed with the theory
\<^file>\<open>~~/src/HOL/Library/Cardinal_Notations.thy\<close> imported.
-The type is simply a copy of the function space @{typ "'d \<Rightarrow> 'a"}, where @{typ 'a}
-is live and @{typ 'd} is dead. We introduce it together with its map function,
+The type is simply a copy of the function space \<^typ>\<open>'d \<Rightarrow> 'a\<close>, where \<^typ>\<open>'a\<close>
+is live and \<^typ>\<open>'d\<close> is dead. We introduce it together with its map function,
set function, predicator, and relator.
\<close>
@@ -2870,7 +2869,7 @@
For many typedefs, lifting the BNF structure from the raw type to the abstract
type can be done uniformly. This is the task of the @{command lift_bnf} command.
-Using @{command lift_bnf}, the above registration of @{typ "('d, 'a) fn"} as a
+Using @{command lift_bnf}, the above registration of \<^typ>\<open>('d, 'a) fn\<close> as a
BNF becomes much shorter:
\<close>
@@ -2885,7 +2884,7 @@
(*>*)
text \<open>
-For type copies (@{command typedef}s with @{term UNIV} as the representing set),
+For type copies (@{command typedef}s with \<^term>\<open>UNIV\<close> as the representing set),
the proof obligations are so simple that they can be
discharged automatically, yielding another command, @{command copy_bnf}, which
does not emit any proof obligations:
@@ -2925,7 +2924,7 @@
The @{command lift_bnf} command requires us to prove that the set of nonempty lists
is closed under the map function and the zip function. The latter only
occurs implicitly in the goal, in form of the variable
-@{term "zs :: ('a \<times> 'b) list"}.
+\<^term>\<open>zs :: ('a \<times> 'b) list\<close>.
\<close>
lift_bnf (*<*)(no_warn_wits) (*>*)'a nonempty_list
@@ -2946,8 +2945,8 @@
reasoning abstractly about an arbitrary BNF. The @{command bnf_axiomatization}
command below introduces a type \<open>('a, 'b, 'c) F\<close>, three set constants,
a map function, a predicator, a relator, and a nonemptiness witness that depends only on
-@{typ 'a}. The type \<open>'a \<Rightarrow> ('a, 'b, 'c) F\<close> of the witness can be read
-as an implication: Given a witness for @{typ 'a}, we can construct a witness for
+\<^typ>\<open>'a\<close>. The type \<open>'a \<Rightarrow> ('a, 'b, 'c) F\<close> of the witness can be read
+as an implication: Given a witness for \<^typ>\<open>'a\<close>, we can construct a witness for
\<open>('a, 'b, 'c) F\<close>. The BNF properties are postulated as axioms.
\<close>
@@ -2971,12 +2970,12 @@
@{command_def "bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command bnf} target? (name ':')? type \<newline>
'map:' term ('sets:' (term +))? 'bd:' term \<newline>
('wits:' (term +))? ('rel:' term)? \<newline>
('pred:' term)? @{syntax plugins}?
-\<close>}
+\<close>
\medskip
@@ -3004,7 +3003,7 @@
@{command_def "lift_bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command lift_bnf} target? lb_options? \<newline>
@{syntax tyargs} name wit_terms? \<newline>
('via' thm)? @{syntax map_rel_pred}?
@@ -3012,15 +3011,14 @@
@{syntax_def lb_options}: '(' ((@{syntax plugins} | 'no_warn_wits') + ',') ')'
;
@{syntax_def wit_terms}: '[' 'wits' ':' terms ']'
-\<close>}
+\<close>
\medskip
\noindent
The @{command lift_bnf} command registers as a BNF an existing type (the
\emph{abstract type}) that was defined as a subtype of a BNF (the \emph{raw
type}) using the @{command typedef} command. To achieve this, it lifts the BNF
-structure on the raw type to the abstract type following a @{term
-type_definition} theorem. The theorem is usually inferred from the type, but can
+structure on the raw type to the abstract type following a \<^term>\<open>type_definition\<close> theorem. The theorem is usually inferred from the type, but can
also be explicitly supplied by means of the optional \<open>via\<close> clause. In
addition, custom names for the set functions, the map function, the predicator, and the relator,
as well as nonemptiness witnesses can be specified.
@@ -3040,15 +3038,15 @@
@{command_def "copy_bnf"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command copy_bnf} target? ('(' @{syntax plugins} ')')? \<newline>
@{syntax tyargs} name ('via' thm)? @{syntax map_rel_pred}?
-\<close>}
+\<close>
\medskip
\noindent
The @{command copy_bnf} command performs the same lifting as @{command lift_bnf}
-for type copies (@{command typedef}s with @{term UNIV} as the representing set),
+for type copies (@{command typedef}s with \<^term>\<open>UNIV\<close> as the representing set),
without requiring the user to discharge any proof obligations or provide
nonemptiness witnesses.
\<close>
@@ -3061,13 +3059,13 @@
@{command_def "bnf_axiomatization"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command bnf_axiomatization} target? ('(' @{syntax plugins} ')')? \<newline>
@{syntax tyargs}? name @{syntax wit_types}? \<newline>
mixfix? @{syntax map_rel_pred}?
;
@{syntax_def wit_types}: '[' 'wits' ':' types ']'
-\<close>}
+\<close>
\medskip
@@ -3078,7 +3076,7 @@
The syntactic entity \synt{target} can be used to specify a local context,
\synt{name} denotes an identifier, \synt{typefree} denotes fixed type variable
-(@{typ 'a}, @{typ 'b}, \ldots), \synt{mixfix} denotes the usual parenthesized
+(\<^typ>\<open>'a\<close>, \<^typ>\<open>'b\<close>, \ldots), \synt{mixfix} denotes the usual parenthesized
mixfix notation, and \synt{types} denotes a space-separated list of types
@{cite "isabelle-isar-ref"}.
@@ -3107,9 +3105,9 @@
@{command_def "print_bnfs"} & : & \<open>local_theory \<rightarrow>\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command print_bnfs}
-\<close>}
+\<close>
\<close>
@@ -3147,13 +3145,13 @@
@{command_def "free_constructors"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command free_constructors} target? @{syntax dt_options} \<newline>
name 'for' (@{syntax fc_ctor} + '|') \<newline>
(@'where' (prop + '|'))?
;
@{syntax_def fc_ctor}: (name ':')? term (name * )
-\<close>}
+\<close>
\medskip
@@ -3188,10 +3186,10 @@
@{command_def "simps_of_case"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command simps_of_case} target? (name ':')? \<newline>
(thm + ) (@'splits' ':' (thm + ))?
-\<close>}
+\<close>
\medskip
@@ -3227,10 +3225,10 @@
@{command_def "case_of_simps"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command case_of_simps} target? (name ':')? \<newline>
(thm + )
-\<close>}
+\<close>
\medskip
@@ -3334,7 +3332,7 @@
For each datatype \<open>t\<close>, the \hthm{size} plugin generates a generic size
function \<open>t.size_t\<close> as well as a specific instance
\<open>size :: t \<Rightarrow> nat\<close> belonging to the \<open>size\<close> type class. The
-\keyw{fun} command relies on @{const size} to prove termination of recursive
+\keyw{fun} command relies on \<^const>\<open>size\<close> to prove termination of recursive
functions on datatypes.
The plugin derives the following properties:
@@ -3356,9 +3354,9 @@
@{thm list.size_gen_o_map[no_vars]}
\item[\<open>t.\<close>\hthm{size_neq}\rm:] ~ \\
-This property is missing for @{typ "'a list"}. If the @{term size} function
+This property is missing for \<^typ>\<open>'a list\<close>. If the \<^term>\<open>size\<close> function
always evaluates to a non-zero value, this theorem has the form
-@{prop "\<not> size x = 0"}.
+\<^prop>\<open>\<not> size x = 0\<close>.
\end{description}
\end{indentblock}
@@ -3371,8 +3369,8 @@
\<open>'a\<^sub>1, \<dots>, 'a\<^sub>m\<close>, by default \<open>u\<close> values are given a size of 0. This
can be improved upon by registering a custom size function of type
\<open>('a\<^sub>1 \<Rightarrow> nat) \<Rightarrow> \<dots> \<Rightarrow> ('a\<^sub>m \<Rightarrow> nat) \<Rightarrow> u \<Rightarrow> nat\<close> using
-the ML function @{ML BNF_LFP_Size.register_size} or
-@{ML BNF_LFP_Size.register_size_global}. See theory
+the ML function \<^ML>\<open>BNF_LFP_Size.register_size\<close> or
+\<^ML>\<open>BNF_LFP_Size.register_size_global\<close>. See theory
\<^file>\<open>~~/src/HOL/Library/Multiset.thy\<close> for an example.
\<close>
--- a/src/Doc/Eisbach/Manual.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Eisbach/Manual.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
The syntax diagram below refers to some syntactic categories that are
further defined in @{cite "isabelle-isar-ref"}.
- @{rail \<open>
+ \<^rail>\<open>
@@{command method} name args @'=' method
;
args: term_args? method_args? \<newline> fact_args? decl_args?
@@ -29,7 +29,7 @@
fact_args: @'uses' (name+)
;
decl_args: @'declares' (name+)
- \<close>}
+ \<close>
\<close>
@@ -68,12 +68,12 @@
text \<open>
Methods can also abstract over terms using the @{keyword_def "for"} keyword,
optionally providing type constraints. For instance, the following proof
- method \<open>intro_ex\<close> takes a term @{term y} of any type, which it uses to
- instantiate the @{term x}-variable of \<open>exI\<close> (existential introduction)
+ method \<open>intro_ex\<close> takes a term \<^term>\<open>y\<close> of any type, which it uses to
+ instantiate the \<^term>\<open>x\<close>-variable of \<open>exI\<close> (existential introduction)
before applying the result as a rule. The instantiation is performed here by
Isar's @{attribute_ref "where"} attribute. If the current subgoal is to find
- a witness for the given predicate @{term Q}, then this has the effect of
- committing to @{term y}.
+ a witness for the given predicate \<^term>\<open>Q\<close>, then this has the effect of
+ committing to \<^term>\<open>y\<close>.
\<close>
method intro_ex for Q :: "'a \<Rightarrow> bool" and y :: 'a =
@@ -81,7 +81,7 @@
text \<open>
- The term parameters @{term y} and @{term Q} can be used arbitrarily inside
+ The term parameters \<^term>\<open>y\<close> and \<^term>\<open>Q\<close> can be used arbitrarily inside
the method body, as part of attribute applications or arguments to other
methods. The expression is type-checked as far as possible when the method
is defined, however dynamic type errors can still occur when it is invoked
@@ -224,15 +224,15 @@
text \<open>
The only non-trivial part above is the final alternative \<open>(erule notE ;
solve \<open>prop_solver\<close>)\<close>. Here, in the case that all other alternatives fail,
- the method takes one of the assumptions @{term "\<not> P"} of the current goal
+ the method takes one of the assumptions \<^term>\<open>\<not> P\<close> of the current goal
and eliminates it with the rule \<open>notE\<close>, causing the goal to be proved to
- become @{term P}. The method then recursively invokes itself on the
+ become \<^term>\<open>P\<close>. The method then recursively invokes itself on the
remaining goals. The job of the recursive call is to demonstrate that there
- is a contradiction in the original assumptions (i.e.\ that @{term P} can be
+ is a contradiction in the original assumptions (i.e.\ that \<^term>\<open>P\<close> can be
derived from them). Note this recursive invocation is applied with the
@{method solve} method combinator to ensure that a contradiction will indeed
be shown. In the case where a contradiction cannot be found, backtracking
- will occur and a different assumption @{term "\<not> Q"} will be chosen for
+ will occur and a different assumption \<^term>\<open>\<not> Q\<close> will be chosen for
elimination.
Note that the recursive call to @{method prop_solver} does not have any
@@ -283,7 +283,7 @@
The syntax diagram below refers to some syntactic categories that are
further defined in @{cite "isabelle-isar-ref"}.
- @{rail \<open>
+ \<^rail>\<open>
@@{method match} kind @'in' (pattern '\<Rightarrow>' @{syntax text} + '\<bar>')
;
kind:
@@ -295,7 +295,7 @@
fact_name: @{syntax name} @{syntax attributes}? ':'
;
args: '(' (('multi' | 'cut' nat?) + ',') ')'
- \<close>}
+ \<close>
Matching allows methods to introspect the goal state, and to implement more
explicit control flow. In the basic case, a term or fact \<open>ts\<close> is given to
@@ -313,8 +313,8 @@
text \<open>
In this example we have a structured Isar proof, with the named assumption
- \<open>X\<close> and a conclusion @{term "P"}. With the match method we can find the
- local facts @{term "Q \<longrightarrow> P"} and @{term "Q"}, binding them to separately as
+ \<open>X\<close> and a conclusion \<^term>\<open>P\<close>. With the match method we can find the
+ local facts \<^term>\<open>Q \<longrightarrow> P\<close> and \<^term>\<open>Q\<close>, binding them to separately as
\<open>I\<close> and \<open>I'\<close>. We then specialize the modus-ponens rule @{thm mp [of Q P]} to
these facts to solve the goal.
\<close>
@@ -354,10 +354,9 @@
\<open>match conclusion in A \<Rightarrow> \<open>insert mp [OF I I']\<close>\<close>)
text \<open>
- In this example @{term A} is a match variable which is bound to @{term P}
+ In this example \<^term>\<open>A\<close> is a match variable which is bound to \<^term>\<open>P\<close>
upon a successful match. The inner @{method match} then matches the
- now-bound @{term A} (bound to @{term P}) against the conclusion (also @{term
- P}), finally applying the specialized rule to solve the goal.
+ now-bound \<^term>\<open>A\<close> (bound to \<^term>\<open>P\<close>) against the conclusion (also \<^term>\<open>P\<close>), finally applying the specialized rule to solve the goal.
Schematic terms like \<open>?P\<close> may also be used to specify match variables, but
the result of the match is not bound, and thus cannot be used in the inner
@@ -377,14 +376,14 @@
\<open>rule exI [where P = Q and x = y, OF U]\<close>\<close>)
text \<open>
- The first @{method match} matches the pattern @{term "\<exists>x. Q x"} against the
- current conclusion, binding the term @{term "Q"} in the inner match. Next
+ The first @{method match} matches the pattern \<^term>\<open>\<exists>x. Q x\<close> against the
+ current conclusion, binding the term \<^term>\<open>Q\<close> in the inner match. Next
the pattern \<open>Q y\<close> is matched against all premises of the current subgoal. In
- this case @{term "Q"} is fixed and @{term "y"} may be instantiated. Once a
+ this case \<^term>\<open>Q\<close> is fixed and \<^term>\<open>y\<close> may be instantiated. Once a
match is found, the local fact \<open>U\<close> is bound to the matching premise and the
- variable @{term "y"} is bound to the matching witness. The existential
- introduction rule \<open>exI:\<close>~@{thm exI} is then instantiated with @{term "y"} as
- the witness and @{term "Q"} as the predicate, with its proof obligation
+ variable \<^term>\<open>y\<close> is bound to the matching witness. The existential
+ introduction rule \<open>exI:\<close>~@{thm exI} is then instantiated with \<^term>\<open>y\<close> as
+ the witness and \<^term>\<open>Q\<close> as the predicate, with its proof obligation
solved by the local fact U (using the Isar attribute @{attribute OF}). The
following example is a trivial use of this method.
\<close>
@@ -413,11 +412,10 @@
\<open>erule allE [where x = y]\<close>)
text \<open>
- Here we take a single parameter @{term y} and specialize the universal
+ Here we take a single parameter \<^term>\<open>y\<close> and specialize the universal
elimination rule (@{thm allE}) to it, then attempt to apply this specialized
rule with @{method erule}. The method @{method erule} will attempt to unify
- with a universal quantifier in the premises that matches the type of @{term
- y}. Since @{keyword "premises"} causes a focus, however, there are no
+ with a universal quantifier in the premises that matches the type of \<^term>\<open>y\<close>. Since @{keyword "premises"} causes a focus, however, there are no
subgoal premises to be found and thus @{method my_allE_bad} will always
fail. If focusing instead left the premises in place, using methods like
@{method erule} would lead to unintended behaviour, specifically during
@@ -475,8 +473,8 @@
text \<open>
In this example, the inner @{method match} can find the focused premise
- @{term B}. In contrast, the @{method assumption} method would fail here due
- to @{term B} not being logically accessible.
+ \<^term>\<open>B\<close>. In contrast, the @{method assumption} method would fail here due
+ to \<^term>\<open>B\<close> not being logically accessible.
\<close>
lemma "A \<Longrightarrow> A \<and> (B \<longrightarrow> B)"
@@ -485,10 +483,8 @@
\<bar> H': B \<Rightarrow> \<open>rule H'\<close>\<close>)
text \<open>
- In this example, the only premise that exists in the first focus is @{term
- "A"}. Prior to the inner match, the rule \<open>impI\<close> changes the goal @{term "B \<longrightarrow>
- B"} into @{term "B \<Longrightarrow> B"}. A standard premise match would also include @{term
- A} as an original premise of the outer match. The \<open>local\<close> argument limits
+ In this example, the only premise that exists in the first focus is \<^term>\<open>A\<close>. Prior to the inner match, the rule \<open>impI\<close> changes the goal \<^term>\<open>B \<longrightarrow>
+ B\<close> into \<^term>\<open>B \<Longrightarrow> B\<close>. A standard premise match would also include \<^term>\<open>A\<close> as an original premise of the outer match. The \<open>local\<close> argument limits
the match to newly focused premises.
\<close>
@@ -558,8 +554,7 @@
text \<open>
In this example, the order of schematics in \<open>asm\<close> is actually \<open>?y ?x\<close>, but
we instantiate our matched rule in the opposite order. This is because the
- effective rule @{term I} was bound from the match, which declared the @{typ
- 'a} slot first and the @{typ 'b} slot second.
+ effective rule \<^term>\<open>I\<close> was bound from the match, which declared the \<^typ>\<open>'a\<close> slot first and the \<^typ>\<open>'b\<close> slot second.
To get the dynamic behaviour of @{attribute of} we can choose to invoke it
\<^emph>\<open>unchecked\<close>. This avoids trying to do any type inference for the provided
@@ -586,8 +581,8 @@
text \<open>
In this example, the pattern \<open>\<And>x :: 'a. ?P x \<Longrightarrow> ?Q x\<close> matches against the
- only premise, giving an appropriately typed slot for @{term y}. After the
- match, the resulting rule is instantiated to @{term y} and then declared as
+ only premise, giving an appropriately typed slot for \<^term>\<open>y\<close>. After the
+ match, the resulting rule is instantiated to \<^term>\<open>y\<close> and then declared as
an @{attribute intros} rule. This is then picked up by @{method prop_solver}
to solve the goal.
\<close>
@@ -611,7 +606,7 @@
done
text \<open>
- In the first @{method match}, without the \<open>(multi)\<close> argument, @{term I} is
+ In the first @{method match}, without the \<open>(multi)\<close> argument, \<^term>\<open>I\<close> is
only ever be bound to one of the members of \<open>asms\<close>. This backtracks over
both possibilities (see next section), however neither assumption in
isolation is sufficient to solve to goal. The use of the @{method solves}
@@ -623,7 +618,7 @@
Using for-fixed variables in patterns imposes additional constraints on the
results. In all previous examples, the choice of using \<open>?P\<close> or a for-fixed
- @{term P} only depended on whether or not @{term P} was mentioned in another
+ \<^term>\<open>P\<close> only depended on whether or not \<^term>\<open>P\<close> was mentioned in another
pattern or the inner method. When using a multi-match, however, all
for-fixed terms must agree in the results.
\<close>
@@ -653,10 +648,9 @@
text \<open>
Dummy patterns may be given as placeholders for unique schematics in
patterns. They implicitly receive all currently bound variables as
- arguments, and are coerced into the @{typ prop} type whenever possible. For
+ arguments, and are coerced into the \<^typ>\<open>prop\<close> type whenever possible. For
example, the trivial dummy pattern \<open>_\<close> will match any proposition. In
- contrast, by default the pattern \<open>?P\<close> is considered to have type @{typ
- bool}. It will not bind anything with meta-logical connectives (e.g. \<open>_ \<Longrightarrow> _\<close>
+ contrast, by default the pattern \<open>?P\<close> is considered to have type \<^typ>\<open>bool\<close>. It will not bind anything with meta-logical connectives (e.g. \<open>_ \<Longrightarrow> _\<close>
or \<open>_ &&& _\<close>).
\<close>
@@ -718,8 +712,8 @@
\<open>rule mp [OF I' I [THEN conjunct1]]\<close>)
text \<open>
- In this example, once a conjunction is found (@{term "P \<and> Q"}), all possible
- implications of @{term "P"} in the premises are considered, evaluating the
+ In this example, once a conjunction is found (\<^term>\<open>P \<and> Q\<close>), all possible
+ implications of \<^term>\<open>P\<close> in the premises are considered, evaluating the
inner @{method rule} with each consequent. No other conjunctions will be
considered, with method failure occurring once all implications of the form
\<open>P \<longrightarrow> ?U\<close> have been explored. Here the left-right processing of individual
@@ -735,8 +729,8 @@
text \<open>
In this example, the first lemma is solved by \<open>foo\<^sub>2\<close>, by first picking
- @{term "A \<longrightarrow> D"} for \<open>I'\<close>, then backtracking and ultimately succeeding after
- picking @{term "A \<longrightarrow> C"}. In the second lemma, however, @{term "C \<and> D"} is
+ \<^term>\<open>A \<longrightarrow> D\<close> for \<open>I'\<close>, then backtracking and ultimately succeeding after
+ picking \<^term>\<open>A \<longrightarrow> C\<close>. In the second lemma, however, \<^term>\<open>C \<and> D\<close> is
matched first, the second pattern in the match cannot be found and so the
method fails, falling through to @{method prop_solver}.
@@ -768,14 +762,14 @@
text \<open>
Intuitively it seems like this proof should fail to check. The first match
- result, which binds @{term I} to the first two members of \<open>asms\<close>, fails the
- second inner match due to binding @{term P} to @{term A}. Backtracking then
- attempts to bind @{term I} to the third member of \<open>asms\<close>. This passes all
+ result, which binds \<^term>\<open>I\<close> to the first two members of \<open>asms\<close>, fails the
+ second inner match due to binding \<^term>\<open>P\<close> to \<^term>\<open>A\<close>. Backtracking then
+ attempts to bind \<^term>\<open>I\<close> to the third member of \<open>asms\<close>. This passes all
inner matches, but fails when @{method rule} cannot successfully apply this
to the current goal. After this, a valid match that is produced by the
- unifier is one which binds @{term P} to simply \<open>\<lambda>a. A ?x\<close>. The first inner
- match succeeds because \<open>\<lambda>a. A ?x\<close> does not match @{term A}. The next inner
- match succeeds because @{term I} has only been bound to the first member of
+ unifier is one which binds \<^term>\<open>P\<close> to simply \<open>\<lambda>a. A ?x\<close>. The first inner
+ match succeeds because \<open>\<lambda>a. A ?x\<close> does not match \<^term>\<open>A\<close>. The next inner
+ match succeeds because \<^term>\<open>I\<close> has only been bound to the first member of
\<open>asms\<close>. This is due to @{method match} considering \<open>\<lambda>a. A ?x\<close> and \<open>\<lambda>a. A ?y\<close>
as distinct terms.
@@ -808,7 +802,7 @@
text \<open>
For the first member of \<open>asms\<close> the dummy pattern successfully matches
- against @{term "B \<Longrightarrow> C"} and so the proof is successful.
+ against \<^term>\<open>B \<Longrightarrow> C\<close> and so the proof is successful.
\<close>
lemma
@@ -820,16 +814,16 @@
text \<open>
This proof will fail to solve the goal. Our match pattern will only match
- rules which have a single premise, and conclusion @{term C}, so the first
+ rules which have a single premise, and conclusion \<^term>\<open>C\<close>, so the first
member of \<open>asms\<close> is not bound and thus the proof fails. Matching a pattern
- of the form @{term "P \<Longrightarrow> Q"} against this fact will bind @{term "P"} to
- @{term "A"} and @{term Q} to @{term "B \<Longrightarrow> C"}. Our pattern, with a concrete
- @{term "C"} in the conclusion, will fail to match this fact.
+ of the form \<^term>\<open>P \<Longrightarrow> Q\<close> against this fact will bind \<^term>\<open>P\<close> to
+ \<^term>\<open>A\<close> and \<^term>\<open>Q\<close> to \<^term>\<open>B \<Longrightarrow> C\<close>. Our pattern, with a concrete
+ \<^term>\<open>C\<close> in the conclusion, will fail to match this fact.
To express our desired match, we may \<^emph>\<open>uncurry\<close> our rules before matching
against them. This forms a meta-conjunction of all premises in a fact, so
that only one implication remains. For example the uncurried version of
- @{term "A \<Longrightarrow> B \<Longrightarrow> C"} is @{term "A &&& B \<Longrightarrow> C"}. This will now match our
+ \<^term>\<open>A \<Longrightarrow> B \<Longrightarrow> C\<close> is \<^term>\<open>A &&& B \<Longrightarrow> C\<close>. This will now match our
desired pattern \<open>_ \<Longrightarrow> C\<close>, and can be \<^emph>\<open>curried\<close> after the match to put it
back into normal form.
\<close>
@@ -861,8 +855,7 @@
In the first @{method match} we attempt to find a member of \<open>asms\<close> which
matches our goal precisely. This fails due to no such member existing. The
second match reverses the role of the fact in the match, by first giving a
- general pattern @{term P}. This bound pattern is then matched against @{term
- "A y"}. In this case, @{term P} is bound to \<open>A ?x\<close> and so it successfully
+ general pattern \<^term>\<open>P\<close>. This bound pattern is then matched against \<^term>\<open>A y\<close>. In this case, \<^term>\<open>P\<close> is bound to \<open>A ?x\<close> and so it successfully
matches.
\<close>
@@ -885,7 +878,7 @@
text \<open>
In this example the type \<open>'b\<close> is matched to \<open>'a\<close>, however statically they
are formally distinct types. The first match binds \<open>'b\<close> while the inner
- match serves to coerce @{term y} into having the type \<open>'b\<close>. This allows the
+ match serves to coerce \<^term>\<open>y\<close> into having the type \<open>'b\<close>. This allows the
rule instantiation to successfully apply.
\<close>
@@ -895,8 +888,7 @@
section \<open>Tracing methods\<close>
text \<open>
- Method tracing is supported by auxiliary print methods provided by @{theory
- "HOL-Eisbach.Eisbach_Tools"}. These include @{method print_fact}, @{method
+ Method tracing is supported by auxiliary print methods provided by \<^theory>\<open>HOL-Eisbach.Eisbach_Tools\<close>. These include @{method print_fact}, @{method
print_term} and @{method print_type}. Whenever a print method is evaluated
it leaves the goal unchanged and writes its argument as tracing output.
@@ -957,7 +949,7 @@
text \<open>
Here the new @{method splits} method transforms the goal to use only logical
- connectives: @{term "L = [] \<longrightarrow> False \<and> (\<forall>x y. L = x # y \<longrightarrow> True)"}. This goal
+ connectives: \<^term>\<open>L = [] \<longrightarrow> False \<and> (\<forall>x y. L = x # y \<longrightarrow> True)\<close>. This goal
is then in a form solvable by @{method prop_solver} when given the universal
quantifier introduction rule \<open>allI\<close>.
\<close>
--- a/src/Doc/Eisbach/Preface.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Eisbach/Preface.thy Sat Jan 05 17:24:33 2019 +0100
@@ -32,9 +32,8 @@
well as the @{method match} method, as well as discussing their integration
with existing Isar concepts such as @{command named_theorems}.
- These commands are provided by theory @{theory "HOL-Eisbach.Eisbach"}: it
- needs to be imported by all Eisbach applications. Theory theory @{theory
- "HOL-Eisbach.Eisbach_Tools"} provides additional proof methods and
+ These commands are provided by theory \<^theory>\<open>HOL-Eisbach.Eisbach\<close>: it
+ needs to be imported by all Eisbach applications. Theory theory \<^theory>\<open>HOL-Eisbach.Eisbach_Tools\<close> provides additional proof methods and
attributes that are occasionally useful.
\<close>
--- a/src/Doc/Functions/Functions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Functions/Functions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,8 +25,7 @@
giving its name, its type,
and a set of defining recursive equations.
If we leave out the type, the most general type will be
- inferred, which can sometimes lead to surprises: Since both @{term
- "1::nat"} and \<open>+\<close> are overloaded, we would end up
+ inferred, which can sometimes lead to surprises: Since both \<^term>\<open>1::nat\<close> and \<open>+\<close> are overloaded, we would end up
with \<open>fib :: nat \<Rightarrow> 'a::{one,plus}\<close>.
\<close>
@@ -88,13 +87,13 @@
Isabelle provides customized induction rules for recursive
functions. These rules follow the recursive structure of the
definition. Here is the rule @{thm [source] sep.induct} arising from the
- above definition of @{const sep}:
+ above definition of \<^const>\<open>sep\<close>:
@{thm [display] sep.induct}
We have a step case for list with at least two elements, and two
base cases for the zero- and the one-element list. Here is a simple
- proof about @{const sep} and @{const map}
+ proof about \<^const>\<open>sep\<close> and \<^const>\<open>map\<close>
\<close>
lemma "map f (sep x ys) = sep (f x) (map f ys)"
@@ -219,7 +218,7 @@
implicitly refers to the last function definition.
The \<open>relation\<close> method takes a relation of
- type @{typ "('a \<times> 'a) set"}, where @{typ "'a"} is the argument type of
+ type \<^typ>\<open>('a \<times> 'a) set\<close>, where \<^typ>\<open>'a\<close> is the argument type of
the function. If the function has multiple curried arguments, then
these are packed together into a tuple, as it happened in the above
example.
@@ -259,8 +258,7 @@
This corresponds to a nested
loop where one index counts up and the other down. Termination can
be proved using a lexicographic combination of two measures, namely
- the value of \<open>N\<close> and the above difference. The @{const
- "measures"} combinator generalizes \<open>measure\<close> by taking a
+ the value of \<open>N\<close> and the above difference. The \<^const>\<open>measures\<close> combinator generalizes \<open>measure\<close> by taking a
list of measure functions.
\<close>
@@ -368,7 +366,7 @@
text \<open>
To eliminate the mutual dependencies, Isabelle internally
creates a single function operating on the sum
- type @{typ "nat + nat"}. Then, @{const even} and @{const odd} are
+ type \<^typ>\<open>nat + nat\<close>. Then, \<^const>\<open>even\<close> and \<^const>\<open>odd\<close> are
defined as projections. Consequently, termination has to be proved
simultaneously for both functions, by specifying a measure on the
sum type:
@@ -390,7 +388,7 @@
generally requires simultaneous induction. The induction rule @{thm [source] "even_odd.induct"}
generated from the above definition reflects this.
- Let us prove something about @{const even} and @{const odd}:
+ Let us prove something about \<^const>\<open>even\<close> and \<^const>\<open>odd\<close>:
\<close>
lemma even_odd_mod2:
@@ -405,7 +403,7 @@
text \<open>
We get four subgoals, which correspond to the clauses in the
- definition of @{const even} and @{const odd}:
+ definition of \<^const>\<open>even\<close> and \<^const>\<open>odd\<close>:
@{subgoals[display,indent=0]}
Simplification solves the first two goals, leaving us with two
statements about the \<open>mod\<close> operation to prove:
@@ -428,7 +426,7 @@
In proofs like this, the simultaneous induction is really essential:
Even if we are just interested in one of the results, the other
one is necessary to strengthen the induction hypothesis. If we leave
- out the statement about @{const odd} and just write @{term True} instead,
+ out the statement about \<^const>\<open>odd\<close> and just write \<^term>\<open>True\<close> instead,
the same proof fails:
\<close>
@@ -471,7 +469,7 @@
@{thm[display] list_to_option.elims}
\noindent
- This lets us eliminate an assumption of the form @{prop "list_to_option xs = y"} and replace it
+ This lets us eliminate an assumption of the form \<^prop>\<open>list_to_option xs = y\<close> and replace it
with the two cases, e.g.:
\<close>
@@ -488,7 +486,7 @@
text \<open>
Sometimes it is convenient to derive specialized versions of the \<open>elim\<close> rules above and
keep them around as facts explicitly. For example, it is natural to show that if
- @{prop "list_to_option xs = Some y"}, then @{term xs} must be a singleton. The command
+ \<^prop>\<open>list_to_option xs = Some y\<close>, then \<^term>\<open>xs\<close> must be a singleton. The command
\cmd{fun\_cases} derives such facts automatically, by instantiating and simplifying the general
elimination rules given some pattern:
\<close>
@@ -511,15 +509,15 @@
Up to now, we used pattern matching only on datatypes, and the
patterns were always disjoint and complete, and if they weren't,
they were made disjoint automatically like in the definition of
- @{const "sep"} in \S\ref{patmatch}.
+ \<^const>\<open>sep\<close> in \S\ref{patmatch}.
This automatic splitting can significantly increase the number of
equations involved, and this is not always desirable. The following
example shows the problem:
Suppose we are modeling incomplete knowledge about the world by a
- three-valued datatype, which has values @{term "T"}, @{term "F"}
- and @{term "X"} for true, false and uncertain propositions, respectively.
+ three-valued datatype, which has values \<^term>\<open>T\<close>, \<^term>\<open>F\<close>
+ and \<^term>\<open>X\<close> for true, false and uncertain propositions, respectively.
\<close>
datatype P3 = T | F | X
@@ -538,7 +536,7 @@
text \<open>
This definition is useful, because the equations can directly be used
as simplification rules. But the patterns overlap: For example,
- the expression @{term "And T T"} is matched by both the first and
+ the expression \<^term>\<open>And T T\<close> is matched by both the first and
the second equation. By default, Isabelle makes the patterns disjoint by
splitting them up, producing instances:
\<close>
@@ -553,14 +551,14 @@
\begin{enumerate}
\item If the datatype has many constructors, there can be an
- explosion of equations. For @{const "And"}, we get seven instead of
+ explosion of equations. For \<^const>\<open>And\<close>, we get seven instead of
five equations, which can be tolerated, but this is just a small
example.
\item Since splitting makes the equations \qt{less general}, they
- do not always match in rewriting. While the term @{term "And x F"}
- can be simplified to @{term "F"} with the original equations, a
- (manual) case split on @{term "x"} is now necessary.
+ do not always match in rewriting. While the term \<^term>\<open>And x F\<close>
+ can be simplified to \<^term>\<open>F\<close> with the original equations, a
+ (manual) case split on \<^term>\<open>x\<close> is now necessary.
\item The splitting also concerns the induction rule @{thm [source]
"And.induct"}. Instead of five premises it now has seven, which
@@ -573,8 +571,8 @@
If we do not want the automatic splitting, we can switch it off by
leaving out the \cmd{sequential} option. However, we will have to
prove that our pattern matching is consistent\footnote{This prevents
- us from defining something like @{term "f x = True"} and @{term "f x
- = False"} simultaneously.}:
+ us from defining something like \<^term>\<open>f x = True\<close> and \<^term>\<open>f x
+ = False\<close> simultaneously.}:
\<close>
function And2 :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
@@ -592,11 +590,11 @@
@{subgoals[display,indent=0]}\vspace{-1.2em}\hspace{3cm}\vdots\vspace{1.2em}
The first subgoal expresses the completeness of the patterns. It has
- the form of an elimination rule and states that every @{term x} of
+ the form of an elimination rule and states that every \<^term>\<open>x\<close> of
the function's input type must match at least one of the patterns\footnote{Completeness could
be equivalently stated as a disjunction of existential statements:
-@{term "(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
- (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method \<open>atomize_elim\<close> to get that form instead.}. If the patterns just involve
+\<^term>\<open>(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
+ (\<exists>p. x = (F, p)) \<or> (x = (X, X))\<close>, and you can use the method \<open>atomize_elim\<close> to get that form instead.}. If the patterns just involve
datatypes, we can solve it with the \<open>pat_completeness\<close>
method:
\<close>
@@ -640,8 +638,8 @@
This kind of matching is again justified by the proof of pattern
completeness and compatibility.
The proof obligation for pattern completeness states that every natural number is
- either @{term "0::nat"}, @{term "1::nat"} or @{term "n +
- (2::nat)"}:
+ either \<^term>\<open>0::nat\<close>, \<^term>\<open>1::nat\<close> or \<^term>\<open>n +
+ (2::nat)\<close>:
@{subgoals[display,indent=0,goals_limit=1]}
@@ -746,8 +744,8 @@
section \<open>Partiality\<close>
text \<open>
- In HOL, all functions are total. A function @{term "f"} applied to
- @{term "x"} always has the value @{term "f x"}, and there is no notion
+ In HOL, all functions are total. A function \<^term>\<open>f\<close> applied to
+ \<^term>\<open>x\<close> always has the value \<^term>\<open>f x\<close>, and there is no notion
of undefinedness.
This is why we have to do termination
proofs when defining functions: The proof justifies that the
@@ -772,8 +770,8 @@
subsection \<open>Domain predicates\<close>
text \<open>
- The trick is that Isabelle has not only defined the function @{const findzero}, but also
- a predicate @{term "findzero_dom"} that characterizes the values where the function
+ The trick is that Isabelle has not only defined the function \<^const>\<open>findzero\<close>, but also
+ a predicate \<^term>\<open>findzero_dom\<close> that characterizes the values where the function
terminates: the \emph{domain} of the function. If we treat a
partial function just as a total function with an additional domain
predicate, we can derive simplification and
@@ -793,14 +791,14 @@
text \<open>
Remember that all we
are doing here is use some tricks to make a total function appear
- as if it was partial. We can still write the term @{term "findzero
- (\<lambda>x. 1) 0"} and like any other term of type @{typ nat} it is equal
+ as if it was partial. We can still write the term \<^term>\<open>findzero
+ (\<lambda>x. 1) 0\<close> and like any other term of type \<^typ>\<open>nat\<close> it is equal
to some natural number, although we might not be able to find out
which one. The function is \emph{underdefined}.
But it is defined enough to prove something interesting about it. We
- can prove that if @{term "findzero f n"}
- terminates, it indeed returns a zero of @{term f}:
+ can prove that if \<^term>\<open>findzero f n\<close>
+ terminates, it indeed returns a zero of \<^term>\<open>f\<close>:
\<close>
lemma findzero_zero: "findzero_dom (f, n) \<Longrightarrow> f (findzero f n) = 0"
@@ -815,9 +813,8 @@
@{subgoals[display,indent=0]}
\noindent The hypothesis in our lemma was used to satisfy the first premise in
- the induction rule. However, we also get @{term
- "findzero_dom (f, n)"} as a local assumption in the induction step. This
- allows unfolding @{term "findzero f n"} using the \<open>psimps\<close>
+ the induction rule. However, we also get \<^term>\<open>findzero_dom (f, n)\<close> as a local assumption in the induction step. This
+ allows unfolding \<^term>\<open>findzero f n\<close> using the \<open>psimps\<close>
rule, and the rest is trivial.
\<close>
apply (simp add: findzero.psimps)
@@ -829,7 +826,7 @@
complicated proof written in Isar. It is verbose enough to show how
partiality comes into play: From the partial induction, we get an
additional domain condition hypothesis. Observe how this condition
- is applied when calls to @{term findzero} are unfolded.
+ is applied when calls to \<^term>\<open>findzero\<close> are unfolded.
\<close>
text_raw \<open>
@@ -876,7 +873,7 @@
Now that we have proved some interesting properties about our
function, we should turn to the domain predicate and see if it is
actually true for some values. Otherwise we would have just proved
- lemmas with @{term False} as a premise.
+ lemmas with \<^term>\<open>False\<close> as a premise.
Essentially, we need some introduction rules for \<open>findzero_dom\<close>. The function package can prove such domain
introduction rules automatically. But since they are not used very
@@ -912,7 +909,7 @@
Figure \ref{findzero_term} gives a detailed Isar proof of the fact
that \<open>findzero\<close> terminates if there is a zero which is greater
- or equal to @{term n}. First we derive two useful rules which will
+ or equal to \<^term>\<open>n\<close>. First we derive two useful rules which will
solve the base case and the step case of the induction. The
induction is then straightforward, except for the unusual induction
principle.
@@ -983,28 +980,27 @@
@{abbrev[display] findzero_dom}
- The domain predicate is the \emph{accessible part} of a relation @{const
- findzero_rel}, which was also created internally by the function
- package. @{const findzero_rel} is just a normal
+ The domain predicate is the \emph{accessible part} of a relation \<^const>\<open>findzero_rel\<close>, which was also created internally by the function
+ package. \<^const>\<open>findzero_rel\<close> is just a normal
inductive predicate, so we can inspect its definition by
looking at the introduction rules @{thm [source] findzero_rel.intros}.
In our case there is just a single rule:
@{thm[display] findzero_rel.intros}
- The predicate @{const findzero_rel}
+ The predicate \<^const>\<open>findzero_rel\<close>
describes the \emph{recursion relation} of the function
definition. The recursion relation is a binary relation on
the arguments of the function that relates each argument to its
recursive calls. In general, there is one introduction rule for each
recursive call.
- The predicate @{term "Wellfounded.accp findzero_rel"} is the accessible part of
+ The predicate \<^term>\<open>Wellfounded.accp findzero_rel\<close> is the accessible part of
that relation. An argument belongs to the accessible part, if it can
be reached in a finite number of steps (cf.~its definition in \<open>Wellfounded.thy\<close>).
Since the domain predicate is just an abbreviation, you can use
- lemmas for @{const Wellfounded.accp} and @{const findzero_rel} directly. Some
+ lemmas for \<^const>\<open>Wellfounded.accp\<close> and \<^const>\<open>findzero_rel\<close> directly. Some
lemmas which are occasionally useful are @{thm [source] accpI}, @{thm [source]
accp_downward}, and of course the introduction and elimination rules
for the recursion relation @{thm [source] "findzero_rel.intros"} and @{thm
@@ -1041,7 +1037,7 @@
@{subgoals[display]}
- Of course this statement is true, since we know that @{const nz} is
+ Of course this statement is true, since we know that \<^const>\<open>nz\<close> is
the zero function. And in fact we have no problem proving this
property by induction.
\<close>
@@ -1051,7 +1047,7 @@
text \<open>
We formulate this as a partial correctness lemma with the condition
- @{term "nz_dom n"}. This allows us to prove it with the \<open>pinduct\<close> rule before we have proved termination. With this lemma,
+ \<^term>\<open>nz_dom n\<close>. This allows us to prove it with the \<open>pinduct\<close> rule before we have proved termination. With this lemma,
the termination proof works as expected:
\<close>
@@ -1111,8 +1107,7 @@
text \<open>
Higher-order recursion occurs when recursive calls
- are passed as arguments to higher-order combinators such as @{const
- map}, @{term filter} etc.
+ are passed as arguments to higher-order combinators such as \<^const>\<open>map\<close>, \<^term>\<open>filter\<close> etc.
As an example, imagine a datatype of n-ary trees:
\<close>
@@ -1122,7 +1117,7 @@
text \<open>\noindent We can define a function which swaps the left and right subtrees recursively, using the
- list functions @{const rev} and @{const map}:\<close>
+ list functions \<^const>\<open>rev\<close> and \<^const>\<open>map\<close>:\<close>
fun mirror :: "'a tree \<Rightarrow> 'a tree"
where
@@ -1139,39 +1134,37 @@
As usual, we have to give a wellfounded relation, such that the
arguments of the recursive calls get smaller. But what exactly are
the arguments of the recursive calls when mirror is given as an
- argument to @{const map}? Isabelle gives us the
+ argument to \<^const>\<open>map\<close>? Isabelle gives us the
subgoals
@{subgoals[display,indent=0]}
- So the system seems to know that @{const map} only
- applies the recursive call @{term "mirror"} to elements
- of @{term "l"}, which is essential for the termination proof.
+ So the system seems to know that \<^const>\<open>map\<close> only
+ applies the recursive call \<^term>\<open>mirror\<close> to elements
+ of \<^term>\<open>l\<close>, which is essential for the termination proof.
- This knowledge about @{const map} is encoded in so-called congruence rules,
+ This knowledge about \<^const>\<open>map\<close> is encoded in so-called congruence rules,
which are special theorems known to the \cmd{function} command. The
- rule for @{const map} is
+ rule for \<^const>\<open>map\<close> is
@{thm[display] map_cong}
- You can read this in the following way: Two applications of @{const
- map} are equal, if the list arguments are equal and the functions
+ You can read this in the following way: Two applications of \<^const>\<open>map\<close> are equal, if the list arguments are equal and the functions
coincide on the elements of the list. This means that for the value
- @{term "map f l"} we only have to know how @{term f} behaves on
- the elements of @{term l}.
+ \<^term>\<open>map f l\<close> we only have to know how \<^term>\<open>f\<close> behaves on
+ the elements of \<^term>\<open>l\<close>.
Usually, one such congruence rule is
needed for each higher-order construct that is used when defining
- new functions. In fact, even basic functions like @{const
- If} and @{const Let} are handled by this mechanism. The congruence
- rule for @{const If} states that the \<open>then\<close> branch is only
+ new functions. In fact, even basic functions like \<^const>\<open>If\<close> and \<^const>\<open>Let\<close> are handled by this mechanism. The congruence
+ rule for \<^const>\<open>If\<close> states that the \<open>then\<close> branch is only
relevant if the condition is true, and the \<open>else\<close> branch only if it
is false:
@{thm[display] if_cong}
Congruence rules can be added to the
- function package by giving them the @{term fundef_cong} attribute.
+ function package by giving them the \<^term>\<open>fundef_cong\<close> attribute.
The constructs that are predefined in Isabelle, usually
come with the respective congruence rules.
--- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4,7 +4,7 @@
begin
(*>*)
text\<open>
-\chapter{@{theory Main}}
+\chapter{\<^theory>\<open>Main\<close>}
\section{Natural numbers}
@@ -19,7 +19,7 @@
\begin{quote}
(\<open>induction n rule: less_induct\<close>)
\end{quote}
-In fact, it is not restricted to @{typ nat} but works for any wellfounded
+In fact, it is not restricted to \<^typ>\<open>nat\<close> but works for any wellfounded
order \<open><\<close>.
There are many more special induction rules. You can find all of them
@@ -29,7 +29,7 @@
\end{quote}
-\paragraph{How to convert numerals into @{const Suc} terms}~\\
+\paragraph{How to convert numerals into \<^const>\<open>Suc\<close> terms}~\\
Solution: simplify with the lemma @{thm[source] numeral_eq_Suc}.
\noindent
@@ -40,7 +40,7 @@
by (simp add: numeral_eq_Suc)
text\<open>This is a typical situation: function ``\<open>^\<close>'' is defined
-by pattern matching on @{const Suc} but is applied to a numeral.
+by pattern matching on \<^const>\<open>Suc\<close> but is applied to a numeral.
Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
One can be more specific with the lemmas @{thm [source] numeral_2_eq_2}
@@ -73,10 +73,10 @@
%Tobias Nipkow
\section{Algebraic simplification}
-On the numeric types @{typ nat}, @{typ int} and @{typ real},
+On the numeric types \<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>,
proof method \<open>simp\<close> and friends can deal with a limited amount of linear
arithmetic (no multiplication except by numerals) and method \<open>arith\<close> can
-handle full linear arithmetic (on @{typ nat}, @{typ int} including quantifiers).
+handle full linear arithmetic (on \<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close> including quantifiers).
But what to do when proper multiplication is involved?
At this point it can be helpful to simplify with the lemma list
@{thm [source] algebra_simps}. Examples:
@@ -95,10 +95,10 @@
terms are rewritten into a normal form by multiplying out,
rearranging sums and products into some canonical order.
In the above lemma the normal form will be something like
-@{term"x*y + y*y - x*z - y*z"}.
-This works for concrete types like @{typ int} as well as for classes like
-@{class comm_ring} (commutative rings). For some classes (e.g.\ @{class ring}
-and @{class comm_ring}) this yields a decision procedure for equality.
+\<^term>\<open>x*y + y*y - x*z - y*z\<close>.
+This works for concrete types like \<^typ>\<open>int\<close> as well as for classes like
+\<^class>\<open>comm_ring\<close> (commutative rings). For some classes (e.g.\ \<^class>\<open>ring\<close>
+and \<^class>\<open>comm_ring\<close>) this yields a decision procedure for equality.
Additional function and predicate symbols are not a problem either:
\<close>
@@ -107,8 +107,8 @@
by(simp add: algebra_simps)
text\<open>Here @{thm[source]algebra_simps} merely has the effect of rewriting
-@{term"y*x"} to @{term"x*y"} (or the other way around). This yields
-a problem of the form @{prop"2*t - t < t + (1::int)"} and we are back in the
+\<^term>\<open>y*x\<close> to \<^term>\<open>x*y\<close> (or the other way around). This yields
+a problem of the form \<^prop>\<open>2*t - t < t + (1::int)\<close> and we are back in the
realm of linear arithmetic.
Because @{thm[source]algebra_simps} multiplies out, terms can explode.
--- a/src/Doc/Implementation/Eq.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Eq.thy Sat Jan 05 17:24:33 2019 +0100
@@ -43,12 +43,12 @@
same reasoning schemes as theorems that can be composed like object-level
rules as explained in \secref{sec:obj-rules}.
- For example, @{ML Thm.symmetric} as Pure inference is an ML function that
+ For example, \<^ML>\<open>Thm.symmetric\<close> as Pure inference is an ML function that
maps a theorem \<open>th\<close> stating \<open>t \<equiv> u\<close> to one stating \<open>u \<equiv> t\<close>. In contrast,
@{thm [source] Pure.symmetric} as Pure theorem expresses the same reasoning
in declarative form. If used like \<open>th [THEN Pure.symmetric]\<close> in Isar source
notation, it achieves a similar effect as the ML inference function,
- although the rule attribute @{attribute THEN} or ML operator @{ML "op RS"}
+ although the rule attribute @{attribute THEN} or ML operator \<^ML>\<open>op RS\<close>
involve the full machinery of higher-order unification (modulo
\<open>\<beta>\<eta>\<close>-conversion) and lifting of \<open>\<And>/\<Longrightarrow>\<close> contexts.
\<close>
@@ -99,21 +99,21 @@
@{index_ML fold_goals_tac: "Proof.context -> thm list -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML rewrite_rule}~\<open>ctxt rules thm\<close> rewrites the whole theorem by the
+ \<^descr> \<^ML>\<open>rewrite_rule\<close>~\<open>ctxt rules thm\<close> rewrites the whole theorem by the
given rules.
- \<^descr> @{ML rewrite_goals_rule}~\<open>ctxt rules thm\<close> rewrites the outer premises of
+ \<^descr> \<^ML>\<open>rewrite_goals_rule\<close>~\<open>ctxt rules thm\<close> rewrites the outer premises of
the given theorem. Interpreting the same as a goal state
(\secref{sec:tactical-goals}) it means to rewrite all subgoals (in the same
- manner as @{ML rewrite_goals_tac}).
+ manner as \<^ML>\<open>rewrite_goals_tac\<close>).
- \<^descr> @{ML rewrite_goal_tac}~\<open>ctxt rules i\<close> rewrites subgoal \<open>i\<close> by the given
+ \<^descr> \<^ML>\<open>rewrite_goal_tac\<close>~\<open>ctxt rules i\<close> rewrites subgoal \<open>i\<close> by the given
rewrite rules.
- \<^descr> @{ML rewrite_goals_tac}~\<open>ctxt rules\<close> rewrites all subgoals by the given
+ \<^descr> \<^ML>\<open>rewrite_goals_tac\<close>~\<open>ctxt rules\<close> rewrites all subgoals by the given
rewrite rules.
- \<^descr> @{ML fold_goals_tac}~\<open>ctxt rules\<close> essentially uses @{ML rewrite_goals_tac}
+ \<^descr> \<^ML>\<open>fold_goals_tac\<close>~\<open>ctxt rules\<close> essentially uses \<^ML>\<open>rewrite_goals_tac\<close>
with the symmetric form of each member of \<open>rules\<close>, re-ordered to fold longer
expression first. This supports to idea to fold primitive definitions that
appear in expended form in the proof state.
--- a/src/Doc/Implementation/Integration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Integration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -44,20 +44,19 @@
@{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Toplevel.state} represents Isar toplevel states, which are
+ \<^descr> Type \<^ML_type>\<open>Toplevel.state\<close> represents Isar toplevel states, which are
normally manipulated through the concept of toplevel transitions only
(\secref{sec:toplevel-transition}).
- \<^descr> @{ML Toplevel.UNDEF} is raised for undefined toplevel operations. Many
- operations work only partially for certain cases, since @{ML_type
- Toplevel.state} is a sum type.
+ \<^descr> \<^ML>\<open>Toplevel.UNDEF\<close> is raised for undefined toplevel operations. Many
+ operations work only partially for certain cases, since \<^ML_type>\<open>Toplevel.state\<close> is a sum type.
+
+ \<^descr> \<^ML>\<open>Toplevel.is_toplevel\<close>~\<open>state\<close> checks for an empty toplevel state.
- \<^descr> @{ML Toplevel.is_toplevel}~\<open>state\<close> checks for an empty toplevel state.
+ \<^descr> \<^ML>\<open>Toplevel.theory_of\<close>~\<open>state\<close> selects the background theory of \<open>state\<close>,
+ it raises \<^ML>\<open>Toplevel.UNDEF\<close> for an empty toplevel state.
- \<^descr> @{ML Toplevel.theory_of}~\<open>state\<close> selects the background theory of \<open>state\<close>,
- it raises @{ML Toplevel.UNDEF} for an empty toplevel state.
-
- \<^descr> @{ML Toplevel.proof_of}~\<open>state\<close> selects the Isar proof state if available,
+ \<^descr> \<^ML>\<open>Toplevel.proof_of\<close>~\<open>state\<close> selects the Isar proof state if available,
otherwise it raises an error.
\<close>
@@ -110,23 +109,23 @@
Toplevel.transition -> Toplevel.transition"} \\
\end{mldecls}
- \<^descr> @{ML Toplevel.keep}~\<open>tr\<close> adjoins a diagnostic function.
+ \<^descr> \<^ML>\<open>Toplevel.keep\<close>~\<open>tr\<close> adjoins a diagnostic function.
- \<^descr> @{ML Toplevel.theory}~\<open>tr\<close> adjoins a theory transformer.
+ \<^descr> \<^ML>\<open>Toplevel.theory\<close>~\<open>tr\<close> adjoins a theory transformer.
- \<^descr> @{ML Toplevel.theory_to_proof}~\<open>tr\<close> adjoins a global goal function, which
+ \<^descr> \<^ML>\<open>Toplevel.theory_to_proof\<close>~\<open>tr\<close> adjoins a global goal function, which
turns a theory into a proof state. The theory may be changed before entering
the proof; the generic Isar goal setup includes an \<^verbatim>\<open>after_qed\<close> argument
that specifies how to apply the proven result to the enclosing context, when
the proof is finished.
- \<^descr> @{ML Toplevel.proof}~\<open>tr\<close> adjoins a deterministic proof command, with a
+ \<^descr> \<^ML>\<open>Toplevel.proof\<close>~\<open>tr\<close> adjoins a deterministic proof command, with a
singleton result.
- \<^descr> @{ML Toplevel.proofs}~\<open>tr\<close> adjoins a general proof command, with zero or
+ \<^descr> \<^ML>\<open>Toplevel.proofs\<close>~\<open>tr\<close> adjoins a general proof command, with zero or
more result states (represented as a lazy list).
- \<^descr> @{ML Toplevel.end_proof}~\<open>tr\<close> adjoins a concluding proof command, that
+ \<^descr> \<^ML>\<open>Toplevel.end_proof\<close>~\<open>tr\<close> adjoins a concluding proof command, that
returns the resulting theory, after applying the resulting facts to the
target context.
\<close>
@@ -157,17 +156,17 @@
@{index_ML Thy_Info.register_thy: "theory -> unit"} \\
\end{mldecls}
- \<^descr> @{ML use_thy}~\<open>A\<close> ensures that theory \<open>A\<close> is fully up-to-date wrt.\ the
+ \<^descr> \<^ML>\<open>use_thy\<close>~\<open>A\<close> ensures that theory \<open>A\<close> is fully up-to-date wrt.\ the
external file store; outdated ancestors are reloaded on demand.
- \<^descr> @{ML Thy_Info.get_theory}~\<open>A\<close> retrieves the theory value presently
+ \<^descr> \<^ML>\<open>Thy_Info.get_theory\<close>~\<open>A\<close> retrieves the theory value presently
associated with name \<open>A\<close>. Note that the result might be outdated wrt.\ the
file-system content.
- \<^descr> @{ML Thy_Info.remove_thy}~\<open>A\<close> deletes theory \<open>A\<close> and all descendants from
+ \<^descr> \<^ML>\<open>Thy_Info.remove_thy\<close>~\<open>A\<close> deletes theory \<open>A\<close> and all descendants from
the theory database.
- \<^descr> @{ML Thy_Info.register_thy}~\<open>text thy\<close> registers an existing theory value
+ \<^descr> \<^ML>\<open>Thy_Info.register_thy\<close>~\<open>text thy\<close> registers an existing theory value
with the theory loader database and updates source version information
according to the file store.
\<close>
--- a/src/Doc/Implementation/Isar.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Isar.thy Sat Jan 05 17:24:33 2019 +0100
@@ -74,7 +74,7 @@
(term * term list) list list -> Proof.context -> Proof.state"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.state} represents Isar proof states. This is a
+ \<^descr> Type \<^ML_type>\<open>Proof.state\<close> represents Isar proof states. This is a
block-structured configuration with proof context, linguistic mode, and
optional goal. The latter consists of goal context, goal facts
(``\<open>using\<close>''), and tactical goal state (see \secref{sec:tactical-goals}).
@@ -83,8 +83,7 @@
some parts of the tactical goal --- how exactly is defined by the proof
method that is applied in that situation.
- \<^descr> @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
- Proof.assert_backward} are partial identity functions that fail unless a
+ \<^descr> \<^ML>\<open>Proof.assert_forward\<close>, \<^ML>\<open>Proof.assert_chain\<close>, \<^ML>\<open>Proof.assert_backward\<close> are partial identity functions that fail unless a
certain linguistic mode is active, namely ``\<open>proof(state)\<close>'',
``\<open>proof(chain)\<close>'', ``\<open>proof(prove)\<close>'', respectively (using the terminology
of @{cite "isabelle-isar-ref"}).
@@ -92,22 +91,21 @@
It is advisable study the implementations of existing proof commands for
suitable modes to be asserted.
- \<^descr> @{ML Proof.simple_goal}~\<open>state\<close> returns the structured Isar goal (if
+ \<^descr> \<^ML>\<open>Proof.simple_goal\<close>~\<open>state\<close> returns the structured Isar goal (if
available) in the form seen by ``simple'' methods (like @{method simp} or
@{method blast}). The Isar goal facts are already inserted as premises into
- the subgoals, which are presented individually as in @{ML Proof.goal}.
+ the subgoals, which are presented individually as in \<^ML>\<open>Proof.goal\<close>.
- \<^descr> @{ML Proof.goal}~\<open>state\<close> returns the structured Isar goal (if available)
+ \<^descr> \<^ML>\<open>Proof.goal\<close>~\<open>state\<close> returns the structured Isar goal (if available)
in the form seen by regular methods (like @{method rule}). The auxiliary
internal encoding of Pure conjunctions is split into individual subgoals as
usual.
- \<^descr> @{ML Proof.raw_goal}~\<open>state\<close> returns the structured Isar goal (if
+ \<^descr> \<^ML>\<open>Proof.raw_goal\<close>~\<open>state\<close> returns the structured Isar goal (if
available) in the raw internal form seen by ``raw'' methods (like @{method
- induct}). This form is rarely appropriate for diagnostic tools; @{ML
- Proof.simple_goal} or @{ML Proof.goal} should be used in most situations.
+ induct}). This form is rarely appropriate for diagnostic tools; \<^ML>\<open>Proof.simple_goal\<close> or \<^ML>\<open>Proof.goal\<close> should be used in most situations.
- \<^descr> @{ML Proof.theorem}~\<open>before_qed after_qed statement ctxt\<close> initializes a
+ \<^descr> \<^ML>\<open>Proof.theorem\<close>~\<open>before_qed after_qed statement ctxt\<close> initializes a
toplevel Isar proof state within a given context.
The optional \<open>before_qed\<close> method is applied at the end of the proof, just
@@ -115,9 +113,8 @@
The \<open>after_qed\<close> continuation receives the extracted result in order to apply
it to the final context in a suitable way (e.g.\ storing named facts). Note
- that at this generic level the target context is specified as @{ML_type
- Proof.context}, but the usual wrapping of toplevel proofs into command
- transactions will provide a @{ML_type local_theory} here
+ that at this generic level the target context is specified as \<^ML_type>\<open>Proof.context\<close>, but the usual wrapping of toplevel proofs into command
+ transactions will provide a \<^ML_type>\<open>local_theory\<close> here
(\chref{ch:local-theory}). This affects the way how results are stored.
The \<open>statement\<close> is given as a nested list of terms, each associated with
@@ -148,7 +145,7 @@
have A and B and C
ML_val
\<open>val n = Thm.nprems_of (#goal @{Isar.goal});
- @{assert} (n = 3);\<close>
+ \<^assert> (n = 3);\<close>
sorry
end
@@ -285,30 +282,28 @@
string -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.method} represents proof methods as abstract type.
+ \<^descr> Type \<^ML_type>\<open>Proof.method\<close> represents proof methods as abstract type.
- \<^descr> @{ML CONTEXT_METHOD}~\<open>(fn facts => context_tactic)\<close> wraps \<open>context_tactic\<close>
+ \<^descr> \<^ML>\<open>CONTEXT_METHOD\<close>~\<open>(fn facts => context_tactic)\<close> wraps \<open>context_tactic\<close>
depending on goal facts as a general proof method that may change the proof
- context dynamically. A typical operation is @{ML
- Proof_Context.update_cases}, which is wrapped up as combinator @{index_ML
+ context dynamically. A typical operation is \<^ML>\<open>Proof_Context.update_cases\<close>, which is wrapped up as combinator @{index_ML
CONTEXT_CASES} for convenience.
- \<^descr> @{ML METHOD}~\<open>(fn facts => tactic)\<close> wraps \<open>tactic\<close> depending on goal facts
+ \<^descr> \<^ML>\<open>METHOD\<close>~\<open>(fn facts => tactic)\<close> wraps \<open>tactic\<close> depending on goal facts
as regular proof method; the goal context is passed via method syntax.
- \<^descr> @{ML SIMPLE_METHOD}~\<open>tactic\<close> wraps a tactic that addresses all subgoals
+ \<^descr> \<^ML>\<open>SIMPLE_METHOD\<close>~\<open>tactic\<close> wraps a tactic that addresses all subgoals
uniformly as simple proof method. Goal facts are already inserted into all
subgoals before \<open>tactic\<close> is applied.
- \<^descr> @{ML SIMPLE_METHOD'}~\<open>tactic\<close> wraps a tactic that addresses a specific
+ \<^descr> \<^ML>\<open>SIMPLE_METHOD'\<close>~\<open>tactic\<close> wraps a tactic that addresses a specific
subgoal as simple proof method that operates on subgoal 1. Goal facts are
inserted into the subgoal then the \<open>tactic\<close> is applied.
- \<^descr> @{ML Method.insert_tac}~\<open>ctxt facts i\<close> inserts \<open>facts\<close> into subgoal \<open>i\<close>.
- This is convenient to reproduce part of the @{ML SIMPLE_METHOD} or @{ML
- SIMPLE_METHOD'} wrapping within regular @{ML METHOD}, for example.
+ \<^descr> \<^ML>\<open>Method.insert_tac\<close>~\<open>ctxt facts i\<close> inserts \<open>facts\<close> into subgoal \<open>i\<close>.
+ This is convenient to reproduce part of the \<^ML>\<open>SIMPLE_METHOD\<close> or \<^ML>\<open>SIMPLE_METHOD'\<close> wrapping within regular \<^ML>\<open>METHOD\<close>, for example.
- \<^descr> @{ML Method.setup}~\<open>name parser description\<close> provides the functionality of
+ \<^descr> \<^ML>\<open>Method.setup\<close>~\<open>name parser description\<close> provides the functionality of
the Isar command @{command method_setup} as ML function.
\<close>
@@ -319,8 +314,8 @@
\<^medskip>
The following toy examples illustrate how the goal facts and state are
passed to proof methods. The predefined proof method called ``@{method
- tactic}'' wraps ML source of type @{ML_type tactic} (abstracted over
- @{ML_text facts}). This allows immediate experimentation without parsing of
+ tactic}'' wraps ML source of type \<^ML_type>\<open>tactic\<close> (abstracted over
+ \<^ML_text>\<open>facts\<close>). This allows immediate experimentation without parsing of
concrete syntax.
\<close>
@@ -330,16 +325,16 @@
assume a: A and b: B
have "A \<and> B"
- apply (tactic \<open>resolve_tac @{context} @{thms conjI} 1\<close>)
- using a apply (tactic \<open>resolve_tac @{context} facts 1\<close>)
- using b apply (tactic \<open>resolve_tac @{context} facts 1\<close>)
+ apply (tactic \<open>resolve_tac \<^context> @{thms conjI} 1\<close>)
+ using a apply (tactic \<open>resolve_tac \<^context> facts 1\<close>)
+ using b apply (tactic \<open>resolve_tac \<^context> facts 1\<close>)
done
have "A \<and> B"
using a and b
ML_val \<open>@{Isar.goal}\<close>
- apply (tactic \<open>Method.insert_tac @{context} facts 1\<close>)
- apply (tactic \<open>(resolve_tac @{context} @{thms conjI} THEN_ALL_NEW assume_tac @{context}) 1\<close>)
+ apply (tactic \<open>Method.insert_tac \<^context> facts 1\<close>)
+ apply (tactic \<open>(resolve_tac \<^context> @{thms conjI} THEN_ALL_NEW assume_tac \<^context>) 1\<close>)
done
end
@@ -361,14 +356,14 @@
passes-through the proof context at the end of parsing, but it is not used
in this example.
- The @{ML Attrib.thms} parser produces a list of theorems from the usual Isar
+ The \<^ML>\<open>Attrib.thms\<close> parser produces a list of theorems from the usual Isar
syntax involving attribute expressions etc.\ (syntax category @{syntax
- thms}) @{cite "isabelle-isar-ref"}. The resulting @{ML_text thms} are
- added to @{ML HOL_basic_ss} which already contains the basic Simplifier
+ thms}) @{cite "isabelle-isar-ref"}. The resulting \<^ML_text>\<open>thms\<close> are
+ added to \<^ML>\<open>HOL_basic_ss\<close> which already contains the basic Simplifier
setup for HOL.
- The tactic @{ML asm_full_simp_tac} is the one that is also used in method
- @{method simp} by default. The extra wrapping by the @{ML CHANGED} tactical
+ The tactic \<^ML>\<open>asm_full_simp_tac\<close> is the one that is also used in method
+ @{method simp} by default. The extra wrapping by the \<^ML>\<open>CHANGED\<close> tactical
ensures progress of simplification: identical goal states are filtered out
explicitly to make the raw tactic conform to standard Isar method behaviour.
@@ -422,7 +417,7 @@
method_setup my_simp' =
\<open>Attrib.thms >> (fn thms => fn ctxt =>
let
- val my_simps = Named_Theorems.get ctxt @{named_theorems my_simp}
+ val my_simps = Named_Theorems.get ctxt \<^named_theorems>\<open>my_simp\<close>
in
SIMPLE_METHOD' (fn i =>
CHANGED (asm_full_simp_tac
@@ -447,8 +442,7 @@
text \<open>
\<^medskip>
The @{method my_simp} variants defined above are ``simple'' methods, i.e.\
- the goal facts are merely inserted as goal premises by the @{ML
- SIMPLE_METHOD'} or @{ML SIMPLE_METHOD} wrapper. For proof methods that are
+ the goal facts are merely inserted as goal premises by the \<^ML>\<open>SIMPLE_METHOD'\<close> or \<^ML>\<open>SIMPLE_METHOD\<close> wrapper. For proof methods that are
similar to the standard collection of @{method simp}, @{method blast},
@{method fast}, @{method auto} there is little more that can be done.
@@ -461,7 +455,7 @@
\<^medskip>
The technical treatment of rules from the context requires further
- attention. Above we rebuild a fresh @{ML_type simpset} from the arguments
+ attention. Above we rebuild a fresh \<^ML_type>\<open>simpset\<close> from the arguments
and \<^emph>\<open>all\<close> rules retrieved from the context on every invocation of the
method. This does not scale to really large collections of rules, which
easily emerges in the context of a big theory library, for example.
@@ -471,7 +465,7 @@
retrieval. More realistic applications require efficient index-structures
that organize theorems in a customized manner, such as a discrimination net
that is indexed by the left-hand sides of rewrite rules. For variations on
- the Simplifier, re-use of the existing type @{ML_type simpset} is adequate,
+ the Simplifier, re-use of the existing type \<^ML_type>\<open>simpset\<close> is adequate,
but scalability would require it be maintained statically within the context
data, not dynamically on each tool invocation.
\<close>
@@ -510,23 +504,23 @@
string -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type attribute} represents attributes as concrete type alias.
+ \<^descr> Type \<^ML_type>\<open>attribute\<close> represents attributes as concrete type alias.
- \<^descr> @{ML Thm.rule_attribute}~\<open>thms (fn context => rule)\<close> wraps a
- context-dependent rule (mapping on @{ML_type thm}) as attribute.
+ \<^descr> \<^ML>\<open>Thm.rule_attribute\<close>~\<open>thms (fn context => rule)\<close> wraps a
+ context-dependent rule (mapping on \<^ML_type>\<open>thm\<close>) as attribute.
The \<open>thms\<close> are additional parameters: when forming an abstract closure, the
system may provide dummy facts that are propagated according to strict
evaluation discipline. In that case, \<open>rule\<close> is bypassed.
- \<^descr> @{ML Thm.declaration_attribute}~\<open>(fn thm => decl)\<close> wraps a
- theorem-dependent declaration (mapping on @{ML_type Context.generic}) as
+ \<^descr> \<^ML>\<open>Thm.declaration_attribute\<close>~\<open>(fn thm => decl)\<close> wraps a
+ theorem-dependent declaration (mapping on \<^ML_type>\<open>Context.generic\<close>) as
attribute.
When forming an abstract closure, the system may provide a dummy fact as
\<open>thm\<close>. In that case, \<open>decl\<close> is bypassed.
- \<^descr> @{ML Attrib.setup}~\<open>name parser description\<close> provides the functionality of
+ \<^descr> \<^ML>\<open>Attrib.setup\<close>~\<open>name parser description\<close> provides the functionality of
the Isar command @{command attribute_setup} as ML function.
\<close>
@@ -535,13 +529,12 @@
@{ML_antiquotation_def attributes} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation attributes} attributes
- \<close>}
+ \<close>
\<^descr> \<open>@{attributes [\<dots>]}\<close> embeds attribute source representation into the ML
- text, which is particularly useful with declarations like @{ML
- Local_Theory.note}. Attribute names are internalized at compile time, but
+ text, which is particularly useful with declarations like \<^ML>\<open>Local_Theory.note\<close>. Attribute names are internalized at compile time, but
the source is unevaluated. This means attributes with formal arguments
(types, terms, theorems) may be subject to odd effects of dynamic scoping!
\<close>
--- a/src/Doc/Implementation/Local_Theory.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Local_Theory.thy Sat Jan 05 17:24:33 2019 +0100
@@ -98,21 +98,20 @@
local_theory -> (string * thm list) * local_theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type local_theory} represents local theories. Although this is
- merely an alias for @{ML_type Proof.context}, it is semantically a subtype
- of the same: a @{ML_type local_theory} holds target information as special
- context data. Subtyping means that any value \<open>lthy:\<close>~@{ML_type local_theory}
- can be also used with operations on expecting a regular \<open>ctxt:\<close>~@{ML_type
- Proof.context}.
+ \<^descr> Type \<^ML_type>\<open>local_theory\<close> represents local theories. Although this is
+ merely an alias for \<^ML_type>\<open>Proof.context\<close>, it is semantically a subtype
+ of the same: a \<^ML_type>\<open>local_theory\<close> holds target information as special
+ context data. Subtyping means that any value \<open>lthy:\<close>~\<^ML_type>\<open>local_theory\<close>
+ can be also used with operations on expecting a regular \<open>ctxt:\<close>~\<^ML_type>\<open>Proof.context\<close>.
- \<^descr> @{ML Named_Target.init}~\<open>before_exit name thy\<close> initializes a local theory
+ \<^descr> \<^ML>\<open>Named_Target.init\<close>~\<open>before_exit name thy\<close> initializes a local theory
derived from the given background theory. An empty name refers to a \<^emph>\<open>global
theory\<close> context, and a non-empty name refers to a @{command locale} or
@{command class} context (a fully-qualified internal name is expected here).
This is useful for experimentation --- normally the Isar toplevel already
takes care to initialize the local theory context.
- \<^descr> @{ML Local_Theory.define}~\<open>((b, mx), (a, rhs)) lthy\<close> defines a local
+ \<^descr> \<^ML>\<open>Local_Theory.define\<close>~\<open>((b, mx), (a, rhs)) lthy\<close> defines a local
entity according to the specification that is given relatively to the
current \<open>lthy\<close> context. In particular the term of the RHS may refer to
earlier local entities from the auxiliary context, or hypothetical
@@ -130,9 +129,8 @@
plain declarations such as @{attribute simp}, while non-trivial rules like
@{attribute simplified} are better avoided.
- \<^descr> @{ML Local_Theory.note}~\<open>(a, ths) lthy\<close> is analogous to @{ML
- Local_Theory.define}, but defines facts instead of terms. There is also a
- slightly more general variant @{ML Local_Theory.notes} that defines several
+ \<^descr> \<^ML>\<open>Local_Theory.note\<close>~\<open>(a, ths) lthy\<close> is analogous to \<^ML>\<open>Local_Theory.define\<close>, but defines facts instead of terms. There is also a
+ slightly more general variant \<^ML>\<open>Local_Theory.notes\<close> that defines several
facts (with attribute expressions) simultaneously.
This is essentially the internal version of the @{command lemmas} command,
--- a/src/Doc/Implementation/Logic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Logic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -120,43 +120,43 @@
@{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type class} represents type classes.
+ \<^descr> Type \<^ML_type>\<open>class\<close> represents type classes.
- \<^descr> Type @{ML_type sort} represents sorts, i.e.\ finite intersections of
- classes. The empty list @{ML "[]: sort"} refers to the empty class
+ \<^descr> Type \<^ML_type>\<open>sort\<close> represents sorts, i.e.\ finite intersections of
+ classes. The empty list \<^ML>\<open>[]: sort\<close> refers to the empty class
intersection, i.e.\ the ``full sort''.
- \<^descr> Type @{ML_type arity} represents type arities. A triple \<open>(\<kappa>, \<^vec>s, s)
+ \<^descr> Type \<^ML_type>\<open>arity\<close> represents type arities. A triple \<open>(\<kappa>, \<^vec>s, s)
: arity\<close> represents \<open>\<kappa> :: (\<^vec>s)s\<close> as described above.
- \<^descr> Type @{ML_type typ} represents types; this is a datatype with constructors
- @{ML TFree}, @{ML TVar}, @{ML Type}.
+ \<^descr> Type \<^ML_type>\<open>typ\<close> represents types; this is a datatype with constructors
+ \<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>, \<^ML>\<open>Type\<close>.
- \<^descr> @{ML Term.map_atyps}~\<open>f \<tau>\<close> applies the mapping \<open>f\<close> to all atomic types
- (@{ML TFree}, @{ML TVar}) occurring in \<open>\<tau>\<close>.
+ \<^descr> \<^ML>\<open>Term.map_atyps\<close>~\<open>f \<tau>\<close> applies the mapping \<open>f\<close> to all atomic types
+ (\<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>) occurring in \<open>\<tau>\<close>.
- \<^descr> @{ML Term.fold_atyps}~\<open>f \<tau>\<close> iterates the operation \<open>f\<close> over all
- occurrences of atomic types (@{ML TFree}, @{ML TVar}) in \<open>\<tau>\<close>; the type
+ \<^descr> \<^ML>\<open>Term.fold_atyps\<close>~\<open>f \<tau>\<close> iterates the operation \<open>f\<close> over all
+ occurrences of atomic types (\<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>) in \<open>\<tau>\<close>; the type
structure is traversed from left to right.
- \<^descr> @{ML Sign.subsort}~\<open>thy (s\<^sub>1, s\<^sub>2)\<close> tests the subsort relation \<open>s\<^sub>1 \<subseteq>
+ \<^descr> \<^ML>\<open>Sign.subsort\<close>~\<open>thy (s\<^sub>1, s\<^sub>2)\<close> tests the subsort relation \<open>s\<^sub>1 \<subseteq>
s\<^sub>2\<close>.
- \<^descr> @{ML Sign.of_sort}~\<open>thy (\<tau>, s)\<close> tests whether type \<open>\<tau>\<close> is of sort \<open>s\<close>.
+ \<^descr> \<^ML>\<open>Sign.of_sort\<close>~\<open>thy (\<tau>, s)\<close> tests whether type \<open>\<tau>\<close> is of sort \<open>s\<close>.
- \<^descr> @{ML Sign.add_type}~\<open>ctxt (\<kappa>, k, mx)\<close> declares a new type constructors \<open>\<kappa>\<close>
+ \<^descr> \<^ML>\<open>Sign.add_type\<close>~\<open>ctxt (\<kappa>, k, mx)\<close> declares a new type constructors \<open>\<kappa>\<close>
with \<open>k\<close> arguments and optional mixfix syntax.
- \<^descr> @{ML Sign.add_type_abbrev}~\<open>ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)\<close> defines a new type
+ \<^descr> \<^ML>\<open>Sign.add_type_abbrev\<close>~\<open>ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)\<close> defines a new type
abbreviation \<open>(\<^vec>\<alpha>)\<kappa> = \<tau>\<close>.
- \<^descr> @{ML Sign.primitive_class}~\<open>(c, [c\<^sub>1, \<dots>, c\<^sub>n])\<close> declares a new class \<open>c\<close>,
+ \<^descr> \<^ML>\<open>Sign.primitive_class\<close>~\<open>(c, [c\<^sub>1, \<dots>, c\<^sub>n])\<close> declares a new class \<open>c\<close>,
together with class relations \<open>c \<subseteq> c\<^sub>i\<close>, for \<open>i = 1, \<dots>, n\<close>.
- \<^descr> @{ML Sign.primitive_classrel}~\<open>(c\<^sub>1, c\<^sub>2)\<close> declares the class relation
+ \<^descr> \<^ML>\<open>Sign.primitive_classrel\<close>~\<open>(c\<^sub>1, c\<^sub>2)\<close> declares the class relation
\<open>c\<^sub>1 \<subseteq> c\<^sub>2\<close>.
- \<^descr> @{ML Sign.primitive_arity}~\<open>(\<kappa>, \<^vec>s, s)\<close> declares the arity \<open>\<kappa> ::
+ \<^descr> \<^ML>\<open>Sign.primitive_arity\<close>~\<open>(\<kappa>, \<^vec>s, s)\<close> declares the arity \<open>\<kappa> ::
(\<^vec>s)s\<close>.
\<close>
@@ -170,7 +170,7 @@
@{ML_antiquotation_def "typ"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation class} embedded
;
@@{ML_antiquotation sort} sort
@@ -180,25 +180,25 @@
@@{ML_antiquotation nonterminal}) embedded
;
@@{ML_antiquotation typ} type
- \<close>}
+ \<close>
- \<^descr> \<open>@{class c}\<close> inlines the internalized class \<open>c\<close> --- as @{ML_type string}
+ \<^descr> \<open>@{class c}\<close> inlines the internalized class \<open>c\<close> --- as \<^ML_type>\<open>string\<close>
literal.
- \<^descr> \<open>@{sort s}\<close> inlines the internalized sort \<open>s\<close> --- as @{ML_type "string
- list"} literal.
+ \<^descr> \<open>@{sort s}\<close> inlines the internalized sort \<open>s\<close> --- as \<^ML_type>\<open>string
+ list\<close> literal.
\<^descr> \<open>@{type_name c}\<close> inlines the internalized type constructor \<open>c\<close> --- as
- @{ML_type string} literal.
+ \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{type_abbrev c}\<close> inlines the internalized type abbreviation \<open>c\<close> --- as
- @{ML_type string} literal.
+ \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{nonterminal c}\<close> inlines the internalized syntactic type~/ grammar
- nonterminal \<open>c\<close> --- as @{ML_type string} literal.
+ nonterminal \<open>c\<close> --- as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{typ \<tau>}\<close> inlines the internalized type \<open>\<tau>\<close> --- as constructor term for
- datatype @{ML_type typ}.
+ datatype \<^ML_type>\<open>typ\<close>.
\<close>
@@ -333,50 +333,49 @@
@{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
\end{mldecls}
- \<^descr> Type @{ML_type term} represents de-Bruijn terms, with comments in
+ \<^descr> Type \<^ML_type>\<open>term\<close> represents de-Bruijn terms, with comments in
abstractions, and explicitly named free variables and constants; this is a
datatype with constructors @{index_ML Bound}, @{index_ML Free}, @{index_ML
Var}, @{index_ML Const}, @{index_ML Abs}, @{index_ML_op "$"}.
- \<^descr> \<open>t\<close>~@{ML_text aconv}~\<open>u\<close> checks \<open>\<alpha>\<close>-equivalence of two terms. This is the
- basic equality relation on type @{ML_type term}; raw datatype equality
+ \<^descr> \<open>t\<close>~\<^ML_text>\<open>aconv\<close>~\<open>u\<close> checks \<open>\<alpha>\<close>-equivalence of two terms. This is the
+ basic equality relation on type \<^ML_type>\<open>term\<close>; raw datatype equality
should only be used for operations related to parsing or printing!
- \<^descr> @{ML Term.map_types}~\<open>f t\<close> applies the mapping \<open>f\<close> to all types occurring
+ \<^descr> \<^ML>\<open>Term.map_types\<close>~\<open>f t\<close> applies the mapping \<open>f\<close> to all types occurring
in \<open>t\<close>.
- \<^descr> @{ML Term.fold_types}~\<open>f t\<close> iterates the operation \<open>f\<close> over all
+ \<^descr> \<^ML>\<open>Term.fold_types\<close>~\<open>f t\<close> iterates the operation \<open>f\<close> over all
occurrences of types in \<open>t\<close>; the term structure is traversed from left to
right.
- \<^descr> @{ML Term.map_aterms}~\<open>f t\<close> applies the mapping \<open>f\<close> to all atomic terms
- (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML Const}) occurring in \<open>t\<close>.
+ \<^descr> \<^ML>\<open>Term.map_aterms\<close>~\<open>f t\<close> applies the mapping \<open>f\<close> to all atomic terms
+ (\<^ML>\<open>Bound\<close>, \<^ML>\<open>Free\<close>, \<^ML>\<open>Var\<close>, \<^ML>\<open>Const\<close>) occurring in \<open>t\<close>.
- \<^descr> @{ML Term.fold_aterms}~\<open>f t\<close> iterates the operation \<open>f\<close> over all
- occurrences of atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
- Const}) in \<open>t\<close>; the term structure is traversed from left to right.
+ \<^descr> \<^ML>\<open>Term.fold_aterms\<close>~\<open>f t\<close> iterates the operation \<open>f\<close> over all
+ occurrences of atomic terms (\<^ML>\<open>Bound\<close>, \<^ML>\<open>Free\<close>, \<^ML>\<open>Var\<close>, \<^ML>\<open>Const\<close>) in \<open>t\<close>; the term structure is traversed from left to right.
- \<^descr> @{ML fastype_of}~\<open>t\<close> determines the type of a well-typed term. This
+ \<^descr> \<^ML>\<open>fastype_of\<close>~\<open>t\<close> determines the type of a well-typed term. This
operation is relatively slow, despite the omission of any sanity checks.
- \<^descr> @{ML lambda}~\<open>a b\<close> produces an abstraction \<open>\<lambda>a. b\<close>, where occurrences of
+ \<^descr> \<^ML>\<open>lambda\<close>~\<open>a b\<close> produces an abstraction \<open>\<lambda>a. b\<close>, where occurrences of
the atomic term \<open>a\<close> in the body \<open>b\<close> are replaced by bound variables.
- \<^descr> @{ML betapply}~\<open>(t, u)\<close> produces an application \<open>t u\<close>, with topmost
+ \<^descr> \<^ML>\<open>betapply\<close>~\<open>(t, u)\<close> produces an application \<open>t u\<close>, with topmost
\<open>\<beta>\<close>-conversion if \<open>t\<close> is an abstraction.
- \<^descr> @{ML incr_boundvars}~\<open>j\<close> increments a term's dangling bound variables by
+ \<^descr> \<^ML>\<open>incr_boundvars\<close>~\<open>j\<close> increments a term's dangling bound variables by
the offset \<open>j\<close>. This is required when moving a subterm into a context where
it is enclosed by a different number of abstractions. Bound variables with a
matching abstraction are unaffected.
- \<^descr> @{ML Sign.declare_const}~\<open>ctxt ((c, \<sigma>), mx)\<close> declares a new constant \<open>c ::
+ \<^descr> \<^ML>\<open>Sign.declare_const\<close>~\<open>ctxt ((c, \<sigma>), mx)\<close> declares a new constant \<open>c ::
\<sigma>\<close> with optional mixfix syntax.
- \<^descr> @{ML Sign.add_abbrev}~\<open>print_mode (c, t)\<close> introduces a new term
+ \<^descr> \<^ML>\<open>Sign.add_abbrev\<close>~\<open>print_mode (c, t)\<close> introduces a new term
abbreviation \<open>c \<equiv> t\<close>.
- \<^descr> @{ML Sign.const_typargs}~\<open>thy (c, \<tau>)\<close> and @{ML Sign.const_instance}~\<open>thy
+ \<^descr> \<^ML>\<open>Sign.const_typargs\<close>~\<open>thy (c, \<tau>)\<close> and \<^ML>\<open>Sign.const_instance\<close>~\<open>thy
(c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])\<close> convert between two representations of polymorphic
constants: full type instance vs.\ compact type arguments form.
\<close>
@@ -390,7 +389,7 @@
@{ML_antiquotation_def "prop"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{ML_antiquotation const_name} |
@@{ML_antiquotation const_abbrev}) embedded
;
@@ -399,23 +398,22 @@
@@{ML_antiquotation term} term
;
@@{ML_antiquotation prop} prop
- \<close>}
+ \<close>
\<^descr> \<open>@{const_name c}\<close> inlines the internalized logical constant name \<open>c\<close> ---
- as @{ML_type string} literal.
+ as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{const_abbrev c}\<close> inlines the internalized abbreviated constant name \<open>c\<close>
- --- as @{ML_type string} literal.
+ --- as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{const c(\<^vec>\<tau>)}\<close> inlines the internalized constant \<open>c\<close> with precise
- type instantiation in the sense of @{ML Sign.const_instance} --- as @{ML
- Const} constructor term for datatype @{ML_type term}.
+ type instantiation in the sense of \<^ML>\<open>Sign.const_instance\<close> --- as \<^ML>\<open>Const\<close> constructor term for datatype \<^ML_type>\<open>term\<close>.
\<^descr> \<open>@{term t}\<close> inlines the internalized term \<open>t\<close> --- as constructor term for
- datatype @{ML_type term}.
+ datatype \<^ML_type>\<open>term\<close>.
\<^descr> \<open>@{prop \<phi>}\<close> inlines the internalized proposition \<open>\<phi>\<close> --- as constructor
- term for datatype @{ML_type term}.
+ term for datatype \<^ML_type>\<open>term\<close>.
\<close>
@@ -601,84 +599,82 @@
Defs.entry -> Defs.entry list -> theory -> theory"} \\
\end{mldecls}
- \<^descr> @{ML Thm.peek_status}~\<open>thm\<close> informs about the current status of the
+ \<^descr> \<^ML>\<open>Thm.peek_status\<close>~\<open>thm\<close> informs about the current status of the
derivation object behind the given theorem. This is a snapshot of a
potentially ongoing (parallel) evaluation of proofs. The three Boolean
values indicate the following: \<^verbatim>\<open>oracle\<close> if the finished part contains some
oracle invocation; \<^verbatim>\<open>unfinished\<close> if some future proofs are still pending;
\<^verbatim>\<open>failed\<close> if some future proof has failed, rendering the theorem invalid!
- \<^descr> @{ML Logic.all}~\<open>a B\<close> produces a Pure quantification \<open>\<And>a. B\<close>, where
+ \<^descr> \<^ML>\<open>Logic.all\<close>~\<open>a B\<close> produces a Pure quantification \<open>\<And>a. B\<close>, where
occurrences of the atomic term \<open>a\<close> in the body proposition \<open>B\<close> are replaced
- by bound variables. (See also @{ML lambda} on terms.)
+ by bound variables. (See also \<^ML>\<open>lambda\<close> on terms.)
- \<^descr> @{ML Logic.mk_implies}~\<open>(A, B)\<close> produces a Pure implication \<open>A \<Longrightarrow> B\<close>.
+ \<^descr> \<^ML>\<open>Logic.mk_implies\<close>~\<open>(A, B)\<close> produces a Pure implication \<open>A \<Longrightarrow> B\<close>.
- \<^descr> Types @{ML_type ctyp} and @{ML_type cterm} represent certified types and
+ \<^descr> Types \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close> represent certified types and
terms, respectively. These are abstract datatypes that guarantee that its
values have passed the full well-formedness (and well-typedness) checks,
relative to the declarations of type constructors, constants etc.\ in the
- background theory. The abstract types @{ML_type ctyp} and @{ML_type cterm}
+ background theory. The abstract types \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close>
are part of the same inference kernel that is mainly responsible for
- @{ML_type thm}. Thus syntactic operations on @{ML_type ctyp} and @{ML_type
- cterm} are located in the @{ML_structure Thm} module, even though theorems
+ \<^ML_type>\<open>thm\<close>. Thus syntactic operations on \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close> are located in the \<^ML_structure>\<open>Thm\<close> module, even though theorems
are not yet involved at that stage.
- \<^descr> @{ML Thm.ctyp_of}~\<open>ctxt \<tau>\<close> and @{ML Thm.cterm_of}~\<open>ctxt t\<close> explicitly
+ \<^descr> \<^ML>\<open>Thm.ctyp_of\<close>~\<open>ctxt \<tau>\<close> and \<^ML>\<open>Thm.cterm_of\<close>~\<open>ctxt t\<close> explicitly
check types and terms, respectively. This also involves some basic
normalizations, such expansion of type and term abbreviations from the
underlying theory context. Full re-certification is relatively slow and
should be avoided in tight reasoning loops.
- \<^descr> @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML Drule.mk_implies}
+ \<^descr> \<^ML>\<open>Thm.apply\<close>, \<^ML>\<open>Thm.lambda\<close>, \<^ML>\<open>Thm.all\<close>, \<^ML>\<open>Drule.mk_implies\<close>
etc.\ compose certified terms (or propositions) incrementally. This is
- equivalent to @{ML Thm.cterm_of} after unchecked @{ML_op "$"}, @{ML lambda},
- @{ML Logic.all}, @{ML Logic.mk_implies} etc., but there can be a big
+ equivalent to \<^ML>\<open>Thm.cterm_of\<close> after unchecked \<^ML_op>\<open>$\<close>, \<^ML>\<open>lambda\<close>,
+ \<^ML>\<open>Logic.all\<close>, \<^ML>\<open>Logic.mk_implies\<close> etc., but there can be a big
difference in performance when large existing entities are composed by a few
extra constructions on top. There are separate operations to decompose
certified terms and theorems to produce certified terms again.
- \<^descr> Type @{ML_type thm} represents proven propositions. This is an abstract
+ \<^descr> Type \<^ML_type>\<open>thm\<close> represents proven propositions. This is an abstract
datatype that guarantees that its values have been constructed by basic
- principles of the @{ML_structure Thm} module. Every @{ML_type thm} value
+ principles of the \<^ML_structure>\<open>Thm\<close> module. Every \<^ML_type>\<open>thm\<close> value
refers its background theory, cf.\ \secref{sec:context-theory}.
- \<^descr> @{ML Thm.transfer}~\<open>thy thm\<close> transfers the given theorem to a \<^emph>\<open>larger\<close>
+ \<^descr> \<^ML>\<open>Thm.transfer\<close>~\<open>thy thm\<close> transfers the given theorem to a \<^emph>\<open>larger\<close>
theory, see also \secref{sec:context}. This formal adjustment of the
background context has no logical significance, but is occasionally required
for formal reasons, e.g.\ when theorems that are imported from more basic
theories are used in the current situation.
- \<^descr> @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML Thm.forall_elim}, @{ML
- Thm.implies_intr}, and @{ML Thm.implies_elim} correspond to the primitive
+ \<^descr> \<^ML>\<open>Thm.assume\<close>, \<^ML>\<open>Thm.forall_intr\<close>, \<^ML>\<open>Thm.forall_elim\<close>, \<^ML>\<open>Thm.implies_intr\<close>, and \<^ML>\<open>Thm.implies_elim\<close> correspond to the primitive
inferences of \figref{fig:prim-rules}.
- \<^descr> @{ML Thm.generalize}~\<open>(\<^vec>\<alpha>, \<^vec>x)\<close> corresponds to the
+ \<^descr> \<^ML>\<open>Thm.generalize\<close>~\<open>(\<^vec>\<alpha>, \<^vec>x)\<close> corresponds to the
\<open>generalize\<close> rules of \figref{fig:subst-rules}. Here collections of type and
term variables are generalized simultaneously, specified by the given basic
names.
- \<^descr> @{ML Thm.instantiate}~\<open>(\<^vec>\<alpha>\<^sub>s, \<^vec>x\<^sub>\<tau>)\<close> corresponds to the
+ \<^descr> \<^ML>\<open>Thm.instantiate\<close>~\<open>(\<^vec>\<alpha>\<^sub>s, \<^vec>x\<^sub>\<tau>)\<close> corresponds to the
\<open>instantiate\<close> rules of \figref{fig:subst-rules}. Type variables are
substituted before term variables. Note that the types in \<open>\<^vec>x\<^sub>\<tau>\<close> refer
to the instantiated versions.
- \<^descr> @{ML Thm.add_axiom}~\<open>ctxt (name, A)\<close> declares an arbitrary proposition as
+ \<^descr> \<^ML>\<open>Thm.add_axiom\<close>~\<open>ctxt (name, A)\<close> declares an arbitrary proposition as
axiom, and retrieves it as a theorem from the resulting theory, cf.\ \<open>axiom\<close>
in \figref{fig:prim-rules}. Note that the low-level representation in the
axiom table may differ slightly from the returned theorem.
- \<^descr> @{ML Thm.add_oracle}~\<open>(binding, oracle)\<close> produces a named oracle rule,
+ \<^descr> \<^ML>\<open>Thm.add_oracle\<close>~\<open>(binding, oracle)\<close> produces a named oracle rule,
essentially generating arbitrary axioms on the fly, cf.\ \<open>axiom\<close> in
\figref{fig:prim-rules}.
- \<^descr> @{ML Thm.add_def}~\<open>ctxt unchecked overloaded (name, c \<^vec>x \<equiv> t)\<close>
+ \<^descr> \<^ML>\<open>Thm.add_def\<close>~\<open>ctxt unchecked overloaded (name, c \<^vec>x \<equiv> t)\<close>
states a definitional axiom for an existing constant \<open>c\<close>. Dependencies are
- recorded via @{ML Theory.add_deps}, unless the \<open>unchecked\<close> option is set.
+ recorded via \<^ML>\<open>Theory.add_deps\<close>, unless the \<open>unchecked\<close> option is set.
Note that the low-level representation in the axiom table may differ
slightly from the returned theorem.
- \<^descr> @{ML Theory.add_deps}~\<open>ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>\<close> declares dependencies of
+ \<^descr> \<^ML>\<open>Theory.add_deps\<close>~\<open>ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>\<close> declares dependencies of
a named specification for constant \<open>c\<^sub>\<tau>\<close>, relative to existing
specifications for constants \<open>\<^vec>d\<^sub>\<sigma>\<close>. This also works for type
constructors.
@@ -694,7 +690,7 @@
@{ML_antiquotation_def "lemma"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation ctyp} typ
;
@@{ML_antiquotation cterm} term
@@ -707,23 +703,23 @@
;
@@{ML_antiquotation lemma} ('(' @'open' ')')? ((prop +) + @'and') \<newline>
@'by' method method?
- \<close>}
+ \<close>
\<^descr> \<open>@{ctyp \<tau>}\<close> produces a certified type wrt.\ the current background theory
- --- as abstract value of type @{ML_type ctyp}.
+ --- as abstract value of type \<^ML_type>\<open>ctyp\<close>.
\<^descr> \<open>@{cterm t}\<close> and \<open>@{cprop \<phi>}\<close> produce a certified term wrt.\ the current
- background theory --- as abstract value of type @{ML_type cterm}.
+ background theory --- as abstract value of type \<^ML_type>\<open>cterm\<close>.
\<^descr> \<open>@{thm a}\<close> produces a singleton fact --- as abstract value of type
- @{ML_type thm}.
+ \<^ML_type>\<open>thm\<close>.
\<^descr> \<open>@{thms a}\<close> produces a general fact --- as abstract value of type
- @{ML_type "thm list"}.
+ \<^ML_type>\<open>thm list\<close>.
\<^descr> \<open>@{lemma \<phi> by meth}\<close> produces a fact that is proven on the spot according
to the minimal proof, which imitates a terminal Isar proof. The result is an
- abstract value of type @{ML_type thm} or @{ML_type "thm list"}, depending on
+ abstract value of type \<^ML_type>\<open>thm\<close> or \<^ML_type>\<open>thm list\<close>, depending on
the number of propositions given here.
The internal derivation object lacks a proper theorem name, but it is
@@ -800,17 +796,17 @@
@{index_ML Logic.dest_type: "term -> typ"} \\
\end{mldecls}
- \<^descr> @{ML Conjunction.intr} derives \<open>A &&& B\<close> from \<open>A\<close> and \<open>B\<close>.
+ \<^descr> \<^ML>\<open>Conjunction.intr\<close> derives \<open>A &&& B\<close> from \<open>A\<close> and \<open>B\<close>.
- \<^descr> @{ML Conjunction.elim} derives \<open>A\<close> and \<open>B\<close> from \<open>A &&& B\<close>.
+ \<^descr> \<^ML>\<open>Conjunction.elim\<close> derives \<open>A\<close> and \<open>B\<close> from \<open>A &&& B\<close>.
- \<^descr> @{ML Drule.mk_term} derives \<open>TERM t\<close>.
+ \<^descr> \<^ML>\<open>Drule.mk_term\<close> derives \<open>TERM t\<close>.
- \<^descr> @{ML Drule.dest_term} recovers term \<open>t\<close> from \<open>TERM t\<close>.
+ \<^descr> \<^ML>\<open>Drule.dest_term\<close> recovers term \<open>t\<close> from \<open>TERM t\<close>.
- \<^descr> @{ML Logic.mk_type}~\<open>\<tau>\<close> produces the term \<open>TYPE(\<tau>)\<close>.
+ \<^descr> \<^ML>\<open>Logic.mk_type\<close>~\<open>\<tau>\<close> produces the term \<open>TYPE(\<tau>)\<close>.
- \<^descr> @{ML Logic.dest_type}~\<open>TYPE(\<tau>)\<close> recovers the type \<open>\<tau>\<close>.
+ \<^descr> \<^ML>\<open>Logic.dest_type\<close>~\<open>TYPE(\<tau>)\<close> recovers the type \<open>\<tau>\<close>.
\<close>
@@ -846,17 +842,16 @@
@{index_ML Thm.strip_shyps: "thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML Thm.extra_shyps}~\<open>thm\<close> determines the extraneous sort hypotheses of
+ \<^descr> \<^ML>\<open>Thm.extra_shyps\<close>~\<open>thm\<close> determines the extraneous sort hypotheses of
the given theorem, i.e.\ the sorts that are not present within type
variables of the statement.
- \<^descr> @{ML Thm.strip_shyps}~\<open>thm\<close> removes any extraneous sort hypotheses that
+ \<^descr> \<^ML>\<open>Thm.strip_shyps\<close>~\<open>thm\<close> removes any extraneous sort hypotheses that
can be witnessed from the type signature.
\<close>
text %mlex \<open>
- The following artificial example demonstrates the derivation of @{prop
- False} with a pending sort hypothesis involving a logically empty sort.
+ The following artificial example demonstrates the derivation of \<^prop>\<open>False\<close> with a pending sort hypothesis involving a logically empty sort.
\<close>
class empty =
@@ -865,7 +860,7 @@
theorem (in empty) false: False
using bad by blast
-ML_val \<open>@{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])\<close>
+ML_val \<open>\<^assert> (Thm.extra_shyps @{thm false} = [\<^sort>\<open>empty\<close>])\<close>
text \<open>
Thanks to the inference kernel managing sort hypothesis according to their
@@ -951,7 +946,7 @@
@{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML Simplifier.norm_hhf}~\<open>ctxt thm\<close> normalizes the given theorem
+ \<^descr> \<^ML>\<open>Simplifier.norm_hhf\<close>~\<open>ctxt thm\<close> normalizes the given theorem
according to the canonical form specified above. This is occasionally
helpful to repair some low-level tools that do not handle Hereditary Harrop
Formulae properly.
@@ -1032,7 +1027,7 @@
\<^descr> \<open>rule\<^sub>1 RSN (i, rule\<^sub>2)\<close> resolves the conclusion of \<open>rule\<^sub>1\<close> with the
\<open>i\<close>-th premise of \<open>rule\<^sub>2\<close>, according to the @{inference resolution}
principle explained above. Unless there is precisely one resolvent it raises
- exception @{ML THM}.
+ exception \<^ML>\<open>THM\<close>.
This corresponds to the rule attribute @{attribute THEN} in Isar source
language.
@@ -1044,7 +1039,7 @@
with the \<open>i\<close>-th premise of \<open>rule\<^sub>2\<close>, accumulating multiple results in one
big list. Note that such strict enumerations of higher-order unifications
can be inefficient compared to the lazy variant seen in elementary tactics
- like @{ML resolve_tac}.
+ like \<^ML>\<open>resolve_tac\<close>.
\<^descr> \<open>rules\<^sub>1 RL rules\<^sub>2\<close> abbreviates \<open>rules\<^sub>1 RLN (1, rules\<^sub>2)\<close>.
@@ -1196,32 +1191,32 @@
@{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
\end{mldecls}
- \<^descr> Type @{ML_type proof} represents proof terms; this is a datatype with
+ \<^descr> Type \<^ML_type>\<open>proof\<close> represents proof terms; this is a datatype with
constructors @{index_ML Abst}, @{index_ML AbsP}, @{index_ML_op "%"},
@{index_ML_op "%%"}, @{index_ML PBound}, @{index_ML MinProof}, @{index_ML
Hyp}, @{index_ML PAxm}, @{index_ML Oracle}, @{index_ML Promise}, @{index_ML
PThm} as explained above. %FIXME OfClass (!?)
- \<^descr> Type @{ML_type proof_body} represents the nested proof information of a
+ \<^descr> Type \<^ML_type>\<open>proof_body\<close> represents the nested proof information of a
named theorem, consisting of a digest of oracles and named theorem over some
proof term. The digest only covers the directly visible part of the proof:
in order to get the full information, the implicit graph of nested theorems
- needs to be traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
+ needs to be traversed (e.g.\ using \<^ML>\<open>Proofterm.fold_body_thms\<close>).
- \<^descr> @{ML Thm.proof_of}~\<open>thm\<close> and @{ML Thm.proof_body_of}~\<open>thm\<close> produce the
+ \<^descr> \<^ML>\<open>Thm.proof_of\<close>~\<open>thm\<close> and \<^ML>\<open>Thm.proof_body_of\<close>~\<open>thm\<close> produce the
proof term or proof body (with digest of oracles and theorems) from a given
theorem. Note that this involves a full join of internal futures that
fulfill pending proof promises, and thus disrupts the natural bottom-up
construction of proofs by introducing dynamic ad-hoc dependencies. Parallel
performance may suffer by inspecting proof terms at run-time.
- \<^descr> @{ML Proofterm.proofs} specifies the detail of proof recording within
- @{ML_type thm} values produced by the inference kernel: @{ML 0} records only
- the names of oracles, @{ML 1} records oracle names and propositions, @{ML 2}
+ \<^descr> \<^ML>\<open>Proofterm.proofs\<close> specifies the detail of proof recording within
+ \<^ML_type>\<open>thm\<close> values produced by the inference kernel: \<^ML>\<open>0\<close> records only
+ the names of oracles, \<^ML>\<open>1\<close> records oracle names and propositions, \<^ML>\<open>2\<close>
additionally records full proof terms. Officially named theorems that
contribute to a result are recorded in any case.
- \<^descr> @{ML Reconstruct.reconstruct_proof}~\<open>ctxt prop prf\<close> turns the implicit
+ \<^descr> \<^ML>\<open>Reconstruct.reconstruct_proof\<close>~\<open>ctxt prop prf\<close> turns the implicit
proof term \<open>prf\<close> into a full proof of the given proposition.
Reconstruction may fail if \<open>prf\<close> is not a proof of \<open>prop\<close>, or if it does not
@@ -1229,21 +1224,21 @@
for proofs that are constructed manually, but not for those produced
automatically by the inference kernel.
- \<^descr> @{ML Reconstruct.expand_proof}~\<open>ctxt [thm\<^sub>1, \<dots>, thm\<^sub>n] prf\<close> expands and
+ \<^descr> \<^ML>\<open>Reconstruct.expand_proof\<close>~\<open>ctxt [thm\<^sub>1, \<dots>, thm\<^sub>n] prf\<close> expands and
reconstructs the proofs of all specified theorems, with the given (full)
proof. Theorems that are not unique specified via their name may be
disambiguated by giving their proposition.
- \<^descr> @{ML Proof_Checker.thm_of_proof}~\<open>thy prf\<close> turns the given (full) proof
+ \<^descr> \<^ML>\<open>Proof_Checker.thm_of_proof\<close>~\<open>thy prf\<close> turns the given (full) proof
into a theorem, by replaying it using only primitive rules of the inference
kernel.
- \<^descr> @{ML Proof_Syntax.read_proof}~\<open>thy b\<^sub>1 b\<^sub>2 s\<close> reads in a proof term. The
+ \<^descr> \<^ML>\<open>Proof_Syntax.read_proof\<close>~\<open>thy b\<^sub>1 b\<^sub>2 s\<close> reads in a proof term. The
Boolean flags indicate the use of sort and type information. Usually, typing
information is left implicit and is inferred during proof reconstruction.
%FIXME eliminate flags!?
- \<^descr> @{ML Proof_Syntax.pretty_proof}~\<open>ctxt prf\<close> pretty-prints the given proof
+ \<^descr> \<^ML>\<open>Proof_Syntax.pretty_proof\<close>~\<open>ctxt prf\<close> pretty-prints the given proof
term.
\<close>
--- a/src/Doc/Implementation/ML.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/ML.thy Sat Jan 05 17:24:33 2019 +0100
@@ -115,34 +115,32 @@
\<^medskip>
\begin{tabular}{lll}
variant & example & ML categories \\\hline
- lower-case & @{ML_text foo_bar} & values, types, record fields \\
- capitalized & @{ML_text Foo_Bar} & datatype constructors, structures, functors \\
- upper-case & @{ML_text FOO_BAR} & special values, exception constructors, signatures \\
+ lower-case & \<^ML_text>\<open>foo_bar\<close> & values, types, record fields \\
+ capitalized & \<^ML_text>\<open>Foo_Bar\<close> & datatype constructors, structures, functors \\
+ upper-case & \<^ML_text>\<open>FOO_BAR\<close> & special values, exception constructors, signatures \\
\end{tabular}
\<^medskip>
For historical reasons, many capitalized names omit underscores, e.g.\
- old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}. Genuine
+ old-style \<^ML_text>\<open>FooBar\<close> instead of \<^ML_text>\<open>Foo_Bar\<close>. Genuine
mixed-case names are \<^emph>\<open>not\<close> used, because clear division of words is
essential for readability.\<^footnote>\<open>Camel-case was invented to workaround the lack
of underscore in some early non-ASCII character sets. Later it became
habitual in some language communities that are now strong in numbers.\<close>
A single (capital) character does not count as ``word'' in this respect:
- some Isabelle/ML names are suffixed by extra markers like this: @{ML_text
- foo_barT}.
-
- Name variants are produced by adding 1--3 primes, e.g.\ @{ML_text foo'},
- @{ML_text foo''}, or @{ML_text foo'''}, but not @{ML_text foo''''} or more.
- Decimal digits scale better to larger numbers, e.g.\ @{ML_text foo0},
- @{ML_text foo1}, @{ML_text foo42}.
+ some Isabelle/ML names are suffixed by extra markers like this: \<^ML_text>\<open>foo_barT\<close>.
+
+ Name variants are produced by adding 1--3 primes, e.g.\ \<^ML_text>\<open>foo'\<close>,
+ \<^ML_text>\<open>foo''\<close>, or \<^ML_text>\<open>foo'''\<close>, but not \<^ML_text>\<open>foo''''\<close> or more.
+ Decimal digits scale better to larger numbers, e.g.\ \<^ML_text>\<open>foo0\<close>,
+ \<^ML_text>\<open>foo1\<close>, \<^ML_text>\<open>foo42\<close>.
\<close>
paragraph \<open>Scopes.\<close>
text \<open>
Apart from very basic library modules, ML structures are not ``opened'', but
- names are referenced with explicit qualification, as in @{ML
- Syntax.string_of_term} for example. When devising names for structures and
+ names are referenced with explicit qualification, as in \<^ML>\<open>Syntax.string_of_term\<close> for example. When devising names for structures and
their components it is important to aim at eye-catching compositions of both
parts, because this is how they are seen in the sources and documentation.
For the same reasons, aliases of well-known library functions should be
@@ -150,8 +148,8 @@
Local names of function abstraction or case/let bindings are typically
shorter, sometimes using only rudiments of ``words'', while still avoiding
- cryptic shorthands. An auxiliary function called @{ML_text helper},
- @{ML_text aux}, or @{ML_text f} is considered bad style.
+ cryptic shorthands. An auxiliary function called \<^ML_text>\<open>helper\<close>,
+ \<^ML_text>\<open>aux\<close>, or \<^ML_text>\<open>f\<close> is considered bad style.
Example:
@@ -187,15 +185,13 @@
text \<open>
Here are some specific name forms that occur frequently in the sources.
- \<^item> A function that maps @{ML_text foo} to @{ML_text bar} is called @{ML_text
- foo_to_bar} or @{ML_text bar_of_foo} (never @{ML_text foo2bar}, nor
- @{ML_text bar_from_foo}, nor @{ML_text bar_for_foo}, nor @{ML_text
- bar4foo}).
-
- \<^item> The name component @{ML_text legacy} means that the operation is about to
+ \<^item> A function that maps \<^ML_text>\<open>foo\<close> to \<^ML_text>\<open>bar\<close> is called \<^ML_text>\<open>foo_to_bar\<close> or \<^ML_text>\<open>bar_of_foo\<close> (never \<^ML_text>\<open>foo2bar\<close>, nor
+ \<^ML_text>\<open>bar_from_foo\<close>, nor \<^ML_text>\<open>bar_for_foo\<close>, nor \<^ML_text>\<open>bar4foo\<close>).
+
+ \<^item> The name component \<^ML_text>\<open>legacy\<close> means that the operation is about to
be discontinued soon.
- \<^item> The name component @{ML_text global} means that this works with the
+ \<^item> The name component \<^ML_text>\<open>global\<close> means that this works with the
background theory instead of the regular local context
(\secref{sec:context}), sometimes for historical reasons, sometimes due a
genuine lack of locality of the concept involved, sometimes as a fall-back
@@ -207,58 +203,57 @@
(\secref{sec:context} and \chref{ch:local-theory}) have firm naming
conventions as follows:
- \<^item> theories are called @{ML_text thy}, rarely @{ML_text theory}
- (never @{ML_text thry})
-
- \<^item> proof contexts are called @{ML_text ctxt}, rarely @{ML_text
- context} (never @{ML_text ctx})
-
- \<^item> generic contexts are called @{ML_text context}
-
- \<^item> local theories are called @{ML_text lthy}, except for local
+ \<^item> theories are called \<^ML_text>\<open>thy\<close>, rarely \<^ML_text>\<open>theory\<close>
+ (never \<^ML_text>\<open>thry\<close>)
+
+ \<^item> proof contexts are called \<^ML_text>\<open>ctxt\<close>, rarely \<^ML_text>\<open>context\<close> (never \<^ML_text>\<open>ctx\<close>)
+
+ \<^item> generic contexts are called \<^ML_text>\<open>context\<close>
+
+ \<^item> local theories are called \<^ML_text>\<open>lthy\<close>, except for local
theories that are treated as proof context (which is a semantic
super-type)
Variations with primed or decimal numbers are always possible, as well as
- semantic prefixes like @{ML_text foo_thy} or @{ML_text bar_ctxt}, but the
+ semantic prefixes like \<^ML_text>\<open>foo_thy\<close> or \<^ML_text>\<open>bar_ctxt\<close>, but the
base conventions above need to be preserved. This allows to emphasize their
data flow via plain regular expressions in the text editor.
\<^item> The main logical entities (\secref{ch:logic}) have established naming
convention as follows:
- \<^item> sorts are called @{ML_text S}
-
- \<^item> types are called @{ML_text T}, @{ML_text U}, or @{ML_text ty} (never
- @{ML_text t})
-
- \<^item> terms are called @{ML_text t}, @{ML_text u}, or @{ML_text tm} (never
- @{ML_text trm})
-
- \<^item> certified types are called @{ML_text cT}, rarely @{ML_text T}, with
+ \<^item> sorts are called \<^ML_text>\<open>S\<close>
+
+ \<^item> types are called \<^ML_text>\<open>T\<close>, \<^ML_text>\<open>U\<close>, or \<^ML_text>\<open>ty\<close> (never
+ \<^ML_text>\<open>t\<close>)
+
+ \<^item> terms are called \<^ML_text>\<open>t\<close>, \<^ML_text>\<open>u\<close>, or \<^ML_text>\<open>tm\<close> (never
+ \<^ML_text>\<open>trm\<close>)
+
+ \<^item> certified types are called \<^ML_text>\<open>cT\<close>, rarely \<^ML_text>\<open>T\<close>, with
variants as for types
- \<^item> certified terms are called @{ML_text ct}, rarely @{ML_text t}, with
- variants as for terms (never @{ML_text ctrm})
-
- \<^item> theorems are called @{ML_text th}, or @{ML_text thm}
+ \<^item> certified terms are called \<^ML_text>\<open>ct\<close>, rarely \<^ML_text>\<open>t\<close>, with
+ variants as for terms (never \<^ML_text>\<open>ctrm\<close>)
+
+ \<^item> theorems are called \<^ML_text>\<open>th\<close>, or \<^ML_text>\<open>thm\<close>
Proper semantic names override these conventions completely. For example,
- the left-hand side of an equation (as a term) can be called @{ML_text lhs}
- (not @{ML_text lhs_tm}). Or a term that is known to be a variable can be
- called @{ML_text v} or @{ML_text x}.
+ the left-hand side of an equation (as a term) can be called \<^ML_text>\<open>lhs\<close>
+ (not \<^ML_text>\<open>lhs_tm\<close>). Or a term that is known to be a variable can be
+ called \<^ML_text>\<open>v\<close> or \<^ML_text>\<open>x\<close>.
\<^item> Tactics (\secref{sec:tactics}) are sufficiently important to have specific
naming conventions. The name of a basic tactic definition always has a
- @{ML_text "_tac"} suffix, the subgoal index (if applicable) is always called
- @{ML_text i}, and the goal state (if made explicit) is usually called
- @{ML_text st} instead of the somewhat misleading @{ML_text thm}. Any other
+ \<^ML_text>\<open>_tac\<close> suffix, the subgoal index (if applicable) is always called
+ \<^ML_text>\<open>i\<close>, and the goal state (if made explicit) is usually called
+ \<^ML_text>\<open>st\<close> instead of the somewhat misleading \<^ML_text>\<open>thm\<close>. Any other
arguments are given before the latter two, and the general context is given
first. Example:
@{verbatim [display] \<open> fun my_tac ctxt arg1 arg2 i st = ...\<close>}
- Note that the goal state @{ML_text st} above is rarely made explicit, if
+ Note that the goal state \<^ML_text>\<open>st\<close> above is rarely made explicit, if
tactic combinators (tacticals) are used as usual.
A tactic that requires a proof context needs to make that explicit as seen
@@ -314,16 +309,16 @@
c);
\<close>}
- Some special infixes (e.g.\ @{ML_text "|>"}) work better at the start of the
+ Some special infixes (e.g.\ \<^ML_text>\<open>|>\<close>) work better at the start of the
line, but punctuation is always at the end.
Function application follows the tradition of \<open>\<lambda>\<close>-calculus, not informal
- mathematics. For example: @{ML_text "f a b"} for a curried function, or
- @{ML_text "g (a, b)"} for a tupled function. Note that the space between
- @{ML_text g} and the pair @{ML_text "(a, b)"} follows the important
- principle of \<^emph>\<open>compositionality\<close>: the layout of @{ML_text "g p"} does not
- change when @{ML_text "p"} is refined to the concrete pair @{ML_text "(a,
- b)"}.
+ mathematics. For example: \<^ML_text>\<open>f a b\<close> for a curried function, or
+ \<^ML_text>\<open>g (a, b)\<close> for a tupled function. Note that the space between
+ \<^ML_text>\<open>g\<close> and the pair \<^ML_text>\<open>(a, b)\<close> follows the important
+ principle of \<^emph>\<open>compositionality\<close>: the layout of \<^ML_text>\<open>g p\<close> does not
+ change when \<^ML_text>\<open>p\<close> is refined to the concrete pair \<^ML_text>\<open>(a,
+ b)\<close>.
\<close>
paragraph \<open>Indentation\<close>
@@ -372,13 +367,13 @@
paragraph \<open>Complex expressions\<close>
text \<open>
- that consist of multi-clausal function definitions, @{ML_text handle},
- @{ML_text case}, @{ML_text let} (and combinations) require special
+ that consist of multi-clausal function definitions, \<^ML_text>\<open>handle\<close>,
+ \<^ML_text>\<open>case\<close>, \<^ML_text>\<open>let\<close> (and combinations) require special
attention. The syntax of Standard ML is quite ambitious and admits a lot of
variance that can distort the meaning of the text.
- Multiple clauses of @{ML_text fun}, @{ML_text fn}, @{ML_text handle},
- @{ML_text case} get extra indentation to indicate the nesting clearly.
+ Multiple clauses of \<^ML_text>\<open>fun\<close>, \<^ML_text>\<open>fn\<close>, \<^ML_text>\<open>handle\<close>,
+ \<^ML_text>\<open>case\<close> get extra indentation to indicate the nesting clearly.
Example:
@{verbatim [display]
@@ -397,7 +392,7 @@
| foo p2 =
expr2\<close>}
- Body expressions consisting of @{ML_text case} or @{ML_text let} require
+ Body expressions consisting of \<^ML_text>\<open>case\<close> or \<^ML_text>\<open>let\<close> require
care to maintain compositionality, to prevent loss of logical indentation
where it is especially important to see the structure of the text. Example:
@@ -428,14 +423,14 @@
...
end\<close>}
- Extra parentheses around @{ML_text case} expressions are optional, but help
+ Extra parentheses around \<^ML_text>\<open>case\<close> expressions are optional, but help
to analyse the nesting based on character matching in the text editor.
\<^medskip>
There are two main exceptions to the overall principle of compositionality
in the layout of complex expressions.
- \<^enum> @{ML_text "if"} expressions are iterated as if ML had multi-branch
+ \<^enum> \<^ML_text>\<open>if\<close> expressions are iterated as if ML had multi-branch
conditionals, e.g.
@{verbatim [display]
@@ -445,7 +440,7 @@
else if b2 then e2
else e3\<close>}
- \<^enum> @{ML_text fn} abstractions are often layed-out as if they would lack any
+ \<^enum> \<^ML_text>\<open>fn\<close> abstractions are often layed-out as if they would lack any
structure by themselves. This traditional form is motivated by the
possibility to shift function arguments back and forth wrt.\ additional
combinators. Example:
@@ -456,12 +451,12 @@
fun foo x y = fold (fn z =>
expr)\<close>}
- Here the visual appearance is that of three arguments @{ML_text x},
- @{ML_text y}, @{ML_text z} in a row.
+ Here the visual appearance is that of three arguments \<^ML_text>\<open>x\<close>,
+ \<^ML_text>\<open>y\<close>, \<^ML_text>\<open>z\<close> in a row.
Such weakly structured layout should be use with great care. Here are some
- counter-examples involving @{ML_text let} expressions:
+ counter-examples involving \<^ML_text>\<open>let\<close> expressions:
@{verbatim [display]
\<open> (* WRONG *)
@@ -537,7 +532,7 @@
more commands that refer to ML source, such as @{command_ref setup} or
@{command_ref declaration}. Even more fine-grained embedding of ML into Isar
is encountered in the proof method @{method_ref tactic}, which refines the
- pending goal state via a given expression of type @{ML_type tactic}.
+ pending goal state via a given expression of type \<^ML_type>\<open>tactic\<close>.
\<close>
text %mlex \<open>
@@ -552,8 +547,7 @@
\<close>
text \<open>
- Here the ML environment is already managed by Isabelle, i.e.\ the @{ML
- factorial} function is not yet accessible in the preceding paragraph, nor in
+ Here the ML environment is already managed by Isabelle, i.e.\ the \<^ML>\<open>factorial\<close> function is not yet accessible in the preceding paragraph, nor in
a different theory that is independent from the current one in the import
hierarchy.
@@ -589,8 +583,7 @@
Two further ML commands are useful in certain situations: @{command_ref
ML_val} and @{command_ref ML_command} are \<^emph>\<open>diagnostic\<close> in the sense that
there is no effect on the underlying environment, and can thus be used
- anywhere. The examples below produce long strings of digits by invoking @{ML
- factorial}: @{command ML_val} takes care of printing the ML toplevel result,
+ anywhere. The examples below produce long strings of digits by invoking \<^ML>\<open>factorial\<close>: @{command ML_val} takes care of printing the ML toplevel result,
but @{command ML_command} is silent so we produce an explicit output
message.
\<close>
@@ -624,19 +617,19 @@
@{index_ML ML_Thms.bind_thm: "string * thm -> unit"} \\
\end{mldecls}
- \<^descr> @{ML "Context.the_generic_context ()"} refers to the theory context of
+ \<^descr> \<^ML>\<open>Context.the_generic_context ()\<close> refers to the theory context of
the ML toplevel --- at compile time. ML code needs to take care to refer to
- @{ML "Context.the_generic_context ()"} correctly. Recall that evaluation
+ \<^ML>\<open>Context.the_generic_context ()\<close> correctly. Recall that evaluation
of a function body is delayed until actual run-time.
- \<^descr> @{ML "Context.>>"}~\<open>f\<close> applies context transformation \<open>f\<close> to the implicit
+ \<^descr> \<^ML>\<open>Context.>>\<close>~\<open>f\<close> applies context transformation \<open>f\<close> to the implicit
context of the ML toplevel.
- \<^descr> @{ML ML_Thms.bind_thms}~\<open>(name, thms)\<close> stores a list of theorems produced
+ \<^descr> \<^ML>\<open>ML_Thms.bind_thms\<close>~\<open>(name, thms)\<close> stores a list of theorems produced
in ML both in the (global) theory context and the ML toplevel, associating
it with the provided name.
- \<^descr> @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but refers to
+ \<^descr> \<^ML>\<open>ML_Thms.bind_thm\<close> is similar to \<^ML>\<open>ML_Thms.bind_thms\<close> but refers to
a singleton fact.
It is important to note that the above functions are really restricted to
@@ -654,9 +647,9 @@
\<^emph>\<open>ML antiquotation\<close>. The standard token language of ML is augmented by
special syntactic entities of the following form:
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def antiquote}: '@{' name args '}'
- \<close>}
+ \<close>
Here @{syntax name} and @{syntax args} are outer syntax categories, as
defined in @{cite "isabelle-isar-ref"}.
@@ -692,11 +685,11 @@
@{ML_antiquotation_def "print"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation make_string}
;
@@{ML_antiquotation print} embedded?
- \<close>}
+ \<close>
\<^descr> \<open>@{make_string}\<close> inlines a function to print arbitrary values similar to
the ML toplevel. The result is compiler dependent and may fall back on "?"
@@ -705,7 +698,7 @@
\<^descr> \<open>@{print f}\<close> uses the ML function \<open>f: string -> unit\<close> to output the result
of \<open>@{make_string}\<close> above, together with the source position of the
- antiquotation. The default output function is @{ML writeln}.
+ antiquotation. The default output function is \<^ML>\<open>writeln\<close>.
\<close>
text %mlex \<open>
@@ -717,10 +710,10 @@
val x = 42;
val y = true;
- writeln (@{make_string} {x = x, y = y});
-
- @{print} {x = x, y = y};
- @{print tracing} {x = x, y = y};
+ writeln (\<^make_string> {x = x, y = y});
+
+ \<^print> {x = x, y = y};
+ \<^print>\<open>tracing\<close> {x = x, y = y};
\<close>
@@ -865,23 +858,23 @@
@{index_ML fold_map: "('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b"} \\
\end{mldecls}
- \<^descr> @{ML fold}~\<open>f\<close> lifts the parametrized update function \<open>f\<close> to a list of
+ \<^descr> \<^ML>\<open>fold\<close>~\<open>f\<close> lifts the parametrized update function \<open>f\<close> to a list of
parameters.
- \<^descr> @{ML fold_rev}~\<open>f\<close> is similar to @{ML fold}~\<open>f\<close>, but works inside-out, as
+ \<^descr> \<^ML>\<open>fold_rev\<close>~\<open>f\<close> is similar to \<^ML>\<open>fold\<close>~\<open>f\<close>, but works inside-out, as
if the list would be reversed.
- \<^descr> @{ML fold_map}~\<open>f\<close> lifts the parametrized update function \<open>f\<close> (with
+ \<^descr> \<^ML>\<open>fold_map\<close>~\<open>f\<close> lifts the parametrized update function \<open>f\<close> (with
side-result) to a list of parameters and cumulative side-results.
\begin{warn}
The literature on functional programming provides a confusing multitude of
combinators called \<open>foldl\<close>, \<open>foldr\<close> etc. SML97 provides its own variations
- as @{ML List.foldl} and @{ML List.foldr}, while the classic Isabelle library
- also has the historic @{ML Library.foldl} and @{ML Library.foldr}. To avoid
+ as \<^ML>\<open>List.foldl\<close> and \<^ML>\<open>List.foldr\<close>, while the classic Isabelle library
+ also has the historic \<^ML>\<open>Library.foldl\<close> and \<^ML>\<open>Library.foldr\<close>. To avoid
unnecessary complication, all these historical versions should be ignored,
- and the canonical @{ML fold} (or @{ML fold_rev}) used exclusively.
+ and the canonical \<^ML>\<open>fold\<close> (or \<^ML>\<open>fold_rev\<close>) used exclusively.
\end{warn}
\<close>
@@ -897,12 +890,11 @@
|> fold (Buffer.add o string_of_int) (0 upto 9)
|> Buffer.content;
- @{assert} (s = "digits: 0123456789");
+ \<^assert> (s = "digits: 0123456789");
\<close>
text \<open>
- Note how @{ML "fold (Buffer.add o string_of_int)"} above saves an extra @{ML
- "map"} over the given list. This kind of peephole optimization reduces both
+ Note how \<^ML>\<open>fold (Buffer.add o string_of_int)\<close> above saves an extra \<^ML>\<open>map\<close> over the given list. This kind of peephole optimization reduces both
the code size and the tree structures in memory (``deforestation''), but it
requires some practice to read and write fluently.
@@ -931,28 +923,26 @@
\<close>
text \<open>
- The slowness of @{ML slow_content} is due to the @{ML implode} of the
+ The slowness of \<^ML>\<open>slow_content\<close> is due to the \<^ML>\<open>implode\<close> of the
recursive results, because it copies previously produced strings again and
again.
- The incremental @{ML add_content} avoids this by operating on a buffer that
- is passed through in a linear fashion. Using @{ML_text "#>"} and contraction
+ The incremental \<^ML>\<open>add_content\<close> avoids this by operating on a buffer that
+ is passed through in a linear fashion. Using \<^ML_text>\<open>#>\<close> and contraction
over the actual buffer argument saves some additional boiler-plate. Of
- course, the two @{ML "Buffer.add"} invocations with concatenated strings
+ course, the two \<^ML>\<open>Buffer.add\<close> invocations with concatenated strings
could have been split into smaller parts, but this would have obfuscated the
source without making a big difference in performance. Here we have done
some peephole-optimization for the sake of readability.
- Another benefit of @{ML add_content} is its ``open'' form as a function on
+ Another benefit of \<^ML>\<open>add_content\<close> is its ``open'' form as a function on
buffers that can be continued in further linear transformations, folding
- etc. Thus it is more compositional than the naive @{ML slow_content}. As
+ etc. Thus it is more compositional than the naive \<^ML>\<open>slow_content\<close>. As
realistic example, compare the old-style
- @{ML "Term.maxidx_of_term: term -> int"} with the newer @{ML
- "Term.maxidx_term: term -> int -> int"} in Isabelle/Pure.
-
- Note that @{ML fast_content} above is only defined as example. In many
- practical situations, it is customary to provide the incremental @{ML
- add_content} only and leave the initialization and termination to the
+ \<^ML>\<open>Term.maxidx_of_term: term -> int\<close> with the newer \<^ML>\<open>Term.maxidx_term: term -> int -> int\<close> in Isabelle/Pure.
+
+ Note that \<^ML>\<open>fast_content\<close> above is only defined as example. In many
+ practical situations, it is customary to provide the incremental \<^ML>\<open>add_content\<close> only and leave the initialization and termination to the
concrete application to the user.
\<close>
@@ -985,10 +975,10 @@
@{index_ML error: "string -> 'a"} % FIXME Output.error_message (!?) \\
\end{mldecls}
- \<^descr> @{ML writeln}~\<open>text\<close> outputs \<open>text\<close> as regular message. This is the
+ \<^descr> \<^ML>\<open>writeln\<close>~\<open>text\<close> outputs \<open>text\<close> as regular message. This is the
primary message output operation of Isabelle and should be used by default.
- \<^descr> @{ML tracing}~\<open>text\<close> outputs \<open>text\<close> as special tracing message, indicating
+ \<^descr> \<^ML>\<open>tracing\<close>~\<open>text\<close> outputs \<open>text\<close> as special tracing message, indicating
potential high-volume output to the front-end (hundreds or thousands of
messages issued by a single command). The idea is to allow the
user-interface to downgrade the quality of message display to achieve higher
@@ -998,27 +988,26 @@
e.g.\ switch to a different output window. So this channel should not be
used for regular output.
- \<^descr> @{ML warning}~\<open>text\<close> outputs \<open>text\<close> as warning, which typically means some
+ \<^descr> \<^ML>\<open>warning\<close>~\<open>text\<close> outputs \<open>text\<close> as warning, which typically means some
extra emphasis on the front-end side (color highlighting, icons, etc.).
- \<^descr> @{ML error}~\<open>text\<close> raises exception @{ML ERROR}~\<open>text\<close> and thus lets the
+ \<^descr> \<^ML>\<open>error\<close>~\<open>text\<close> raises exception \<^ML>\<open>ERROR\<close>~\<open>text\<close> and thus lets the
Isar toplevel print \<open>text\<close> on the error channel, which typically means some
extra emphasis on the front-end side (color highlighting, icons, etc.).
This assumes that the exception is not handled before the command
- terminates. Handling exception @{ML ERROR}~\<open>text\<close> is a perfectly legal
+ terminates. Handling exception \<^ML>\<open>ERROR\<close>~\<open>text\<close> is a perfectly legal
alternative: it means that the error is absorbed without any message output.
\begin{warn}
- The actual error channel is accessed via @{ML Output.error_message}, but
+ The actual error channel is accessed via \<^ML>\<open>Output.error_message\<close>, but
this is normally not used directly in user code.
\end{warn}
\begin{warn}
Regular Isabelle/ML code should output messages exclusively by the official
- channels. Using raw I/O on \<^emph>\<open>stdout\<close> or \<^emph>\<open>stderr\<close> instead (e.g.\ via @{ML
- TextIO.output}) is apt to cause problems in the presence of parallel and
+ channels. Using raw I/O on \<^emph>\<open>stdout\<close> or \<^emph>\<open>stderr\<close> instead (e.g.\ via \<^ML>\<open>TextIO.output\<close>) is apt to cause problems in the presence of parallel and
asynchronous processing of Isabelle theories. Such raw output might be
displayed by the front-end in some system console log, with a low chance
that the user will ever see it. Moreover, as a genuine side-effect on global
@@ -1029,7 +1018,7 @@
\begin{warn}
The message channels should be used in a message-oriented manner. This means
that multi-line output that logically belongs together is issued by a single
- invocation of @{ML writeln} etc.\ with the functional concatenation of all
+ invocation of \<^ML>\<open>writeln\<close> etc.\ with the functional concatenation of all
message constituents.
\end{warn}
\<close>
@@ -1081,11 +1070,11 @@
text \<open>
These are meant to provide informative feedback about malformed input etc.
- The \<^emph>\<open>error\<close> function raises the corresponding @{ML ERROR} exception, with a
- plain text message as argument. @{ML ERROR} exceptions can be handled
+ The \<^emph>\<open>error\<close> function raises the corresponding \<^ML>\<open>ERROR\<close> exception, with a
+ plain text message as argument. \<^ML>\<open>ERROR\<close> exceptions can be handled
internally, in order to be ignored, turned into other exceptions, or
cascaded by appending messages. If the corresponding Isabelle/Isar command
- terminates with an @{ML ERROR} exception state, the system will print the
+ terminates with an \<^ML>\<open>ERROR\<close> exception state, the system will print the
result on the error channel (see \secref{sec:message-channels}).
It is considered bad style to refer to internal function names or values in
@@ -1109,7 +1098,7 @@
purpose is to determine quickly what has happened where. Traditionally, the
(short) exception message would include the name of an ML function, although
this is no longer necessary, because the ML runtime system attaches detailed
- source position stemming from the corresponding @{ML_text raise} keyword.
+ source position stemming from the corresponding \<^ML_text>\<open>raise\<close> keyword.
\<^medskip>
User modules can always introduce their own custom exceptions locally, e.g.\
@@ -1123,7 +1112,7 @@
text \<open>
These indicate arbitrary system events: both the ML runtime system and the
Isabelle/ML infrastructure signal various exceptional situations by raising
- the special @{ML Exn.Interrupt} exception in user code.
+ the special \<^ML>\<open>Exn.Interrupt\<close> exception in user code.
This is the one and only way that physical events can intrude an Isabelle/ML
program. Such an interrupt can mean out-of-memory, stack overflow, timeout,
@@ -1160,32 +1149,32 @@
@{index_ML Runtime.exn_trace: "(unit -> 'a) -> 'a"} \\
\end{mldecls}
- \<^descr> @{ML try}~\<open>f x\<close> makes the partiality of evaluating \<open>f x\<close> explicit via the
+ \<^descr> \<^ML>\<open>try\<close>~\<open>f x\<close> makes the partiality of evaluating \<open>f x\<close> explicit via the
option datatype. Interrupts are \<^emph>\<open>not\<close> handled here, i.e.\ this form serves
- as safe replacement for the \<^emph>\<open>unsafe\<close> version @{ML_text "(SOME"}~\<open>f
- x\<close>~@{ML_text "handle _ => NONE)"} that is occasionally seen in books about
+ as safe replacement for the \<^emph>\<open>unsafe\<close> version \<^ML_text>\<open>(SOME\<close>~\<open>f
+ x\<close>~\<^ML_text>\<open>handle _ => NONE)\<close> that is occasionally seen in books about
SML97, but not in Isabelle/ML.
- \<^descr> @{ML can} is similar to @{ML try} with more abstract result.
-
- \<^descr> @{ML ERROR}~\<open>msg\<close> represents user errors; this exception is normally
- raised indirectly via the @{ML error} function (see
+ \<^descr> \<^ML>\<open>can\<close> is similar to \<^ML>\<open>try\<close> with more abstract result.
+
+ \<^descr> \<^ML>\<open>ERROR\<close>~\<open>msg\<close> represents user errors; this exception is normally
+ raised indirectly via the \<^ML>\<open>error\<close> function (see
\secref{sec:message-channels}).
- \<^descr> @{ML Fail}~\<open>msg\<close> represents general program failures.
-
- \<^descr> @{ML Exn.is_interrupt} identifies interrupts robustly, without mentioning
+ \<^descr> \<^ML>\<open>Fail\<close>~\<open>msg\<close> represents general program failures.
+
+ \<^descr> \<^ML>\<open>Exn.is_interrupt\<close> identifies interrupts robustly, without mentioning
concrete exception constructors in user code. Handled interrupts need to be
re-raised promptly!
- \<^descr> @{ML Exn.reraise}~\<open>exn\<close> raises exception \<open>exn\<close> while preserving its implicit
+ \<^descr> \<^ML>\<open>Exn.reraise\<close>~\<open>exn\<close> raises exception \<open>exn\<close> while preserving its implicit
position information (if possible, depending on the ML platform).
- \<^descr> @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~\<open>e\<close>@{ML_text ")"} evaluates
+ \<^descr> \<^ML>\<open>Runtime.exn_trace\<close>~\<^ML_text>\<open>(fn () =>\<close>~\<open>e\<close>\<^ML_text>\<open>)\<close> evaluates
expression \<open>e\<close> while printing a full trace of its stack of nested exceptions
(if possible, depending on the ML platform).
- Inserting @{ML Runtime.exn_trace} into ML code temporarily is useful for
+ Inserting \<^ML>\<open>Runtime.exn_trace\<close> into ML code temporarily is useful for
debugging, but not suitable for production code.
\<close>
@@ -1195,16 +1184,15 @@
@{ML_antiquotation_def "undefined"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- \<^descr> \<open>@{assert}\<close> inlines a function @{ML_type "bool -> unit"} that raises @{ML
- Fail} if the argument is @{ML false}. Due to inlining the source position of
+ \<^descr> \<open>@{assert}\<close> inlines a function \<^ML_type>\<open>bool -> unit\<close> that raises \<^ML>\<open>Fail\<close> if the argument is \<^ML>\<open>false\<close>. Due to inlining the source position of
failed assertions is included in the error output.
- \<^descr> \<open>@{undefined}\<close> inlines @{verbatim raise}~@{ML Match}, i.e.\ the ML program
+ \<^descr> \<open>@{undefined}\<close> inlines \<^verbatim>\<open>raise\<close>~\<^ML>\<open>Match\<close>, i.e.\ the ML program
behaves as in some function application of an undefined case.
\<close>
text %mlex \<open>
- The ML function @{ML undefined} is defined in \<^file>\<open>~~/src/Pure/library.ML\<close>
+ The ML function \<^ML>\<open>undefined\<close> is defined in \<^file>\<open>~~/src/Pure/library.ML\<close>
as follows:
\<close>
@@ -1216,7 +1204,7 @@
instead:
\<close>
-ML \<open>fun undefined _ = @{undefined}\<close>
+ML \<open>fun undefined _ = \<^undefined>\<close>
text \<open>
\<^medskip>
@@ -1284,33 +1272,30 @@
@{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "Symbol.symbol"} represents individual Isabelle symbols.
-
- \<^descr> @{ML "Symbol.explode"}~\<open>str\<close> produces a symbol list from the packed form.
- This function supersedes @{ML "String.explode"} for virtually all purposes
+ \<^descr> Type \<^ML_type>\<open>Symbol.symbol\<close> represents individual Isabelle symbols.
+
+ \<^descr> \<^ML>\<open>Symbol.explode\<close>~\<open>str\<close> produces a symbol list from the packed form.
+ This function supersedes \<^ML>\<open>String.explode\<close> for virtually all purposes
of manipulating text in Isabelle!\<^footnote>\<open>The runtime overhead for exploded strings
is mainly that of the list structure: individual symbols that happen to be a
singleton string do not require extra memory in Poly/ML.\<close>
- \<^descr> @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
- "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard symbols
+ \<^descr> \<^ML>\<open>Symbol.is_letter\<close>, \<^ML>\<open>Symbol.is_digit\<close>, \<^ML>\<open>Symbol.is_quasi\<close>, \<^ML>\<open>Symbol.is_blank\<close> classify standard symbols
according to fixed syntactic conventions of Isabelle, cf.\ @{cite
"isabelle-isar-ref"}.
- \<^descr> Type @{ML_type "Symbol.sym"} is a concrete datatype that represents the
- different kinds of symbols explicitly, with constructors @{ML
- "Symbol.Char"}, @{ML "Symbol.UTF8"}, @{ML "Symbol.Sym"}, @{ML
- "Symbol.Control"}, @{ML "Symbol.Malformed"}.
-
- \<^descr> @{ML "Symbol.decode"} converts the string representation of a symbol into
+ \<^descr> Type \<^ML_type>\<open>Symbol.sym\<close> is a concrete datatype that represents the
+ different kinds of symbols explicitly, with constructors \<^ML>\<open>Symbol.Char\<close>, \<^ML>\<open>Symbol.UTF8\<close>, \<^ML>\<open>Symbol.Sym\<close>, \<^ML>\<open>Symbol.Control\<close>, \<^ML>\<open>Symbol.Malformed\<close>.
+
+ \<^descr> \<^ML>\<open>Symbol.decode\<close> converts the string representation of a symbol into
the datatype version.
\<close>
paragraph \<open>Historical note.\<close>
text \<open>
- In the original SML90 standard the primitive ML type @{ML_type char} did not
- exists, and @{ML_text "explode: string -> string list"} produced a list of
- singleton strings like @{ML "raw_explode: string -> string list"} in
+ In the original SML90 standard the primitive ML type \<^ML_type>\<open>char\<close> did not
+ exists, and \<^ML_text>\<open>explode: string -> string list\<close> produced a list of
+ singleton strings like \<^ML>\<open>raw_explode: string -> string list\<close> in
Isabelle/ML today. When SML97 came out, Isabelle did not adopt its somewhat
anachronistic 8-bit or 16-bit characters, but the idea of exploding a string
into a list of small strings was extended to ``symbols'' as explained above.
@@ -1327,8 +1312,7 @@
of its operations simply do not fit with important Isabelle/ML conventions
(like ``canonical argument order'', see
\secref{sec:canonical-argument-order}), others cause problems with the
- parallel evaluation model of Isabelle/ML (such as @{ML TextIO.print} or @{ML
- OS.Process.system}).
+ parallel evaluation model of Isabelle/ML (such as \<^ML>\<open>TextIO.print\<close> or \<^ML>\<open>OS.Process.system\<close>).
Subsequently we give a brief overview of important operations on basic ML
data types.
@@ -1342,7 +1326,7 @@
@{index_ML_type char} \\
\end{mldecls}
- \<^descr> Type @{ML_type char} is \<^emph>\<open>not\<close> used. The smallest textual unit in Isabelle
+ \<^descr> Type \<^ML_type>\<open>char\<close> is \<^emph>\<open>not\<close> used. The smallest textual unit in Isabelle
is represented as a ``symbol'' (see \secref{sec:symbols}).
\<close>
@@ -1354,7 +1338,7 @@
@{index_ML_type string} \\
\end{mldecls}
- \<^descr> Type @{ML_type string} represents immutable vectors of 8-bit characters.
+ \<^descr> Type \<^ML_type>\<open>string\<close> represents immutable vectors of 8-bit characters.
There are operations in SML to convert back and forth to actual byte
vectors, which are seldom used.
@@ -1362,11 +1346,10 @@
Isabelle-specific purposes with the following implicit substructures packed
into the string content:
- \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}), with @{ML
- Symbol.explode} as key operation;
+ \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}), with \<^ML>\<open>Symbol.explode\<close> as key operation;
\<^enum> XML tree structure via YXML (see also @{cite "isabelle-system"}), with
- @{ML YXML.parse_body} as key operation.
+ \<^ML>\<open>YXML.parse_body\<close> as key operation.
Note that Isabelle/ML string literals may refer Isabelle symbols like
``\<^verbatim>\<open>\<alpha>\<close>'' natively, \<^emph>\<open>without\<close> escaping the backslash. This is a consequence
@@ -1382,8 +1365,8 @@
ML_val \<open>
val s = "\<A>";
- @{assert} (length (Symbol.explode s) = 1);
- @{assert} (size s = 4);
+ \<^assert> (length (Symbol.explode s) = 1);
+ \<^assert> (size s = 4);
\<close>
text \<open>
@@ -1403,13 +1386,13 @@
@{index_ML_type int} \\
\end{mldecls}
- \<^descr> Type @{ML_type int} represents regular mathematical integers, which are
+ \<^descr> Type \<^ML_type>\<open>int\<close> represents regular mathematical integers, which are
\<^emph>\<open>unbounded\<close>. Overflow is treated properly, but should never happen in
practice.\<^footnote>\<open>The size limit for integer bit patterns in memory is 64\,MB for
32-bit Poly/ML, and much higher for 64-bit systems.\<close>
- Structure @{ML_structure IntInf} of SML97 is obsolete and superseded by
- @{ML_structure Int}. Structure @{ML_structure Integer} in
+ Structure \<^ML_structure>\<open>IntInf\<close> of SML97 is obsolete and superseded by
+ \<^ML_structure>\<open>Int\<close>. Structure \<^ML_structure>\<open>Integer\<close> in
\<^file>\<open>~~/src/Pure/General/integer.ML\<close> provides some additional operations.
\<close>
@@ -1421,7 +1404,7 @@
@{index_ML_type Rat.rat} \\
\end{mldecls}
- \<^descr> Type @{ML_type Rat.rat} represents rational numbers, based on the
+ \<^descr> Type \<^ML_type>\<open>Rat.rat\<close> represents rational numbers, based on the
unbounded integers of Poly/ML.
Literal rationals may be written with special antiquotation syntax
@@ -1441,11 +1424,11 @@
@{index_ML seconds: "real -> Time.time"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Time.time} represents time abstractly according to the
+ \<^descr> Type \<^ML_type>\<open>Time.time\<close> represents time abstractly according to the
SML97 basis library definition. This is adequate for internal ML operations,
but awkward in concrete time specifications.
- \<^descr> @{ML seconds}~\<open>s\<close> turns the concrete scalar \<open>s\<close> (measured in seconds) into
+ \<^descr> \<^ML>\<open>seconds\<close>~\<open>s\<close> turns the concrete scalar \<open>s\<close> (measured in seconds) into
an abstract time value. Floating point numbers are easy to use as
configuration options in the context (see \secref{sec:config-options}) or
system options that are maintained externally.
@@ -1467,8 +1450,8 @@
\<close>
text \<open>
- Apart from @{ML Option.map} most other operations defined in structure
- @{ML_structure Option} are alien to Isabelle/ML and never used. The
+ Apart from \<^ML>\<open>Option.map\<close> most other operations defined in structure
+ \<^ML_structure>\<open>Option\<close> are alien to Isabelle/ML and never used. The
operations shown above are defined in \<^file>\<open>~~/src/Pure/General/basics.ML\<close>.
\<close>
@@ -1490,29 +1473,29 @@
@{index_ML update: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
\end{mldecls}
- \<^descr> @{ML cons}~\<open>x xs\<close> evaluates to \<open>x :: xs\<close>.
+ \<^descr> \<^ML>\<open>cons\<close>~\<open>x xs\<close> evaluates to \<open>x :: xs\<close>.
Tupled infix operators are a historical accident in Standard ML. The curried
- @{ML cons} amends this, but it should be only used when partial application
+ \<^ML>\<open>cons\<close> amends this, but it should be only used when partial application
is required.
- \<^descr> @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat lists as a
+ \<^descr> \<^ML>\<open>member\<close>, \<^ML>\<open>insert\<close>, \<^ML>\<open>remove\<close>, \<^ML>\<open>update\<close> treat lists as a
set-like container that maintains the order of elements. See
\<^file>\<open>~~/src/Pure/library.ML\<close> for the full specifications (written in ML).
- There are some further derived operations like @{ML union} or @{ML inter}.
-
- Note that @{ML insert} is conservative about elements that are already a
- @{ML member} of the list, while @{ML update} ensures that the latest entry
+ There are some further derived operations like \<^ML>\<open>union\<close> or \<^ML>\<open>inter\<close>.
+
+ Note that \<^ML>\<open>insert\<close> is conservative about elements that are already a
+ \<^ML>\<open>member\<close> of the list, while \<^ML>\<open>update\<close> ensures that the latest entry
is always put in front. The latter discipline is often more appropriate in
declarations of context data (\secref{sec:context-data}) that are issued by
the user in Isar source: later declarations take precedence over earlier
ones. \<close>
text %mlex \<open>
- Using canonical @{ML fold} together with @{ML cons} (or similar standard
+ Using canonical \<^ML>\<open>fold\<close> together with \<^ML>\<open>cons\<close> (or similar standard
operations) alternates the orientation of data. The is quite natural and
- should not be altered forcible by inserting extra applications of @{ML rev}.
- The alternative @{ML fold_rev} can be used in the few situations, where
+ should not be altered forcible by inserting extra applications of \<^ML>\<open>rev\<close>.
+ The alternative \<^ML>\<open>fold_rev\<close> can be used in the few situations, where
alternation should be prevented.
\<close>
@@ -1520,10 +1503,10 @@
val items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
val list1 = fold cons items [];
- @{assert} (list1 = rev items);
+ \<^assert> (list1 = rev items);
val list2 = fold_rev cons items [];
- @{assert} (list2 = items);
+ \<^assert> (list2 = items);
\<close>
text \<open>
@@ -1537,11 +1520,10 @@
text \<open>
Here the first list is treated conservatively: only the new elements from
- the second list are inserted. The inside-out order of insertion via @{ML
- fold_rev} attempts to preserve the order of elements in the result.
+ the second list are inserted. The inside-out order of insertion via \<^ML>\<open>fold_rev\<close> attempts to preserve the order of elements in the result.
This way of merging lists is typical for context data
- (\secref{sec:context-data}). See also @{ML merge} as defined in
+ (\secref{sec:context-data}). See also \<^ML>\<open>merge\<close> as defined in
\<^file>\<open>~~/src/Pure/library.ML\<close>.
\<close>
@@ -1562,7 +1544,7 @@
@{index_ML AList.update: "('a * 'a -> bool) -> 'a * 'b -> ('a * 'b) list -> ('a * 'b) list"} \\
\end{mldecls}
- \<^descr> @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update} implement the
+ \<^descr> \<^ML>\<open>AList.lookup\<close>, \<^ML>\<open>AList.defined\<close>, \<^ML>\<open>AList.update\<close> implement the
main ``framework operations'' for mappings in Isabelle/ML, following
standard conventions for their names and types.
@@ -1570,7 +1552,7 @@
via an explicit option element. There is no choice to raise an exception,
without changing the name to something like \<open>the_element\<close> or \<open>get\<close>.
- The \<open>defined\<close> operation is essentially a contraction of @{ML is_some} and
+ The \<open>defined\<close> operation is essentially a contraction of \<^ML>\<open>is_some\<close> and
\<^verbatim>\<open>lookup\<close>, but this is sufficiently frequent to justify its independent
existence. This also gives the implementation some opportunity for peep-hole
optimization.
@@ -1600,15 +1582,15 @@
are notorious for causing problems. In a highly parallel system, both
correctness \<^emph>\<open>and\<close> performance are easily degraded when using mutable data.
- The unwieldy name of @{ML Unsynchronized.ref} for the constructor for
+ The unwieldy name of \<^ML>\<open>Unsynchronized.ref\<close> for the constructor for
references in Isabelle/ML emphasizes the inconveniences caused by
- mutability. Existing operations @{ML "!"} and @{ML_op ":="} are unchanged,
+ mutability. Existing operations \<^ML>\<open>!\<close> and \<^ML_op>\<open>:=\<close> are unchanged,
but should be used with special precautions, say in a strictly local
situation that is guaranteed to be restricted to sequential evaluation ---
now and in the future.
\begin{warn}
- Never @{ML_text "open Unsynchronized"}, not even in a local scope!
+ Never \<^ML_text>\<open>open Unsynchronized\<close>, not even in a local scope!
Pretending that mutable state is no problem is a very bad idea.
\end{warn}
\<close>
@@ -1746,10 +1728,10 @@
@{index_ML serial_string: "unit -> string"} \\
\end{mldecls}
- \<^descr> @{ML File.tmp_path}~\<open>path\<close> relocates the base component of \<open>path\<close> into the
+ \<^descr> \<^ML>\<open>File.tmp_path\<close>~\<open>path\<close> relocates the base component of \<open>path\<close> into the
unique temporary directory of the running Isabelle/ML process.
- \<^descr> @{ML serial_string}~\<open>()\<close> creates a new serial number that is unique over
+ \<^descr> \<^ML>\<open>serial_string\<close>~\<open>()\<close> creates a new serial number that is unique over
the runtime of the Isabelle/ML process.
\<close>
@@ -1760,7 +1742,7 @@
ML_val \<open>
val tmp1 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
val tmp2 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
- @{assert} (tmp1 <> tmp2);
+ \<^assert> (tmp1 <> tmp2);
\<close>
@@ -1790,21 +1772,21 @@
('a -> ('b * 'a) option) -> 'b"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a Synchronized.var"} represents synchronized variables
- with state of type @{ML_type 'a}.
-
- \<^descr> @{ML Synchronized.var}~\<open>name x\<close> creates a synchronized variable that is
+ \<^descr> Type \<^ML_type>\<open>'a Synchronized.var\<close> represents synchronized variables
+ with state of type \<^ML_type>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Synchronized.var\<close>~\<open>name x\<close> creates a synchronized variable that is
initialized with value \<open>x\<close>. The \<open>name\<close> is used for tracing.
- \<^descr> @{ML Synchronized.guarded_access}~\<open>var f\<close> lets the function \<open>f\<close> operate
+ \<^descr> \<^ML>\<open>Synchronized.guarded_access\<close>~\<open>var f\<close> lets the function \<open>f\<close> operate
within a critical section on the state \<open>x\<close> as follows: if \<open>f x\<close> produces
- @{ML NONE}, it continues to wait on the internal condition variable,
+ \<^ML>\<open>NONE\<close>, it continues to wait on the internal condition variable,
expecting that some other thread will eventually change the content in a
- suitable manner; if \<open>f x\<close> produces @{ML SOME}~\<open>(y, x')\<close> it is satisfied and
+ suitable manner; if \<open>f x\<close> produces \<^ML>\<open>SOME\<close>~\<open>(y, x')\<close> it is satisfied and
assigns the new state value \<open>x'\<close>, broadcasts a signal to all waiting threads
on the associated condition variable, and returns the result \<open>y\<close>.
- There are some further variants of the @{ML Synchronized.guarded_access}
+ There are some further variants of the \<^ML>\<open>Synchronized.guarded_access\<close>
combinator, see \<^file>\<open>~~/src/Pure/Concurrent/synchronized.ML\<close> for details.
\<close>
@@ -1826,7 +1808,7 @@
val a = next ();
val b = next ();
- @{assert} (a <> b);
+ \<^assert> (a <> b);
\<close>
text \<open>
@@ -1892,29 +1874,28 @@
@{index_ML Par_Exn.release_first: "'a Exn.result list -> 'a list"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a Exn.result"} represents the disjoint sum of ML results
- explicitly, with constructor @{ML Exn.Res} for regular values and @{ML
- "Exn.Exn"} for exceptions.
-
- \<^descr> @{ML Exn.capture}~\<open>f x\<close> manages the evaluation of \<open>f x\<close> such that
- exceptions are made explicit as @{ML "Exn.Exn"}. Note that this includes
+ \<^descr> Type \<^ML_type>\<open>'a Exn.result\<close> represents the disjoint sum of ML results
+ explicitly, with constructor \<^ML>\<open>Exn.Res\<close> for regular values and \<^ML>\<open>Exn.Exn\<close> for exceptions.
+
+ \<^descr> \<^ML>\<open>Exn.capture\<close>~\<open>f x\<close> manages the evaluation of \<open>f x\<close> such that
+ exceptions are made explicit as \<^ML>\<open>Exn.Exn\<close>. Note that this includes
physical interrupts (see also \secref{sec:exceptions}), so the same
precautions apply to user code: interrupts must not be absorbed
accidentally!
- \<^descr> @{ML Exn.interruptible_capture} is similar to @{ML Exn.capture}, but
+ \<^descr> \<^ML>\<open>Exn.interruptible_capture\<close> is similar to \<^ML>\<open>Exn.capture\<close>, but
interrupts are immediately re-raised as required for user code.
- \<^descr> @{ML Exn.release}~\<open>result\<close> releases the original runtime result, exposing
+ \<^descr> \<^ML>\<open>Exn.release\<close>~\<open>result\<close> releases the original runtime result, exposing
its regular value or raising the reified exception.
- \<^descr> @{ML Par_Exn.release_all}~\<open>results\<close> combines results that were produced
+ \<^descr> \<^ML>\<open>Par_Exn.release_all\<close>~\<open>results\<close> combines results that were produced
independently (e.g.\ by parallel evaluation). If all results are regular
values, that list is returned. Otherwise, the collection of all exceptions
is raised, wrapped-up as collective parallel exception. Note that the latter
prevents access to individual exceptions by conventional \<^verbatim>\<open>handle\<close> of ML.
- \<^descr> @{ML Par_Exn.release_first} is similar to @{ML Par_Exn.release_all}, but
+ \<^descr> \<^ML>\<open>Par_Exn.release_first\<close> is similar to \<^ML>\<open>Par_Exn.release_all\<close>, but
only the first (meaningful) exception that has occurred in the original
evaluation process is raised again, the others are ignored. That single
exception may get handled by conventional means in ML.
@@ -1944,23 +1925,23 @@
@{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
\end{mldecls}
- \<^descr> @{ML Par_List.map}~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> is like @{ML "map"}~\<open>f [x\<^sub>1, \<dots>,
+ \<^descr> \<^ML>\<open>Par_List.map\<close>~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> is like \<^ML>\<open>map\<close>~\<open>f [x\<^sub>1, \<dots>,
x\<^sub>n]\<close>, but the evaluation of \<open>f x\<^sub>i\<close> for \<open>i = 1, \<dots>, n\<close> is performed in
parallel.
An exception in any \<open>f x\<^sub>i\<close> cancels the overall evaluation process. The
- final result is produced via @{ML Par_Exn.release_first} as explained above,
+ final result is produced via \<^ML>\<open>Par_Exn.release_first\<close> as explained above,
which means the first program exception that happened to occur in the
parallel evaluation is propagated, and all other failures are ignored.
- \<^descr> @{ML Par_List.get_some}~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> produces some \<open>f x\<^sub>i\<close> that is of
+ \<^descr> \<^ML>\<open>Par_List.get_some\<close>~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> produces some \<open>f x\<^sub>i\<close> that is of
the form \<open>SOME y\<^sub>i\<close>, if that exists, otherwise \<open>NONE\<close>. Thus it is similar to
- @{ML Library.get_first}, but subject to a non-deterministic parallel choice
+ \<^ML>\<open>Library.get_first\<close>, but subject to a non-deterministic parallel choice
process. The first successful result cancels the overall evaluation process;
- other exceptions are propagated as for @{ML Par_List.map}.
+ other exceptions are propagated as for \<^ML>\<open>Par_List.map\<close>.
This generic parallel choice combinator is the basis for derived forms, such
- as @{ML Par_List.find_some}, @{ML Par_List.exists}, @{ML Par_List.forall}.
+ as \<^ML>\<open>Par_List.find_some\<close>, \<^ML>\<open>Par_List.exists\<close>, \<^ML>\<open>Par_List.forall\<close>.
\<close>
text %mlex \<open>
@@ -2010,18 +1991,18 @@
@{index_ML Lazy.force: "'a lazy -> 'a"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a lazy"} represents lazy values over type \<^verbatim>\<open>'a\<close>.
-
- \<^descr> @{ML Lazy.lazy}~\<open>(fn () => e)\<close> wraps the unevaluated expression \<open>e\<close> as
+ \<^descr> Type \<^ML_type>\<open>'a lazy\<close> represents lazy values over type \<^verbatim>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Lazy.lazy\<close>~\<open>(fn () => e)\<close> wraps the unevaluated expression \<open>e\<close> as
unfinished lazy value.
- \<^descr> @{ML Lazy.value}~\<open>a\<close> wraps the value \<open>a\<close> as finished lazy value. When
+ \<^descr> \<^ML>\<open>Lazy.value\<close>~\<open>a\<close> wraps the value \<open>a\<close> as finished lazy value. When
forced, it returns \<open>a\<close> without any further evaluation.
There is very low overhead for this proforma wrapping of strict values as
lazy values.
- \<^descr> @{ML Lazy.force}~\<open>x\<close> produces the result of the lazy value in a
+ \<^descr> \<^ML>\<open>Lazy.force\<close>~\<open>x\<close> produces the result of the lazy value in a
thread-safe manner as explained above. Thus it may cause the current thread
to wait on a pending evaluation attempt by another thread.
\<close>
@@ -2098,33 +2079,32 @@
@{index_ML Future.fulfill: "'a future -> 'a -> unit"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a future"} represents future values over type \<^verbatim>\<open>'a\<close>.
-
- \<^descr> @{ML Future.fork}~\<open>(fn () => e)\<close> registers the unevaluated expression \<open>e\<close>
+ \<^descr> Type \<^ML_type>\<open>'a future\<close> represents future values over type \<^verbatim>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Future.fork\<close>~\<open>(fn () => e)\<close> registers the unevaluated expression \<open>e\<close>
as unfinished future value, to be evaluated eventually on the parallel
- worker-thread farm. This is a shorthand for @{ML Future.forks} below, with
+ worker-thread farm. This is a shorthand for \<^ML>\<open>Future.forks\<close> below, with
default parameters and a single expression.
- \<^descr> @{ML Future.forks}~\<open>params exprs\<close> is the general interface to fork several
+ \<^descr> \<^ML>\<open>Future.forks\<close>~\<open>params exprs\<close> is the general interface to fork several
futures simultaneously. The \<open>params\<close> consist of the following fields:
- \<^item> \<open>name : string\<close> (default @{ML "\"\""}) specifies a common name for the
+ \<^item> \<open>name : string\<close> (default \<^ML>\<open>""\<close>) specifies a common name for the
tasks of the forked futures, which serves diagnostic purposes.
- \<^item> \<open>group : Future.group option\<close> (default @{ML NONE}) specifies an optional
- task group for the forked futures. @{ML NONE} means that a new sub-group
+ \<^item> \<open>group : Future.group option\<close> (default \<^ML>\<open>NONE\<close>) specifies an optional
+ task group for the forked futures. \<^ML>\<open>NONE\<close> means that a new sub-group
of the current worker-thread task context is created. If this is not a
worker thread, the group will be a new root in the group hierarchy.
- \<^item> \<open>deps : Future.task list\<close> (default @{ML "[]"}) specifies dependencies on
+ \<^item> \<open>deps : Future.task list\<close> (default \<^ML>\<open>[]\<close>) specifies dependencies on
other future tasks, i.e.\ the adjacency relation in the global task queue.
Dependencies on already finished tasks are ignored.
- \<^item> \<open>pri : int\<close> (default @{ML 0}) specifies a priority within the task
+ \<^item> \<open>pri : int\<close> (default \<^ML>\<open>0\<close>) specifies a priority within the task
queue.
- Typically there is only little deviation from the default priority @{ML
- 0}. As a rule of thumb, @{ML "~1"} means ``low priority" and @{ML 1} means
+ Typically there is only little deviation from the default priority \<^ML>\<open>0\<close>. As a rule of thumb, \<^ML>\<open>~1\<close> means ``low priority" and \<^ML>\<open>1\<close> means
``high priority''.
Note that the task priority only affects the position in the queue, not
@@ -2133,7 +2113,7 @@
Higher priority tasks that are queued later need to wait until this (or
another) worker thread becomes free again.
- \<^item> \<open>interrupts : bool\<close> (default @{ML true}) tells whether the worker thread
+ \<^item> \<open>interrupts : bool\<close> (default \<^ML>\<open>true\<close>) tells whether the worker thread
that processes the corresponding task is initially put into interruptible
state. This state may change again while running, by modifying the thread
attributes.
@@ -2142,7 +2122,7 @@
the responsibility of the programmer that this special state is retained
only briefly.
- \<^descr> @{ML Future.join}~\<open>x\<close> retrieves the value of an already finished future,
+ \<^descr> \<^ML>\<open>Future.join\<close>~\<open>x\<close> retrieves the value of an already finished future,
which may lead to an exception, according to the result of its previous
evaluation.
@@ -2164,8 +2144,8 @@
explicitly when forked (see \<open>deps\<close> above). Thus the evaluation can work from
the bottom up, without join conflicts and wait states.
- \<^descr> @{ML Future.joins}~\<open>xs\<close> joins the given list of futures simultaneously,
- which is more efficient than @{ML "map Future.join"}~\<open>xs\<close>.
+ \<^descr> \<^ML>\<open>Future.joins\<close>~\<open>xs\<close> joins the given list of futures simultaneously,
+ which is more efficient than \<^ML>\<open>map Future.join\<close>~\<open>xs\<close>.
Based on the dependency graph of tasks, the current thread takes over the
responsibility to evaluate future expressions that are required for the main
@@ -2173,23 +2153,22 @@
presently evaluated on other threads only happens as last resort, when no
other unfinished futures are left over.
- \<^descr> @{ML Future.value}~\<open>a\<close> wraps the value \<open>a\<close> as finished future value,
+ \<^descr> \<^ML>\<open>Future.value\<close>~\<open>a\<close> wraps the value \<open>a\<close> as finished future value,
bypassing the worker-thread farm. When joined, it returns \<open>a\<close> without any
further evaluation.
There is very low overhead for this proforma wrapping of strict values as
futures.
- \<^descr> @{ML Future.map}~\<open>f x\<close> is a fast-path implementation of @{ML
- Future.fork}~\<open>(fn () => f (\<close>@{ML Future.join}~\<open>x))\<close>, which avoids the full
+ \<^descr> \<^ML>\<open>Future.map\<close>~\<open>f x\<close> is a fast-path implementation of \<^ML>\<open>Future.fork\<close>~\<open>(fn () => f (\<close>\<^ML>\<open>Future.join\<close>~\<open>x))\<close>, which avoids the full
overhead of the task queue and worker-thread farm as far as possible. The
function \<open>f\<close> is supposed to be some trivial post-processing or projection of
the future result.
- \<^descr> @{ML Future.cancel}~\<open>x\<close> cancels the task group of the given future, using
- @{ML Future.cancel_group} below.
-
- \<^descr> @{ML Future.cancel_group}~\<open>group\<close> cancels all tasks of the given task
+ \<^descr> \<^ML>\<open>Future.cancel\<close>~\<open>x\<close> cancels the task group of the given future, using
+ \<^ML>\<open>Future.cancel_group\<close> below.
+
+ \<^descr> \<^ML>\<open>Future.cancel_group\<close>~\<open>group\<close> cancels all tasks of the given task
group for all time. Threads that are presently processing a task of the
given group are interrupted: it may take some time until they are actually
terminated. Tasks that are queued but not yet processed are dequeued and
@@ -2197,10 +2176,10 @@
any further attempt to fork a future that belongs to it will yield a
canceled result as well.
- \<^descr> @{ML Future.promise}~\<open>abort\<close> registers a passive future with the given
+ \<^descr> \<^ML>\<open>Future.promise\<close>~\<open>abort\<close> registers a passive future with the given
\<open>abort\<close> operation: it is invoked when the future task group is canceled.
- \<^descr> @{ML Future.fulfill}~\<open>x a\<close> finishes the passive future \<open>x\<close> by the given
+ \<^descr> \<^ML>\<open>Future.fulfill\<close>~\<open>x a\<close> finishes the passive future \<open>x\<close> by the given
value \<open>a\<close>. If the promise has already been canceled, the attempt to fulfill
it causes an exception.
\<close>
--- a/src/Doc/Implementation/Prelim.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Prelim.thy Sat Jan 05 17:24:33 2019 +0100
@@ -116,22 +116,22 @@
@{index_ML Theory.ancestors_of: "theory -> theory list"} \\
\end{mldecls}
- \<^descr> Type @{ML_type theory} represents theory contexts.
+ \<^descr> Type \<^ML_type>\<open>theory\<close> represents theory contexts.
- \<^descr> @{ML "Context.eq_thy"}~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> check strict identity of two
+ \<^descr> \<^ML>\<open>Context.eq_thy\<close>~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> check strict identity of two
theories.
- \<^descr> @{ML "Context.subthy"}~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> compares theories according to the
+ \<^descr> \<^ML>\<open>Context.subthy\<close>~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> compares theories according to the
intrinsic graph structure of the construction. This sub-theory relation is a
nominal approximation of inclusion (\<open>\<subseteq>\<close>) of the corresponding content
(according to the semantics of the ML modules that implement the data).
- \<^descr> @{ML "Theory.begin_theory"}~\<open>name parents\<close> constructs a new theory based
+ \<^descr> \<^ML>\<open>Theory.begin_theory\<close>~\<open>name parents\<close> constructs a new theory based
on the given parents. This ML function is normally not invoked directly.
- \<^descr> @{ML "Theory.parents_of"}~\<open>thy\<close> returns the direct ancestors of \<open>thy\<close>.
+ \<^descr> \<^ML>\<open>Theory.parents_of\<close>~\<open>thy\<close> returns the direct ancestors of \<open>thy\<close>.
- \<^descr> @{ML "Theory.ancestors_of"}~\<open>thy\<close> returns all ancestors of \<open>thy\<close> (not
+ \<^descr> \<^ML>\<open>Theory.ancestors_of\<close>~\<open>thy\<close> returns all ancestors of \<open>thy\<close> (not
including \<open>thy\<close> itself).
\<close>
@@ -141,11 +141,11 @@
@{ML_antiquotation_def "theory_context"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation theory} embedded?
;
@@{ML_antiquotation theory_context} embedded
- \<close>}
+ \<close>
\<^descr> \<open>@{theory}\<close> refers to the background theory of the current context --- as
abstract value.
@@ -154,8 +154,7 @@
background theory of the current context --- as abstract value.
\<^descr> \<open>@{theory_context A}\<close> is similar to \<open>@{theory A}\<close>, but presents the result
- as initial @{ML_type Proof.context} (see also @{ML
- Proof_Context.init_global}).
+ as initial \<^ML_type>\<open>Proof.context\<close> (see also \<^ML>\<open>Proof_Context.init_global\<close>).
\<close>
@@ -194,15 +193,15 @@
@{index_ML Proof_Context.transfer: "theory -> Proof.context -> Proof.context"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.context} represents proof contexts.
+ \<^descr> Type \<^ML_type>\<open>Proof.context\<close> represents proof contexts.
- \<^descr> @{ML Proof_Context.init_global}~\<open>thy\<close> produces a proof context derived
+ \<^descr> \<^ML>\<open>Proof_Context.init_global\<close>~\<open>thy\<close> produces a proof context derived
from \<open>thy\<close>, initializing all data.
- \<^descr> @{ML Proof_Context.theory_of}~\<open>ctxt\<close> selects the background theory from
+ \<^descr> \<^ML>\<open>Proof_Context.theory_of\<close>~\<open>ctxt\<close> selects the background theory from
\<open>ctxt\<close>.
- \<^descr> @{ML Proof_Context.transfer}~\<open>thy ctxt\<close> promotes the background theory of
+ \<^descr> \<^ML>\<open>Proof_Context.transfer\<close>~\<open>thy ctxt\<close> promotes the background theory of
\<open>ctxt\<close> to the super theory \<open>thy\<close>.
\<close>
@@ -242,15 +241,14 @@
@{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Context.generic} is the direct sum of @{ML_type "theory"}
- and @{ML_type "Proof.context"}, with the datatype constructors @{ML
- "Context.Theory"} and @{ML "Context.Proof"}.
+ \<^descr> Type \<^ML_type>\<open>Context.generic\<close> is the direct sum of \<^ML_type>\<open>theory\<close>
+ and \<^ML_type>\<open>Proof.context\<close>, with the datatype constructors \<^ML>\<open>Context.Theory\<close> and \<^ML>\<open>Context.Proof\<close>.
- \<^descr> @{ML Context.theory_of}~\<open>context\<close> always produces a theory from the
- generic \<open>context\<close>, using @{ML "Proof_Context.theory_of"} as required.
+ \<^descr> \<^ML>\<open>Context.theory_of\<close>~\<open>context\<close> always produces a theory from the
+ generic \<open>context\<close>, using \<^ML>\<open>Proof_Context.theory_of\<close> as required.
- \<^descr> @{ML Context.proof_of}~\<open>context\<close> always produces a proof context from the
- generic \<open>context\<close>, using @{ML "Proof_Context.init_global"} as required (note
+ \<^descr> \<^ML>\<open>Context.proof_of\<close>~\<open>context\<close> always produces a proof context from the
+ generic \<open>context\<close>, using \<^ML>\<open>Proof_Context.init_global\<close> as required (note
that this re-initializes the context data with each invocation).
\<close>
@@ -282,7 +280,7 @@
Implementing \<open>merge\<close> can be tricky. The general idea is that \<open>merge (data\<^sub>1,
data\<^sub>2)\<close> inserts those parts of \<open>data\<^sub>2\<close> into \<open>data\<^sub>1\<close> that are not yet
- present, while keeping the general order of things. The @{ML Library.merge}
+ present, while keeping the general order of things. The \<^ML>\<open>Library.merge\<close>
function on plain lists may serve as canonical template.
Particularly note that shared parts of the data must not be duplicated by
@@ -342,15 +340,14 @@
@{index_ML_functor Generic_Data} \\
\end{mldecls}
- \<^descr> @{ML_functor Theory_Data}\<open>(spec)\<close> declares data for type @{ML_type theory}
+ \<^descr> \<^ML_functor>\<open>Theory_Data\<close>\<open>(spec)\<close> declares data for type \<^ML_type>\<open>theory\<close>
according to the specification provided as argument structure. The resulting
structure provides data init and access operations as described above.
- \<^descr> @{ML_functor Proof_Data}\<open>(spec)\<close> is analogous to @{ML_functor Theory_Data}
- for type @{ML_type Proof.context}.
+ \<^descr> \<^ML_functor>\<open>Proof_Data\<close>\<open>(spec)\<close> is analogous to \<^ML_functor>\<open>Theory_Data\<close>
+ for type \<^ML_type>\<open>Proof.context\<close>.
- \<^descr> @{ML_functor Generic_Data}\<open>(spec)\<close> is analogous to @{ML_functor
- Theory_Data} for type @{ML_type Context.generic}. \<close>
+ \<^descr> \<^ML_functor>\<open>Generic_Data\<close>\<open>(spec)\<close> is analogous to \<^ML_functor>\<open>Theory_Data\<close> for type \<^ML_type>\<open>Context.generic\<close>. \<close>
text %mlex \<open>
The following artificial example demonstrates theory data: we maintain a set
@@ -397,32 +394,29 @@
\<close>
text \<open>
- Type @{ML_type "term Ord_List.T"} is used for reasonably efficient
+ Type \<^ML_type>\<open>term Ord_List.T\<close> is used for reasonably efficient
representation of a set of terms: all operations are linear in the number of
stored elements. Here we assume that users of this module do not care about
the declaration order, since that data structure forces its own arrangement
of elements.
- Observe how the @{ML_text merge} operation joins the data slots of the two
- constituents: @{ML Ord_List.union} prevents duplication of common data from
+ Observe how the \<^ML_text>\<open>merge\<close> operation joins the data slots of the two
+ constituents: \<^ML>\<open>Ord_List.union\<close> prevents duplication of common data from
different branches, thus avoiding the danger of exponential blowup. Plain
list append etc.\ must never be used for theory data merges!
\<^medskip>
Our intended invariant is achieved as follows:
- \<^enum> @{ML Wellformed_Terms.add} only admits terms that have passed the @{ML
- Sign.cert_term} check of the given theory at that point.
+ \<^enum> \<^ML>\<open>Wellformed_Terms.add\<close> only admits terms that have passed the \<^ML>\<open>Sign.cert_term\<close> check of the given theory at that point.
- \<^enum> Wellformedness in the sense of @{ML Sign.cert_term} is monotonic wrt.\
+ \<^enum> Wellformedness in the sense of \<^ML>\<open>Sign.cert_term\<close> is monotonic wrt.\
the sub-theory relation. So our data can move upwards in the hierarchy
(via extension or merges), and maintain wellformedness without further
checks.
- Note that all basic operations of the inference kernel (which includes @{ML
- Sign.cert_term}) observe this monotonicity principle, but other user-space
- tools don't. For example, fully-featured type-inference via @{ML
- Syntax.check_term} (cf.\ \secref{sec:term-check}) is not necessarily
+ Note that all basic operations of the inference kernel (which includes \<^ML>\<open>Sign.cert_term\<close>) observe this monotonicity principle, but other user-space
+ tools don't. For example, fully-featured type-inference via \<^ML>\<open>Syntax.check_term\<close> (cf.\ \secref{sec:term-check}) is not necessarily
monotonic wrt.\ the background theory, since constraints of term constants
can be modified by later declarations, for example.
@@ -496,62 +490,61 @@
string Config.T"} \\
\end{mldecls}
- \<^descr> @{ML Config.get}~\<open>ctxt config\<close> gets the value of \<open>config\<close> in the given
+ \<^descr> \<^ML>\<open>Config.get\<close>~\<open>ctxt config\<close> gets the value of \<open>config\<close> in the given
context.
- \<^descr> @{ML Config.map}~\<open>config f ctxt\<close> updates the context by updating the value
+ \<^descr> \<^ML>\<open>Config.map\<close>~\<open>config f ctxt\<close> updates the context by updating the value
of \<open>config\<close>.
- \<^descr> \<open>config =\<close>~@{ML Attrib.setup_config_bool}~\<open>name default\<close> creates a named
- configuration option of type @{ML_type bool}, with the given \<open>default\<close>
+ \<^descr> \<open>config =\<close>~\<^ML>\<open>Attrib.setup_config_bool\<close>~\<open>name default\<close> creates a named
+ configuration option of type \<^ML_type>\<open>bool\<close>, with the given \<open>default\<close>
depending on the application context. The resulting \<open>config\<close> can be used to
get/map its value in a given context. There is an implicit update of the
background theory that registers the option as attribute with some concrete
syntax.
- \<^descr> @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
- Attrib.config_string} work like @{ML Attrib.config_bool}, but for types
- @{ML_type int} and @{ML_type string}, respectively.
+ \<^descr> \<^ML>\<open>Attrib.config_int\<close>, \<^ML>\<open>Attrib.config_real\<close>, and \<^ML>\<open>Attrib.config_string\<close> work like \<^ML>\<open>Attrib.config_bool\<close>, but for types
+ \<^ML_type>\<open>int\<close> and \<^ML_type>\<open>string\<close>, respectively.
\<close>
text %mlex \<open>
The following example shows how to declare and use a Boolean configuration
- option called \<open>my_flag\<close> with constant default value @{ML false}.
+ option called \<open>my_flag\<close> with constant default value \<^ML>\<open>false\<close>.
\<close>
ML \<open>
val my_flag =
- Attrib.setup_config_bool @{binding my_flag} (K false)
+ Attrib.setup_config_bool \<^binding>\<open>my_flag\<close> (K false)
\<close>
text \<open>
Now the user can refer to @{attribute my_flag} in declarations, while ML
- tools can retrieve the current value from the context via @{ML Config.get}.
+ tools can retrieve the current value from the context via \<^ML>\<open>Config.get\<close>.
\<close>
-ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
+ML_val \<open>\<^assert> (Config.get \<^context> my_flag = false)\<close>
declare [[my_flag = true]]
-ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
+ML_val \<open>\<^assert> (Config.get \<^context> my_flag = true)\<close>
notepad
begin
{
note [[my_flag = false]]
- ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
+ ML_val \<open>\<^assert> (Config.get \<^context> my_flag = false)\<close>
}
- ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
+ ML_val \<open>\<^assert> (Config.get \<^context> my_flag = true)\<close>
end
text \<open>
- Here is another example involving ML type @{ML_type real} (floating-point
+ Here is another example involving ML type \<^ML_type>\<open>real\<close> (floating-point
numbers).
\<close>
ML \<open>
val airspeed_velocity =
- Attrib.setup_config_real @{binding airspeed_velocity} (K 0.0)
+ Attrib.setup_config_real \<^binding>\<open>airspeed_velocity\<close> (K 0.0)
\<close>
declare [[airspeed_velocity = 10]]
@@ -630,43 +623,43 @@
@{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
\end{mldecls}
- \<^descr> @{ML Name.internal}~\<open>name\<close> produces an internal name by adding one
+ \<^descr> \<^ML>\<open>Name.internal\<close>~\<open>name\<close> produces an internal name by adding one
underscore.
- \<^descr> @{ML Name.skolem}~\<open>name\<close> produces a Skolem name by adding two underscores.
+ \<^descr> \<^ML>\<open>Name.skolem\<close>~\<open>name\<close> produces a Skolem name by adding two underscores.
- \<^descr> Type @{ML_type Name.context} represents the context of already used names;
- the initial value is @{ML "Name.context"}.
+ \<^descr> Type \<^ML_type>\<open>Name.context\<close> represents the context of already used names;
+ the initial value is \<^ML>\<open>Name.context\<close>.
- \<^descr> @{ML Name.declare}~\<open>name\<close> enters a used name into the context.
+ \<^descr> \<^ML>\<open>Name.declare\<close>~\<open>name\<close> enters a used name into the context.
- \<^descr> @{ML Name.invent}~\<open>context name n\<close> produces \<open>n\<close> fresh names derived from
+ \<^descr> \<^ML>\<open>Name.invent\<close>~\<open>context name n\<close> produces \<open>n\<close> fresh names derived from
\<open>name\<close>.
- \<^descr> @{ML Name.variant}~\<open>name context\<close> produces a fresh variant of \<open>name\<close>; the
+ \<^descr> \<^ML>\<open>Name.variant\<close>~\<open>name context\<close> produces a fresh variant of \<open>name\<close>; the
result is declared to the context.
- \<^descr> @{ML Variable.names_of}~\<open>ctxt\<close> retrieves the context of declared type and
+ \<^descr> \<^ML>\<open>Variable.names_of\<close>~\<open>ctxt\<close> retrieves the context of declared type and
term variable names. Projecting a proof context down to a primitive name
context is occasionally useful when invoking lower-level operations. Regular
management of ``fresh variables'' is done by suitable operations of
- structure @{ML_structure Variable}, which is also able to provide an
+ structure \<^ML_structure>\<open>Variable\<close>, which is also able to provide an
official status of ``locally fixed variable'' within the logical environment
(cf.\ \secref{sec:variables}).
\<close>
text %mlex \<open>
The following simple examples demonstrate how to produce fresh names from
- the initial @{ML Name.context}.
+ the initial \<^ML>\<open>Name.context\<close>.
\<close>
ML_val \<open>
val list1 = Name.invent Name.context "a" 5;
- @{assert} (list1 = ["a", "b", "c", "d", "e"]);
+ \<^assert> (list1 = ["a", "b", "c", "d", "e"]);
val list2 =
#1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] Name.context);
- @{assert} (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
+ \<^assert> (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
\<close>
text \<open>
@@ -677,14 +670,14 @@
begin
ML_val \<open>
- val names = Variable.names_of @{context};
+ val names = Variable.names_of \<^context>;
val list1 = Name.invent names "a" 5;
- @{assert} (list1 = ["d", "e", "f", "g", "h"]);
+ \<^assert> (list1 = ["d", "e", "f", "g", "h"]);
val list2 =
#1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] names);
- @{assert} (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
+ \<^assert> (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
\<close>
end
@@ -726,8 +719,8 @@
@{index_ML_type indexname: "string * int"} \\
\end{mldecls}
- \<^descr> Type @{ML_type indexname} represents indexed names. This is an
- abbreviation for @{ML_type "string * int"}. The second component is usually
+ \<^descr> Type \<^ML_type>\<open>indexname\<close> represents indexed names. This is an
+ abbreviation for \<^ML_type>\<open>string * int\<close>. The second component is usually
non-negative, except for situations where \<open>(x, -1)\<close> is used to inject basic
names into this type. Other negative indexes should not be used.
\<close>
@@ -765,13 +758,13 @@
@{index_ML Long_Name.explode: "string -> string list"} \\
\end{mldecls}
- \<^descr> @{ML Long_Name.base_name}~\<open>name\<close> returns the base name of a long name.
+ \<^descr> \<^ML>\<open>Long_Name.base_name\<close>~\<open>name\<close> returns the base name of a long name.
- \<^descr> @{ML Long_Name.qualifier}~\<open>name\<close> returns the qualifier of a long name.
+ \<^descr> \<^ML>\<open>Long_Name.qualifier\<close>~\<open>name\<close> returns the qualifier of a long name.
- \<^descr> @{ML Long_Name.append}~\<open>name\<^sub>1 name\<^sub>2\<close> appends two long names.
+ \<^descr> \<^ML>\<open>Long_Name.append\<close>~\<open>name\<^sub>1 name\<^sub>2\<close> appends two long names.
- \<^descr> @{ML Long_Name.implode}~\<open>names\<close> and @{ML Long_Name.explode}~\<open>name\<close> convert
+ \<^descr> \<^ML>\<open>Long_Name.implode\<close>~\<open>names\<close> and \<^ML>\<open>Long_Name.explode\<close>~\<open>name\<close> convert
between the packed string representation and the explicit list form of long
names.
\<close>
@@ -860,75 +853,74 @@
@{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
\end{mldecls}
- \<^descr> Type @{ML_type binding} represents the abstract concept of name bindings.
+ \<^descr> Type \<^ML_type>\<open>binding\<close> represents the abstract concept of name bindings.
- \<^descr> @{ML Binding.empty} is the empty binding.
+ \<^descr> \<^ML>\<open>Binding.empty\<close> is the empty binding.
- \<^descr> @{ML Binding.name}~\<open>name\<close> produces a binding with base name \<open>name\<close>. Note
+ \<^descr> \<^ML>\<open>Binding.name\<close>~\<open>name\<close> produces a binding with base name \<open>name\<close>. Note
that this lacks proper source position information; see also the ML
antiquotation @{ML_antiquotation binding}.
- \<^descr> @{ML Binding.qualify}~\<open>mandatory name binding\<close> prefixes qualifier \<open>name\<close>
+ \<^descr> \<^ML>\<open>Binding.qualify\<close>~\<open>mandatory name binding\<close> prefixes qualifier \<open>name\<close>
to \<open>binding\<close>. The \<open>mandatory\<close> flag tells if this name component always needs
to be given in name space accesses --- this is mostly \<open>false\<close> in practice.
Note that this part of qualification is typically used in derived
specification mechanisms.
- \<^descr> @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but affects the
+ \<^descr> \<^ML>\<open>Binding.prefix\<close> is similar to \<^ML>\<open>Binding.qualify\<close>, but affects the
system prefix. This part of extra qualification is typically used in the
infrastructure for modular specifications, notably ``local theory targets''
(see also \chref{ch:local-theory}).
- \<^descr> @{ML Binding.concealed}~\<open>binding\<close> indicates that the binding shall refer
+ \<^descr> \<^ML>\<open>Binding.concealed\<close>~\<open>binding\<close> indicates that the binding shall refer
to an entity that serves foundational purposes only. This flag helps to mark
implementation details of specification mechanism etc. Other tools should
- not depend on the particulars of concealed entities (cf.\ @{ML
- Name_Space.is_concealed}).
+ not depend on the particulars of concealed entities (cf.\ \<^ML>\<open>Name_Space.is_concealed\<close>).
- \<^descr> @{ML Binding.print}~\<open>binding\<close> produces a string representation for
+ \<^descr> \<^ML>\<open>Binding.print\<close>~\<open>binding\<close> produces a string representation for
human-readable output, together with some formal markup that might get used
in GUI front-ends, for example.
- \<^descr> Type @{ML_type Name_Space.naming} represents the abstract concept of a
+ \<^descr> Type \<^ML_type>\<open>Name_Space.naming\<close> represents the abstract concept of a
naming policy.
- \<^descr> @{ML Name_Space.global_naming} is the default naming policy: it is global
+ \<^descr> \<^ML>\<open>Name_Space.global_naming\<close> is the default naming policy: it is global
and lacks any path prefix. In a regular theory context this is augmented by
a path prefix consisting of the theory name.
- \<^descr> @{ML Name_Space.add_path}~\<open>path naming\<close> augments the naming policy by
+ \<^descr> \<^ML>\<open>Name_Space.add_path\<close>~\<open>path naming\<close> augments the naming policy by
extending its path component.
- \<^descr> @{ML Name_Space.full_name}~\<open>naming binding\<close> turns a name binding (usually
+ \<^descr> \<^ML>\<open>Name_Space.full_name\<close>~\<open>naming binding\<close> turns a name binding (usually
a basic name) into the fully qualified internal name, according to the given
naming policy.
- \<^descr> Type @{ML_type Name_Space.T} represents name spaces.
+ \<^descr> Type \<^ML_type>\<open>Name_Space.T\<close> represents name spaces.
- \<^descr> @{ML Name_Space.empty}~\<open>kind\<close> and @{ML Name_Space.merge}~\<open>(space\<^sub>1,
+ \<^descr> \<^ML>\<open>Name_Space.empty\<close>~\<open>kind\<close> and \<^ML>\<open>Name_Space.merge\<close>~\<open>(space\<^sub>1,
space\<^sub>2)\<close> are the canonical operations for maintaining name spaces according
to theory data management (\secref{sec:context-data}); \<open>kind\<close> is a formal
comment to characterize the purpose of a name space.
- \<^descr> @{ML Name_Space.declare}~\<open>context strict binding space\<close> enters a name
+ \<^descr> \<^ML>\<open>Name_Space.declare\<close>~\<open>context strict binding space\<close> enters a name
binding as fully qualified internal name into the name space, using the
naming of the context.
- \<^descr> @{ML Name_Space.intern}~\<open>space name\<close> internalizes a (partially qualified)
+ \<^descr> \<^ML>\<open>Name_Space.intern\<close>~\<open>space name\<close> internalizes a (partially qualified)
external name.
This operation is mostly for parsing! Note that fully qualified names
- stemming from declarations are produced via @{ML "Name_Space.full_name"} and
- @{ML "Name_Space.declare"} (or their derivatives for @{ML_type theory} and
- @{ML_type Proof.context}).
+ stemming from declarations are produced via \<^ML>\<open>Name_Space.full_name\<close> and
+ \<^ML>\<open>Name_Space.declare\<close> (or their derivatives for \<^ML_type>\<open>theory\<close> and
+ \<^ML_type>\<open>Proof.context\<close>).
- \<^descr> @{ML Name_Space.extern}~\<open>ctxt space name\<close> externalizes a (fully qualified)
+ \<^descr> \<^ML>\<open>Name_Space.extern\<close>~\<open>ctxt space name\<close> externalizes a (fully qualified)
internal name.
This operation is mostly for printing! User code should not rely on the
precise result too much.
- \<^descr> @{ML Name_Space.is_concealed}~\<open>space name\<close> indicates whether \<open>name\<close> refers
+ \<^descr> \<^ML>\<open>Name_Space.is_concealed\<close>~\<open>space name\<close> indicates whether \<open>name\<close> refers
to a strictly private entity that other tools are supposed to ignore!
\<close>
@@ -937,13 +929,13 @@
@{ML_antiquotation_def "binding"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation binding} embedded
- \<close>}
+ \<close>
\<^descr> \<open>@{binding name}\<close> produces a binding with base name \<open>name\<close> and the source
position taken from the concrete syntax of this antiquotation. In many
- situations this is more appropriate than the more basic @{ML Binding.name}
+ situations this is more appropriate than the more basic \<^ML>\<open>Binding.name\<close>
function.
\<close>
@@ -952,7 +944,7 @@
inlined into the text:
\<close>
-ML_val \<open>Binding.pos_of @{binding here}\<close>
+ML_val \<open>Binding.pos_of \<^binding>\<open>here\<close>\<close>
text \<open>
\<^medskip>
@@ -961,7 +953,7 @@
ML_command
\<open>writeln
- ("Look here" ^ Position.here (Binding.pos_of @{binding here}))\<close>
+ ("Look here" ^ Position.here (Binding.pos_of \<^binding>\<open>here\<close>))\<close>
text \<open>
This illustrates a key virtue of formalized bindings as opposed to raw
@@ -973,6 +965,6 @@
occasionally useful for experimentation and diagnostic purposes:
\<close>
-ML_command \<open>warning ("Look here" ^ Position.here @{here})\<close>
+ML_command \<open>warning ("Look here" ^ Position.here \<^here>)\<close>
end
--- a/src/Doc/Implementation/Proof.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Proof.thy Sat Jan 05 17:24:33 2019 +0100
@@ -107,40 +107,40 @@
((string * (string * typ)) list * term) * Proof.context"} \\
\end{mldecls}
- \<^descr> @{ML Variable.add_fixes}~\<open>xs ctxt\<close> fixes term variables \<open>xs\<close>, returning
+ \<^descr> \<^ML>\<open>Variable.add_fixes\<close>~\<open>xs ctxt\<close> fixes term variables \<open>xs\<close>, returning
the resulting internal names. By default, the internal representation
coincides with the external one, which also means that the given variables
must not be fixed already. There is a different policy within a local proof
body: the given names are just hints for newly invented Skolem variables.
- \<^descr> @{ML Variable.variant_fixes} is similar to @{ML Variable.add_fixes}, but
+ \<^descr> \<^ML>\<open>Variable.variant_fixes\<close> is similar to \<^ML>\<open>Variable.add_fixes\<close>, but
always produces fresh variants of the given names.
- \<^descr> @{ML Variable.declare_term}~\<open>t ctxt\<close> declares term \<open>t\<close> to belong to the
+ \<^descr> \<^ML>\<open>Variable.declare_term\<close>~\<open>t ctxt\<close> declares term \<open>t\<close> to belong to the
context. This automatically fixes new type variables, but not term
variables. Syntactic constraints for type and term variables are declared
uniformly, though.
- \<^descr> @{ML Variable.declare_constraints}~\<open>t ctxt\<close> declares syntactic constraints
+ \<^descr> \<^ML>\<open>Variable.declare_constraints\<close>~\<open>t ctxt\<close> declares syntactic constraints
from term \<open>t\<close>, without making it part of the context yet.
- \<^descr> @{ML Variable.export}~\<open>inner outer thms\<close> generalizes fixed type and term
+ \<^descr> \<^ML>\<open>Variable.export\<close>~\<open>inner outer thms\<close> generalizes fixed type and term
variables in \<open>thms\<close> according to the difference of the \<open>inner\<close> and \<open>outer\<close>
context, following the principles sketched above.
- \<^descr> @{ML Variable.polymorphic}~\<open>ctxt ts\<close> generalizes type variables in \<open>ts\<close> as
+ \<^descr> \<^ML>\<open>Variable.polymorphic\<close>~\<open>ctxt ts\<close> generalizes type variables in \<open>ts\<close> as
far as possible, even those occurring in fixed term variables. The default
policy of type-inference is to fix newly introduced type variables, which is
- essentially reversed with @{ML Variable.polymorphic}: here the given terms
+ essentially reversed with \<^ML>\<open>Variable.polymorphic\<close>: here the given terms
are detached from the context as far as possible.
- \<^descr> @{ML Variable.import}~\<open>open thms ctxt\<close> invents fixed type and term
+ \<^descr> \<^ML>\<open>Variable.import\<close>~\<open>open thms ctxt\<close> invents fixed type and term
variables for the schematic ones occurring in \<open>thms\<close>. The \<open>open\<close> flag
indicates whether the fixed names should be accessible to the user,
otherwise newly introduced names are marked as ``internal''
(\secref{sec:names}).
- \<^descr> @{ML Variable.focus}~\<open>bindings B\<close> decomposes the outermost \<open>\<And>\<close> prefix of
+ \<^descr> \<^ML>\<open>Variable.focus\<close>~\<open>bindings B\<close> decomposes the outermost \<open>\<And>\<close> prefix of
proposition \<open>B\<close>, using the given name bindings.
\<close>
@@ -151,7 +151,7 @@
ML_val \<open>
(*static compile-time context -- for testing only*)
- val ctxt0 = @{context};
+ val ctxt0 = \<^context>;
(*locally fixed parameters -- no type assignment yet*)
val ([x, y], ctxt1) = ctxt0 |> Variable.add_fixes ["x", "y"];
@@ -177,7 +177,7 @@
\<close>
ML_val \<open>
- val ctxt0 = @{context};
+ val ctxt0 = \<^context>;
val ([x1, x2, x3], ctxt1) =
ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
\<close>
@@ -192,7 +192,7 @@
notepad
begin
ML_prf %"ML"
- \<open>val ctxt0 = @{context};
+ \<open>val ctxt0 = \<^context>;
val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
val ([x2], ctxt2) = ctxt1 |> Variable.add_fixes ["x"];
@@ -203,7 +203,7 @@
end
text \<open>
- In this situation @{ML Variable.add_fixes} and @{ML Variable.variant_fixes}
+ In this situation \<^ML>\<open>Variable.add_fixes\<close> and \<^ML>\<open>Variable.variant_fixes\<close>
are very similar, but identical name proposals given in a row are only
accepted by the second version.
\<close>
@@ -273,41 +273,39 @@
@{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Assumption.export} represents arbitrary export rules, which
- is any function of type @{ML_type "bool -> cterm list -> thm -> thm"}, where
- the @{ML_type "bool"} indicates goal mode, and the @{ML_type "cterm list"}
+ \<^descr> Type \<^ML_type>\<open>Assumption.export\<close> represents arbitrary export rules, which
+ is any function of type \<^ML_type>\<open>bool -> cterm list -> thm -> thm\<close>, where
+ the \<^ML_type>\<open>bool\<close> indicates goal mode, and the \<^ML_type>\<open>cterm list\<close>
the collection of assumptions to be discharged simultaneously.
- \<^descr> @{ML Assumption.assume}~\<open>ctxt A\<close> turns proposition \<open>A\<close> into a primitive
+ \<^descr> \<^ML>\<open>Assumption.assume\<close>~\<open>ctxt A\<close> turns proposition \<open>A\<close> into a primitive
assumption \<open>A \<turnstile> A'\<close>, where the conclusion \<open>A'\<close> is in HHF normal form.
- \<^descr> @{ML Assumption.add_assms}~\<open>r As\<close> augments the context by assumptions \<open>As\<close>
+ \<^descr> \<^ML>\<open>Assumption.add_assms\<close>~\<open>r As\<close> augments the context by assumptions \<open>As\<close>
with export rule \<open>r\<close>. The resulting facts are hypothetical theorems as
- produced by the raw @{ML Assumption.assume}.
+ produced by the raw \<^ML>\<open>Assumption.assume\<close>.
- \<^descr> @{ML Assumption.add_assumes}~\<open>As\<close> is a special case of @{ML
- Assumption.add_assms} where the export rule performs \<open>\<Longrightarrow>\<hyphen>intro\<close> or
+ \<^descr> \<^ML>\<open>Assumption.add_assumes\<close>~\<open>As\<close> is a special case of \<^ML>\<open>Assumption.add_assms\<close> where the export rule performs \<open>\<Longrightarrow>\<hyphen>intro\<close> or
\<open>#\<Longrightarrow>\<hyphen>intro\<close>, depending on goal mode.
- \<^descr> @{ML Assumption.export}~\<open>is_goal inner outer thm\<close> exports result \<open>thm\<close>
+ \<^descr> \<^ML>\<open>Assumption.export\<close>~\<open>is_goal inner outer thm\<close> exports result \<open>thm\<close>
from the the \<open>inner\<close> context back into the \<open>outer\<close> one; \<open>is_goal = true\<close>
means this is a goal context. The result is in HHF normal form. Note that
- @{ML "Proof_Context.export"} combines @{ML "Variable.export"} and @{ML
- "Assumption.export"} in the canonical way.
+ \<^ML>\<open>Proof_Context.export\<close> combines \<^ML>\<open>Variable.export\<close> and \<^ML>\<open>Assumption.export\<close> in the canonical way.
\<close>
text %mlex \<open>
The following example demonstrates how rules can be derived by building up a
context of assumptions first, and exporting some local fact afterwards. We
- refer to @{theory Pure} equality here for testing purposes.
+ refer to \<^theory>\<open>Pure\<close> equality here for testing purposes.
\<close>
ML_val \<open>
(*static compile-time context -- for testing only*)
- val ctxt0 = @{context};
+ val ctxt0 = \<^context>;
val ([eq], ctxt1) =
- ctxt0 |> Assumption.add_assumes [@{cprop "x \<equiv> y"}];
+ ctxt0 |> Assumption.add_assumes [\<^cprop>\<open>x \<equiv> y\<close>];
val eq' = Thm.symmetric eq;
(*back to original context -- discharges assumption*)
@@ -317,8 +315,8 @@
text \<open>
Note that the variables of the resulting rule are not generalized. This
would have required to fix them properly in the context beforehand, and
- export wrt.\ variables afterwards (cf.\ @{ML Variable.export} or the
- combined @{ML "Proof_Context.export"}).
+ export wrt.\ variables afterwards (cf.\ \<^ML>\<open>Variable.export\<close> or the
+ combined \<^ML>\<open>Proof_Context.export\<close>).
\<close>
@@ -389,46 +387,45 @@
Proof.context -> ((string * cterm) list * thm list) * Proof.context"} \\
\end{mldecls}
- \<^descr> @{ML SUBPROOF}~\<open>tac ctxt i\<close> decomposes the structure of the specified
+ \<^descr> \<^ML>\<open>SUBPROOF\<close>~\<open>tac ctxt i\<close> decomposes the structure of the specified
sub-goal, producing an extended context and a reduced goal, which needs to
be solved by the given tactic. All schematic parameters of the goal are
imported into the context as fixed ones, which may not be instantiated in
the sub-proof.
- \<^descr> @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
- Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are slightly more
+ \<^descr> \<^ML>\<open>Subgoal.FOCUS\<close>, \<^ML>\<open>Subgoal.FOCUS_PREMS\<close>, and \<^ML>\<open>Subgoal.FOCUS_PARAMS\<close> are similar to \<^ML>\<open>SUBPROOF\<close>, but are slightly more
flexible: only the specified parts of the subgoal are imported into the
context, and the body tactic may introduce new subgoals and schematic
variables.
- \<^descr> @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML Subgoal.focus_params}
+ \<^descr> \<^ML>\<open>Subgoal.focus\<close>, \<^ML>\<open>Subgoal.focus_prems\<close>, \<^ML>\<open>Subgoal.focus_params\<close>
extract the focus information from a goal state in the same way as the
corresponding tacticals above. This is occasionally useful to experiment
without writing actual tactics yet.
- \<^descr> @{ML Goal.prove}~\<open>ctxt xs As C tac\<close> states goal \<open>C\<close> in the context
+ \<^descr> \<^ML>\<open>Goal.prove\<close>~\<open>ctxt xs As C tac\<close> states goal \<open>C\<close> in the context
augmented by fixed variables \<open>xs\<close> and assumptions \<open>As\<close>, and applies tactic
\<open>tac\<close> to solve it. The latter may depend on the local assumptions being
presented as facts. The result is in HHF normal form.
- \<^descr> @{ML Goal.prove_common}~\<open>ctxt fork_pri\<close> is the common form to state and
- prove a simultaneous goal statement, where @{ML Goal.prove} is a convenient
+ \<^descr> \<^ML>\<open>Goal.prove_common\<close>~\<open>ctxt fork_pri\<close> is the common form to state and
+ prove a simultaneous goal statement, where \<^ML>\<open>Goal.prove\<close> is a convenient
shorthand that is most frequently used in applications.
The given list of simultaneous conclusions is encoded in the goal state by
- means of Pure conjunction: @{ML Goal.conjunction_tac} will turn this into a
+ means of Pure conjunction: \<^ML>\<open>Goal.conjunction_tac\<close> will turn this into a
collection of individual subgoals, but note that the original multi-goal
state is usually required for advanced induction.
It is possible to provide an optional priority for a forked proof, typically
- @{ML "SOME ~1"}, while @{ML NONE} means the proof is immediate (sequential)
- as for @{ML Goal.prove}. Note that a forked proof does not exhibit any
+ \<^ML>\<open>SOME ~1\<close>, while \<^ML>\<open>NONE\<close> means the proof is immediate (sequential)
+ as for \<^ML>\<open>Goal.prove\<close>. Note that a forked proof does not exhibit any
failures in the usual way via exceptions in ML, but accumulates error
situations under the execution id of the running transaction. Thus the
system is able to expose error messages ultimately to the end-user, even
though the subsequent ML code misses them.
- \<^descr> @{ML Obtain.result}~\<open>tac thms ctxt\<close> eliminates the given facts using a
+ \<^descr> \<^ML>\<open>Obtain.result\<close>~\<open>tac thms ctxt\<close> eliminates the given facts using a
tactic, which results in additional fixed variables and assumptions in the
context. Final results need to be exported explicitly.
\<close>
@@ -446,7 +443,7 @@
ML_val
\<open>val {goal, context = goal_ctxt, ...} = @{Isar.goal};
val (focus as {params, asms, concl, ...}, goal') =
- Subgoal.focus goal_ctxt 1 (SOME [@{binding x}]) goal;
+ Subgoal.focus goal_ctxt 1 (SOME [\<^binding>\<open>x\<close>]) goal;
val [A, B] = #prems focus;
val [(_, x)] = #params focus;\<close>
sorry
@@ -455,7 +452,7 @@
text \<open>
\<^medskip>
The next example demonstrates forward-elimination in a local context, using
- @{ML Obtain.result}.
+ \<^ML>\<open>Obtain.result\<close>.
\<close>
notepad
@@ -463,7 +460,7 @@
assume ex: "\<exists>x. B x"
ML_prf %"ML"
- \<open>val ctxt0 = @{context};
+ \<open>val ctxt0 = \<^context>;
val (([(_, x)], [B]), ctxt1) = ctxt0
|> Obtain.result (fn _ => eresolve_tac ctxt0 @{thms exE} 1) [@{thm ex}];\<close>
ML_prf %"ML"
--- a/src/Doc/Implementation/Syntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Syntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -81,46 +81,43 @@
@{index_ML Syntax.string_of_term: "Proof.context -> term -> string"} \\
\end{mldecls}
- \<^descr> @{ML Syntax.read_typs}~\<open>ctxt strs\<close> parses and checks a simultaneous list
+ \<^descr> \<^ML>\<open>Syntax.read_typs\<close>~\<open>ctxt strs\<close> parses and checks a simultaneous list
of source strings as types of the logic.
- \<^descr> @{ML Syntax.read_terms}~\<open>ctxt strs\<close> parses and checks a simultaneous list
+ \<^descr> \<^ML>\<open>Syntax.read_terms\<close>~\<open>ctxt strs\<close> parses and checks a simultaneous list
of source strings as terms of the logic. Type-reconstruction puts all parsed
terms into the same scope: types of free variables ultimately need to
coincide.
If particular type-constraints are required for some of the arguments, the
read operations needs to be split into its parse and check phases. Then it
- is possible to use @{ML Type.constraint} on the intermediate pre-terms
+ is possible to use \<^ML>\<open>Type.constraint\<close> on the intermediate pre-terms
(\secref{sec:term-check}).
- \<^descr> @{ML Syntax.read_props}~\<open>ctxt strs\<close> parses and checks a simultaneous list
+ \<^descr> \<^ML>\<open>Syntax.read_props\<close>~\<open>ctxt strs\<close> parses and checks a simultaneous list
of source strings as terms of the logic, with an implicit type-constraint
- for each argument to enforce type @{typ prop}; this also affects the inner
- syntax for parsing. The remaining type-reconstruction works as for @{ML
- Syntax.read_terms}.
+ for each argument to enforce type \<^typ>\<open>prop\<close>; this also affects the inner
+ syntax for parsing. The remaining type-reconstruction works as for \<^ML>\<open>Syntax.read_terms\<close>.
- \<^descr> @{ML Syntax.read_typ}, @{ML Syntax.read_term}, @{ML Syntax.read_prop} are
+ \<^descr> \<^ML>\<open>Syntax.read_typ\<close>, \<^ML>\<open>Syntax.read_term\<close>, \<^ML>\<open>Syntax.read_prop\<close> are
like the simultaneous versions, but operate on a single argument only. This
convenient shorthand is adequate in situations where a single item in its
- own scope is processed. Do not use @{ML "map o Syntax.read_term"} where @{ML
- Syntax.read_terms} is actually intended!
+ own scope is processed. Do not use \<^ML>\<open>map o Syntax.read_term\<close> where \<^ML>\<open>Syntax.read_terms\<close> is actually intended!
- \<^descr> @{ML Syntax.pretty_typ}~\<open>ctxt T\<close> and @{ML Syntax.pretty_term}~\<open>ctxt t\<close>
+ \<^descr> \<^ML>\<open>Syntax.pretty_typ\<close>~\<open>ctxt T\<close> and \<^ML>\<open>Syntax.pretty_term\<close>~\<open>ctxt t\<close>
uncheck and pretty-print the given type or term, respectively. Although the
uncheck phase acts on a simultaneous list as well, this is rarely used in
practice, so only the singleton case is provided as combined pretty
operation. There is no distinction of term vs.\ proposition.
- \<^descr> @{ML Syntax.string_of_typ} and @{ML Syntax.string_of_term} are convenient
- compositions of @{ML Syntax.pretty_typ} and @{ML Syntax.pretty_term} with
- @{ML Pretty.string_of} for output. The result may be concatenated with other
+ \<^descr> \<^ML>\<open>Syntax.string_of_typ\<close> and \<^ML>\<open>Syntax.string_of_term\<close> are convenient
+ compositions of \<^ML>\<open>Syntax.pretty_typ\<close> and \<^ML>\<open>Syntax.pretty_term\<close> with
+ \<^ML>\<open>Pretty.string_of\<close> for output. The result may be concatenated with other
strings, as long as there is no further formatting and line-breaking
involved.
- @{ML Syntax.read_term}, @{ML Syntax.read_prop}, and @{ML
- Syntax.string_of_term} are the most important operations in practice.
+ \<^ML>\<open>Syntax.read_term\<close>, \<^ML>\<open>Syntax.read_prop\<close>, and \<^ML>\<open>Syntax.string_of_term\<close> are the most important operations in practice.
\<^medskip>
Note that the string values that are passed in and out are annotated by the
@@ -130,8 +127,8 @@
datatype, encoded as concrete string for historical reasons.
The standard way to provide the required position markup for input works via
- the outer syntax parser wrapper @{ML Parse.inner_syntax}, which is already
- part of @{ML Parse.typ}, @{ML Parse.term}, @{ML Parse.prop}. So a string
+ the outer syntax parser wrapper \<^ML>\<open>Parse.inner_syntax\<close>, which is already
+ part of \<^ML>\<open>Parse.typ\<close>, \<^ML>\<open>Parse.term\<close>, \<^ML>\<open>Parse.prop\<close>. So a string
obtained from one of the latter may be directly passed to the corresponding
read operation: this yields PIDE markup of the input and precise positions
for warning and error messages.
@@ -168,27 +165,26 @@
@{index_ML Syntax.unparse_term: "Proof.context -> term -> Pretty.T"} \\
\end{mldecls}
- \<^descr> @{ML Syntax.parse_typ}~\<open>ctxt str\<close> parses a source string as pre-type that
+ \<^descr> \<^ML>\<open>Syntax.parse_typ\<close>~\<open>ctxt str\<close> parses a source string as pre-type that
is ready to be used with subsequent check operations.
- \<^descr> @{ML Syntax.parse_term}~\<open>ctxt str\<close> parses a source string as pre-term that
+ \<^descr> \<^ML>\<open>Syntax.parse_term\<close>~\<open>ctxt str\<close> parses a source string as pre-term that
is ready to be used with subsequent check operations.
- \<^descr> @{ML Syntax.parse_prop}~\<open>ctxt str\<close> parses a source string as pre-term that
+ \<^descr> \<^ML>\<open>Syntax.parse_prop\<close>~\<open>ctxt str\<close> parses a source string as pre-term that
is ready to be used with subsequent check operations. The inner syntax
- category is @{typ prop} and a suitable type-constraint is included to ensure
+ category is \<^typ>\<open>prop\<close> and a suitable type-constraint is included to ensure
that this information is observed in subsequent type reconstruction.
- \<^descr> @{ML Syntax.unparse_typ}~\<open>ctxt T\<close> unparses a type after uncheck
+ \<^descr> \<^ML>\<open>Syntax.unparse_typ\<close>~\<open>ctxt T\<close> unparses a type after uncheck
operations, to turn it into a pretty tree.
- \<^descr> @{ML Syntax.unparse_term}~\<open>ctxt T\<close> unparses a term after uncheck
+ \<^descr> \<^ML>\<open>Syntax.unparse_term\<close>~\<open>ctxt T\<close> unparses a term after uncheck
operations, to turn it into a pretty tree. There is no distinction for
propositions here.
- These operations always operate on a single item; use the combinator @{ML
- map} to apply them to a list.
+ These operations always operate on a single item; use the combinator \<^ML>\<open>map\<close> to apply them to a list.
\<close>
@@ -230,36 +226,34 @@
@{index_ML Syntax.uncheck_terms: "Proof.context -> term list -> term list"} \\
\end{mldecls}
- \<^descr> @{ML Syntax.check_typs}~\<open>ctxt Ts\<close> checks a simultaneous list of pre-types
+ \<^descr> \<^ML>\<open>Syntax.check_typs\<close>~\<open>ctxt Ts\<close> checks a simultaneous list of pre-types
as types of the logic. Typically, this involves normalization of type
synonyms.
- \<^descr> @{ML Syntax.check_terms}~\<open>ctxt ts\<close> checks a simultaneous list of pre-terms
+ \<^descr> \<^ML>\<open>Syntax.check_terms\<close>~\<open>ctxt ts\<close> checks a simultaneous list of pre-terms
as terms of the logic. Typically, this involves type-inference and
normalization term abbreviations. The types within the given terms are
- treated in the same way as for @{ML Syntax.check_typs}.
+ treated in the same way as for \<^ML>\<open>Syntax.check_typs\<close>.
Applications sometimes need to check several types and terms together. The
- standard approach uses @{ML Logic.mk_type} to embed the language of types
+ standard approach uses \<^ML>\<open>Logic.mk_type\<close> to embed the language of types
into that of terms; all arguments are appended into one list of terms that
- is checked; afterwards the type arguments are recovered with @{ML
- Logic.dest_type}.
+ is checked; afterwards the type arguments are recovered with \<^ML>\<open>Logic.dest_type\<close>.
- \<^descr> @{ML Syntax.check_props}~\<open>ctxt ts\<close> checks a simultaneous list of pre-terms
- as terms of the logic, such that all terms are constrained by type @{typ
- prop}. The remaining check operation works as @{ML Syntax.check_terms}
+ \<^descr> \<^ML>\<open>Syntax.check_props\<close>~\<open>ctxt ts\<close> checks a simultaneous list of pre-terms
+ as terms of the logic, such that all terms are constrained by type \<^typ>\<open>prop\<close>. The remaining check operation works as \<^ML>\<open>Syntax.check_terms\<close>
above.
- \<^descr> @{ML Syntax.uncheck_typs}~\<open>ctxt Ts\<close> unchecks a simultaneous list of types
+ \<^descr> \<^ML>\<open>Syntax.uncheck_typs\<close>~\<open>ctxt Ts\<close> unchecks a simultaneous list of types
of the logic, in preparation of pretty printing.
- \<^descr> @{ML Syntax.uncheck_terms}~\<open>ctxt ts\<close> unchecks a simultaneous list of terms
+ \<^descr> \<^ML>\<open>Syntax.uncheck_terms\<close>~\<open>ctxt ts\<close> unchecks a simultaneous list of terms
of the logic, in preparation of pretty printing. There is no distinction for
propositions here.
These operations always operate simultaneously on a list; use the combinator
- @{ML singleton} to apply them to a single item.
+ \<^ML>\<open>singleton\<close> to apply them to a single item.
\<close>
end
--- a/src/Doc/Implementation/Tactic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Tactic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -67,17 +67,17 @@
@{index_ML Goal.conclude: "thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML "Goal.init"}~\<open>C\<close> initializes a tactical goal from the well-formed
+ \<^descr> \<^ML>\<open>Goal.init\<close>~\<open>C\<close> initializes a tactical goal from the well-formed
proposition \<open>C\<close>.
- \<^descr> @{ML "Goal.finish"}~\<open>ctxt thm\<close> checks whether theorem \<open>thm\<close> is a solved
+ \<^descr> \<^ML>\<open>Goal.finish\<close>~\<open>ctxt thm\<close> checks whether theorem \<open>thm\<close> is a solved
goal (no subgoals), and concludes the result by removing the goal
protection. The context is only required for printing error messages.
- \<^descr> @{ML "Goal.protect"}~\<open>n thm\<close> protects the statement of theorem \<open>thm\<close>. The
+ \<^descr> \<^ML>\<open>Goal.protect\<close>~\<open>n thm\<close> protects the statement of theorem \<open>thm\<close>. The
parameter \<open>n\<close> indicates the number of premises to be retained.
- \<^descr> @{ML "Goal.conclude"}~\<open>thm\<close> removes the goal protection, even if there are
+ \<^descr> \<^ML>\<open>Goal.conclude\<close>~\<open>thm\<close> removes the goal protection, even if there are
pending subgoals.
\<close>
@@ -151,7 +151,7 @@
(\secref{sec:struct-goals}); others are not checked explicitly, and
violating them merely results in ill-behaved tactics experienced by the user
(e.g.\ tactics that insist in being applicable only to singleton goals, or
- prevent composition via standard tacticals such as @{ML REPEAT}).
+ prevent composition via standard tacticals such as \<^ML>\<open>REPEAT\<close>).
\<close>
text %mlref \<open>
@@ -167,42 +167,42 @@
@{index_ML PREFER_GOAL: "tactic -> int -> tactic"} \\
\end{mldecls}
- \<^descr> Type @{ML_type tactic} represents tactics. The well-formedness conditions
+ \<^descr> Type \<^ML_type>\<open>tactic\<close> represents tactics. The well-formedness conditions
described above need to be observed. See also \<^file>\<open>~~/src/Pure/General/seq.ML\<close>
for the underlying implementation of lazy sequences.
- \<^descr> Type @{ML_type "int -> tactic"} represents tactics with explicit subgoal
+ \<^descr> Type \<^ML_type>\<open>int -> tactic\<close> represents tactics with explicit subgoal
addressing, with well-formedness conditions as described above.
- \<^descr> @{ML no_tac} is a tactic that always fails, returning the empty sequence.
+ \<^descr> \<^ML>\<open>no_tac\<close> is a tactic that always fails, returning the empty sequence.
- \<^descr> @{ML all_tac} is a tactic that always succeeds, returning a singleton
+ \<^descr> \<^ML>\<open>all_tac\<close> is a tactic that always succeeds, returning a singleton
sequence with unchanged goal state.
- \<^descr> @{ML print_tac}~\<open>ctxt message\<close> is like @{ML all_tac}, but prints a message
+ \<^descr> \<^ML>\<open>print_tac\<close>~\<open>ctxt message\<close> is like \<^ML>\<open>all_tac\<close>, but prints a message
together with the goal state on the tracing channel.
- \<^descr> @{ML PRIMITIVE}~\<open>rule\<close> turns a primitive inference rule into a tactic with
- unique result. Exception @{ML THM} is considered a regular tactic failure
+ \<^descr> \<^ML>\<open>PRIMITIVE\<close>~\<open>rule\<close> turns a primitive inference rule into a tactic with
+ unique result. Exception \<^ML>\<open>THM\<close> is considered a regular tactic failure
and produces an empty result; other exceptions are passed through.
- \<^descr> @{ML SUBGOAL}~\<open>(fn (subgoal, i) => tactic)\<close> is the most basic form to
+ \<^descr> \<^ML>\<open>SUBGOAL\<close>~\<open>(fn (subgoal, i) => tactic)\<close> is the most basic form to
produce a tactic with subgoal addressing. The given abstraction over the
subgoal term and subgoal number allows to peek at the relevant information
of the full goal state. The subgoal range is checked as required above.
- \<^descr> @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the subgoal as
- @{ML_type cterm} instead of raw @{ML_type term}. This avoids expensive
+ \<^descr> \<^ML>\<open>CSUBGOAL\<close> is similar to \<^ML>\<open>SUBGOAL\<close>, but passes the subgoal as
+ \<^ML_type>\<open>cterm\<close> instead of raw \<^ML_type>\<open>term\<close>. This avoids expensive
re-certification in situations where the subgoal is used directly for
primitive inferences.
- \<^descr> @{ML SELECT_GOAL}~\<open>tac i\<close> confines a tactic to the specified subgoal \<open>i\<close>.
+ \<^descr> \<^ML>\<open>SELECT_GOAL\<close>~\<open>tac i\<close> confines a tactic to the specified subgoal \<open>i\<close>.
This rearranges subgoals and the main goal protection
(\secref{sec:tactical-goals}), while retaining the syntactic context of the
overall goal state (concerning schematic variables etc.).
- \<^descr> @{ML PREFER_GOAL}~\<open>tac i\<close> rearranges subgoals to put \<open>i\<close> in front. This is
- similar to @{ML SELECT_GOAL}, but without changing the main goal protection.
+ \<^descr> \<^ML>\<open>PREFER_GOAL\<close>~\<open>tac i\<close> rearranges subgoals to put \<open>i\<close> in front. This is
+ similar to \<^ML>\<open>SELECT_GOAL\<close>, but without changing the main goal protection.
\<close>
@@ -256,27 +256,26 @@
@{index_ML bimatch_tac: "Proof.context -> (bool * thm) list -> int -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML resolve_tac}~\<open>ctxt thms i\<close> refines the goal state using the given
+ \<^descr> \<^ML>\<open>resolve_tac\<close>~\<open>ctxt thms i\<close> refines the goal state using the given
theorems, which should normally be introduction rules. The tactic resolves a
rule's conclusion with subgoal \<open>i\<close>, replacing it by the corresponding
versions of the rule's premises.
- \<^descr> @{ML eresolve_tac}~\<open>ctxt thms i\<close> performs elim-resolution with the given
+ \<^descr> \<^ML>\<open>eresolve_tac\<close>~\<open>ctxt thms i\<close> performs elim-resolution with the given
theorems, which are normally be elimination rules.
- Note that @{ML_text "eresolve_tac ctxt [asm_rl]"} is equivalent to @{ML_text
- "assume_tac ctxt"}, which facilitates mixing of assumption steps with
+ Note that \<^ML_text>\<open>eresolve_tac ctxt [asm_rl]\<close> is equivalent to \<^ML_text>\<open>assume_tac ctxt\<close>, which facilitates mixing of assumption steps with
genuine eliminations.
- \<^descr> @{ML dresolve_tac}~\<open>ctxt thms i\<close> performs destruct-resolution with the
+ \<^descr> \<^ML>\<open>dresolve_tac\<close>~\<open>ctxt thms i\<close> performs destruct-resolution with the
given theorems, which should normally be destruction rules. This replaces an
assumption by the result of applying one of the rules.
- \<^descr> @{ML forward_tac} is like @{ML dresolve_tac} except that the selected
+ \<^descr> \<^ML>\<open>forward_tac\<close> is like \<^ML>\<open>dresolve_tac\<close> except that the selected
assumption is not deleted. It applies a rule to an assumption, adding the
result as a new assumption.
- \<^descr> @{ML biresolve_tac}~\<open>ctxt brls i\<close> refines the proof state by resolution or
+ \<^descr> \<^ML>\<open>biresolve_tac\<close>~\<open>ctxt brls i\<close> refines the proof state by resolution or
elim-resolution on each rule, as indicated by its flag. It affects subgoal
\<open>i\<close> of the proof state.
@@ -285,18 +284,18 @@
mixture of introduction and elimination rules, which is useful to organize
the search process systematically in proof tools.
- \<^descr> @{ML assume_tac}~\<open>ctxt i\<close> attempts to solve subgoal \<open>i\<close> by assumption
+ \<^descr> \<^ML>\<open>assume_tac\<close>~\<open>ctxt i\<close> attempts to solve subgoal \<open>i\<close> by assumption
(modulo higher-order unification).
- \<^descr> @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks only for
+ \<^descr> \<^ML>\<open>eq_assume_tac\<close> is similar to \<^ML>\<open>assume_tac\<close>, but checks only for
immediate \<open>\<alpha>\<close>-convertibility instead of using unification. It succeeds (with
a unique next state) if one of the assumptions is equal to the subgoal's
conclusion. Since it does not instantiate variables, it cannot make other
subgoals unprovable.
- \<^descr> @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML bimatch_tac}
- are similar to @{ML resolve_tac}, @{ML eresolve_tac}, @{ML dresolve_tac},
- and @{ML biresolve_tac}, respectively, but do not instantiate schematic
+ \<^descr> \<^ML>\<open>match_tac\<close>, \<^ML>\<open>ematch_tac\<close>, \<^ML>\<open>dmatch_tac\<close>, and \<^ML>\<open>bimatch_tac\<close>
+ are similar to \<^ML>\<open>resolve_tac\<close>, \<^ML>\<open>eresolve_tac\<close>, \<^ML>\<open>dresolve_tac\<close>,
+ and \<^ML>\<open>biresolve_tac\<close>, respectively, but do not instantiate schematic
variables in the goal state.\<^footnote>\<open>Strictly speaking, matching means to treat the
unknowns in the goal state as constants, but these tactics merely discard
unifiers that would update the goal state. In rare situations (where the
@@ -371,36 +370,36 @@
@{index_ML rename_tac: "string list -> int -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML Rule_Insts.res_inst_tac}~\<open>ctxt insts thm i\<close> instantiates the rule
+ \<^descr> \<^ML>\<open>Rule_Insts.res_inst_tac\<close>~\<open>ctxt insts thm i\<close> instantiates the rule
\<open>thm\<close> with the instantiations \<open>insts\<close>, as described above, and then performs
resolution on subgoal \<open>i\<close>.
- \<^descr> @{ML Rule_Insts.eres_inst_tac} is like @{ML Rule_Insts.res_inst_tac}, but
+ \<^descr> \<^ML>\<open>Rule_Insts.eres_inst_tac\<close> is like \<^ML>\<open>Rule_Insts.res_inst_tac\<close>, but
performs elim-resolution.
- \<^descr> @{ML Rule_Insts.dres_inst_tac} is like @{ML Rule_Insts.res_inst_tac}, but
+ \<^descr> \<^ML>\<open>Rule_Insts.dres_inst_tac\<close> is like \<^ML>\<open>Rule_Insts.res_inst_tac\<close>, but
performs destruct-resolution.
- \<^descr> @{ML Rule_Insts.forw_inst_tac} is like @{ML Rule_Insts.dres_inst_tac}
+ \<^descr> \<^ML>\<open>Rule_Insts.forw_inst_tac\<close> is like \<^ML>\<open>Rule_Insts.dres_inst_tac\<close>
except that the selected assumption is not deleted.
- \<^descr> @{ML Rule_Insts.subgoal_tac}~\<open>ctxt \<phi> i\<close> adds the proposition \<open>\<phi>\<close> as local
+ \<^descr> \<^ML>\<open>Rule_Insts.subgoal_tac\<close>~\<open>ctxt \<phi> i\<close> adds the proposition \<open>\<phi>\<close> as local
premise to subgoal \<open>i\<close>, and poses the same as a new subgoal \<open>i + 1\<close> (in the
original context).
- \<^descr> @{ML Rule_Insts.thin_tac}~\<open>ctxt \<phi> i\<close> deletes the specified premise from
+ \<^descr> \<^ML>\<open>Rule_Insts.thin_tac\<close>~\<open>ctxt \<phi> i\<close> deletes the specified premise from
subgoal \<open>i\<close>. Note that \<open>\<phi>\<close> may contain schematic variables, to abbreviate
the intended proposition; the first matching subgoal premise will be
deleted. Removing useless premises from a subgoal increases its readability
and can make search tactics run faster.
- \<^descr> @{ML rename_tac}~\<open>names i\<close> renames the innermost parameters of subgoal \<open>i\<close>
+ \<^descr> \<^ML>\<open>rename_tac\<close>~\<open>names i\<close> renames the innermost parameters of subgoal \<open>i\<close>
according to the provided \<open>names\<close> (which need to be distinct identifiers).
For historical reasons, the above instantiation tactics take unparsed string
arguments, which makes them hard to use in general ML code. The slightly
- more advanced @{ML Subgoal.FOCUS} combinator of \secref{sec:struct-goals}
+ more advanced \<^ML>\<open>Subgoal.FOCUS\<close> combinator of \secref{sec:struct-goals}
allows to refer to internal goal structure with explicit context management.
\<close>
@@ -421,20 +420,20 @@
@{index_ML flexflex_tac: "Proof.context -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML rotate_tac}~\<open>n i\<close> rotates the premises of subgoal \<open>i\<close> by \<open>n\<close>
+ \<^descr> \<^ML>\<open>rotate_tac\<close>~\<open>n i\<close> rotates the premises of subgoal \<open>i\<close> by \<open>n\<close>
positions: from right to left if \<open>n\<close> is positive, and from left to right if
\<open>n\<close> is negative.
- \<^descr> @{ML distinct_subgoals_tac} removes duplicate subgoals from a proof state.
+ \<^descr> \<^ML>\<open>distinct_subgoals_tac\<close> removes duplicate subgoals from a proof state.
This is potentially inefficient.
- \<^descr> @{ML flexflex_tac} removes all flex-flex pairs from the proof state by
+ \<^descr> \<^ML>\<open>flexflex_tac\<close> removes all flex-flex pairs from the proof state by
applying the trivial unifier. This drastic step loses information. It is
already part of the Isar infrastructure for facts resulting from goals, and
rarely needs to be invoked manually.
Flex-flex constraints arise from difficult cases of higher-order
- unification. To prevent this, use @{ML Rule_Insts.res_inst_tac} to
+ unification. To prevent this, use \<^ML>\<open>Rule_Insts.res_inst_tac\<close> to
instantiate some variables in a rule. Normally flex-flex constraints can be
ignored; they often disappear as unknowns get instantiated.
\<close>
@@ -456,17 +455,17 @@
@{index_ML_op COMP: "thm * thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML compose_tac}~\<open>ctxt (flag, rule, m) i\<close> refines subgoal \<open>i\<close> using
+ \<^descr> \<^ML>\<open>compose_tac\<close>~\<open>ctxt (flag, rule, m) i\<close> refines subgoal \<open>i\<close> using
\<open>rule\<close>, without lifting. The \<open>rule\<close> is taken to have the form \<open>\<psi>\<^sub>1 \<Longrightarrow> \<dots> \<psi>\<^sub>m \<Longrightarrow>
\<psi>\<close>, where \<open>\<psi>\<close> need not be atomic; thus \<open>m\<close> determines the number of new
subgoals. If \<open>flag\<close> is \<open>true\<close> then it performs elim-resolution --- it solves
the first premise of \<open>rule\<close> by assumption and deletes that assumption.
- \<^descr> @{ML Drule.compose}~\<open>(thm\<^sub>1, i, thm\<^sub>2)\<close> uses \<open>thm\<^sub>1\<close>, regarded as an
+ \<^descr> \<^ML>\<open>Drule.compose\<close>~\<open>(thm\<^sub>1, i, thm\<^sub>2)\<close> uses \<open>thm\<^sub>1\<close>, regarded as an
atomic formula, to solve premise \<open>i\<close> of \<open>thm\<^sub>2\<close>. Let \<open>thm\<^sub>1\<close> and \<open>thm\<^sub>2\<close> be
\<open>\<psi>\<close> and \<open>\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>\<close>. The unique \<open>s\<close> that unifies \<open>\<psi>\<close> and \<open>\<phi>\<^sub>i\<close> yields
the theorem \<open>(\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>i\<^sub>-\<^sub>1 \<Longrightarrow> \<phi>\<^sub>i\<^sub>+\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>)s\<close>. Multiple results are
- considered as error (exception @{ML THM}).
+ considered as error (exception \<^ML>\<open>THM\<close>).
\<^descr> \<open>thm\<^sub>1 COMP thm\<^sub>2\<close> is the same as \<open>Drule.compose (thm\<^sub>1, 1, thm\<^sub>2)\<close>.
@@ -495,9 +494,9 @@
text \<open>
Sequential composition and alternative choices are the most basic ways to
combine tactics, similarly to ``\<^verbatim>\<open>,\<close>'' and ``\<^verbatim>\<open>|\<close>'' in Isar method notation.
- This corresponds to @{ML_op "THEN"} and @{ML_op "ORELSE"} in ML, but there
+ This corresponds to \<^ML_op>\<open>THEN\<close> and \<^ML_op>\<open>ORELSE\<close> in ML, but there
are further possibilities for fine-tuning alternation of tactics such as
- @{ML_op "APPEND"}. Further details become visible in ML due to explicit
+ \<^ML_op>\<open>APPEND\<close>. Further details become visible in ML due to explicit
subgoal addressing.
\<close>
@@ -516,34 +515,32 @@
@{index_ML "FIRST'": "('a -> tactic) list -> 'a -> tactic"} \\
\end{mldecls}
- \<^descr> \<open>tac\<^sub>1\<close>~@{ML_op THEN}~\<open>tac\<^sub>2\<close> is the sequential composition of \<open>tac\<^sub>1\<close> and
+ \<^descr> \<open>tac\<^sub>1\<close>~\<^ML_op>\<open>THEN\<close>~\<open>tac\<^sub>2\<close> is the sequential composition of \<open>tac\<^sub>1\<close> and
\<open>tac\<^sub>2\<close>. Applied to a goal state, it returns all states reachable in two
steps by applying \<open>tac\<^sub>1\<close> followed by \<open>tac\<^sub>2\<close>. First, it applies \<open>tac\<^sub>1\<close> to
the goal state, getting a sequence of possible next states; then, it applies
\<open>tac\<^sub>2\<close> to each of these and concatenates the results to produce again one
flat sequence of states.
- \<^descr> \<open>tac\<^sub>1\<close>~@{ML_op ORELSE}~\<open>tac\<^sub>2\<close> makes a choice between \<open>tac\<^sub>1\<close> and
+ \<^descr> \<open>tac\<^sub>1\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>tac\<^sub>2\<close> makes a choice between \<open>tac\<^sub>1\<close> and
\<open>tac\<^sub>2\<close>. Applied to a state, it tries \<open>tac\<^sub>1\<close> and returns the result if
non-empty; if \<open>tac\<^sub>1\<close> fails then it uses \<open>tac\<^sub>2\<close>. This is a deterministic
choice: if \<open>tac\<^sub>1\<close> succeeds then \<open>tac\<^sub>2\<close> is excluded from the result.
- \<^descr> \<open>tac\<^sub>1\<close>~@{ML_op APPEND}~\<open>tac\<^sub>2\<close> concatenates the possible results of
- \<open>tac\<^sub>1\<close> and \<open>tac\<^sub>2\<close>. Unlike @{ML_op "ORELSE"} there is \<^emph>\<open>no commitment\<close> to
- either tactic, so @{ML_op "APPEND"} helps to avoid incompleteness during
+ \<^descr> \<open>tac\<^sub>1\<close>~\<^ML_op>\<open>APPEND\<close>~\<open>tac\<^sub>2\<close> concatenates the possible results of
+ \<open>tac\<^sub>1\<close> and \<open>tac\<^sub>2\<close>. Unlike \<^ML_op>\<open>ORELSE\<close> there is \<^emph>\<open>no commitment\<close> to
+ either tactic, so \<^ML_op>\<open>APPEND\<close> helps to avoid incompleteness during
search, at the cost of potential inefficiencies.
- \<^descr> @{ML EVERY}~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>n]\<close> abbreviates \<open>tac\<^sub>1\<close>~@{ML_op
- THEN}~\<open>\<dots>\<close>~@{ML_op THEN}~\<open>tac\<^sub>n\<close>. Note that @{ML "EVERY []"} is the same as
- @{ML all_tac}: it always succeeds.
+ \<^descr> \<^ML>\<open>EVERY\<close>~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>n]\<close> abbreviates \<open>tac\<^sub>1\<close>~\<^ML_op>\<open>THEN\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>THEN\<close>~\<open>tac\<^sub>n\<close>. Note that \<^ML>\<open>EVERY []\<close> is the same as
+ \<^ML>\<open>all_tac\<close>: it always succeeds.
- \<^descr> @{ML FIRST}~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>n]\<close> abbreviates \<open>tac\<^sub>1\<close>~@{ML_op
- ORELSE}~\<open>\<dots>\<close>~@{ML_op "ORELSE"}~\<open>tac\<^sub>n\<close>. Note that @{ML "FIRST []"} is the
- same as @{ML no_tac}: it always fails.
+ \<^descr> \<^ML>\<open>FIRST\<close>~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>n]\<close> abbreviates \<open>tac\<^sub>1\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>tac\<^sub>n\<close>. Note that \<^ML>\<open>FIRST []\<close> is the
+ same as \<^ML>\<open>no_tac\<close>: it always fails.
- \<^descr> @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for tactics
- with explicit subgoal addressing. So \<open>(tac\<^sub>1\<close>~@{ML_op THEN'}~\<open>tac\<^sub>2) i\<close> is
- the same as \<open>(tac\<^sub>1 i\<close>~@{ML_op THEN}~\<open>tac\<^sub>2 i)\<close>.
+ \<^descr> \<^ML_op>\<open>THEN'\<close> is the lifted version of \<^ML_op>\<open>THEN\<close>, for tactics
+ with explicit subgoal addressing. So \<open>(tac\<^sub>1\<close>~\<^ML_op>\<open>THEN'\<close>~\<open>tac\<^sub>2) i\<close> is
+ the same as \<open>(tac\<^sub>1 i\<close>~\<^ML_op>\<open>THEN\<close>~\<open>tac\<^sub>2 i)\<close>.
The other primed tacticals work analogously.
\<close>
@@ -565,43 +562,42 @@
@{index_ML "REPEAT_DETERM_N": "int -> tactic -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML TRY}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns the resulting
+ \<^descr> \<^ML>\<open>TRY\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns the resulting
sequence, if non-empty; otherwise it returns the original state. Thus, it
applies \<open>tac\<close> at most once.
Note that for tactics with subgoal addressing, the combinator can be applied
- via functional composition: @{ML "TRY"}~@{ML_op o}~\<open>tac\<close>. There is no need
+ via functional composition: \<^ML>\<open>TRY\<close>~\<^ML_op>\<open>o\<close>~\<open>tac\<close>. There is no need
for \<^verbatim>\<open>TRY'\<close>.
- \<^descr> @{ML REPEAT}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and, recursively, to
+ \<^descr> \<^ML>\<open>REPEAT\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and, recursively, to
each element of the resulting sequence. The resulting sequence consists of
those states that make \<open>tac\<close> fail. Thus, it applies \<open>tac\<close> as many times as
possible (including zero times), and allows backtracking over each
- invocation of \<open>tac\<close>. @{ML REPEAT} is more general than @{ML REPEAT_DETERM},
+ invocation of \<open>tac\<close>. \<^ML>\<open>REPEAT\<close> is more general than \<^ML>\<open>REPEAT_DETERM\<close>,
but requires more space.
- \<^descr> @{ML REPEAT1}~\<open>tac\<close> is like @{ML REPEAT}~\<open>tac\<close> but it always applies \<open>tac\<close>
+ \<^descr> \<^ML>\<open>REPEAT1\<close>~\<open>tac\<close> is like \<^ML>\<open>REPEAT\<close>~\<open>tac\<close> but it always applies \<open>tac\<close>
at least once, failing if this is impossible.
- \<^descr> @{ML REPEAT_DETERM}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and,
+ \<^descr> \<^ML>\<open>REPEAT_DETERM\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and,
recursively, to the head of the resulting sequence. It returns the first
state to make \<open>tac\<close> fail. It is deterministic, discarding alternative
outcomes.
- \<^descr> @{ML REPEAT_DETERM_N}~\<open>n tac\<close> is like @{ML REPEAT_DETERM}~\<open>tac\<close> but the
- number of repetitions is bound by \<open>n\<close> (where @{ML "~1"} means \<open>\<infinity>\<close>).
+ \<^descr> \<^ML>\<open>REPEAT_DETERM_N\<close>~\<open>n tac\<close> is like \<^ML>\<open>REPEAT_DETERM\<close>~\<open>tac\<close> but the
+ number of repetitions is bound by \<open>n\<close> (where \<^ML>\<open>~1\<close> means \<open>\<infinity>\<close>).
\<close>
text %mlex \<open>
The basic tactics and tacticals considered above follow some algebraic laws:
- \<^item> @{ML all_tac} is the identity element of the tactical @{ML_op "THEN"}.
+ \<^item> \<^ML>\<open>all_tac\<close> is the identity element of the tactical \<^ML_op>\<open>THEN\<close>.
- \<^item> @{ML no_tac} is the identity element of @{ML_op "ORELSE"} and @{ML_op
- "APPEND"}. Also, it is a zero element for @{ML_op "THEN"}, which means that
- \<open>tac\<close>~@{ML_op THEN}~@{ML no_tac} is equivalent to @{ML no_tac}.
+ \<^item> \<^ML>\<open>no_tac\<close> is the identity element of \<^ML_op>\<open>ORELSE\<close> and \<^ML_op>\<open>APPEND\<close>. Also, it is a zero element for \<^ML_op>\<open>THEN\<close>, which means that
+ \<open>tac\<close>~\<^ML_op>\<open>THEN\<close>~\<^ML>\<open>no_tac\<close> is equivalent to \<^ML>\<open>no_tac\<close>.
- \<^item> @{ML TRY} and @{ML REPEAT} can be expressed as (recursive) functions over
+ \<^item> \<^ML>\<open>TRY\<close> and \<^ML>\<open>REPEAT\<close> can be expressed as (recursive) functions over
more basic combinators (ignoring some internal implementation tricks):
\<close>
@@ -611,15 +607,14 @@
\<close>
text \<open>
- If \<open>tac\<close> can return multiple outcomes then so can @{ML REPEAT}~\<open>tac\<close>. @{ML
- REPEAT} uses @{ML_op "ORELSE"} and not @{ML_op "APPEND"}, it applies \<open>tac\<close>
+ If \<open>tac\<close> can return multiple outcomes then so can \<^ML>\<open>REPEAT\<close>~\<open>tac\<close>. \<^ML>\<open>REPEAT\<close> uses \<^ML_op>\<open>ORELSE\<close> and not \<^ML_op>\<open>APPEND\<close>, it applies \<open>tac\<close>
as many times as possible in each outcome.
\begin{warn}
Note the explicit abstraction over the goal state in the ML definition of
- @{ML REPEAT}. Recursive tacticals must be coded in this awkward fashion to
+ \<^ML>\<open>REPEAT\<close>. Recursive tacticals must be coded in this awkward fashion to
avoid infinite recursion of eager functional evaluation in Standard ML. The
- following attempt would make @{ML REPEAT}~\<open>tac\<close> loop:
+ following attempt would make \<^ML>\<open>REPEAT\<close>~\<open>tac\<close> loop:
\end{warn}
\<close>
@@ -632,7 +627,7 @@
subsection \<open>Applying tactics to subgoal ranges\<close>
text \<open>
- Tactics with explicit subgoal addressing @{ML_type "int -> tactic"} can be
+ Tactics with explicit subgoal addressing \<^ML_type>\<open>int -> tactic\<close> can be
used together with tacticals that act like ``subgoal quantifiers'': guided
by success of the body tactic a certain range of subgoals is covered. Thus
the body tactic is applied to \<^emph>\<open>all\<close> subgoals, \<^emph>\<open>some\<close> subgoal etc.
@@ -655,26 +650,23 @@
@{index_ML RANGE: "(int -> tactic) list -> int -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML ALLGOALS}~\<open>tac\<close> is equivalent to \<open>tac n\<close>~@{ML_op THEN}~\<open>\<dots>\<close>~@{ML_op
- THEN}~\<open>tac 1\<close>. It applies the \<open>tac\<close> to all the subgoals, counting downwards.
+ \<^descr> \<^ML>\<open>ALLGOALS\<close>~\<open>tac\<close> is equivalent to \<open>tac n\<close>~\<^ML_op>\<open>THEN\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>THEN\<close>~\<open>tac 1\<close>. It applies the \<open>tac\<close> to all the subgoals, counting downwards.
+
+ \<^descr> \<^ML>\<open>SOMEGOAL\<close>~\<open>tac\<close> is equivalent to \<open>tac n\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>tac 1\<close>. It applies \<open>tac\<close> to one subgoal, counting downwards.
- \<^descr> @{ML SOMEGOAL}~\<open>tac\<close> is equivalent to \<open>tac n\<close>~@{ML_op ORELSE}~\<open>\<dots>\<close>~@{ML_op
- ORELSE}~\<open>tac 1\<close>. It applies \<open>tac\<close> to one subgoal, counting downwards.
+ \<^descr> \<^ML>\<open>FIRSTGOAL\<close>~\<open>tac\<close> is equivalent to \<open>tac 1\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>ORELSE\<close>~\<open>tac n\<close>. It applies \<open>tac\<close> to one subgoal, counting upwards.
- \<^descr> @{ML FIRSTGOAL}~\<open>tac\<close> is equivalent to \<open>tac 1\<close>~@{ML_op ORELSE}~\<open>\<dots>\<close>~@{ML_op
- ORELSE}~\<open>tac n\<close>. It applies \<open>tac\<close> to one subgoal, counting upwards.
-
- \<^descr> @{ML HEADGOAL}~\<open>tac\<close> is equivalent to \<open>tac 1\<close>. It applies \<open>tac\<close>
+ \<^descr> \<^ML>\<open>HEADGOAL\<close>~\<open>tac\<close> is equivalent to \<open>tac 1\<close>. It applies \<open>tac\<close>
unconditionally to the first subgoal.
- \<^descr> @{ML REPEAT_SOME}~\<open>tac\<close> applies \<open>tac\<close> once or more to a subgoal, counting
+ \<^descr> \<^ML>\<open>REPEAT_SOME\<close>~\<open>tac\<close> applies \<open>tac\<close> once or more to a subgoal, counting
downwards.
- \<^descr> @{ML REPEAT_FIRST}~\<open>tac\<close> applies \<open>tac\<close> once or more to a subgoal, counting
+ \<^descr> \<^ML>\<open>REPEAT_FIRST\<close>~\<open>tac\<close> applies \<open>tac\<close> once or more to a subgoal, counting
upwards.
- \<^descr> @{ML RANGE}~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>k] i\<close> is equivalent to \<open>tac\<^sub>k (i + k -
- 1)\<close>~@{ML_op THEN}~\<open>\<dots>\<close>~@{ML_op THEN}~\<open>tac\<^sub>1 i\<close>. It applies the given list of
+ \<^descr> \<^ML>\<open>RANGE\<close>~\<open>[tac\<^sub>1, \<dots>, tac\<^sub>k] i\<close> is equivalent to \<open>tac\<^sub>k (i + k -
+ 1)\<close>~\<^ML_op>\<open>THEN\<close>~\<open>\<dots>\<close>~\<^ML_op>\<open>THEN\<close>~\<open>tac\<^sub>1 i\<close>. It applies the given list of
tactics to the corresponding range of subgoals, counting downwards.
\<close>
@@ -682,7 +674,7 @@
subsection \<open>Control and search tacticals\<close>
text \<open>
- A predicate on theorems @{ML_type "thm -> bool"} can test whether a goal
+ A predicate on theorems \<^ML_type>\<open>thm -> bool\<close> can test whether a goal
state enjoys some desirable property --- such as having no subgoals. Tactics
that search for satisfactory goal states are easy to express. The main
search procedures, depth-first, breadth-first and best-first, are provided
@@ -701,13 +693,12 @@
@{index_ML CHANGED: "tactic -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML FILTER}~\<open>sat tac\<close> applies \<open>tac\<close> to the goal state and returns a
+ \<^descr> \<^ML>\<open>FILTER\<close>~\<open>sat tac\<close> applies \<open>tac\<close> to the goal state and returns a
sequence consisting of those result goal states that are satisfactory in the
sense of \<open>sat\<close>.
- \<^descr> @{ML CHANGED}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns precisely
- those states that differ from the original state (according to @{ML
- Thm.eq_thm}). Thus @{ML CHANGED}~\<open>tac\<close> always has some effect on the state.
+ \<^descr> \<^ML>\<open>CHANGED\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns precisely
+ those states that differ from the original state (according to \<^ML>\<open>Thm.eq_thm\<close>). Thus \<^ML>\<open>CHANGED\<close>~\<open>tac\<close> always has some effect on the state.
\<close>
@@ -720,15 +711,15 @@
@{index_ML DEPTH_SOLVE_1: "tactic -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML DEPTH_FIRST}~\<open>sat tac\<close> returns the goal state if \<open>sat\<close> returns true.
+ \<^descr> \<^ML>\<open>DEPTH_FIRST\<close>~\<open>sat tac\<close> returns the goal state if \<open>sat\<close> returns true.
Otherwise it applies \<open>tac\<close>, then recursively searches from each element of
the resulting sequence. The code uses a stack for efficiency, in effect
- applying \<open>tac\<close>~@{ML_op THEN}~@{ML DEPTH_FIRST}~\<open>sat tac\<close> to the state.
+ applying \<open>tac\<close>~\<^ML_op>\<open>THEN\<close>~\<^ML>\<open>DEPTH_FIRST\<close>~\<open>sat tac\<close> to the state.
- \<^descr> @{ML DEPTH_SOLVE}\<open>tac\<close> uses @{ML DEPTH_FIRST} to search for states having
+ \<^descr> \<^ML>\<open>DEPTH_SOLVE\<close>\<open>tac\<close> uses \<^ML>\<open>DEPTH_FIRST\<close> to search for states having
no subgoals.
- \<^descr> @{ML DEPTH_SOLVE_1}~\<open>tac\<close> uses @{ML DEPTH_FIRST} to search for states
+ \<^descr> \<^ML>\<open>DEPTH_SOLVE_1\<close>~\<open>tac\<close> uses \<^ML>\<open>DEPTH_FIRST\<close> to search for states
having fewer subgoals than the given state. Thus, it insists upon solving at
least one subgoal.
\<close>
@@ -747,21 +738,21 @@
not enumerate all solutions; they terminate after the first satisfactory
result from \<open>tac\<close>.
- \<^descr> @{ML BREADTH_FIRST}~\<open>sat tac\<close> uses breadth-first search to find states for
+ \<^descr> \<^ML>\<open>BREADTH_FIRST\<close>~\<open>sat tac\<close> uses breadth-first search to find states for
which \<open>sat\<close> is true. For most applications, it is too slow.
- \<^descr> @{ML BEST_FIRST}~\<open>(sat, dist) tac\<close> does a heuristic search, using \<open>dist\<close>
+ \<^descr> \<^ML>\<open>BEST_FIRST\<close>~\<open>(sat, dist) tac\<close> does a heuristic search, using \<open>dist\<close>
to estimate the distance from a satisfactory state (in the sense of \<open>sat\<close>).
It maintains a list of states ordered by distance. It applies \<open>tac\<close> to the
head of this list; if the result contains any satisfactory states, then it
- returns them. Otherwise, @{ML BEST_FIRST} adds the new states to the list,
+ returns them. Otherwise, \<^ML>\<open>BEST_FIRST\<close> adds the new states to the list,
and continues.
- The distance function is typically @{ML size_of_thm}, which computes the
+ The distance function is typically \<^ML>\<open>size_of_thm\<close>, which computes the
size of the state. The smaller the state, the fewer and simpler subgoals it
has.
- \<^descr> @{ML THEN_BEST_FIRST}~\<open>tac\<^sub>0 (sat, dist) tac\<close> is like @{ML BEST_FIRST},
+ \<^descr> \<^ML>\<open>THEN_BEST_FIRST\<close>~\<open>tac\<^sub>0 (sat, dist) tac\<close> is like \<^ML>\<open>BEST_FIRST\<close>,
except that the priority queue initially contains the result of applying
\<open>tac\<^sub>0\<close> to the goal state. This tactical permits separate tactics for
starting the search and continuing the search.
@@ -778,21 +769,21 @@
@{index_ML DETERM: "tactic -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML COND}~\<open>sat tac\<^sub>1 tac\<^sub>2\<close> applies \<open>tac\<^sub>1\<close> to the goal state if it
+ \<^descr> \<^ML>\<open>COND\<close>~\<open>sat tac\<^sub>1 tac\<^sub>2\<close> applies \<open>tac\<^sub>1\<close> to the goal state if it
satisfies predicate \<open>sat\<close>, and applies \<open>tac\<^sub>2\<close>. It is a conditional tactical
in that only one of \<open>tac\<^sub>1\<close> and \<open>tac\<^sub>2\<close> is applied to a goal state. However,
both \<open>tac\<^sub>1\<close> and \<open>tac\<^sub>2\<close> are evaluated because ML uses eager evaluation.
- \<^descr> @{ML IF_UNSOLVED}~\<open>tac\<close> applies \<open>tac\<close> to the goal state if it has any
+ \<^descr> \<^ML>\<open>IF_UNSOLVED\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state if it has any
subgoals, and simply returns the goal state otherwise. Many common tactics,
- such as @{ML resolve_tac}, fail if applied to a goal state that has no
+ such as \<^ML>\<open>resolve_tac\<close>, fail if applied to a goal state that has no
subgoals.
- \<^descr> @{ML SOLVE}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and then fails iff there
+ \<^descr> \<^ML>\<open>SOLVE\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and then fails iff there
are subgoals left.
- \<^descr> @{ML DETERM}~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns the head of
- the resulting sequence. @{ML DETERM} limits the search space by making its
+ \<^descr> \<^ML>\<open>DETERM\<close>~\<open>tac\<close> applies \<open>tac\<close> to the goal state and returns the head of
+ the resulting sequence. \<^ML>\<open>DETERM\<close> limits the search space by making its
argument deterministic.
\<close>
@@ -807,20 +798,20 @@
@{index_ML size_of_thm: "thm -> int"} \\
\end{mldecls}
- \<^descr> @{ML has_fewer_prems}~\<open>n thm\<close> reports whether \<open>thm\<close> has fewer than \<open>n\<close>
+ \<^descr> \<^ML>\<open>has_fewer_prems\<close>~\<open>n thm\<close> reports whether \<open>thm\<close> has fewer than \<open>n\<close>
premises.
- \<^descr> @{ML Thm.eq_thm}~\<open>(thm\<^sub>1, thm\<^sub>2)\<close> reports whether \<open>thm\<^sub>1\<close> and \<open>thm\<^sub>2\<close> are
+ \<^descr> \<^ML>\<open>Thm.eq_thm\<close>~\<open>(thm\<^sub>1, thm\<^sub>2)\<close> reports whether \<open>thm\<^sub>1\<close> and \<open>thm\<^sub>2\<close> are
equal. Both theorems must have the same conclusions, the same set of
hypotheses, and the same set of sort hypotheses. Names of bound variables
are ignored as usual.
- \<^descr> @{ML Thm.eq_thm_prop}~\<open>(thm\<^sub>1, thm\<^sub>2)\<close> reports whether the propositions of
+ \<^descr> \<^ML>\<open>Thm.eq_thm_prop\<close>~\<open>(thm\<^sub>1, thm\<^sub>2)\<close> reports whether the propositions of
\<open>thm\<^sub>1\<close> and \<open>thm\<^sub>2\<close> are equal. Names of bound variables are ignored.
- \<^descr> @{ML size_of_thm}~\<open>thm\<close> computes the size of \<open>thm\<close>, namely the number of
+ \<^descr> \<^ML>\<open>size_of_thm\<close>~\<open>thm\<close> computes the size of \<open>thm\<close>, namely the number of
variables, constants and abstractions in its conclusion. It may serve as a
- distance function for @{ML BEST_FIRST}.
+ distance function for \<^ML>\<open>BEST_FIRST\<close>.
\<close>
end
--- a/src/Doc/Isar_Ref/Document_Preparation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -47,12 +47,12 @@
Note that formal comments (\secref{sec:comments}) are similar to markup
commands, but have a different status within Isabelle/Isar syntax.
- @{rail \<open>
+ \<^rail>\<open>
(@@{command chapter} | @@{command section} | @@{command subsection} |
@@{command subsubsection} | @@{command paragraph} | @@{command subparagraph})
@{syntax text} ';'? |
(@@{command text} | @@{command txt} | @@{command text_raw}) @{syntax text}
- \<close>}
+ \<close>
\<^descr> @{command chapter}, @{command section}, @{command subsection} etc.\ mark
section headings within the theory source. This works in any context, even
@@ -147,7 +147,7 @@
\begingroup
\def\isasymcontrolstart{\isatt{\isacharbackslash\isacharless\isacharcircum}}
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def antiquotation}:
'@{' antiquotation_body '}' |
'\<controlstart>' @{syntax_ref name} '>' @{syntax_ref cartouche} |
@@ -157,14 +157,14 @@
;
option: @{syntax name} | @{syntax name} '=' @{syntax name}
;
- \<close>}
+ \<close>
\endgroup
Note that the syntax of antiquotations may \<^emph>\<open>not\<close> include source comments
\<^verbatim>\<open>(*\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>*)\<close> nor verbatim text \<^verbatim>\<open>{*\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>*}\<close>.
%% FIXME less monolithic presentation, move to individual sections!?
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def antiquotation_body}:
(@@{antiquotation text} | @@{antiquotation cartouche} | @@{antiquotation theory_text})
options @{syntax text} |
@@ -210,7 +210,7 @@
style: (@{syntax name} +)
;
@@{command print_antiquotations} ('!'?)
- \<close>}
+ \<close>
\<^descr> \<open>@{text s}\<close> prints uninterpreted source text \<open>s\<close>, i.e.\ inner syntax. This
is particularly useful to print portions of text according to the Isabelle
@@ -472,11 +472,11 @@
Each Isabelle/Isar command may be decorated by additional presentation tags,
to indicate some modification in the way it is printed in the document.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def tags}: ( tag * )
;
tag: '%' (@{syntax short_ident} | @{syntax string})
- \<close>}
+ \<close>
Some tags are pre-declared for certain classes of commands, serving as
default markup if no tags are given in the text:
@@ -523,9 +523,9 @@
@{antiquotation_def "rail"} & : & \<open>antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
'rail' @{syntax text}
- \<close>}
+ \<close>
The @{antiquotation rail} antiquotation allows to include syntax diagrams
into Isabelle documents. {\LaTeX} requires the style file
@@ -537,7 +537,7 @@
\begingroup
\def\isasymnewline{\isatt{\isacharbackslash\isacharless newline\isachargreater}}
- @{rail \<open>
+ \<^rail>\<open>
rule? + ';'
;
rule: ((identifier | @{syntax antiquotation}) ':')? body
@@ -549,7 +549,7 @@
atom: '(' body? ')' | identifier |
'@'? (string | @{syntax antiquotation}) |
'\<newline>'
- \<close>}
+ \<close>
\endgroup
The lexical syntax of \<open>identifier\<close> coincides with that of @{syntax
@@ -563,59 +563,59 @@
\<^item> Empty \<^verbatim>\<open>()\<close>
- @{rail \<open>()\<close>}
+ \<^rail>\<open>()\<close>
\<^item> Nonterminal \<^verbatim>\<open>A\<close>
- @{rail \<open>A\<close>}
+ \<^rail>\<open>A\<close>
\<^item> Nonterminal via Isabelle antiquotation \<^verbatim>\<open>@{syntax method}\<close>
- @{rail \<open>@{syntax method}\<close>}
+ \<^rail>\<open>@{syntax method}\<close>
\<^item> Terminal \<^verbatim>\<open>'xyz'\<close>
- @{rail \<open>'xyz'\<close>}
+ \<^rail>\<open>'xyz'\<close>
\<^item> Terminal in keyword style \<^verbatim>\<open>@'xyz'\<close>
- @{rail \<open>@'xyz'\<close>}
+ \<^rail>\<open>@'xyz'\<close>
\<^item> Terminal via Isabelle antiquotation \<^verbatim>\<open>@@{method rule}\<close>
- @{rail \<open>@@{method rule}\<close>}
+ \<^rail>\<open>@@{method rule}\<close>
\<^item> Concatenation \<^verbatim>\<open>A B C\<close>
- @{rail \<open>A B C\<close>}
+ \<^rail>\<open>A B C\<close>
\<^item> Newline inside concatenation \<^verbatim>\<open>A B C \<newline> D E F\<close>
- @{rail \<open>A B C \<newline> D E F\<close>}
+ \<^rail>\<open>A B C \<newline> D E F\<close>
\<^item> Variants \<^verbatim>\<open>A | B | C\<close>
- @{rail \<open>A | B | C\<close>}
+ \<^rail>\<open>A | B | C\<close>
\<^item> Option \<^verbatim>\<open>A ?\<close>
- @{rail \<open>A ?\<close>}
+ \<^rail>\<open>A ?\<close>
\<^item> Repetition \<^verbatim>\<open>A *\<close>
- @{rail \<open>A *\<close>}
+ \<^rail>\<open>A *\<close>
\<^item> Repetition with separator \<^verbatim>\<open>A * sep\<close>
- @{rail \<open>A * sep\<close>}
+ \<^rail>\<open>A * sep\<close>
\<^item> Strict repetition \<^verbatim>\<open>A +\<close>
- @{rail \<open>A +\<close>}
+ \<^rail>\<open>A +\<close>
\<^item> Strict repetition with separator \<^verbatim>\<open>A + sep\<close>
- @{rail \<open>A + sep\<close>}
+ \<^rail>\<open>A + sep\<close>
\<close>
end
--- a/src/Doc/Isar_Ref/Framework.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Framework.thy Sat Jan 05 17:24:33 2019 +0100
@@ -359,7 +359,7 @@
Multiple parameters and premises are represented by repeating these
connectives in a right-associative manner.
- Thanks to the Pure theorem @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"} the
+ Thanks to the Pure theorem \<^prop>\<open>(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)\<close> the
connectives \<open>\<And>\<close> and \<open>\<Longrightarrow>\<close> commute. So we may assume w.l.o.g.\ that rule
statements always observe the normal form where quantifiers are pulled in
front of implications at each level of nesting. This means that any Pure
@@ -372,14 +372,14 @@
For example, the \<open>\<inter>\<close>-introduction rule encountered before is represented as
a Pure theorem as follows:
\[
- \<open>IntI:\<close>~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
+ \<open>IntI:\<close>~\<^prop>\<open>x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B\<close>
\]
This is a plain Horn clause, since no further nesting on the left is
involved. The general \<open>\<Inter>\<close>-introduction corresponds to a Hereditary Harrop
Formula with one additional level of nesting:
\[
- \<open>InterI:\<close>~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
+ \<open>InterI:\<close>~\<^prop>\<open>(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>\<close>
\]
\<^medskip>
@@ -740,7 +740,7 @@
A simple statement consists of named propositions. The full form admits
local context elements followed by the actual conclusions, such as ``\<^theory_text>\<open>fixes
x assumes A x shows B x\<close>''. The final result emerges as a Pure rule after
- discharging the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
+ discharging the context: \<^prop>\<open>\<And>x. A x \<Longrightarrow> B x\<close>.
The \<^theory_text>\<open>obtains\<close> variant is another abbreviation defined below; unlike
\<^theory_text>\<open>obtain\<close> (cf.\ \secref{sec:framework-context}) there may be several
@@ -974,7 +974,7 @@
"Bauer-Wenzel:2001"}.
The generic calculational mechanism is based on the observation that rules
- such as \<open>trans:\<close>~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"} proceed from the premises
+ such as \<open>trans:\<close>~\<^prop>\<open>x = y \<Longrightarrow> y = z \<Longrightarrow> x = z\<close> proceed from the premises
towards the conclusion in a deterministic fashion. Thus we may reason in
forward mode, feeding intermediate results into rules selected from the
context. The course of reasoning is organized by maintaining a secondary
@@ -1021,7 +1021,7 @@
exact correspondence is dependent on the transitivity rules being involved.
\<^medskip>
- Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like transitivities with
+ Symmetry rules such as \<^prop>\<open>x = y \<Longrightarrow> y = x\<close> are like transitivities with
only one premise. Isar maintains a separate rule collection declared via the
@{attribute sym} attribute, to be used in fact expressions ``\<open>a
[symmetric]\<close>'', or single-step proofs ``\<^theory_text>\<open>assume "x = y" then have "y = x"
--- a/src/Doc/Isar_Ref/Generic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Generic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,8 +10,7 @@
text \<open>
Isabelle/Pure maintains a record of named configuration options within the
- theory or proof context, with values of type @{ML_type bool}, @{ML_type
- int}, @{ML_type real}, or @{ML_type string}. Tools may declare options in
+ theory or proof context, with values of type \<^ML_type>\<open>bool\<close>, \<^ML_type>\<open>int\<close>, \<^ML_type>\<open>real\<close>, or \<^ML_type>\<open>string\<close>. Tools may declare options in
ML, and then refer to these values (relative to the context). Thus global
reference variables are easily avoided. The user may change the value of a
configuration option by means of an associated attribute of the same name.
@@ -33,18 +32,18 @@
@{command_def "print_options"} & : & \<open>context \<rightarrow>\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command print_options} ('!'?)
;
@{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
- \<close>}
+ \<close>
\<^descr> @{command "print_options"} prints the available configuration options,
with names, types, and current values; the ``\<open>!\<close>'' option indicates extra
verbosity.
\<^descr> \<open>name = value\<close> as an attribute expression modifies the named option, with
- the syntax of the value depending on the option's type. For @{ML_type bool}
+ the syntax of the value depending on the option's type. For \<^ML_type>\<open>bool\<close>
the default value is \<open>true\<close>. Any attempt to change a global option in a
local context is ignored.
\<close>
@@ -69,7 +68,7 @@
@{method_def sleep} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thms}
;
(@@{method erule} | @@{method drule} | @@{method frule})
@@ -78,7 +77,7 @@
(@@{method intro} | @@{method elim}) @{syntax thms}?
;
@@{method sleep} @{syntax real}
- \<close>}
+ \<close>
\<^descr> @{method unfold}~\<open>a\<^sub>1 \<dots> a\<^sub>n\<close> and @{method fold}~\<open>a\<^sub>1 \<dots> a\<^sub>n\<close> expand (or
fold back) the given definitions throughout all goals; any chained facts
@@ -134,7 +133,7 @@
@{attribute_def no_vars}\<open>\<^sup>*\<close> & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute tagged} @{syntax name} @{syntax name}
;
@@{attribute untagged} @{syntax name}
@@ -144,7 +143,7 @@
(@@{attribute unfolded} | @@{attribute folded}) @{syntax thms}
;
@@{attribute rotated} @{syntax int}?
- \<close>}
+ \<close>
\<^descr> @{attribute tagged}~\<open>name value\<close> and @{attribute untagged}~\<open>name\<close> add and
remove \<^emph>\<open>tags\<close> of some theorem. Tags may be any list of string pairs that
@@ -154,21 +153,21 @@
\<^descr> @{attribute THEN}~\<open>a\<close> composes rules by resolution; it resolves with the
first premise of \<open>a\<close> (an alternative position may be also specified). See
- also @{ML_op "RS"} in @{cite "isabelle-implementation"}.
+ also \<^ML_op>\<open>RS\<close> in @{cite "isabelle-implementation"}.
\<^descr> @{attribute unfolded}~\<open>a\<^sub>1 \<dots> a\<^sub>n\<close> and @{attribute folded}~\<open>a\<^sub>1 \<dots> a\<^sub>n\<close>
expand and fold back again the given definitions throughout a rule.
- \<^descr> @{attribute abs_def} turns an equation of the form @{prop "f x y \<equiv> t"}
- into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method simp} steps always
+ \<^descr> @{attribute abs_def} turns an equation of the form \<^prop>\<open>f x y \<equiv> t\<close>
+ into \<^prop>\<open>f \<equiv> \<lambda>x y. t\<close>, which ensures that @{method simp} steps always
expand it. This also works for object-logic equality.
\<^descr> @{attribute rotated}~\<open>n\<close> rotate the premises of a theorem by \<open>n\<close> (default
1).
\<^descr> @{attribute (Pure) elim_format} turns a destruction rule into elimination
- rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow> (PROP A \<Longrightarrow> PROP B) \<Longrightarrow>
- PROP B"}.
+ rule format, by resolving with the rule \<^prop>\<open>PROP A \<Longrightarrow> (PROP A \<Longrightarrow> PROP B) \<Longrightarrow>
+ PROP B\<close>.
Note that the Classical Reasoner (\secref{sec:classical}) provides its own
version of this operation.
@@ -187,11 +186,11 @@
@{method_def split} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thm}
;
@@{method split} @{syntax thms}
- \<close>}
+ \<close>
These methods provide low-level facilities for equational reasoning that are
intended for specialized applications only. Normally, single step
@@ -269,7 +268,7 @@
\end{tabular}
\<^medskip>
- @{rail \<open>
+ \<^rail>\<open>
(@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
;
@@ -278,7 +277,7 @@
@{syntax_def simpmod}: ('add' | 'del' | 'flip' | 'only' |
'split' (() | '!' | 'del') | 'cong' (() | 'add' | 'del'))
':' @{syntax thms}
- \<close>}
+ \<close>
\<^descr> @{method simp} invokes the Simplifier on the first subgoal, after
inserting chained facts as additional goal premises; further rule
@@ -355,19 +354,19 @@
\hline
Isar method & ML tactic & behavior \\\hline
- \<open>(simp (no_asm))\<close> & @{ML simp_tac} & assumptions are ignored completely
+ \<open>(simp (no_asm))\<close> & \<^ML>\<open>simp_tac\<close> & assumptions are ignored completely
\\\hline
- \<open>(simp (no_asm_simp))\<close> & @{ML asm_simp_tac} & assumptions are used in the
+ \<open>(simp (no_asm_simp))\<close> & \<^ML>\<open>asm_simp_tac\<close> & assumptions are used in the
simplification of the conclusion but are not themselves simplified \\\hline
- \<open>(simp (no_asm_use))\<close> & @{ML full_simp_tac} & assumptions are simplified but
+ \<open>(simp (no_asm_use))\<close> & \<^ML>\<open>full_simp_tac\<close> & assumptions are simplified but
are not used in the simplification of each other or the conclusion \\\hline
- \<open>(simp)\<close> & @{ML asm_full_simp_tac} & assumptions are used in the
+ \<open>(simp)\<close> & \<^ML>\<open>asm_full_simp_tac\<close> & assumptions are used in the
simplification of the conclusion and to simplify other assumptions \\\hline
- \<open>(simp (asm_lr))\<close> & @{ML asm_lr_simp_tac} & compatibility mode: an
+ \<open>(simp (asm_lr))\<close> & \<^ML>\<open>asm_lr_simp_tac\<close> & compatibility mode: an
assumption is only used for simplifying assumptions which are to the right
of it \\\hline
@@ -377,7 +376,7 @@
\<^medskip>
In Isabelle/Pure, proof methods @{method (Pure) simp} and @{method (Pure)
simp_all} only know about meta-equality \<open>\<equiv>\<close>. Any new object-logic needs to
- re-define these methods via @{ML Simplifier.method_setup} in ML:
+ re-define these methods via \<^ML>\<open>Simplifier.method_setup\<close> in ML:
Isabelle/FOL or Isabelle/HOL may serve as blue-prints.
\<close>
@@ -386,14 +385,14 @@
text \<open>
We consider basic algebraic simplifications in Isabelle/HOL. The rather
- trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like a good candidate
+ trivial goal \<^prop>\<open>0 + (x + 0) = x + 0 + 0\<close> looks like a good candidate
to be solved by a single call of @{method simp}:
\<close>
lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
text \<open>
- The above attempt \<^emph>\<open>fails\<close>, because @{term "0"} and @{term "(+)"} in the
+ The above attempt \<^emph>\<open>fails\<close>, because \<^term>\<open>0\<close> and \<^term>\<open>(+)\<close> in the
HOL library are declared as generic type class operations, without stating
any algebraic laws yet. More specific types are required to get access to
certain standard simplifications of the theory context, e.g.\ like this:\<close>
@@ -418,8 +417,8 @@
the subsequent method invocation. Both too little or too much information
can make simplification fail, for different reasons.
- In the next example the malicious assumption @{prop "\<And>x::nat. f x = g (f (g
- x))"} does not contribute to solve the problem, but makes the default
+ In the next example the malicious assumption \<^prop>\<open>\<And>x::nat. f x = g (f (g
+ x))\<close> does not contribute to solve the problem, but makes the default
@{method simp} method loop: the rewrite rule \<open>f ?x \<equiv> g (f (g ?x))\<close> extracted
from the assumption does not terminate. The Simplifier notices certain
simple forms of nontermination, but not this one. The problem can be solved
@@ -453,14 +452,14 @@
\<^medskip>
Because assumptions may simplify each other, there can be very subtle cases
of nontermination. For example, the regular @{method simp} method applied to
- @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y \<Longrightarrow> Q"} gives rise to the infinite
+ \<^prop>\<open>P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y \<Longrightarrow> Q\<close> gives rise to the infinite
reduction sequence
\[
\<open>P (f x)\<close> \stackrel{\<open>f x \<equiv> f y\<close>}{\longmapsto}
\<open>P (f y)\<close> \stackrel{\<open>y \<equiv> x\<close>}{\longmapsto}
\<open>P (f x)\<close> \stackrel{\<open>f x \<equiv> f y\<close>}{\longmapsto} \cdots
\]
- whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"}
+ whereas applying the same to \<^prop>\<open>y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q\<close>
terminates (without solving the goal):
\<close>
@@ -484,12 +483,12 @@
@{command_def "print_simpset"}\<open>\<^sup>*\<close> & : & \<open>context \<rightarrow>\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{attribute simp} | @@{attribute cong}) (() | 'add' | 'del') |
@@{attribute split} (() | '!' | 'del')
;
@@{command print_simpset} ('!'?)
- \<close>}
+ \<close>
\<^descr> @{attribute simp} declares rewrite rules, by adding or deleting them from
the simpset within the theory or proof context. Rewrite rules are theorems
@@ -692,7 +691,7 @@
lemma "(b \<bullet> c) \<bullet> a = xxx"
apply (simp only: AC_rules)
- txt \<open>@{subgoals}\<close>
+ txt \<open>\<^subgoals>\<close>
oops
lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
@@ -732,14 +731,14 @@
\end{tabular}
\<^medskip>
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute simp_trace_new} ('interactive')? \<newline>
('mode' '=' ('full' | 'normal'))? \<newline>
('depth' '=' @{syntax nat})?
;
@@{attribute simp_break} (@{syntax term}*)
- \<close>}
+ \<close>
These attributes and configurations options control various aspects of
Simplifier tracing and debugging.
@@ -802,22 +801,21 @@
simproc & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
@{syntax text};
@@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
- \<close>}
+ \<close>
\<^descr> @{command "simproc_setup"} defines a named simplification procedure that
is invoked by the Simplifier whenever any of the given term patterns match
the current redex. The implementation, which is provided as ML source text,
needs to be of type
- @{ML_type "morphism -> Proof.context -> cterm -> thm option"}, where the
- @{ML_type cterm} represents the current redex \<open>r\<close> and the result is supposed
- to be some proven rewrite rule \<open>r \<equiv> r'\<close> (or a generalized version), or @{ML
- NONE} to indicate failure. The @{ML_type Proof.context} argument holds the
- full context of the current Simplifier invocation. The @{ML_type morphism}
+ \<^ML_type>\<open>morphism -> Proof.context -> cterm -> thm option\<close>, where the
+ \<^ML_type>\<open>cterm\<close> represents the current redex \<open>r\<close> and the result is supposed
+ to be some proven rewrite rule \<open>r \<equiv> r'\<close> (or a generalized version), or \<^ML>\<open>NONE\<close> to indicate failure. The \<^ML_type>\<open>Proof.context\<close> argument holds the
+ full context of the current Simplifier invocation. The \<^ML_type>\<open>morphism\<close>
informs about the difference of the original compilation context wrt.\ the
one of the actual application later on.
@@ -880,11 +878,11 @@
its conclusion, as in \<open>Suc ?m < ?n \<Longrightarrow> ?m < ?n\<close>, the default strategy could
loop. % FIXME !??
- \<^descr> @{ML Simplifier.set_subgoaler}~\<open>tac ctxt\<close> sets the subgoaler of the
+ \<^descr> \<^ML>\<open>Simplifier.set_subgoaler\<close>~\<open>tac ctxt\<close> sets the subgoaler of the
context to \<open>tac\<close>. The tactic will be applied to the context of the running
Simplifier instance.
- \<^descr> @{ML Simplifier.prems_of}~\<open>ctxt\<close> retrieves the current set of premises
+ \<^descr> \<^ML>\<open>Simplifier.prems_of\<close>~\<open>ctxt\<close> retrieves the current set of premises
from the context. This may be non-empty only if the Simplifier has been
told to utilize local assumptions in the first place (cf.\ the options in
\secref{sec:simp-meth}).
@@ -918,12 +916,11 @@
\end{mldecls}
A solver is a tactic that attempts to solve a subgoal after simplification.
- Its core functionality is to prove trivial subgoals such as @{prop "True"}
+ Its core functionality is to prove trivial subgoals such as \<^prop>\<open>True\<close>
and \<open>t = t\<close>, but object-logics might be more ambitious. For example,
Isabelle/HOL performs a restricted version of linear arithmetic here.
- Solvers are packaged up in abstract type @{ML_type solver}, with @{ML
- Simplifier.mk_solver} as the only operation to create a solver.
+ Solvers are packaged up in abstract type \<^ML_type>\<open>solver\<close>, with \<^ML>\<open>Simplifier.mk_solver\<close> as the only operation to create a solver.
\<^medskip>
Rewriting does not instantiate unknowns. For example, rewriting alone cannot
@@ -943,7 +940,7 @@
Note that in this way the overall tactic is not totally safe: it may
instantiate unknowns that appear also in other subgoals.
- \<^descr> @{ML Simplifier.mk_solver}~\<open>name tac\<close> turns \<open>tac\<close> into a solver; the
+ \<^descr> \<^ML>\<open>Simplifier.mk_solver\<close>~\<open>name tac\<close> turns \<open>tac\<close> into a solver; the
\<open>name\<close> is only attached as a comment and has no further significance.
\<^descr> \<open>ctxt setSSolver solver\<close> installs \<open>solver\<close> as the safe solver of \<open>ctxt\<close>.
@@ -960,11 +957,11 @@
\<^medskip>
The solver tactic is invoked with the context of the running Simplifier.
Further operations may be used to retrieve relevant information, such as the
- list of local Simplifier premises via @{ML Simplifier.prems_of} --- this
+ list of local Simplifier premises via \<^ML>\<open>Simplifier.prems_of\<close> --- this
list may be non-empty only if the Simplifier runs in a mode that utilizes
local assumptions (see also \secref{sec:simp-meth}). The solver is also
presented the full goal including its assumptions in any case. Thus it can
- use these (e.g.\ by calling @{ML assume_tac}), even if the Simplifier proper
+ use these (e.g.\ by calling \<^ML>\<open>assume_tac\<close>), even if the Simplifier proper
happens to ignore local premises at the moment.
\<^medskip>
@@ -1027,16 +1024,16 @@
\<^descr> \<open>ctxt delloop name\<close> deletes the looper tactic that was associated with
\<open>name\<close> from \<open>ctxt\<close>.
- \<^descr> @{ML Splitter.add_split}~\<open>thm ctxt\<close> adds split tactic
+ \<^descr> \<^ML>\<open>Splitter.add_split\<close>~\<open>thm ctxt\<close> adds split tactic
for \<open>thm\<close> as additional looper tactic of \<open>ctxt\<close>
(overwriting previous split tactic for the same constant).
- \<^descr> @{ML Splitter.add_split_bang}~\<open>thm ctxt\<close> adds aggressive
+ \<^descr> \<^ML>\<open>Splitter.add_split_bang\<close>~\<open>thm ctxt\<close> adds aggressive
(see \S\ref{sec:simp-meth})
split tactic for \<open>thm\<close> as additional looper tactic of \<open>ctxt\<close>
(overwriting previous split tactic for the same constant).
- \<^descr> @{ML Splitter.del_split}~\<open>thm ctxt\<close> deletes the split tactic
+ \<^descr> \<^ML>\<open>Splitter.del_split\<close>~\<open>thm ctxt\<close> deletes the split tactic
corresponding to \<open>thm\<close> from the looper tactics of \<open>ctxt\<close>.
The splitter replaces applications of a given function; the right-hand side
@@ -1046,17 +1043,16 @@
@{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
Another example is the elimination operator for Cartesian products (which
- happens to be called @{const case_prod} in Isabelle/HOL:
+ happens to be called \<^const>\<open>case_prod\<close> in Isabelle/HOL:
@{text [display] "?P (case_prod ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
For technical reasons, there is a distinction between case splitting in the
- conclusion and in the premises of a subgoal. The former is done by @{ML
- Splitter.split_tac} with rules like @{thm [source] if_split} or @{thm
+ conclusion and in the premises of a subgoal. The former is done by \<^ML>\<open>Splitter.split_tac\<close> with rules like @{thm [source] if_split} or @{thm
[source] option.split}, which do not split the subgoal, while the latter is
- done by @{ML Splitter.split_asm_tac} with rules like @{thm [source]
+ done by \<^ML>\<open>Splitter.split_asm_tac\<close> with rules like @{thm [source]
if_split_asm} or @{thm [source] option.split_asm}, which split the subgoal.
- The function @{ML Splitter.add_split} automatically takes care of which
+ The function \<^ML>\<open>Splitter.add_split\<close> automatically takes care of which
tactic to call, analyzing the form of the rules given as argument; it is the
same operation behind \<open>split\<close> attribute or method modifier syntax in the
Isar source language.
@@ -1067,7 +1063,7 @@
Isabelle/FOL/ZF.
\begin{warn}
- With @{ML Splitter.split_asm_tac} as looper component, the Simplifier may
+ With \<^ML>\<open>Splitter.split_asm_tac\<close> as looper component, the Simplifier may
split subgoals! This might cause unexpected problems in tactic expressions
that silently assume 0 or 1 subgoals after simplification.
\end{warn}
@@ -1081,12 +1077,12 @@
@{attribute_def simplified} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute simplified} opt? @{syntax thms}?
;
opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
- \<close>}
+ \<close>
\<^descr> @{attribute simplified}~\<open>a\<^sub>1 \<dots> a\<^sub>n\<close> causes a theorem to be simplified,
either by exactly the specified rules \<open>a\<^sub>1, \<dots>, a\<^sub>n\<close>, or the implicit
@@ -1335,16 +1331,16 @@
@{attribute_def swapped} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
;
@@{attribute rule} 'del'
;
@@{attribute iff} (((() | 'add') '?'?) | 'del')
- \<close>}
+ \<close>
\<^descr> @{command "print_claset"} prints the collection of rules
- declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
+ declared to the Classical Reasoner, i.e.\ the \<^ML_type>\<open>claset\<close>
within the context.
\<^descr> @{attribute intro}, @{attribute elim}, and @{attribute dest}
@@ -1401,9 +1397,9 @@
@{method_def contradiction} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method rule} @{syntax thms}?
- \<close>}
+ \<close>
\<^descr> @{method rule} as offered by the Classical Reasoner is a
refinement over the Pure one (see \secref{sec:pure-meth-att}). Both
@@ -1438,7 +1434,7 @@
@{method_def deepen} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method blast} @{syntax nat}? (@{syntax clamod} * )
;
@@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
@@ -1460,7 +1456,7 @@
'split' (() | '!' | 'del') |
'iff' (((() | 'add') '?'?) | 'del') |
(('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thms}
- \<close>}
+ \<close>
\<^descr> @{method blast} is a separate classical tableau prover that
uses the same classical rule declarations as explained before.
@@ -1564,11 +1560,11 @@
@{method_def clarsimp} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
;
@@{method clarsimp} (@{syntax clasimpmod} * )
- \<close>}
+ \<close>
\<^descr> @{method safe} repeatedly performs safe steps on all subgoals.
It is deterministic, with at most one outcome.
@@ -1743,13 +1739,13 @@
Generic tools may refer to the information provided by object-logic
declarations internally.
- @{rail \<open>
+ \<^rail>\<open>
@@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
;
@@{attribute atomize} ('(' 'full' ')')?
;
@@{attribute rule_format} ('(' 'noasm' ')')?
- \<close>}
+ \<close>
\<^descr> @{command "judgment"}~\<open>c :: \<sigma> (mx)\<close> declares constant
\<open>c\<close> as the truth judgment of the current object-logic. Its
--- a/src/Doc/Isar_Ref/HOL_Specific.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/HOL_Specific.thy Sat Jan 05 17:24:33 2019 +0100
@@ -98,7 +98,7 @@
B\<close>, for each premise \<open>\<M> R t\<close> in an introduction rule. The default rule
declarations of Isabelle/HOL already take care of most common situations.
- @{rail \<open>
+ \<^rail>\<open>
(@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
@@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
@{syntax vars} @{syntax for_fixes} \<newline>
@@ -107,7 +107,7 @@
@@{command print_inductives} ('!'?)
;
@@{attribute (HOL) mono} (() | 'add' | 'del')
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "inductive"} and @{command (HOL) "coinductive"} define
(co)inductive predicates from the introduction rules.
@@ -188,15 +188,15 @@
\<^item> De Morgan style equations for reasoning about the ``polarity'' of
expressions, e.g.
\[
- @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
- @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
+ \<^prop>\<open>\<not> \<not> P \<longleftrightarrow> P\<close> \qquad\qquad
+ \<^prop>\<open>\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q\<close>
\]
\<^item> Equations for reducing complex operators to more primitive ones whose
monotonicity can easily be proved, e.g.
\[
- @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
- @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
+ \<^prop>\<open>(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q\<close> \qquad\qquad
+ \<^prop>\<open>Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x\<close>
\]
\<close>
@@ -255,7 +255,7 @@
@{command_def (HOL) "fun_cases"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) primrec} @{syntax specification}
;
(@@{command (HOL) fun} | @@{command (HOL) function}) opts? @{syntax specification}
@@ -265,7 +265,7 @@
@@{command (HOL) termination} @{syntax term}?
;
@@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "primrec"} defines primitive recursive functions over
datatypes (see also @{command_ref (HOL) datatype}). The given \<open>equations\<close>
@@ -380,13 +380,13 @@
text \<open>
Since the value of an expression depends on the value of its variables, the
- functions @{const evala} and @{const evalb} take an additional parameter, an
+ functions \<^const>\<open>evala\<close> and \<^const>\<open>evalb\<close> take an additional parameter, an
\<^emph>\<open>environment\<close> that maps variables to their values.
\<^medskip>
Substitution on expressions can be defined similarly. The mapping \<open>f\<close> of
- type @{typ "'a \<Rightarrow> 'a aexp"} given as a parameter is lifted canonically on the
- types @{typ "'a aexp"} and @{typ "'a bexp"}, respectively.
+ type \<^typ>\<open>'a \<Rightarrow> 'a aexp\<close> given as a parameter is lifted canonically on the
+ types \<^typ>\<open>'a aexp\<close> and \<^typ>\<open>'a bexp\<close>, respectively.
\<close>
primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
@@ -403,8 +403,8 @@
text \<open>
In textbooks about semantics one often finds substitution theorems, which
- express the relationship between substitution and evaluation. For @{typ "'a
- aexp"} and @{typ "'a bexp"}, we can prove such a theorem by mutual
+ express the relationship between substitution and evaluation. For \<^typ>\<open>'a
+ aexp\<close> and \<^typ>\<open>'a bexp\<close>, we can prove such a theorem by mutual
induction, followed by simplification.
\<close>
@@ -429,8 +429,8 @@
datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
text \<open>
- A substitution function on type @{typ "('a, 'b) term"} can be defined as
- follows, by working simultaneously on @{typ "('a, 'b) term list"}:
+ A substitution function on type \<^typ>\<open>('a, 'b) term\<close> can be defined as
+ follows, by working simultaneously on \<^typ>\<open>('a, 'b) term list\<close>:
\<close>
primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
@@ -443,7 +443,7 @@
text \<open>
The recursion scheme follows the structure of the unfolded definition of
- type @{typ "('a, 'b) term"}. To prove properties of this substitution
+ type \<^typ>\<open>('a, 'b) term\<close>. To prove properties of this substitution
function, mutual induction is needed:
\<close>
@@ -470,10 +470,10 @@
text \<open>
Note that all occurrences of functions such as \<open>ts\<close> above must be applied to
- an argument. In particular, @{term "map_tree f \<circ> ts"} is not allowed here.
+ an argument. In particular, \<^term>\<open>map_tree f \<circ> ts\<close> is not allowed here.
\<^medskip>
- Here is a simple composition lemma for @{term map_tree}:
+ Here is a simple composition lemma for \<^term>\<open>map_tree\<close>:
\<close>
lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
@@ -492,7 +492,7 @@
@{method_def (HOL) induction_schema} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) relation} @{syntax term}
;
@@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
@@ -502,7 +502,7 @@
@@{method (HOL) induction_schema}
;
orders: ( 'max' | 'min' | 'ms' ) *
- \<close>}
+ \<close>
\<^descr> @{method (HOL) pat_completeness} is a specialized method to solve goals
regarding the completeness of pattern matching, as required by the @{command
@@ -551,10 +551,10 @@
@{attribute_def (HOL) "partial_function_mono"} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) partial_function} '(' @{syntax name} ')'
@{syntax specification}
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "partial_function"}~\<open>(mode)\<close> defines recursive functions
based on fixpoints in complete partial orders. No termination proof is
@@ -576,22 +576,20 @@
command, which directly corresponds to a complete partial order on the
result type. By default, the following modes are defined:
- \<^descr> \<open>option\<close> defines functions that map into the @{type option} type. Here,
- the value @{term None} is used to model a non-terminating computation.
- Monotonicity requires that if @{term None} is returned by a recursive
- call, then the overall result must also be @{term None}. This is best
- achieved through the use of the monadic operator @{const "Option.bind"}.
+ \<^descr> \<open>option\<close> defines functions that map into the \<^type>\<open>option\<close> type. Here,
+ the value \<^term>\<open>None\<close> is used to model a non-terminating computation.
+ Monotonicity requires that if \<^term>\<open>None\<close> is returned by a recursive
+ call, then the overall result must also be \<^term>\<open>None\<close>. This is best
+ achieved through the use of the monadic operator \<^const>\<open>Option.bind\<close>.
\<^descr> \<open>tailrec\<close> defines functions with an arbitrary result type and uses the
- slightly degenerated partial order where @{term "undefined"} is the bottom
- element. Now, monotonicity requires that if @{term undefined} is returned
- by a recursive call, then the overall result must also be @{term
- undefined}. In practice, this is only satisfied when each recursive call
+ slightly degenerated partial order where \<^term>\<open>undefined\<close> is the bottom
+ element. Now, monotonicity requires that if \<^term>\<open>undefined\<close> is returned
+ by a recursive call, then the overall result must also be \<^term>\<open>undefined\<close>. In practice, this is only satisfied when each recursive call
is a tail call, whose result is directly returned. Thus, this mode of
operation allows the definition of arbitrary tail-recursive functions.
- Experienced users may define new modes by instantiating the locale @{const
- "partial_function_definitions"} appropriately.
+ Experienced users may define new modes by instantiating the locale \<^const>\<open>partial_function_definitions\<close> appropriately.
\<^descr> @{attribute (HOL) partial_function_mono} declares rules for use in the
internal monotonicity proofs of partial function definitions.
@@ -609,7 +607,7 @@
mostly obsolete; @{command (HOL) "function"} or @{command (HOL) "fun"}
should be used instead.
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
@{syntax name} @{syntax term} (@{syntax prop} +) hints?
;
@@ -617,7 +615,7 @@
;
recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
(() | 'add' | 'del') ':' @{syntax thms}) | @{syntax clasimpmod}
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "recdef"} defines general well-founded recursive functions
(using the TFL package). The ``\<open>(permissive)\<close>'' option tells TFL to recover
@@ -639,10 +637,10 @@
@{attribute_def (HOL) recdef_wf} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
@@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
- \<close>}
+ \<close>
\<close>
@@ -662,10 +660,10 @@
For examples see \<^file>\<open>~~/src/HOL/ex/Adhoc_Overloading_Examples.thy\<close> and
\<^file>\<open>~~/src/HOL/Library/Monad_Syntax.thy\<close>.
- @{rail \<open>
+ \<^rail>\<open>
(@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
(@{syntax name} (@{syntax term} + ) + @'and')
- \<close>}
+ \<close>
\<^descr> @{command "adhoc_overloading"}~\<open>c v\<^sub>1 ... v\<^sub>n\<close> associates variants with an
existing constant.
@@ -688,12 +686,12 @@
@{command_def (HOL) "specification"} & : & \<open>theory \<rightarrow> proof(prove)\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) specification} '(' (decl +) ')' \<newline>
(@{syntax thmdecl}? @{syntax prop} +)
;
decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "specification"}~\<open>decls \<phi>\<close> sets up a goal stating the
existence of terms with the properties specified to hold for the constants
@@ -715,14 +713,14 @@
@{command_def (HOL) "old_rep_datatype"} & : & \<open>theory \<rightarrow> proof(prove)\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) old_rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
;
spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
;
cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "old_rep_datatype"} represents existing types as
old-style datatypes.
@@ -744,7 +742,7 @@
text \<open>
We define a type of finite sequences, with slightly different names than the
- existing @{typ "'a list"} that is already in @{theory Main}:
+ existing \<^typ>\<open>'a list\<close> that is already in \<^theory>\<open>Main\<close>:
\<close>
(*<*)experiment begin(*>*)
@@ -851,7 +849,7 @@
@{command_def (HOL) "print_record"} & : & \<open>context \<rightarrow>\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) record} @{syntax "overloaded"}? @{syntax typespec_sorts} '=' \<newline>
(@{syntax type} '+')? (constdecl +)
;
@@ -860,7 +858,7 @@
@@{command (HOL) print_record} modes? @{syntax typespec_sorts}
;
modes: '(' (@{syntax name} +) ')'
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "record"}~\<open>(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1 \<dots> c\<^sub>n :: \<sigma>\<^sub>n\<close>
defines extensible record type \<open>(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t\<close>, derived from the optional
@@ -981,8 +979,8 @@
internal simplification procedure, which is also part of the standard
Simplifier setup.
- \<^enum> Inject equations of a form analogous to @{prop "(x, y) = (x', y') \<equiv> x = x'
- \<and> y = y'"} are declared to the Simplifier and Classical Reasoner as
+ \<^enum> Inject equations of a form analogous to \<^prop>\<open>(x, y) = (x', y') \<equiv> x = x'
+ \<and> y = y'\<close> are declared to the Simplifier and Classical Reasoner as
@{attribute iff} rules. These rules are available as \<open>t.iffs\<close>.
\<^enum> The introduction rule for record equality analogous to \<open>x r = x r' \<Longrightarrow> y r =
@@ -1019,14 +1017,14 @@
A type definition identifies a new type with a non-empty subset of an
existing type. More precisely, the new type is defined by exhibiting an
- existing type \<open>\<tau>\<close>, a set \<open>A :: \<tau> set\<close>, and proving @{prop "\<exists>x. x \<in> A"}. Thus
+ existing type \<open>\<tau>\<close>, a set \<open>A :: \<tau> set\<close>, and proving \<^prop>\<open>\<exists>x. x \<in> A\<close>. Thus
\<open>A\<close> is a non-empty subset of \<open>\<tau>\<close>, and the new type denotes this subset. New
functions are postulated that establish an isomorphism between the new type
and the subset. In general, the type \<open>\<tau>\<close> may involve type variables \<open>\<alpha>\<^sub>1, \<dots>,
\<alpha>\<^sub>n\<close> which means that the type definition produces a type constructor \<open>(\<alpha>\<^sub>1,
\<dots>, \<alpha>\<^sub>n) t\<close> depending on those type arguments.
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) typedef} @{syntax "overloaded"}? abs_type '=' rep_set
;
@{syntax_def "overloaded"}: ('(' @'overloaded' ')')
@@ -1034,7 +1032,7 @@
abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
;
rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
- \<close>}
+ \<close>
To understand the concept of type definition better, we need to recount its
somewhat complex history. The HOL logic goes back to the ``Simple Theory of
@@ -1098,7 +1096,7 @@
specification allows to provide alternative names.
The logical characterization of @{command typedef} uses the predicate of
- locale @{const type_definition} that is defined in Isabelle/HOL. Various
+ locale \<^const>\<open>type_definition\<close> that is defined in Isabelle/HOL. Various
basic consequences of that are instantiated accordingly, re-using the locale
facts with names derived from the new type constructor. Thus the generic
theorem @{thm type_definition.Rep} is turned into the specific \<open>Rep_t\<close>, for
@@ -1160,9 +1158,9 @@
@{command_def (HOL) "functor"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "functor"}~\<open>prefix: m\<close> allows to prove and register
properties about the functorial structure of type constructors. These
@@ -1205,7 +1203,7 @@
@{command_def (HOL) "quotient_type"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>\\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) quotient_type} @{syntax "overloaded"}? \<newline>
@{syntax typespec} @{syntax mixfix}? '=' quot_type \<newline>
quot_morphisms? quot_parametric?
@@ -1215,7 +1213,7 @@
quot_morphisms: @'morphisms' @{syntax name} @{syntax name}
;
quot_parametric: @'parametric' @{syntax thm}
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "quotient_type"} defines a new quotient type \<open>\<tau>\<close>. The
injection from a quotient type to a raw type is called \<open>rep_\<tau>\<close>, its inverse
@@ -1243,7 +1241,7 @@
The Lifting package allows users to lift terms of the raw type to the
abstract type, which is a necessary step in building a library for an
abstract type. Lifting defines a new constant by combining coercion
- functions (@{term Abs} and @{term Rep}) with the raw term. It also proves an
+ functions (\<^term>\<open>Abs\<close> and \<^term>\<open>Rep\<close>) with the raw term. It also proves an
appropriate transfer rule for the Transfer (\secref{sec:transfer}) package
and, if possible, an equation for the code generator.
@@ -1271,7 +1269,7 @@
@{attribute_def (HOL) "lifting_restore"} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) setup_lifting} @{syntax thm} @{syntax thm}? \<newline>
(@'parametric' @{syntax thm})?
;
@@ -1285,20 +1283,19 @@
;
@@{attribute (HOL) lifting_restore}
@{syntax thm} (@{syntax thm} @{syntax thm})?
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "setup_lifting"} Sets up the Lifting package to work with
a user-defined type. The command supports two modes.
\<^enum> The first one is a low-level mode when the user must provide as a first
- argument of @{command (HOL) "setup_lifting"} a quotient theorem @{term
- "Quotient R Abs Rep T"}. The package configures a transfer rule for
+ argument of @{command (HOL) "setup_lifting"} a quotient theorem \<^term>\<open>Quotient R Abs Rep T\<close>. The package configures a transfer rule for
equality, a domain transfer rules and sets up the @{command_def (HOL)
"lift_definition"} command to work with the abstract type. An optional
- theorem @{term "reflp R"}, which certifies that the equivalence relation R
+ theorem \<^term>\<open>reflp R\<close>, which certifies that the equivalence relation R
is total, can be provided as a second argument. This allows the package to
generate stronger transfer rules. And finally, the parametricity theorem
- for @{term R} can be provided as a third argument. This allows the package
+ for \<^term>\<open>R\<close> can be provided as a third argument. This allows the package
to generate a stronger transfer rule for equality.
Users generally will not prove the \<open>Quotient\<close> theorem manually for new
@@ -1306,9 +1303,9 @@
\<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command
(HOL) "lift_definition"} can be used in its second mode, where only the
- @{term type_definition} theorem @{term "type_definition Rep Abs A"} is
+ \<^term>\<open>type_definition\<close> theorem \<^term>\<open>type_definition Rep Abs A\<close> is
used as an argument of the command. The command internally proves the
- corresponding @{term Quotient} theorem and registers it with @{command
+ corresponding \<^term>\<open>Quotient\<close> theorem and registers it with @{command
(HOL) setup_lifting} using its first mode.
For quotients, the command @{command (HOL) quotient_type} can be used. The
@@ -1350,12 +1347,12 @@
\<^medskip>
Integration with [@{attribute code} abstract]: For subtypes (e.g.\
- corresponding to a datatype invariant, such as @{typ "'a dlist"}), @{command
+ corresponding to a datatype invariant, such as \<^typ>\<open>'a dlist\<close>), @{command
(HOL) "lift_definition"} uses a code certificate theorem \<open>f.rep_eq\<close> as a
code equation. Because of the limitation of the code generator, \<open>f.rep_eq\<close>
cannot be used as a code equation if the subtype occurs inside the result
- type rather than at the top level (e.g.\ function returning @{typ "'a dlist
- option"} vs. @{typ "'a dlist"}).
+ type rather than at the top level (e.g.\ function returning \<^typ>\<open>'a dlist
+ option\<close> vs. \<^typ>\<open>'a dlist\<close>).
In this case, an extension of @{command (HOL) "lift_definition"} can be
invoked by specifying the flag \<open>code_dt\<close>. This extension enables code
@@ -1365,8 +1362,8 @@
\<^descr> \<open>\<tau>\<close> is a type variable
\<^descr> \<open>\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>\<close>, where \<open>\<kappa>\<close> is an abstract type constructor and \<open>\<tau>\<^sub>1 \<dots>
- \<tau>\<^sub>n\<close> do not contain abstract types (i.e.\ @{typ "int dlist"} is allowed
- whereas @{typ "int dlist dlist"} not)
+ \<tau>\<^sub>n\<close> do not contain abstract types (i.e.\ \<^typ>\<open>int dlist\<close> is allowed
+ whereas \<^typ>\<open>int dlist dlist\<close> not)
\<^descr> \<open>\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>\<close>, \<open>\<kappa>\<close> is a type constructor that was defined as a
(co)datatype whose constructor argument types do not contain either
@@ -1403,23 +1400,22 @@
\<^descr> @{command (HOL) "print_quotients"} prints stored quotient theorems.
\<^descr> @{attribute (HOL) quot_map} registers a quotient map theorem, a theorem
- showing how to ``lift'' quotients over type constructors. E.g.\ @{term
- "Quotient R Abs Rep T \<Longrightarrow> Quotient (rel_set R) (image Abs) (image Rep)
- (rel_set T)"}. For examples see \<^file>\<open>~~/src/HOL/Lifting_Set.thy\<close> or
+ showing how to ``lift'' quotients over type constructors. E.g.\ \<^term>\<open>Quotient R Abs Rep T \<Longrightarrow> Quotient (rel_set R) (image Abs) (image Rep)
+ (rel_set T)\<close>. For examples see \<^file>\<open>~~/src/HOL/Lifting_Set.thy\<close> or
\<^file>\<open>~~/src/HOL/Lifting.thy\<close>. This property is proved automatically if the
involved type is BNF without dead variables.
\<^descr> @{attribute (HOL) relator_eq_onp} registers a theorem that shows that a
- relator applied to an equality restricted by a predicate @{term P} (i.e.\
- @{term "eq_onp P"}) is equal to a predicator applied to the @{term P}. The
- combinator @{const eq_onp} is used for internal encoding of proper subtypes.
+ relator applied to an equality restricted by a predicate \<^term>\<open>P\<close> (i.e.\
+ \<^term>\<open>eq_onp P\<close>) is equal to a predicator applied to the \<^term>\<open>P\<close>. The
+ combinator \<^const>\<open>eq_onp\<close> is used for internal encoding of proper subtypes.
Such theorems allows the package to hide \<open>eq_onp\<close> from a user in a
user-readable form of a respectfulness theorem. For examples see
\<^file>\<open>~~/src/HOL/Lifting_Set.thy\<close> or \<^file>\<open>~~/src/HOL/Lifting.thy\<close>. This property
is proved automatically if the involved type is BNF without dead variables.
\<^descr> @{attribute (HOL) "relator_mono"} registers a property describing a
- monotonicity of a relator. E.g.\ @{prop "A \<le> B \<Longrightarrow> rel_set A \<le> rel_set B"}.
+ monotonicity of a relator. E.g.\ \<^prop>\<open>A \<le> B \<Longrightarrow> rel_set A \<le> rel_set B\<close>.
This property is needed for proving a stronger transfer rule in
@{command_def (HOL) "lift_definition"} when a parametricity theorem for the
raw term is specified and also for the reflexivity prover. For examples see
@@ -1451,7 +1447,7 @@
and thus sets up lifting for an abstract type \<open>\<tau>\<close> (that is defined by
\<open>Quotient_thm\<close>). Optional theorems \<open>pcr_def\<close> and \<open>pcr_cr_eq_thm\<close> can be
specified to register the parametrized correspondence relation for \<open>\<tau>\<close>.
- E.g.\ for @{typ "'a dlist"}, \<open>pcr_def\<close> is \<open>pcr_dlist A \<equiv> list_all2 A \<circ>\<circ>
+ E.g.\ for \<^typ>\<open>'a dlist\<close>, \<open>pcr_def\<close> is \<open>pcr_dlist A \<equiv> list_all2 A \<circ>\<circ>
cr_dlist\<close> and \<open>pcr_cr_eq_thm\<close> is \<open>pcr_dlist (=) = (=)\<close>. This attribute
is rather used for low-level manipulation with set-up of the Lifting package
because using of the bundle \<open>\<tau>.lifting\<close> together with the commands @{command
@@ -1555,15 +1551,15 @@
quantifiers are transferred.
\<^descr> @{attribute (HOL) relator_eq} attribute collects identity laws for
- relators of various type constructors, e.g. @{term "rel_set (=) = (=)"}.
+ relators of various type constructors, e.g. \<^term>\<open>rel_set (=) = (=)\<close>.
The @{method (HOL) transfer} method uses these lemmas to infer
transfer rules for non-polymorphic constants on the fly. For examples see
\<^file>\<open>~~/src/HOL/Lifting_Set.thy\<close> or \<^file>\<open>~~/src/HOL/Lifting.thy\<close>. This property
is proved automatically if the involved type is BNF without dead variables.
\<^descr> @{attribute_def (HOL) "relator_domain"} attribute collects rules
- describing domains of relators by predicators. E.g.\ @{term "Domainp
- (rel_set T) = (\<lambda>A. Ball A (Domainp T))"}. This allows the package to lift
+ describing domains of relators by predicators. E.g.\ \<^term>\<open>Domainp
+ (rel_set T) = (\<lambda>A. Ball A (Domainp T))\<close>. This allows the package to lift
transfer domain rules through type constructors. For examples see
\<^file>\<open>~~/src/HOL/Lifting_Set.thy\<close> or \<^file>\<open>~~/src/HOL/Lifting.thy\<close>. This property
is proved automatically if the involved type is BNF without dead variables.
@@ -1597,7 +1593,7 @@
@{attribute_def (HOL) "quot_preserve"} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
@{syntax term} 'is' @{syntax term}
;
@@ -1606,7 +1602,7 @@
@@{method (HOL) lifting} @{syntax thms}?
;
@@{method (HOL) lifting_setup} @{syntax thms}?
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "quotient_definition"} defines a constant on the quotient
type.
@@ -1665,8 +1661,8 @@
container types. Given a polymorphic type that serves as a container, a map
function defined for this container using @{command (HOL) "functor"} and a
relation map defined for for the container type, the quotient extension
- theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3 (rel_map R) (map
- Abs) (map Rep)"}. Quotient extension theorems are stored in a database and
+ theorem should be \<^term>\<open>Quotient3 R Abs Rep \<Longrightarrow> Quotient3 (rel_map R) (map
+ Abs) (map Rep)\<close>. Quotient extension theorems are stored in a database and
are used all the steps of lifting theorems.
\<close>
@@ -1688,7 +1684,7 @@
@{command_def (HOL) "sledgehammer_params"} & : & \<open>theory \<rightarrow> theory\<close>
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) try}
;
@@ -1704,7 +1700,7 @@
args: ( @{syntax name} '=' value + ',' )
;
facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thms} ) + ) ? ')'
- \<close>} % FIXME check args "value"
+ \<close> % FIXME check args "value"
\<^descr> @{command (HOL) "solve_direct"} checks whether the current subgoals can be
solved directly by an existing theorem. Duplicate lemmas can be detected in
@@ -1747,7 +1743,7 @@
@{command_def (HOL) "find_unused_assms"} & : & \<open>context \<rightarrow>\<close>
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
;
@@ -1771,7 +1767,7 @@
modes: '(' (@{syntax name} +) ')'
;
args: ( @{syntax name} '=' value + ',' )
- \<close>} % FIXME check "value"
+ \<close> % FIXME check "value"
\<^descr> @{command (HOL) "value"}~\<open>t\<close> evaluates and prints a term; optionally
\<open>modes\<close> can be specified, which are appended to the current print mode; see
@@ -1859,62 +1855,54 @@
Using the following type classes, the testers generate values and convert
them back into Isabelle terms for displaying counterexamples.
- \<^descr>[\<open>exhaustive\<close>] The parameters of the type classes @{class exhaustive} and
- @{class full_exhaustive} implement the testing. They take a testing
- function as a parameter, which takes a value of type @{typ "'a"} and
+ \<^descr>[\<open>exhaustive\<close>] The parameters of the type classes \<^class>\<open>exhaustive\<close> and
+ \<^class>\<open>full_exhaustive\<close> implement the testing. They take a testing
+ function as a parameter, which takes a value of type \<^typ>\<open>'a\<close> and
optionally produces a counterexample, and a size parameter for the test
- values. In @{class full_exhaustive}, the testing function parameter
- additionally expects a lazy term reconstruction in the type @{typ
- Code_Evaluation.term} of the tested value.
+ values. In \<^class>\<open>full_exhaustive\<close>, the testing function parameter
+ additionally expects a lazy term reconstruction in the type \<^typ>\<open>Code_Evaluation.term\<close> of the tested value.
The canonical implementation for \<open>exhaustive\<close> testers calls the given
testing function on all values up to the given size and stops as soon as a
counterexample is found.
- \<^descr>[\<open>random\<close>] The operation @{const Quickcheck_Random.random} of the type
- class @{class random} generates a pseudo-random value of the given size
- and a lazy term reconstruction of the value in the type @{typ
- Code_Evaluation.term}. A pseudo-randomness generator is defined in theory
- @{theory HOL.Random}.
+ \<^descr>[\<open>random\<close>] The operation \<^const>\<open>Quickcheck_Random.random\<close> of the type
+ class \<^class>\<open>random\<close> generates a pseudo-random value of the given size
+ and a lazy term reconstruction of the value in the type \<^typ>\<open>Code_Evaluation.term\<close>. A pseudo-randomness generator is defined in theory
+ \<^theory>\<open>HOL.Random\<close>.
\<^descr>[\<open>narrowing\<close>] implements Haskell's Lazy Smallcheck @{cite
- "runciman-naylor-lindblad"} using the type classes @{class narrowing} and
- @{class partial_term_of}. Variables in the current goal are initially
+ "runciman-naylor-lindblad"} using the type classes \<^class>\<open>narrowing\<close> and
+ \<^class>\<open>partial_term_of\<close>. Variables in the current goal are initially
represented as symbolic variables. If the execution of the goal tries to
evaluate one of them, the test engine replaces it with refinements
- provided by @{const narrowing}. Narrowing views every value as a
- sum-of-products which is expressed using the operations @{const
- Quickcheck_Narrowing.cons} (embedding a value), @{const
- Quickcheck_Narrowing.apply} (product) and @{const
- Quickcheck_Narrowing.sum} (sum). The refinement should enable further
+ provided by \<^const>\<open>narrowing\<close>. Narrowing views every value as a
+ sum-of-products which is expressed using the operations \<^const>\<open>Quickcheck_Narrowing.cons\<close> (embedding a value), \<^const>\<open>Quickcheck_Narrowing.apply\<close> (product) and \<^const>\<open>Quickcheck_Narrowing.sum\<close> (sum). The refinement should enable further
evaluation of the goal.
- For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
+ For example, \<^const>\<open>narrowing\<close> for the list type \<^typ>\<open>'a :: narrowing list\<close>
can be recursively defined as
- @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
+ \<^term>\<open>Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
(Quickcheck_Narrowing.apply
(Quickcheck_Narrowing.apply
(Quickcheck_Narrowing.cons (#))
narrowing)
- narrowing)"}.
- If a symbolic variable of type @{typ "_ list"} is evaluated, it is
- replaced by (i)~the empty list @{term "[]"} and (ii)~by a non-empty list
+ narrowing)\<close>.
+ If a symbolic variable of type \<^typ>\<open>_ list\<close> is evaluated, it is
+ replaced by (i)~the empty list \<^term>\<open>[]\<close> and (ii)~by a non-empty list
whose head and tail can then be recursively refined if needed.
- To reconstruct counterexamples, the operation @{const partial_term_of}
- transforms \<open>narrowing\<close>'s deep representation of terms to the type @{typ
- Code_Evaluation.term}. The deep representation models symbolic variables
- as @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally
- converted to @{const Code_Evaluation.Free}, and refined values as @{term
- "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i ::
- integer"} denotes the index in the sum of refinements. In the above
- example for lists, @{term "0"} corresponds to @{term "[]"} and @{term "1"}
- to @{term "(#)"}.
-
- The command @{command (HOL) "code_datatype"} sets up @{const
- partial_term_of} such that the @{term "i"}-th refinement is interpreted as
- the @{term "i"}-th constructor, but it does not ensures consistency with
- @{const narrowing}.
+ To reconstruct counterexamples, the operation \<^const>\<open>partial_term_of\<close>
+ transforms \<open>narrowing\<close>'s deep representation of terms to the type \<^typ>\<open>Code_Evaluation.term\<close>. The deep representation models symbolic variables
+ as \<^const>\<open>Quickcheck_Narrowing.Narrowing_variable\<close>, which are normally
+ converted to \<^const>\<open>Code_Evaluation.Free\<close>, and refined values as \<^term>\<open>Quickcheck_Narrowing.Narrowing_constructor i args\<close>, where \<^term>\<open>i ::
+ integer\<close> denotes the index in the sum of refinements. In the above
+ example for lists, \<^term>\<open>0\<close> corresponds to \<^term>\<open>[]\<close> and \<^term>\<open>1\<close>
+ to \<^term>\<open>(#)\<close>.
+
+ The command @{command (HOL) "code_datatype"} sets up \<^const>\<open>partial_term_of\<close> such that the \<^term>\<open>i\<close>-th refinement is interpreted as
+ the \<^term>\<open>i\<close>-th constructor, but it does not ensures consistency with
+ \<^const>\<open>narrowing\<close>.
\<^descr> @{command (HOL) "quickcheck_params"} changes @{command (HOL) "quickcheck"}
configuration options persistently.
@@ -1953,7 +1941,7 @@
called \<^emph>\<open>coercions\<close>. Type inference will add them as necessary when parsing
a term. See @{cite "traytel-berghofer-nipkow-2011"} for details.
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute (HOL) coercion} (@{syntax term})
;
@@{attribute (HOL) coercion_delete} (@{syntax term})
@@ -1961,7 +1949,7 @@
@@{attribute (HOL) coercion_map} (@{syntax term})
;
@@{attribute (HOL) coercion_args} (@{syntax const}) (('+' | '0' | '-')+)
- \<close>}
+ \<close>
\<^descr> @{attribute (HOL) "coercion"}~\<open>f\<close> registers a new coercion function \<open>f ::
\<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2\<close> where \<open>\<sigma>\<^sub>1\<close> and \<open>\<sigma>\<^sub>2\<close> are type constructors without arguments.
@@ -1997,7 +1985,7 @@
the definition of syntatic constructs (usually extralogical, i.e., processed
and stripped during type inference), that should not be destroyed by the
insertion of coercions (see, for example, the setup for the case syntax in
- @{theory HOL.Ctr_Sugar}).
+ \<^theory>\<open>HOL.Ctr_Sugar\<close>).
\<^descr> @{attribute (HOL) "coercion_enabled"} enables the coercion inference
algorithm.
@@ -2036,9 +2024,9 @@
@{method_def (HOL) iprover} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) iprover} (@{syntax rulemod} *)
- \<close>}
+ \<close>
\<^descr> @{method (HOL) iprover} performs intuitionistic proof search, depending on
specifically declared rules from the context, or given as explicit
@@ -2063,13 +2051,13 @@
@{method_def (HOL) "metis"} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) meson} @{syntax thms}?
;
@@{method (HOL) metis}
('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
@{syntax thms}?
- \<close>}
+ \<close>
\<^descr> @{method (HOL) meson} implements Loveland's model elimination procedure
@{cite "loveland-78"}. See \<^file>\<open>~~/src/HOL/ex/Meson_Test.thy\<close> for examples.
@@ -2091,13 +2079,13 @@
@{attribute_def (HOL) algebra} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) algebra}
('add' ':' @{syntax thms})?
('del' ':' @{syntax thms})?
;
@@{attribute (HOL) algebra} (() | 'add' | 'del')
- \<close>}
+ \<close>
\<^descr> @{method (HOL) algebra} performs algebraic reasoning via Gr\"obner bases,
see also @{cite "Chaieb-Wenzel:2007"} and @{cite \<open>\S3.2\<close> "Chaieb-thesis"}.
@@ -2168,9 +2156,9 @@
@{method_def (HOL) "coherent"} & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) coherent} @{syntax thms}?
- \<close>}
+ \<close>
\<^descr> @{method (HOL) coherent} solves problems of \<^emph>\<open>Coherent Logic\<close> @{cite
"Bezem-Coquand:2005"}, which covers applications in confluence theory,
@@ -2193,7 +2181,7 @@
@{command_def (HOL) "inductive_cases"}\<open>\<^sup>*\<close> & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
;
@@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
@@ -2203,7 +2191,7 @@
@@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
;
rule: 'rule' ':' @{syntax thm}
- \<close>}
+ \<close>
\<^descr> @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit to reason
about inductive types. Rules are selected according to the declarations by
@@ -2219,7 +2207,7 @@
object-logic conclusion of the subgoal being addressed.
\<^descr> @{method (HOL) ind_cases} and @{command (HOL) "inductive_cases"} provide
- an interface to the internal @{ML_text mk_cases} operation. Rules are
+ an interface to the internal \<^ML_text>\<open>mk_cases\<close> operation. Rules are
simplified in an unrestricted forward manner.
While @{method (HOL) ind_cases} is a proof method to apply the result
@@ -2238,9 +2226,9 @@
@{attribute_def (HOL) split_format}\<open>\<^sup>*\<close> & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute (HOL) split_format} ('(' 'complete' ')')?
- \<close>}
+ \<close>
\<^descr> @{attribute (HOL) split_format}\ \<open>(complete)\<close> causes arguments in function
applications to be represented canonically according to their tuple type
@@ -2292,7 +2280,7 @@
@{command_def (HOL) "code_pred"} & : & \<open>theory \<rightarrow> proof(prove)\<close>
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
@@ -2381,7 +2369,7 @@
(@'and' ((const ':' modes @'and') +))?))
;
modes: mode @'as' const
- \<close>}
+ \<close>
\<^descr> @{command (HOL) "export_code"} generates code for a given list of
constants in the specified target language(s). If no serialization
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -46,7 +46,7 @@
These diagnostic commands assist interactive development by printing
internal logical entities in a human-readable fashion.
- @{rail \<open>
+ \<^rail>\<open>
@@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
;
@@{command term} @{syntax modes}? @{syntax term}
@@ -60,7 +60,7 @@
@@{command print_state} @{syntax modes}?
;
@{syntax_def modes}: '(' (@{syntax name} + ) ')'
- \<close>}
+ \<close>
\<^descr> @{command "typ"}~\<open>\<tau>\<close> reads and prints a type expression according to the
current context.
@@ -172,14 +172,14 @@
\<^descr> @{attribute eta_contract} controls \<open>\<eta>\<close>-contracted printing of terms.
- The \<open>\<eta>\<close>-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"}, provided \<open>x\<close> is not
- free in \<open>f\<close>. It asserts \<^emph>\<open>extensionality\<close> of functions: @{prop "f \<equiv> g"} if
- @{prop "f x \<equiv> g x"} for all \<open>x\<close>. Higher-order unification frequently puts
+ The \<open>\<eta>\<close>-contraction law asserts \<^prop>\<open>(\<lambda>x. f x) \<equiv> f\<close>, provided \<open>x\<close> is not
+ free in \<open>f\<close>. It asserts \<^emph>\<open>extensionality\<close> of functions: \<^prop>\<open>f \<equiv> g\<close> if
+ \<^prop>\<open>f x \<equiv> g x\<close> for all \<open>x\<close>. Higher-order unification frequently puts
terms into a fully \<open>\<eta>\<close>-expanded form. For example, if \<open>F\<close> has type \<open>(\<tau> \<Rightarrow> \<tau>)
- \<Rightarrow> \<tau>\<close> then its expanded form is @{term "\<lambda>h. F (\<lambda>x. h x)"}.
+ \<Rightarrow> \<tau>\<close> then its expanded form is \<^term>\<open>\<lambda>h. F (\<lambda>x. h x)\<close>.
Enabling @{attribute eta_contract} makes Isabelle perform \<open>\<eta>\<close>-contractions
- before printing, so that @{term "\<lambda>h. F (\<lambda>x. h x)"} appears simply as \<open>F\<close>.
+ before printing, so that \<^term>\<open>\<lambda>h. F (\<lambda>x. h x)\<close> appears simply as \<open>F\<close>.
Note that the distinction between a term and its \<open>\<eta>\<close>-expanded form
occasionally matters. While higher-order resolution and rewriting operate
@@ -229,12 +229,12 @@
\secref{sec:print-diag}) take additional print modes as optional argument.
The underlying ML operations are as follows.
- \<^descr> @{ML "print_mode_value ()"} yields the list of currently active print
+ \<^descr> \<^ML>\<open>print_mode_value ()\<close> yields the list of currently active print
mode names. This should be understood as symbolic representation of
certain individual features for printing (with precedence from left to
right).
- \<^descr> @{ML Print_Mode.with_modes}~\<open>modes f x\<close> evaluates \<open>f x\<close> in an execution
+ \<^descr> \<^ML>\<open>Print_Mode.with_modes\<close>~\<open>modes f x\<close> evaluates \<open>f x\<close> in an execution
context where the print mode is prepended by the given \<open>modes\<close>. This
provides a thread-safe way to augment print modes. It is also monotonic in
the set of mode names: it retains the default print mode that certain
@@ -277,7 +277,7 @@
specify any context-free priority grammar, which is more general than the
fixity declarations of ML and Prolog.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def mixfix}: '('
(@{syntax template} prios? @{syntax nat}? |
(@'infix' | @'infixl' | @'infixr') @{syntax template} @{syntax nat} |
@@ -289,7 +289,7 @@
prios: '[' (@{syntax nat} + ',') ']'
;
prio: '[' @{syntax nat} ']'
- \<close>}
+ \<close>
The mixfix \<open>template\<close> may include literal text, spacing, blocks, and
arguments (denoted by ``\<open>_\<close>''); the special symbol ``\<^verbatim>\<open>\<index>\<close>'' (printed as
@@ -383,13 +383,13 @@
Block properties allow more control over the details of pretty-printed
output. The concrete syntax is defined as follows.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def "mixfix_properties"}: (entry *)
;
entry: atom ('=' atom)?
;
atom: @{syntax short_ident} | @{syntax int} | @{syntax float} | @{syntax cartouche}
- \<close>}
+ \<close>
Each @{syntax entry} is a name-value pair: if the value is omitted, it
defaults to \<^verbatim>\<open>true\<close> (intended for Boolean properties). The following
@@ -503,7 +503,7 @@
for explicit notation. This allows to add or delete mixfix annotations for
of existing logical entities within the current context.
- @{rail \<open>
+ \<^rail>\<open>
(@@{command type_notation} | @@{command no_type_notation}) @{syntax mode}? \<newline>
(@{syntax name} @{syntax mixfix} + @'and')
;
@@ -511,7 +511,7 @@
(@{syntax name} @{syntax mixfix} + @'and')
;
@@{command write} @{syntax mode}? (@{syntax name} @{syntax mixfix} + @'and')
- \<close>}
+ \<close>
\<^descr> @{command "type_notation"}~\<open>c (mx)\<close> associates mixfix syntax with an
existing type constructor. The arity of the constructor is retrieved from
@@ -566,7 +566,7 @@
@{syntax_def (inner) float_token} & = & @{syntax_ref nat}\<^verbatim>\<open>.\<close>@{syntax_ref nat} \\
@{syntax_def (inner) str_token} & = & \<^verbatim>\<open>''\<close> \<open>\<dots>\<close> \<^verbatim>\<open>''\<close> \\
@{syntax_def (inner) string_token} & = & \<^verbatim>\<open>"\<close> \<open>\<dots>\<close> \<^verbatim>\<open>"\<close> \\
- @{syntax_def (inner) cartouche} & = & @{verbatim "\<open>"} \<open>\<dots>\<close> @{verbatim "\<close>"} \\
+ @{syntax_def (inner) cartouche} & = & \<^verbatim>\<open>\<open>\<close> \<open>\<dots>\<close> \<^verbatim>\<open>\<close>\<close> \\
\end{supertabular}
\end{center}
@@ -738,7 +738,7 @@
\<^descr> @{syntax_ref (inner) any} denotes any term.
\<^descr> @{syntax_ref (inner) prop} denotes meta-level propositions, which are
- terms of type @{typ prop}. The syntax of such formulae of the meta-logic is
+ terms of type \<^typ>\<open>prop\<close>. The syntax of such formulae of the meta-logic is
carefully distinguished from usual conventions for object-logics. In
particular, plain \<open>\<lambda>\<close>-term notation is \<^emph>\<open>not\<close> recognized as @{syntax (inner)
prop}.
@@ -747,19 +747,19 @@
embedded into regular @{syntax (inner) prop} by means of an explicit \<^verbatim>\<open>PROP\<close>
token.
- Terms of type @{typ prop} with non-constant head, e.g.\ a plain variable,
- are printed in this form. Constants that yield type @{typ prop} are expected
+ Terms of type \<^typ>\<open>prop\<close> with non-constant head, e.g.\ a plain variable,
+ are printed in this form. Constants that yield type \<^typ>\<open>prop\<close> are expected
to provide their own concrete syntax; otherwise the printed version will
appear like @{syntax (inner) logic} and cannot be parsed again as @{syntax
(inner) prop}.
\<^descr> @{syntax_ref (inner) logic} denotes arbitrary terms of a logical type,
- excluding type @{typ prop}. This is the main syntactic category of
+ excluding type \<^typ>\<open>prop\<close>. This is the main syntactic category of
object-logic entities, covering plain \<open>\<lambda>\<close>-term notation (variables,
abstraction, application), plus anything defined by the user.
When specifying notation for logical entities, all logical types (excluding
- @{typ prop}) are \<^emph>\<open>collapsed\<close> to this single category of @{syntax (inner)
+ \<^typ>\<open>prop\<close>) are \<^emph>\<open>collapsed\<close> to this single category of @{syntax (inner)
logic}.
\<^descr> @{syntax_ref (inner) index} denotes an optional index term for indexed
@@ -811,7 +811,7 @@
by the type-checking phase.
\<^descr> A bound ``\<open>_\<close>'' refers to a vacuous abstraction, where the body does not
- refer to the binding introduced here. As in the term @{term "\<lambda>x _. x"},
+ refer to the binding introduced here. As in the term \<^term>\<open>\<lambda>x _. x\<close>,
which is \<open>\<alpha>\<close>-equivalent to \<open>\<lambda>x y. x\<close>.
\<^descr> A free ``\<open>_\<close>'' refers to an implicit outer binding. Higher definitional
@@ -969,7 +969,7 @@
subsection \<open>Abstract syntax trees \label{sec:ast}\<close>
text \<open>
- The ML datatype @{ML_type Ast.ast} explicitly represents the intermediate
+ The ML datatype \<^ML_type>\<open>Ast.ast\<close> explicitly represents the intermediate
AST format that is used for syntax rewriting (\secref{sec:syn-trans}). It is
defined in ML as follows:
@{verbatim [display]
@@ -995,7 +995,7 @@
Nested application like \<^verbatim>\<open>(("_abs" x t) u)\<close> is also possible, but ASTs are
definitely first-order: the syntax constant \<^verbatim>\<open>"_abs"\<close> does not bind the \<^verbatim>\<open>x\<close>
in any way. Proper bindings are introduced in later stages of the term
- syntax, where \<^verbatim>\<open>("_abs" x t)\<close> becomes an @{ML Abs} node and occurrences of
+ syntax, where \<^verbatim>\<open>("_abs" x t)\<close> becomes an \<^ML>\<open>Abs\<close> node and occurrences of
\<^verbatim>\<open>x\<close> in \<^verbatim>\<open>t\<close> are replaced by bound variables (represented as de-Bruijn
indices).
\<close>
@@ -1005,21 +1005,20 @@
text \<open>
Depending on the situation --- input syntax, output syntax, translation
- patterns --- the distinction of atomic ASTs as @{ML Ast.Constant} versus
- @{ML Ast.Variable} serves slightly different purposes.
+ patterns --- the distinction of atomic ASTs as \<^ML>\<open>Ast.Constant\<close> versus
+ \<^ML>\<open>Ast.Variable\<close> serves slightly different purposes.
Input syntax of a term such as \<open>f a b = c\<close> does not yet indicate the scopes
of atomic entities \<open>f, a, b, c\<close>: they could be global constants or local
- variables, even bound ones depending on the context of the term. @{ML
- Ast.Variable} leaves this choice still open: later syntax layers (or
+ variables, even bound ones depending on the context of the term. \<^ML>\<open>Ast.Variable\<close> leaves this choice still open: later syntax layers (or
translation functions) may capture such a variable to determine its role
specifically, to make it a constant, bound variable, free variable etc. In
contrast, syntax translations that introduce already known constants would
- rather do it via @{ML Ast.Constant} to prevent accidental re-interpretation
+ rather do it via \<^ML>\<open>Ast.Constant\<close> to prevent accidental re-interpretation
later on.
- Output syntax turns term constants into @{ML Ast.Constant} and variables
- (free or schematic) into @{ML Ast.Variable}. This information is precise
+ Output syntax turns term constants into \<^ML>\<open>Ast.Constant\<close> and variables
+ (free or schematic) into \<^ML>\<open>Ast.Variable\<close>. This information is precise
when printing fully formal \<open>\<lambda>\<close>-terms.
\<^medskip>
@@ -1049,7 +1048,7 @@
not know about this later name resolution, there can be surprises in
boundary cases.
- \<^emph>\<open>Authentic syntax names\<close> for @{ML Ast.Constant} avoid this problem: the
+ \<^emph>\<open>Authentic syntax names\<close> for \<^ML>\<open>Ast.Constant\<close> avoid this problem: the
fully-qualified constant name with a special prefix for its formal category
(\<open>class\<close>, \<open>type\<close>, \<open>const\<close>, \<open>fixed\<close>) represents the information faithfully
within the untyped AST format. Accidental overlap with free or bound
@@ -1095,7 +1094,7 @@
@{command translations}) are required to turn resulting parse trees into
proper representations of formal entities again.
- @{rail \<open>
+ \<^rail>\<open>
@@{command nonterminal} (@{syntax name} + @'and')
;
(@@{command syntax} | @@{command no_syntax}) @{syntax mode}? (constdecl +)
@@ -1109,7 +1108,7 @@
mode: ('(' ( @{syntax name} | @'output' | @{syntax name} @'output' ) ')')
;
transpat: ('(' @{syntax name} ')')? @{syntax string}
- \<close>}
+ \<close>
\<^descr> @{command "nonterminal"}~\<open>c\<close> declares a type constructor \<open>c\<close> (without
arguments) to act as purely syntactic type: a nonterminal symbol of the
@@ -1179,10 +1178,8 @@
applications within the term syntax, independently of the corresponding
concrete syntax.
- Atomic ASTs are distinguished as @{ML Ast.Constant} versus @{ML
- Ast.Variable} as follows: a qualified name or syntax constant declared via
- @{command syntax}, or parse tree head of concrete notation becomes @{ML
- Ast.Constant}, anything else @{ML Ast.Variable}. Note that \<open>CONST\<close> and
+ Atomic ASTs are distinguished as \<^ML>\<open>Ast.Constant\<close> versus \<^ML>\<open>Ast.Variable\<close> as follows: a qualified name or syntax constant declared via
+ @{command syntax}, or parse tree head of concrete notation becomes \<^ML>\<open>Ast.Constant\<close>, anything else \<^ML>\<open>Ast.Variable\<close>. Note that \<open>CONST\<close> and
\<open>XCONST\<close> within the term language (\secref{sec:pure-grammar}) allow to
enforce treatment as constants.
@@ -1212,13 +1209,13 @@
syntax} and @{command translations} are really need are as follows:
\<^item> Iterated replacement via recursive @{command translations}. For example,
- consider list enumeration @{term "[a, b, c, d]"} as defined in theory
- @{theory HOL.List}.
+ consider list enumeration \<^term>\<open>[a, b, c, d]\<close> as defined in theory
+ \<^theory>\<open>HOL.List\<close>.
\<^item> Change of binding status of variables: anything beyond the built-in
@{keyword "binder"} mixfix annotation requires explicit syntax translations.
- For example, consider the set comprehension syntax @{term "{x. P}"} as
- defined in theory @{theory HOL.Set}.
+ For example, consider the set comprehension syntax \<^term>\<open>{x. P}\<close> as
+ defined in theory \<^theory>\<open>HOL.Set\<close>.
\<close>
@@ -1235,22 +1232,22 @@
instance of \<open>lhs\<close>; in this case the pattern \<open>lhs\<close> is said to match the
object \<open>u\<close>. A redex matched by \<open>lhs\<close> may be replaced by the corresponding
instance of \<open>rhs\<close>, thus \<^emph>\<open>rewriting\<close> the AST \<open>t\<close>. Matching requires some
- notion of \<^emph>\<open>place-holders\<close> in rule patterns: @{ML Ast.Variable} serves this
+ notion of \<^emph>\<open>place-holders\<close> in rule patterns: \<^ML>\<open>Ast.Variable\<close> serves this
purpose.
More precisely, the matching of the object \<open>u\<close> against the pattern \<open>lhs\<close> is
performed as follows:
- \<^item> Objects of the form @{ML Ast.Variable}~\<open>x\<close> or @{ML Ast.Constant}~\<open>x\<close> are
- matched by pattern @{ML Ast.Constant}~\<open>x\<close>. Thus all atomic ASTs in the
+ \<^item> Objects of the form \<^ML>\<open>Ast.Variable\<close>~\<open>x\<close> or \<^ML>\<open>Ast.Constant\<close>~\<open>x\<close> are
+ matched by pattern \<^ML>\<open>Ast.Constant\<close>~\<open>x\<close>. Thus all atomic ASTs in the
object are treated as (potential) constants, and a successful match makes
them actual constants even before name space resolution (see also
\secref{sec:ast}).
- \<^item> Object \<open>u\<close> is matched by pattern @{ML Ast.Variable}~\<open>x\<close>, binding \<open>x\<close> to
+ \<^item> Object \<open>u\<close> is matched by pattern \<^ML>\<open>Ast.Variable\<close>~\<open>x\<close>, binding \<open>x\<close> to
\<open>u\<close>.
- \<^item> Object @{ML Ast.Appl}~\<open>us\<close> is matched by @{ML Ast.Appl}~\<open>ts\<close> if \<open>us\<close> and
+ \<^item> Object \<^ML>\<open>Ast.Appl\<close>~\<open>us\<close> is matched by \<^ML>\<open>Ast.Appl\<close>~\<open>ts\<close> if \<open>us\<close> and
\<open>ts\<close> have the same length and each corresponding subtree matches.
\<^item> In every other case, matching fails.
@@ -1305,7 +1302,7 @@
manipulations of inner syntax, at the expense of some complexity and
obscurity in the implementation.
- @{rail \<open>
+ \<^rail>\<open>
( @@{command parse_ast_translation} | @@{command parse_translation} |
@@{command print_translation} | @@{command typed_print_translation} |
@@{command print_ast_translation}) @{syntax text}
@@ -1314,7 +1311,7 @@
@@{ML_antiquotation type_syntax} |
@@{ML_antiquotation const_syntax} |
@@{ML_antiquotation syntax_const}) embedded
- \<close>}
+ \<close>
\<^descr> @{command parse_translation} etc. declare syntax translation functions to
the theory. Any of these commands have a single @{syntax text} argument that
@@ -1324,15 +1321,15 @@
{\footnotesize
\begin{tabular}{l}
@{command parse_ast_translation} : \\
- \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
+ \quad \<^ML_type>\<open>(string * (Proof.context -> Ast.ast list -> Ast.ast)) list\<close> \\
@{command parse_translation} : \\
- \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
+ \quad \<^ML_type>\<open>(string * (Proof.context -> term list -> term)) list\<close> \\
@{command print_translation} : \\
- \quad @{ML_type "(string * (Proof.context -> term list -> term)) list"} \\
+ \quad \<^ML_type>\<open>(string * (Proof.context -> term list -> term)) list\<close> \\
@{command typed_print_translation} : \\
- \quad @{ML_type "(string * (Proof.context -> typ -> term list -> term)) list"} \\
+ \quad \<^ML_type>\<open>(string * (Proof.context -> typ -> term list -> term)) list\<close> \\
@{command print_ast_translation} : \\
- \quad @{ML_type "(string * (Proof.context -> Ast.ast list -> Ast.ast)) list"} \\
+ \quad \<^ML_type>\<open>(string * (Proof.context -> Ast.ast list -> Ast.ast)) list\<close> \\
\end{tabular}}
\<^medskip>
@@ -1372,10 +1369,8 @@
in ML.
For AST translations, the arguments \<open>x\<^sub>1, \<dots>, x\<^sub>n\<close> are ASTs. A combination
- has the form @{ML "Ast.Constant"}~\<open>c\<close> or @{ML "Ast.Appl"}~\<open>[\<close>@{ML
- Ast.Constant}~\<open>c, x\<^sub>1, \<dots>, x\<^sub>n]\<close>. For term translations, the arguments are
- terms and a combination has the form @{ML Const}~\<open>(c, \<tau>)\<close> or @{ML
- Const}~\<open>(c, \<tau>) $ x\<^sub>1 $ \<dots> $ x\<^sub>n\<close>. Terms allow more sophisticated
+ has the form \<^ML>\<open>Ast.Constant\<close>~\<open>c\<close> or \<^ML>\<open>Ast.Appl\<close>~\<open>[\<close>\<^ML>\<open>Ast.Constant\<close>~\<open>c, x\<^sub>1, \<dots>, x\<^sub>n]\<close>. For term translations, the arguments are
+ terms and a combination has the form \<^ML>\<open>Const\<close>~\<open>(c, \<tau>)\<close> or \<^ML>\<open>Const\<close>~\<open>(c, \<tau>) $ x\<^sub>1 $ \<dots> $ x\<^sub>n\<close>. Terms allow more sophisticated
transformations than ASTs do, typically involving abstractions and bound
variables. \<^emph>\<open>Typed\<close> print translations may even peek at the type \<open>\<tau>\<close> of the
constant they are invoked on, although some information might have been
@@ -1394,12 +1389,11 @@
arguments that are partly still in internal form. The result again
undergoes translation; therefore a print translation should not introduce
as head the very constant that invoked it. The function may raise
- exception @{ML Match} to indicate failure; in this event it has no effect.
+ exception \<^ML>\<open>Match\<close> to indicate failure; in this event it has no effect.
Multiple functions associated with some syntactic name are tried in the
order of declaration in the theory.
- Only constant atoms --- constructor @{ML Ast.Constant} for ASTs and @{ML
- Const} for terms --- can invoke translation functions. This means that parse
+ Only constant atoms --- constructor \<^ML>\<open>Ast.Constant\<close> for ASTs and \<^ML>\<open>Const\<close> for terms --- can invoke translation functions. This means that parse
translations can only be associated with parse tree heads of concrete
syntax, or syntactic constants introduced via other translations. For plain
identifiers within the term language, the status of constant versus variable
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -41,9 +41,9 @@
@{command_def "help"}\<open>\<^sup>*\<close> & : & \<open>any \<rightarrow>\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command help} (@{syntax name} * )
- \<close>}
+ \<close>
\<^descr> @{command "print_commands"} prints all outer syntax keywords
and commands.
@@ -107,7 +107,7 @@
@{syntax_def type_var} & = & \<^verbatim>\<open>?\<close>\<open>type_ident |\<close>~~\<^verbatim>\<open>?\<close>\<open>type_ident\<close>\<^verbatim>\<open>.\<close>\<open>nat\<close> \\
@{syntax_def string} & = & \<^verbatim>\<open>"\<close> \<open>\<dots>\<close> \<^verbatim>\<open>"\<close> \\
@{syntax_def altstring} & = & \<^verbatim>\<open>`\<close> \<open>\<dots>\<close> \<^verbatim>\<open>`\<close> \\
- @{syntax_def cartouche} & = & @{verbatim "\<open>"} \<open>\<dots>\<close> @{verbatim "\<close>"} \\
+ @{syntax_def cartouche} & = & \<^verbatim>\<open>\<open>\<close> \<open>\<dots>\<close> \<^verbatim>\<open>\<close>\<close> \\
@{syntax_def verbatim} & = & \<^verbatim>\<open>{*\<close> \<open>\<dots>\<close> \<^verbatim>\<open>*}\<close> \\[1ex]
\<open>letter\<close> & = & \<open>latin |\<close>~~\<^verbatim>\<open>\\<close>\<^verbatim>\<open><\<close>\<open>latin\<close>\<^verbatim>\<open>>\<close>~~\<open>|\<close>~~\<^verbatim>\<open>\\<close>\<^verbatim>\<open><\<close>\<open>latin latin\<close>\<^verbatim>\<open>>\<close>~~\<open>| greek |\<close> \\
@@ -129,8 +129,7 @@
\end{center}
A @{syntax_ref term_var} or @{syntax_ref type_var} describes an unknown,
- which is internally a pair of base name and index (ML type @{ML_type
- indexname}). These components are either separated by a dot as in \<open>?x.1\<close> or
+ which is internally a pair of base name and index (ML type \<^ML_type>\<open>indexname\<close>). These components are either separated by a dot as in \<open>?x.1\<close> or
\<open>?x7.3\<close> or run together as in \<open>?x1\<close>. The latter form is possible if the base
name does not end with digits. If the index is 0, it may be dropped
altogether: \<open>?x\<close> and \<open>?x0\<close> and \<open>?x.0\<close> all refer to the same unknown, with
@@ -147,7 +146,7 @@
is no way to escape ``\<^verbatim>\<open>*}\<close>''. Cartouches do not have this limitation.
A @{syntax_ref cartouche} consists of arbitrary text, with properly balanced
- blocks of ``@{verbatim "\<open>"}~\<open>\<dots>\<close>~@{verbatim "\<close>"}''. Note that the rendering
+ blocks of ``\<^verbatim>\<open>\<open>\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>\<close>\<close>''. Note that the rendering
of cartouche delimiters is usually like this: ``\<open>\<open> \<dots> \<close>\<close>''.
Source comments take the form \<^verbatim>\<open>(*\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>*)\<close> and may be nested: the text is
@@ -181,12 +180,12 @@
theorems etc.\ Quoted strings provide an escape for non-identifier names or
those ruled out by outer syntax keywords (e.g.\ quoted \<^verbatim>\<open>"let"\<close>).
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def name}: @{syntax short_ident} | @{syntax long_ident} |
@{syntax sym_ident} | @{syntax nat} | @{syntax string}
;
@{syntax_def par_name}: '(' @{syntax name} ')'
- \<close>}
+ \<close>
A @{syntax_def system_name} is like @{syntax name}, but it excludes
white-space characters and needs to conform to file-name notation. Name
@@ -202,11 +201,11 @@
floating point numbers. These are combined as @{syntax int} and @{syntax
real} as follows.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def int}: @{syntax nat} | '-' @{syntax nat}
;
@{syntax_def real}: @{syntax float} | @{syntax int}
- \<close>}
+ \<close>
Note that there is an overlap with the category @{syntax name}, which also
includes @{syntax nat}.
@@ -223,11 +222,11 @@
plain identifiers in the outer language may be used as inner language
content without delimiters.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def embedded}: @{syntax cartouche} | @{syntax string} |
@{syntax short_ident} | @{syntax long_ident} | @{syntax sym_ident} |
@{syntax term_var} | @{syntax type_ident} | @{syntax type_var} | @{syntax nat}
- \<close>}
+ \<close>
\<close>
@@ -239,9 +238,9 @@
convenience, any of the smaller text unit that conforms to @{syntax name} is
admitted as well.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def text}: @{syntax embedded} | @{syntax verbatim}
- \<close>}
+ \<close>
Typical uses are document markup commands, like \<^theory_text>\<open>chapter\<close>, \<^theory_text>\<open>section\<close> etc.
(\secref{sec:markup}).
@@ -284,13 +283,13 @@
to the intersection of these classes. The syntax of type arities is given
directly at the outer level.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def classdecl}: @{syntax name} (('<' | '\<subseteq>') (@{syntax name} + ','))?
;
@{syntax_def sort}: @{syntax embedded}
;
@{syntax_def arity}: ('(' (@{syntax sort} + ',') ')')? @{syntax sort}
- \<close>}
+ \<close>
\<close>
@@ -308,49 +307,49 @@
these have not been superseded by commands or other keywords already (such
as \<^verbatim>\<open>=\<close> or \<^verbatim>\<open>+\<close>).
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def type}: @{syntax embedded}
;
@{syntax_def term}: @{syntax embedded}
;
@{syntax_def prop}: @{syntax embedded}
- \<close>}
+ \<close>
Positional instantiations are specified as a sequence of terms, or the
placeholder ``\<open>_\<close>'' (underscore), which means to skip a position.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def inst}: '_' | @{syntax term}
;
@{syntax_def insts}: (@{syntax inst} *)
- \<close>}
+ \<close>
Named instantiations are specified as pairs of assignments \<open>v = t\<close>, which
refer to schematic variables in some theorem that is instantiated. Both type
and terms instantiations are admitted, and distinguished by the usual syntax
of variable names.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def named_inst}: variable '=' (type | term)
;
@{syntax_def named_insts}: (named_inst @'and' +)
;
variable: @{syntax name} | @{syntax term_var} | @{syntax type_ident} | @{syntax type_var}
- \<close>}
+ \<close>
Type declarations and definitions usually refer to @{syntax typespec} on the
left-hand side. This models basic type constructor application at the outer
syntax level. Note that only plain postfix notation is available here, but
no infixes.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def typespec}:
(() | @{syntax type_ident} | '(' ( @{syntax type_ident} + ',' ) ')') @{syntax name}
;
@{syntax_def typespec_sorts}:
(() | (@{syntax type_ident} ('::' @{syntax sort})?) |
'(' ( (@{syntax type_ident} ('::' @{syntax sort})?) + ',' ) ')') @{syntax name}
- \<close>}
+ \<close>
\<close>
@@ -362,11 +361,11 @@
patterns of the form ``\<^theory_text>\<open>(is p\<^sub>1 \<dots> p\<^sub>n)\<close>''. This works both for @{syntax
term} and @{syntax prop}.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def term_pat}: '(' (@'is' @{syntax term} +) ')'
;
@{syntax_def prop_pat}: '(' (@'is' @{syntax prop} +) ')'
- \<close>}
+ \<close>
\<^medskip>
Declarations of local variables \<open>x :: \<tau>\<close> and logical propositions \<open>a : \<phi>\<close>
@@ -376,7 +375,7 @@
references of current facts). In any case, Isar proof elements usually admit
to introduce multiple such items simultaneously.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def vars}:
(((@{syntax name} +) ('::' @{syntax type})? |
@{syntax name} ('::' @{syntax type})? @{syntax mixfix}) + @'and')
@@ -384,7 +383,7 @@
@{syntax_def props}: @{syntax thmdecl}? (@{syntax prop} @{syntax prop_pat}? +)
;
@{syntax_def props'}: (@{syntax prop} @{syntax prop_pat}? +)
- \<close>}
+ \<close>
The treatment of multiple declarations corresponds to the complementary
focus of @{syntax vars} versus @{syntax props}. In ``\<open>x\<^sub>1 \<dots> x\<^sub>n :: \<tau>\<close>'' the
@@ -407,7 +406,7 @@
@{syntax atom} refers to any atomic entity, including any @{syntax keyword}
conforming to @{syntax sym_ident}.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def atom}: @{syntax name} | @{syntax type_ident} |
@{syntax type_var} | @{syntax term_var} | @{syntax nat} | @{syntax float} |
@{syntax keyword} | @{syntax cartouche}
@@ -417,7 +416,7 @@
@{syntax_def args}: arg *
;
@{syntax_def attributes}: '[' (@{syntax name} @{syntax args} * ',') ']'
- \<close>}
+ \<close>
Theorem specifications come in several flavors: @{syntax axmdecl} and
@{syntax thmdecl} usually refer to axioms, assumptions or results of goal
@@ -447,7 +446,7 @@
context will persist. This form of in-place declarations is particularly
useful with commands like @{command "declare"} and @{command "using"}.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def axmdecl}: @{syntax name} @{syntax attributes}? ':'
;
@{syntax_def thmbind}:
@@ -465,7 +464,7 @@
@{syntax_def thms}: @{syntax thm} +
;
selection: '(' ((@{syntax nat} | @{syntax nat} '-' @{syntax nat}?) + ',') ')'
- \<close>}
+ \<close>
\<close>
@@ -481,7 +480,7 @@
cases: each with its own scope of inferred types for free variables.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def for_fixes}: (@'for' @{syntax vars})?
;
@{syntax_def multi_specs}: (@{syntax structured_spec} + '|')
@@ -492,7 +491,7 @@
@{syntax_def spec_prems}: (@'if' ((@{syntax prop}+) + @'and'))?
;
@{syntax_def specification}: @{syntax vars} @'where' @{syntax multi_specs}
- \<close>}
+ \<close>
\<close>
@@ -513,7 +512,7 @@
@{command_def "print_term_bindings"}\<open>\<^sup>*\<close> & : & \<open>context \<rightarrow>\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{command print_theory} |
@@{command print_definitions} |
@@{command print_methods} |
@@ -534,7 +533,7 @@
@@{command thm_deps} @{syntax thmrefs}
;
@@{command unused_thms} ((@{syntax name} +) '-' (@{syntax name} * ))?
- \<close>}
+ \<close>
These commands print certain parts of the theory and proof context. Note
that there are some further ones available, such as for the set of rules
--- a/src/Doc/Isar_Ref/Proof.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Proof.thy Sat Jan 05 17:24:33 2019 +0100
@@ -46,11 +46,11 @@
@{command_def "notepad"} & : & \<open>local_theory \<rightarrow> proof(state)\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command notepad} @'begin'
;
@@{command end}
- \<close>}
+ \<close>
\<^descr> @{command "notepad"}~@{keyword "begin"} opens a proof state without any
goal statement. This allows to experiment with Isar, without producing any
@@ -157,7 +157,7 @@
reflexivity rule. Thus, exporting some result \<open>x \<equiv> t \<turnstile> \<phi>[x]\<close> yields \<open>\<turnstile>
\<phi>[t]\<close>.
- @{rail \<open>
+ \<^rail>\<open>
@@{command fix} @{syntax vars}
;
(@@{command assume} | @@{command presume}) concl prems @{syntax for_fixes}
@@ -168,7 +168,7 @@
;
@@{command define} @{syntax vars} @'where'
(@{syntax props} + @'and') @{syntax for_fixes}
- \<close>}
+ \<close>
\<^descr> @{command "fix"}~\<open>x\<close> introduces a local variable \<open>x\<close> that is \<^emph>\<open>arbitrary,
but fixed\<close>.
@@ -236,9 +236,9 @@
during the input process just after type checking. Also note that @{command
"define"} does not support polymorphism.
- @{rail \<open>
+ \<^rail>\<open>
@@{command let} ((@{syntax term} + @'and') '=' @{syntax term} + @'and')
- \<close>}
+ \<close>
The syntax of @{keyword "is"} patterns follows @{syntax term_pat} or
@{syntax prop_pat} (see \secref{sec:term-decls}).
@@ -288,14 +288,14 @@
the most recently established facts, but only \<^emph>\<open>before\<close> issuing a follow-up
claim.
- @{rail \<open>
+ \<^rail>\<open>
@@{command note} (@{syntax thmdef}? @{syntax thms} + @'and')
;
(@@{command from} | @@{command with} | @@{command using} | @@{command unfolding})
(@{syntax thms} + @'and')
;
@{method use} @{syntax thms} @'in' @{syntax method}
- \<close>}
+ \<close>
\<^descr> @{command "note"}~\<open>a = b\<^sub>1 \<dots> b\<^sub>n\<close> recalls existing facts \<open>b\<^sub>1, \<dots>, b\<^sub>n\<close>,
binding the result as \<open>a\<close>. Note that attributes may be involved as well,
@@ -395,7 +395,7 @@
disjunction of eliminated parameters and assumptions, cf.\
\secref{sec:obtain}).
- @{rail \<open>
+ \<^rail>\<open>
(@@{command lemma} | @@{command theorem} | @@{command corollary} |
@@{command proposition} | @@{command schematic_goal})
(long_statement | short_statement)
@@ -422,7 +422,7 @@
;
@{syntax_def obtain_case}: @{syntax vars} @'where'
(@{syntax thmdecl}? (@{syntax prop}+) + @'and')
- \<close>}
+ \<close>
\<^descr> @{command "lemma"}~\<open>a: \<phi>\<close> enters proof mode with \<open>\<phi>\<close> as main goal,
eventually resulting in some fact \<open>\<turnstile> \<phi>\<close> to be put back into the target
@@ -541,11 +541,11 @@
@{command "ultimately"} & \equiv & @{command "moreover"}~@{command "from"}~\<open>calculation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{command also} | @@{command finally}) ('(' @{syntax thms} ')')?
;
@@{attribute trans} (() | 'add' | 'del')
- \<close>}
+ \<close>
\<^descr> @{command "also"}~\<open>(a\<^sub>1 \<dots> a\<^sub>n)\<close> maintains the auxiliary @{fact
calculation} register as follows. The first occurrence of @{command "also"}
@@ -605,12 +605,12 @@
precedence of method combinators is \<^verbatim>\<open>|\<close> \<^verbatim>\<open>;\<close> \<^verbatim>\<open>,\<close> \<^verbatim>\<open>[]\<close> \<^verbatim>\<open>+\<close> \<^verbatim>\<open>?\<close> (from low
to high).
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def method}:
(@{syntax name} | '(' methods ')') (() | '?' | '+' | '[' @{syntax nat}? ']')
;
methods: (@{syntax name} @{syntax args} | @{syntax method}) + (',' | ';' | '|')
- \<close>}
+ \<close>
Regular Isar proof methods do \<^emph>\<open>not\<close> admit direct goal addressing, but refer
to the first subgoal or to all subgoals uniformly. Nonetheless, the
@@ -626,7 +626,7 @@
Structural composition ``\<open>m\<^sub>1\<close>\<^verbatim>\<open>;\<close>~\<open>m\<^sub>2\<close>'' means that method \<open>m\<^sub>1\<close> is
applied with restriction to the first subgoal, then \<open>m\<^sub>2\<close> is applied
consecutively with restriction to each subgoal that has newly emerged due to
- \<open>m\<^sub>1\<close>. This is analogous to the tactic combinator @{ML_op THEN_ALL_NEW} in
+ \<open>m\<^sub>1\<close>. This is analogous to the tactic combinator \<^ML_op>\<open>THEN_ALL_NEW\<close> in
Isabelle/ML, see also @{cite "isabelle-implementation"}. For example, \<open>(rule
r; blast)\<close> applies rule \<open>r\<close> and then solves all new subgoals by \<open>blast\<close>.
@@ -641,10 +641,10 @@
as explicit argument to the individual tactic being involved. Here ``\<open>[!]\<close>''
refers to all goals, and ``\<open>[n-]\<close>'' to all goals starting from \<open>n\<close>.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def goal_spec}:
'[' (@{syntax nat} '-' @{syntax nat} | @{syntax nat} '-' | @{syntax nat} | '!' ) ']'
- \<close>}
+ \<close>
\<close>
@@ -693,7 +693,7 @@
connective involved. There is no separate default terminal method. Any
remaining goals are always solved by assumption in the very last step.
- @{rail \<open>
+ \<^rail>\<open>
@@{command proof} method?
;
@@{command qed} method?
@@ -701,7 +701,7 @@
@@{command "by"} method method?
;
(@@{command "."} | @@{command ".."} | @@{command sorry})
- \<close>}
+ \<close>
\<^descr> @{command "proof"}~\<open>m\<^sub>1\<close> refines the goal by proof method \<open>m\<^sub>1\<close>; facts for
forward chaining are passed if so indicated by \<open>proof(chain)\<close> mode.
@@ -779,7 +779,7 @@
@{attribute_def "where"} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{method goal_cases} (@{syntax name}*)
;
@@{method fact} @{syntax thms}?
@@ -799,7 +799,7 @@
@@{attribute of} @{syntax insts} ('concl' ':' @{syntax insts})? @{syntax for_fixes}
;
@@{attribute "where"} @{syntax named_insts} @{syntax for_fixes}
- \<close>}
+ \<close>
\<^descr> @{command "print_rules"} prints rules declared via attributes @{attribute
(Pure) intro}, @{attribute (Pure) elim}, @{attribute (Pure) dest} of
@@ -908,16 +908,14 @@
@{command_def "method_setup"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command method_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
- \<close>}
+ \<close>
\<^descr> @{command "method_setup"}~\<open>name = text description\<close> defines a proof method
in the current context. The given \<open>text\<close> has to be an ML expression of type
- @{ML_type "(Proof.context -> Proof.method) context_parser"}, cf.\ basic
- parsers defined in structure @{ML_structure Args} and @{ML_structure
- Attrib}. There are also combinators like @{ML METHOD} and @{ML
- SIMPLE_METHOD} to turn certain tactic forms into official proof methods; the
+ \<^ML_type>\<open>(Proof.context -> Proof.method) context_parser\<close>, cf.\ basic
+ parsers defined in structure \<^ML_structure>\<open>Args\<close> and \<^ML_structure>\<open>Attrib\<close>. There are also combinators like \<^ML>\<open>METHOD\<close> and \<^ML>\<open>SIMPLE_METHOD\<close> to turn certain tactic forms into official proof methods; the
primed versions refer to tactics with explicit goal addressing.
Here are some example method definitions:
@@ -996,7 +994,7 @@
versions of rules that have been derived manually become ready to use in
advanced case analysis later.
- @{rail \<open>
+ \<^rail>\<open>
@@{command case} @{syntax thmdecl}? (name | '(' name (('_' | @{syntax name}) *) ')')
;
@@{attribute case_names} ((@{syntax name} ( '[' (('_' | @{syntax name}) *) ']' ) ? ) +)
@@ -1006,7 +1004,7 @@
@@{attribute params} ((@{syntax name} * ) + @'and')
;
@@{attribute consumes} @{syntax int}?
- \<close>}
+ \<close>
\<^descr> @{command "case"}~\<open>a: (c x\<^sub>1 \<dots> x\<^sub>m)\<close> invokes a named local context \<open>c:
x\<^sub>1, \<dots>, x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>m\<close>, as provided by an appropriate proof method (such
@@ -1095,7 +1093,7 @@
Method @{method induction} differs from @{method induct} only in the names
of the facts in the local context invoked by the @{command "case"} command.
- @{rail \<open>
+ \<^rail>\<open>
@@{method cases} ('(' 'no_simp' ')')? \<newline>
(@{syntax insts} * @'and') rule?
;
@@ -1114,7 +1112,7 @@
arbitrary: 'arbitrary' ':' ((@{syntax term} * ) @'and' +)
;
taking: 'taking' ':' @{syntax insts}
- \<close>}
+ \<close>
\<^descr> @{method cases}~\<open>insts R\<close> applies method @{method rule} with an
appropriate case distinction theorem, instantiated to the subjects \<open>insts\<close>.
@@ -1274,7 +1272,7 @@
@{attribute_def coinduct} & : & \<open>attribute\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{attribute cases} spec
;
@@{attribute induct} spec
@@ -1283,7 +1281,7 @@
;
spec: (('type' | 'pred' | 'set') ':' @{syntax name}) | 'del'
- \<close>}
+ \<close>
\<^descr> @{command "print_induct_rules"} prints cases and induct rules for
predicates (or sets) and types of the current context.
@@ -1335,7 +1333,7 @@
below. In particular, the logic of elimination and case splitting is
delegated to an Isar proof, which often involves automated tools.
- @{rail \<open>
+ \<^rail>\<open>
@@{command consider} @{syntax obtain_clauses}
;
@@{command obtain} @{syntax par_name}? @{syntax vars} \<newline>
@@ -1346,7 +1344,7 @@
prems: (@'if' (@{syntax props'} + @'and'))?
;
@@{command guess} @{syntax vars}
- \<close>}
+ \<close>
\<^descr> @{command consider}~\<open>(a) \<^vec>x \<WHERE> \<^vec>A \<^vec>x | (b)
\<^vec>y \<WHERE> \<^vec>B \<^vec>y | \<dots>\<close> states a rule for case splitting
--- a/src/Doc/Isar_Ref/Proof_Script.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Proof_Script.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,7 +36,7 @@
@{command_def "back"}\<open>\<^sup>*\<close> & : & \<open>proof \<rightarrow> proof\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command supply} (@{syntax thmdef}? @{syntax thms} + @'and')
;
( @@{command apply} | @@{command apply_end} ) @{syntax method}
@@ -44,7 +44,7 @@
@@{command defer} @{syntax nat}?
;
@@{command prefer} @{syntax nat}
- \<close>}
+ \<close>
\<^descr> @{command "supply"} supports fact definitions during goal refinement: it
is similar to @{command "note"}, but it operates in backwards mode and does
@@ -92,13 +92,13 @@
@{command_def "subgoal"}\<open>\<^sup>*\<close> & : & \<open>proof \<rightarrow> proof\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command subgoal} @{syntax thmbind}? prems? params?
;
prems: @'premises' @{syntax thmbind}?
;
params: @'for' '\<dots>'? (('_' | @{syntax name})+)
- \<close>}
+ \<close>
\<^descr> @{command "subgoal"} allows to impose some structure on backward
refinements, to avoid proof scripts degenerating into long of @{command
@@ -219,7 +219,7 @@
@{method_def raw_tactic}\<open>\<^sup>*\<close> & : & \<open>method\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
@@{method frule_tac} | @@{method cut_tac}) @{syntax goal_spec}? \<newline>
(@{syntax named_insts} @{syntax for_fixes} @'in' @{syntax thm} | @{syntax thms} )
@@ -233,14 +233,13 @@
@@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
;
(@@{method tactic} | @@{method raw_tactic}) @{syntax text}
- \<close>}
+ \<close>
\<^descr> @{method rule_tac} etc. do resolution of rules with explicit
- instantiation. This works the same way as the ML tactics @{ML
- Rule_Insts.res_inst_tac} etc.\ (see @{cite "isabelle-implementation"}).
+ instantiation. This works the same way as the ML tactics \<^ML>\<open>Rule_Insts.res_inst_tac\<close> etc.\ (see @{cite "isabelle-implementation"}).
Multiple rules may be only given if there is no instantiation; then @{method
- rule_tac} is the same as @{ML resolve_tac} in ML (see @{cite
+ rule_tac} is the same as \<^ML>\<open>resolve_tac\<close> in ML (see @{cite
"isabelle-implementation"}).
\<^descr> @{method cut_tac} inserts facts into the proof state as assumption of a
@@ -267,8 +266,8 @@
\<open>n\<close> is negative; the default value is 1.
\<^descr> @{method tactic}~\<open>text\<close> produces a proof method from any ML text of type
- @{ML_type tactic}. Apart from the usual ML environment and the current proof
- context, the ML code may refer to the locally bound values @{ML_text facts},
+ \<^ML_type>\<open>tactic\<close>. Apart from the usual ML environment and the current proof
+ context, the ML code may refer to the locally bound values \<^ML_text>\<open>facts\<close>,
which indicates any current facts used for forward-chaining.
\<^descr> @{method raw_tactic} is similar to @{method tactic}, but presents the goal
--- a/src/Doc/Isar_Ref/Spec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Spec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -57,7 +57,7 @@
does not belong to a local theory target. No further commands may follow
such a global @{command (global) "end"}.
- @{rail \<open>
+ \<^rail>\<open>
@@{command theory} @{syntax system_name}
@'imports' (@{syntax system_name} +) \<newline>
keywords? abbrevs? @'begin'
@@ -71,7 +71,7 @@
@@{command thy_deps} (thy_bounds thy_bounds?)?
;
thy_bounds: @{syntax name} | '(' (@{syntax name} + @'|') ')'
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>theory A imports B\<^sub>1 \<dots> B\<^sub>n begin\<close> starts a new theory \<open>A\<close> based on the
merge of existing theories \<open>B\<^sub>1 \<dots> B\<^sub>n\<close>. Due to the possibility to import
@@ -82,9 +82,9 @@
processed.
Empty imports are only allowed in the bootstrap process of the special
- theory @{theory Pure}, which is the start of any other formal development
+ theory \<^theory>\<open>Pure\<close>, which is the start of any other formal development
based on Isabelle. Regular user theories usually refer to some more complex
- entry point, such as theory @{theory Main} in Isabelle/HOL.
+ entry point, such as theory \<^theory>\<open>Main\<close> in Isabelle/HOL.
The @{keyword_def "keywords"} specification declares outer syntax
(\chref{ch:outer-syntax}) that is introduced in this theory later on (rare
@@ -138,13 +138,13 @@
contexts may be nested within other targets, like \<^theory_text>\<open>locale\<close>, \<^theory_text>\<open>class\<close>,
\<^theory_text>\<open>instantiation\<close>, \<^theory_text>\<open>overloading\<close>.
- @{rail \<open>
+ \<^rail>\<open>
@@{command context} @{syntax name} @'begin'
;
@@{command context} @{syntax_ref "includes"}? (@{syntax context_elem} * ) @'begin'
;
@{syntax_def target}: '(' @'in' @{syntax name} ')'
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>context c begin\<close> opens a named context, by recommencing an existing
locale or class \<open>c\<close>. Note that locale and class definitions allow to include
@@ -228,7 +228,7 @@
which is in contrast to locales and locale interpretation
(\secref{sec:locale}).
- @{rail \<open>
+ \<^rail>\<open>
@@{command bundle} @{syntax name}
( '=' @{syntax thms} @{syntax for_fixes} | @'begin')
;
@@ -237,7 +237,7 @@
(@@{command include} | @@{command including}) (@{syntax name}+)
;
@{syntax_def "includes"}: @'includes' (@{syntax name}+)
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>bundle b = decls\<close> defines a bundle of declarations in the current
context. The RHS is similar to the one of the \<^theory_text>\<open>declare\<close> command. Bundles
@@ -299,7 +299,7 @@
rewrite system on abstract syntax. The second form is called
``abbreviation''.
- @{rail \<open>
+ \<^rail>\<open>
@@{command definition} decl? definition
;
@@{command abbreviation} @{syntax mode}? decl? abbreviation
@@ -312,7 +312,7 @@
@{syntax spec_prems} @{syntax for_fixes}
;
abbreviation: @{syntax prop} @{syntax for_fixes}
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>definition c where eq\<close> produces an internal definition \<open>c \<equiv> t\<close> according
to the specification given as \<open>eq\<close>, which is then turned into a proven fact.
@@ -355,12 +355,12 @@
@{command_def "axiomatization"} & : & \<open>theory \<rightarrow> theory\<close> & (axiomatic!) \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command axiomatization} @{syntax vars}? (@'where' axiomatization)?
;
axiomatization: (@{syntax thmdecl} @{syntax prop} + @'and')
@{syntax spec_prems} @{syntax for_fixes}
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>axiomatization c\<^sub>1 \<dots> c\<^sub>m where \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n\<close> introduces several constants
simultaneously and states axiomatic properties for these. The constants are
@@ -399,15 +399,14 @@
important special case: it consists of a theorem which is applied to the
context by means of an attribute.
- @{rail \<open>
+ \<^rail>\<open>
(@@{command declaration} | @@{command syntax_declaration})
('(' @'pervasive' ')')? \<newline> @{syntax text}
;
@@{command declare} (@{syntax thms} + @'and')
- \<close>}
+ \<close>
- \<^descr> \<^theory_text>\<open>declaration d\<close> adds the declaration function \<open>d\<close> of ML type @{ML_type
- declaration}, to the current local theory under construction. In later
+ \<^descr> \<^theory_text>\<open>declaration d\<close> adds the declaration function \<open>d\<close> of ML type \<^ML_type>\<open>declaration\<close>, to the current local theory under construction. In later
application contexts, the function is transformed according to the morphisms
being involved in the interpretation hierarchy.
@@ -457,7 +456,7 @@
locales. The context consists of the declaration elements from the locale
instances. Redundant locale instances are omitted according to roundup.
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def locale_expr}: (instance + '+') @{syntax for_fixes}
;
instance: (qualifier ':')? @{syntax name} (pos_insts | named_insts) \<newline>
@@ -470,7 +469,7 @@
named_insts: @'where' (@{syntax name} '=' @{syntax term} + @'and')
;
rewrites: @'rewrites' (@{syntax thmdecl}? @{syntax prop} + @'and')
- \<close>}
+ \<close>
A locale instance consists of a reference to a locale and either positional
or named parameter instantiations optionally followed by rewrites clauses.
@@ -514,7 +513,7 @@
\indexisarelem{fixes}\indexisarelem{constrains}\indexisarelem{assumes}
\indexisarelem{defines}\indexisarelem{notes}
- @{rail \<open>
+ \<^rail>\<open>
@@{command locale} @{syntax name} ('=' @{syntax locale})? @'begin'?
;
@@{command experiment} (@{syntax context_elem}*) @'begin'
@@ -532,7 +531,7 @@
@'assumes' (@{syntax props} + @'and') |
@'defines' (@{syntax thmdecl}? @{syntax prop} @{syntax prop_pat}? + @'and') |
@'notes' (@{syntax thmdef}? @{syntax thms} + @'and')
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>locale loc = import + body\<close> defines a new locale \<open>loc\<close> as a context
consisting of a certain view of existing locales (\<open>import\<close>) plus some
@@ -650,7 +649,7 @@
bodies (\<^theory_text>\<open>interpret\<close>), into global theories (\<^theory_text>\<open>global_interpretation\<close>) and
into locales (\<^theory_text>\<open>sublocale\<close>).
- @{rail \<open>
+ \<^rail>\<open>
@@{command interpretation} @{syntax locale_expr}
;
@@{command interpret} @{syntax locale_expr}
@@ -667,7 +666,7 @@
definitions: @'defines' (@{syntax thmdecl}? @{syntax name} \<newline>
@{syntax mixfix}? @'=' @{syntax term} + @'and');
- \<close>}
+ \<close>
The core of each interpretation command is a locale expression \<open>expr\<close>; the
command generates proof obligations for the instantiated specifications.
@@ -819,7 +818,7 @@
classes (notably type-inference). See @{cite "isabelle-classes"} for a short
tutorial.
- @{rail \<open>
+ \<^rail>\<open>
@@{command class} class_spec @'begin'?
;
class_spec: @{syntax name} '='
@@ -836,7 +835,7 @@
@@{command class_deps} (class_bounds class_bounds?)?
;
class_bounds: @{syntax sort} | '(' (@{syntax sort} + @'|') ')'
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>class c = superclasses + body\<close> defines a new class \<open>c\<close>, inheriting from
\<open>superclasses\<close>. This introduces a locale \<open>c\<close> with import of all locales
@@ -997,13 +996,13 @@
@{command_def "overloading"} & : & \<open>theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command consts} ((@{syntax name} '::' @{syntax type} @{syntax mixfix}?) +)
;
@@{command overloading} ( spec + ) @'begin'
;
spec: @{syntax name} ( '\<equiv>' | '==' ) @{syntax term} ( '(' @'unchecked' ')' )?
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>consts c :: \<sigma>\<close> declares constant \<open>c\<close> to have any instance of type scheme
\<open>\<sigma>\<close>. The optional mixfix annotations may attach concrete syntax to the
@@ -1074,7 +1073,7 @@
@{attribute_def ML_exception_debugger} & : & \<open>attribute\<close> & default \<open>false\<close> \\
\end{tabular}
- @{rail \<open>
+ \<^rail>\<open>
(@@{command SML_file} |
@@{command SML_file_debug} |
@@{command SML_file_no_debug} |
@@ -1087,7 +1086,7 @@
@@{command local_setup}) @{syntax text}
;
@@{command attribute_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>SML_file name\<close> reads and evaluates the given Standard ML file. Top-level
SML bindings are stored within the (global or local) theory context; the
@@ -1097,8 +1096,7 @@
the regular Isabelle/ML environment.
\<^descr> \<^theory_text>\<open>ML_file name\<close> reads and evaluates the given ML file. The current theory
- context is passed down to the ML toplevel and may be modified, using @{ML
- "Context.>>"} or derived ML commands. Top-level ML bindings are stored
+ context is passed down to the ML toplevel and may be modified, using \<^ML>\<open>Context.>>\<close> or derived ML commands. Top-level ML bindings are stored
within the (global or local) theory context.
\<^descr> \<^theory_text>\<open>SML_file_debug\<close>, \<^theory_text>\<open>SML_file_no_debug\<close>, \<^theory_text>\<open>ML_file_debug\<close>, and
@@ -1125,19 +1123,19 @@
at the ML toplevel, but \<^theory_text>\<open>ML_command\<close> is silent.
\<^descr> \<^theory_text>\<open>setup "text"\<close> changes the current theory context by applying \<open>text\<close>,
- which refers to an ML expression of type @{ML_type "theory -> theory"}. This
+ which refers to an ML expression of type \<^ML_type>\<open>theory -> theory\<close>. This
enables to initialize any object-logic specific tools and packages written
in ML, for example.
\<^descr> \<^theory_text>\<open>local_setup\<close> is similar to \<^theory_text>\<open>setup\<close> for a local theory context, and an
- ML expression of type @{ML_type "local_theory -> local_theory"}. This allows
+ ML expression of type \<^ML_type>\<open>local_theory -> local_theory\<close>. This allows
to invoke local theory specification packages without going through concrete
outer syntax, for example.
\<^descr> \<^theory_text>\<open>attribute_setup name = "text" description\<close> defines an attribute in the
current context. The given \<open>text\<close> has to be an ML expression of type
- @{ML_type "attribute context_parser"}, cf.\ basic parsers defined in
- structure @{ML_structure Args} and @{ML_structure Attrib}.
+ \<^ML_type>\<open>attribute context_parser\<close>, cf.\ basic parsers defined in
+ structure \<^ML_structure>\<open>Args\<close> and \<^ML_structure>\<open>Attrib\<close>.
In principle, attributes can operate both on a given theorem and the
implicit context, although in practice only one is modified and the other
@@ -1179,7 +1177,7 @@
should print a detailed stack trace on exceptions. The result is dependent
on various ML compiler optimizations. The boundary for the exception trace
is the current Isar command transactions: it is occasionally better to
- insert the combinator @{ML Runtime.exn_trace} into ML code for debugging
+ insert the combinator \<^ML>\<open>Runtime.exn_trace\<close> into ML code for debugging
@{cite "isabelle-implementation"}, closer to the point where it actually
happens.
@@ -1197,12 +1195,11 @@
@{command_def "external_file"} & : & \<open>any \<rightarrow> any\<close> \\
\end{matharray}
- @{rail \<open>@@{command external_file} @{syntax name} ';'?\<close>}
+ \<^rail>\<open>@@{command external_file} @{syntax name} ';'?\<close>
\<^descr> \<^theory_text>\<open>external_file name\<close> declares the formal dependency on the given file
name, such that the Isabelle build process knows about it (see also @{cite
- "isabelle-system"}). The file can be read e.g.\ in Isabelle/ML via @{ML
- File.read}, without specific management by the Prover IDE.
+ "isabelle-system"}). The file can be read e.g.\ in Isabelle/ML via \<^ML>\<open>File.read\<close>, without specific management by the Prover IDE.
\<close>
@@ -1216,9 +1213,9 @@
@{command_def "default_sort"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command default_sort} @{syntax sort}
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>default_sort s\<close> makes sort \<open>s\<close> the new default sort for any type
variable that is given explicitly in the text, but lacks a sort constraint
@@ -1226,7 +1223,7 @@
not affected.
Usually the default sort is only changed when defining a new object-logic.
- For example, the default sort in Isabelle/HOL is @{class type}, the class of
+ For example, the default sort in Isabelle/HOL is \<^class>\<open>type\<close>, the class of
all HOL types.
When merging theories, the default sorts of the parents are logically
@@ -1242,11 +1239,11 @@
@{command_def "typedecl"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command type_synonym} (@{syntax typespec} '=' @{syntax type} @{syntax mixfix}?)
;
@@{command typedecl} @{syntax typespec} @{syntax mixfix}?
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>type_synonym (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>\<close> introduces a \<^emph>\<open>type synonym\<close> \<open>(\<alpha>\<^sub>1, \<dots>,
\<alpha>\<^sub>n) t\<close> for the existing type \<open>\<tau>\<close>. Unlike the semantic type definitions in
@@ -1280,12 +1277,12 @@
@{command_def "named_theorems"} & : & \<open>local_theory \<rightarrow> local_theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{command lemmas} (@{syntax thmdef}? @{syntax thms} + @'and')
@{syntax for_fixes}
;
@@{command named_theorems} (@{syntax name} @{syntax text}? + @'and')
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>lemmas a = b\<^sub>1 \<dots> b\<^sub>n\<close>~@{keyword_def "for"}~\<open>x\<^sub>1 \<dots> x\<^sub>m\<close> evaluates given
facts (with attributes) in the current context, which may be augmented by
@@ -1321,13 +1318,13 @@
asserted, and records within the internal derivation object how presumed
theorems depend on unproven suppositions.
- @{rail \<open>
+ \<^rail>\<open>
@@{command oracle} @{syntax name} '=' @{syntax text}
- \<close>}
+ \<close>
\<^descr> \<^theory_text>\<open>oracle name = "text"\<close> turns the given ML expression \<open>text\<close> of type
- @{ML_text "'a -> cterm"} into an ML function of type @{ML_text "'a -> thm"},
- which is bound to the global identifier @{ML_text name}. This acts like an
+ \<^ML_text>\<open>'a -> cterm\<close> into an ML function of type \<^ML_text>\<open>'a -> thm\<close>,
+ which is bound to the global identifier \<^ML_text>\<open>name\<close>. This acts like an
infinitary specification of axioms! Invoking the oracle only works within
the scope of the resulting theory.
@@ -1349,12 +1346,12 @@
@{command_def "hide_fact"} & : & \<open>theory \<rightarrow> theory\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@{command alias} | @{command type_alias}) @{syntax name} '=' @{syntax name}
;
(@{command hide_class} | @{command hide_type} |
@{command hide_const} | @{command hide_fact}) ('(' @'open' ')')? (@{syntax name} + )
- \<close>}
+ \<close>
Isabelle organizes any kind of name declarations (of types, constants,
theorems etc.) by separate hierarchically structured name spaces. Normally
--- a/src/Doc/Isar_Ref/Synopsis.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Isar_Ref/Synopsis.thy Sat Jan 05 17:24:33 2019 +0100
@@ -552,7 +552,7 @@
text \<open>
The object-logic is embedded into the Pure framework via an implicit
- derivability judgment @{term "Trueprop :: bool \<Rightarrow> prop"}.
+ derivability judgment \<^term>\<open>Trueprop :: bool \<Rightarrow> prop\<close>.
Thus any HOL formulae appears atomic to the Pure framework, while
the rule structure outlines the corresponding proof pattern.
@@ -754,7 +754,7 @@
show "B\<^sub>2 x" \<proof>
qed
- txt \<open>The compound rule premise @{prop "\<And>x. B\<^sub>1 x \<Longrightarrow> B\<^sub>2 x"} is better
+ txt \<open>The compound rule premise \<^prop>\<open>\<And>x. B\<^sub>1 x \<Longrightarrow> B\<^sub>2 x\<close> is better
addressed via @{command fix}~/ @{command assume}~/ @{command show}
in the nested proof body.\<close>
end
@@ -1080,8 +1080,8 @@
subsection \<open>Obtaining local contexts\<close>
text \<open>A single ``case'' branch may be inlined into Isar proof text
- via @{command obtain}. This proves @{prop "(\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow>
- thesis"} on the spot, and augments the context afterwards.\<close>
+ via @{command obtain}. This proves \<^prop>\<open>(\<And>x. B x \<Longrightarrow> thesis) \<Longrightarrow>
+ thesis\<close> on the spot, and augments the context afterwards.\<close>
notepad
begin
@@ -1090,7 +1090,7 @@
obtain x where "B x" \<proof>
note \<open>B x\<close>
- txt \<open>Conclusions from this context may not mention @{term x} again!\<close>
+ txt \<open>Conclusions from this context may not mention \<^term>\<open>x\<close> again!\<close>
{
obtain x where "B x" \<proof>
from \<open>B x\<close> have C \<proof>
--- a/src/Doc/JEdit/JEdit.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/JEdit/JEdit.thy Sat Jan 05 17:24:33 2019 +0100
@@ -164,11 +164,10 @@
Regular jEdit options are accessible via the dialogs \<^emph>\<open>Utilities~/ Global
Options\<close> or \<^emph>\<open>Plugins~/ Plugin Options\<close>, with a second chance to flip the
- two within the central options dialog. Changes are stored in @{path
- "$JEDIT_SETTINGS/properties"} and @{path "$JEDIT_SETTINGS/keymaps"}.
+ two within the central options dialog. Changes are stored in \<^path>\<open>$JEDIT_SETTINGS/properties\<close> and \<^path>\<open>$JEDIT_SETTINGS/keymaps\<close>.
Isabelle system options are managed by Isabelle/Scala and changes are stored
- in @{path "$ISABELLE_HOME_USER/etc/preferences"}, independently of
+ in \<^path>\<open>$ISABELLE_HOME_USER/etc/preferences\<close>, independently of
other jEdit properties. See also @{cite "isabelle-system"}, especially the
coverage of sessions and command-line tools like @{tool build} or @{tool
options}.
@@ -190,8 +189,8 @@
\<^medskip>
Options are usually loaded on startup and saved on shutdown of
- Isabelle/jEdit. Editing the generated @{path "$JEDIT_SETTINGS/properties"}
- or @{path "$ISABELLE_HOME_USER/etc/preferences"} manually while the
+ Isabelle/jEdit. Editing the generated \<^path>\<open>$JEDIT_SETTINGS/properties\<close>
+ or \<^path>\<open>$ISABELLE_HOME_USER/etc/preferences\<close> manually while the
application is running may cause surprise due to lost updates!
\<close>
@@ -487,7 +486,7 @@
physically via Unicode glyphs, in order to show ``\<^verbatim>\<open>\<alpha>\<close>'' as ``\<open>\<alpha>\<close>'', for
example. This symbol interpretation is specified by the Isabelle system
distribution in \<^file>\<open>$ISABELLE_HOME/etc/symbols\<close> and may be augmented by the
- user in @{path "$ISABELLE_HOME_USER/etc/symbols"}.
+ user in \<^path>\<open>$ISABELLE_HOME_USER/etc/symbols\<close>.
The appendix of @{cite "isabelle-isar-ref"} gives an overview of the
standard interpretation of finitely many symbols from the infinite
@@ -1151,10 +1150,10 @@
or proof context matching all of given criteria in the \<^emph>\<open>Find\<close> text field. A
single criterion has the following syntax:
- @{rail \<open>
+ \<^rail>\<open>
('-'?) ('name' ':' @{syntax name} | 'intro' | 'elim' | 'dest' |
'solves' | 'simp' ':' @{syntax term} | @{syntax term})
- \<close>}
+ \<close>
See also the Isar command @{command_ref find_theorems} in @{cite
"isabelle-isar-ref"}.
@@ -1168,10 +1167,10 @@
meets all of the given criteria in the \<^emph>\<open>Find\<close> text field. A single
criterion has the following syntax:
- @{rail \<open>
+ \<^rail>\<open>
('-'?)
('name' ':' @{syntax name} | 'strict' ':' @{syntax type} | @{syntax type})
- \<close>}
+ \<close>
See also the Isar command @{command_ref find_consts} in @{cite
"isabelle-isar-ref"}.
@@ -1359,8 +1358,7 @@
text \<open>
The completion tables for Isabelle symbols (\secref{sec:symbols}) are
- determined statically from \<^file>\<open>$ISABELLE_HOME/etc/symbols\<close> and @{path
- "$ISABELLE_HOME_USER/etc/symbols"} for each symbol specification as follows:
+ determined statically from \<^file>\<open>$ISABELLE_HOME/etc/symbols\<close> and \<^path>\<open>$ISABELLE_HOME_USER/etc/symbols\<close> for each symbol specification as follows:
\<^medskip>
\begin{tabular}{ll}
@@ -1671,8 +1669,7 @@
dictionary, taken from the colon-separated list in the settings variable
@{setting_def JORTHO_DICTIONARIES}. There are jEdit actions to specify local
updates to a dictionary, by including or excluding words. The result of
- permanent dictionary updates is stored in the directory @{path
- "$ISABELLE_HOME_USER/dictionaries"}, in a separate file for each dictionary.
+ permanent dictionary updates is stored in the directory \<^path>\<open>$ISABELLE_HOME_USER/dictionaries\<close>, in a separate file for each dictionary.
\<^item> @{system_option_def spell_checker_include} specifies a comma-separated
list of markup elements that delimit words in the source that is subject to
@@ -2008,7 +2005,7 @@
(like @{command SML_file}).
The context for Isabelle/ML is optional, it may evaluate to a value of type
- @{ML_type theory}, @{ML_type Proof.context}, or @{ML_type Context.generic}.
+ \<^ML_type>\<open>theory\<close>, \<^ML_type>\<open>Proof.context\<close>, or \<^ML_type>\<open>Context.generic\<close>.
Thus the given ML expression (with its antiquotations) may be subject to the
intended dynamic run-time context, instead of the static compile-time
context.
@@ -2088,15 +2085,13 @@
compliant.
Under normal circumstances, prover output always works via managed message
- channels (corresponding to @{ML writeln}, @{ML warning}, @{ML
- Output.error_message} in Isabelle/ML), which are displayed by regular means
+ channels (corresponding to \<^ML>\<open>writeln\<close>, \<^ML>\<open>warning\<close>, \<^ML>\<open>Output.error_message\<close> in Isabelle/ML), which are displayed by regular means
within the document model (\secref{sec:output}). Unhandled Isabelle/ML
- exceptions are printed by the system via @{ML Output.error_message}.
+ exceptions are printed by the system via \<^ML>\<open>Output.error_message\<close>.
\<^item> \<^emph>\<open>Syslog\<close> shows system messages that might be relevant to diagnose
problems with the startup or shutdown phase of the prover process; this also
- includes raw output on \<^verbatim>\<open>stderr\<close>. Isabelle/ML also provides an explicit @{ML
- Output.system_message} operation, which is occasionally useful for
+ includes raw output on \<^verbatim>\<open>stderr\<close>. Isabelle/ML also provides an explicit \<^ML>\<open>Output.system_message\<close> operation, which is occasionally useful for
diagnostic purposes within the system infrastructure itself.
A limited amount of syslog messages are buffered, independently of the
--- a/src/Doc/Locales/Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Locales/Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -62,8 +62,8 @@
Isabelle recognises unbound names as free variables. In locale
assumptions, these are implicitly universally quantified. That is,
- @{term "\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"} in fact means
- @{term "\<And>x y z. \<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"}.
+ \<^term>\<open>\<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z\<close> in fact means
+ \<^term>\<open>\<And>x y z. \<lbrakk> x \<sqsubseteq> y; y \<sqsubseteq> z \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z\<close>.
Two commands are provided to inspect locales:
\isakeyword{print\_locales} lists the names of all locales of the
@@ -71,7 +71,7 @@
and assumptions of locale $n$; the variation \isakeyword{print\_locale!}~$n$
additionally outputs the conclusions that are stored in the locale.
We may inspect the new locale
- by issuing \isakeyword{print\_locale!} @{term partial_order}. The output
+ by issuing \isakeyword{print\_locale!} \<^term>\<open>partial_order\<close>. The output
is the following list of context elements.
\begin{small}
\begin{alltt}
@@ -88,9 +88,8 @@
This differs from the declaration. The assumptions have turned into
conclusions, denoted by the keyword \isakeyword{notes}. Also,
- there is only one assumption, namely @{term "partial_order le"}.
- The locale declaration has introduced the predicate @{term
- partial_order} to the theory. This predicate is the
+ there is only one assumption, namely \<^term>\<open>partial_order le\<close>.
+ The locale declaration has introduced the predicate \<^term>\<open>partial_order\<close> to the theory. This predicate is the
\emph{locale predicate}. Its definition may be inspected by
issuing \isakeyword{thm} @{thm [source] partial_order_def}.
@{thm [display, indent=2] partial_order_def}
@@ -150,7 +149,7 @@
defined in terms of the locale parameter~\<open>le\<close> and the general
equality of the object logic we work in. The definition generates a
\emph{foundational constant}
- @{term partial_order.less} with definition @{thm [source]
+ \<^term>\<open>partial_order.less\<close> with definition @{thm [source]
partial_order.less_def}:
@{thm [display, indent=2] partial_order.less_def}
At the same time, the locale is extended by syntax transformations
@@ -659,7 +658,7 @@
Consider the locale hierarchy from Figure~\ref{fig:lattices}(a).
Total orders are lattices, although this is not reflected here, and
definitions, theorems and other conclusions
- from @{term lattice} are not available in @{term total_order}. To
+ from \<^term>\<open>lattice\<close> are not available in \<^term>\<open>total_order\<close>. To
obtain the situation in Figure~\ref{fig:lattices}(b), it is
sufficient to add the conclusions of the latter locale to the former.
The \isakeyword{sublocale} command does exactly this.
--- a/src/Doc/Locales/Examples1.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Locales/Examples1.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,9 +18,9 @@
interpretations in proofs, in
Section~\ref{sec:local-interpretation}.
- As an example, consider the type of integers @{typ int}. The
- relation @{term "(\<le>)"} is a total order over @{typ int}. We start
- with the interpretation that @{term "(\<le>)"} is a partial order. The
+ As an example, consider the type of integers \<^typ>\<open>int\<close>. The
+ relation \<^term>\<open>(\<le>)\<close> is a total order over \<^typ>\<open>int\<close>. We start
+ with the interpretation that \<^term>\<open>(\<le>)\<close> is a partial order. The
facilities of the interpretation command are explored gradually in
three versions.
\<close>
@@ -32,8 +32,8 @@
text \<open>
The command \isakeyword{interpretation} is for the interpretation of
locale in theories. In the following example, the parameter of locale
- \<open>partial_order\<close> is replaced by @{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow>
- bool"} and the locale instance is interpreted in the current
+ \<open>partial_order\<close> is replaced by \<^term>\<open>(\<le>) :: int \<Rightarrow> int \<Rightarrow>
+ bool\<close> and the locale instance is interpreted in the current
theory.\<close>
interpretation %visible int: partial_order "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"
@@ -55,8 +55,7 @@
text \<open>The effect of the command is that instances of all
conclusions of the locale are available in the theory, where names
- are prefixed by the qualifier. For example, transitivity for @{typ
- int} is named @{thm [source] int.trans} and is the following
+ are prefixed by the qualifier. For example, transitivity for \<^typ>\<open>int\<close> is named @{thm [source] int.trans} and is the following
theorem:
@{thm [display, indent=2] int.trans}
It is not possible to reference this theorem simply as \<open>trans\<close>. This prevents unwanted hiding of existing theorems of the
@@ -72,14 +71,14 @@
the interpretation of the definition, which is \<open>partial_order.less (\<le>)\<close>.
Qualified name and expanded form may be used almost
interchangeably.%
-\footnote{Since @{term "(\<le>)"} is polymorphic, for \<open>partial_order.less (\<le>)\<close> a
+\footnote{Since \<^term>\<open>(\<le>)\<close> is polymorphic, for \<open>partial_order.less (\<le>)\<close> a
more general type will be inferred than for \<open>int.less\<close> which
- is over type @{typ int}.}
+ is over type \<^typ>\<open>int\<close>.}
The former is preferred on output, as for example in the theorem
@{thm [source] int.less_le_trans}: @{thm [display, indent=2]
int.less_le_trans}
Both notations for the strict order are not satisfactory. The
- constant @{term "(<)"} is the strict order for @{typ int}.
+ constant \<^term>\<open>(<)\<close> is the strict order for \<^typ>\<open>int\<close>.
In order to allow for the desired replacement, interpretation
accepts \emph{equations} in addition to the parameter instantiation.
These follow the locale expression and are indicated with the
--- a/src/Doc/Locales/Examples2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Locales/Examples2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
show "partial_order ((\<le>) :: int \<Rightarrow> int \<Rightarrow> bool)"
by unfold_locales auto
txt \<open>\normalsize The second goal is shown by unfolding the
- definition of @{term "partial_order.less"}.\<close>
+ definition of \<^term>\<open>partial_order.less\<close>.\<close>
show "partial_order.less (\<le>) x y = (x < y)"
unfolding partial_order.less_def [OF \<open>partial_order (\<le>)\<close>]
by auto
--- a/src/Doc/Locales/Examples3.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Locales/Examples3.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,7 +5,7 @@
subsection \<open>Third Version: Local Interpretation
\label{sec:local-interpretation}\<close>
-text \<open>In the above example, the fact that @{term "(\<le>)"} is a partial
+text \<open>In the above example, the fact that \<^term>\<open>(\<le>)\<close> is a partial
order for the integers was used in the second goal to
discharge the premise in the definition of \<open>(\<sqsubset>)\<close>. In
general, proofs of the equations not only may involve definitions
@@ -40,8 +40,8 @@
text \<open>Further interpretations are necessary for
the other locales. In \<open>lattice\<close> the operations~\<open>\<sqinter>\<close>
- and~\<open>\<squnion>\<close> are substituted by @{term "min :: int \<Rightarrow> int \<Rightarrow> int"}
- and @{term "max :: int \<Rightarrow> int \<Rightarrow> int"}. The entire proof for the
+ and~\<open>\<squnion>\<close> are substituted by \<^term>\<open>min :: int \<Rightarrow> int \<Rightarrow> int\<close>
+ and \<^term>\<open>max :: int \<Rightarrow> int \<Rightarrow> int\<close>. The entire proof for the
interpretation is reproduced to give an example of a more
elaborate interpretation proof. Note that the equations are named
so they can be used in a later example.\<close>
@@ -115,7 +115,7 @@
the \isakeyword{sublocale} command. Existing interpretations are
skipped avoiding duplicate work.
\item
- The predicate @{term "(<)"} appears in theorem @{thm [source]
+ The predicate \<^term>\<open>(<)\<close> appears in theorem @{thm [source]
int.less_total}
although an equation for the replacement of \<open>(\<sqsubset>)\<close> was only
given in the interpretation of \<open>partial_order\<close>. The
@@ -128,7 +128,7 @@
text \<open>The interpretations for a locale $n$ within the current
theory may be inspected with \isakeyword{print\_interps}~$n$. This
prints the list of instances of $n$, for which interpretations exist.
- For example, \isakeyword{print\_interps} @{term partial_order}
+ For example, \isakeyword{print\_interps} \<^term>\<open>partial_order\<close>
outputs the following:
\begin{small}
\begin{alltt}
@@ -148,7 +148,7 @@
section \<open>Locale Expressions \label{sec:expressions}\<close>
text \<open>
- A map~@{term \<phi>} between partial orders~\<open>\<sqsubseteq>\<close> and~\<open>\<preceq>\<close>
+ A map~\<^term>\<open>\<phi>\<close> between partial orders~\<open>\<sqsubseteq>\<close> and~\<open>\<preceq>\<close>
is called order preserving if \<open>x \<sqsubseteq> y\<close> implies \<open>\<phi> x \<preceq>
\<phi> y\<close>. This situation is more complex than those encountered so
far: it involves two partial orders, and it is desirable to use the
@@ -218,8 +218,7 @@
\hspace*{1em}@{thm [source] le'.less_le_trans}:
@{thm [display, indent=4] le'.less_le_trans}
While there is infix syntax for the strict operation associated with
- @{term "(\<sqsubseteq>)"}, there is none for the strict version of @{term
- "(\<preceq>)"}. The syntax \<open>\<sqsubset>\<close> for \<open>less\<close> is only
+ \<^term>\<open>(\<sqsubseteq>)\<close>, there is none for the strict version of \<^term>\<open>(\<preceq>)\<close>. The syntax \<open>\<sqsubset>\<close> for \<open>less\<close> is only
available for the original instance it was declared for. We may
introduce infix syntax for \<open>le'.less\<close> with the following declaration:\<close>
@@ -285,8 +284,7 @@
assumes hom_meet: "\<phi> (x \<sqinter> y) = le'.meet (\<phi> x) (\<phi> y)"
and hom_join: "\<phi> (x \<squnion> y) = le'.join (\<phi> x) (\<phi> y)"
-text \<open>The parameter instantiation in the first instance of @{term
- lattice} is omitted. This causes the parameter~\<open>le\<close> to be
+text \<open>The parameter instantiation in the first instance of \<^term>\<open>lattice\<close> is omitted. This causes the parameter~\<open>le\<close> to be
added to the \isakeyword{for} clause, and the locale has
parameters~\<open>le\<close>,~\<open>le'\<close> and, of course,~\<open>\<phi>\<close>.
@@ -383,10 +381,10 @@
certain conditions are fulfilled. Take, for example, the function
\<open>\<lambda>i. n * i\<close> that scales its argument by a constant factor.
This function is order preserving (and even a lattice endomorphism)
- with respect to @{term "(\<le>)"} provided \<open>n \<ge> 0\<close>.
+ with respect to \<^term>\<open>(\<le>)\<close> provided \<open>n \<ge> 0\<close>.
It is not possible to express this using a global interpretation,
- because it is in general unspecified whether~@{term n} is
+ because it is in general unspecified whether~\<^term>\<open>n\<close> is
non-negative, but one may make an interpretation in an inner context
of a proof where full information is available.
This is not fully satisfactory either, since potentially
@@ -409,7 +407,7 @@
using non_neg by unfold_locales (rule mult_left_mono)
text \<open>While the proof of the previous interpretation
- is straightforward from monotonicity lemmas for~@{term "(*)"}, the
+ is straightforward from monotonicity lemmas for~\<^term>\<open>(*)\<close>, the
second proof follows a useful pattern.\<close>
sublocale %visible non_negative \<subseteq> lattice_end "(\<le>)" "\<lambda>i. n * i"
@@ -418,9 +416,8 @@
interpretation equations immediately yields two subgoals that
reflect the core conjecture.
@{subgoals [display]}
- It is now necessary to show, in the context of @{term
- non_negative}, that multiplication by~@{term n} commutes with
- @{term min} and @{term max}.\<close>
+ It is now necessary to show, in the context of \<^term>\<open>non_negative\<close>, that multiplication by~\<^term>\<open>n\<close> commutes with
+ \<^term>\<open>min\<close> and \<^term>\<open>max\<close>.\<close>
qed (auto simp: hom_le)
text (in order_preserving) \<open>The lemma @{thm [source] hom_le}
@@ -452,7 +449,7 @@
and the interpretation is rejected.
Instead it is necessary to declare a locale that is logically
- equivalent to @{term partial_order} but serves to collect facts
+ equivalent to \<^term>\<open>partial_order\<close> but serves to collect facts
about functions spaces where the co-domain is a partial order, and
to make the interpretation in its context:\<close>
--- a/src/Doc/Main/Main_Doc.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Main/Main_Doc.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4,42 +4,42 @@
begin
setup \<open>
- Thy_Output.antiquotation_pretty_source @{binding term_type_only} (Args.term -- Args.typ_abbrev)
+ Thy_Output.antiquotation_pretty_source \<^binding>\<open>term_type_only\<close> (Args.term -- Args.typ_abbrev)
(fn ctxt => fn (t, T) =>
(if fastype_of t = Sign.certify_typ (Proof_Context.theory_of ctxt) T then ()
else error "term_type_only: type mismatch";
Syntax.pretty_typ ctxt T))
\<close>
setup \<open>
- Thy_Output.antiquotation_pretty_source @{binding expanded_typ} Args.typ
+ Thy_Output.antiquotation_pretty_source \<^binding>\<open>expanded_typ\<close> Args.typ
Syntax.pretty_typ
\<close>
(*>*)
text\<open>
\begin{abstract}
-This document lists the main types, functions and syntax provided by theory @{theory Main}. It is meant as a quick overview of what is available. For infix operators and their precedences see the final section. The sophisticated class structure is only hinted at. For details see \<^url>\<open>https://isabelle.in.tum.de/library/HOL\<close>.
+This document lists the main types, functions and syntax provided by theory \<^theory>\<open>Main\<close>. It is meant as a quick overview of what is available. For infix operators and their precedences see the final section. The sophisticated class structure is only hinted at. For details see \<^url>\<open>https://isabelle.in.tum.de/library/HOL\<close>.
\end{abstract}
\section*{HOL}
-The basic logic: @{prop "x = y"}, @{const True}, @{const False}, @{prop "\<not> P"}, @{prop"P \<and> Q"},
-@{prop "P \<or> Q"}, @{prop "P \<longrightarrow> Q"}, @{prop "\<forall>x. P"}, @{prop "\<exists>x. P"}, @{prop"\<exists>! x. P"},
-@{term"THE x. P"}.
+The basic logic: \<^prop>\<open>x = y\<close>, \<^const>\<open>True\<close>, \<^const>\<open>False\<close>, \<^prop>\<open>\<not> P\<close>, \<^prop>\<open>P \<and> Q\<close>,
+\<^prop>\<open>P \<or> Q\<close>, \<^prop>\<open>P \<longrightarrow> Q\<close>, \<^prop>\<open>\<forall>x. P\<close>, \<^prop>\<open>\<exists>x. P\<close>, \<^prop>\<open>\<exists>! x. P\<close>,
+\<^term>\<open>THE x. P\<close>.
\<^smallskip>
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const HOL.undefined} & @{typeof HOL.undefined}\\
-@{const HOL.default} & @{typeof HOL.default}\\
+\<^const>\<open>HOL.undefined\<close> & \<^typeof>\<open>HOL.undefined\<close>\\
+\<^const>\<open>HOL.default\<close> & \<^typeof>\<open>HOL.default\<close>\\
\end{tabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term"\<not> (x = y)"} & @{term[source]"\<not> (x = y)"} & (\<^verbatim>\<open>~=\<close>)\\
-@{term[source]"P \<longleftrightarrow> Q"} & @{term"P \<longleftrightarrow> Q"} \\
-@{term"If x y z"} & @{term[source]"If x y z"}\\
-@{term"Let e\<^sub>1 (\<lambda>x. e\<^sub>2)"} & @{term[source]"Let e\<^sub>1 (\<lambda>x. e\<^sub>2)"}\\
+\<^term>\<open>\<not> (x = y)\<close> & @{term[source]"\<not> (x = y)"} & (\<^verbatim>\<open>~=\<close>)\\
+@{term[source]"P \<longleftrightarrow> Q"} & \<^term>\<open>P \<longleftrightarrow> Q\<close> \\
+\<^term>\<open>If x y z\<close> & @{term[source]"If x y z"}\\
+\<^term>\<open>Let e\<^sub>1 (\<lambda>x. e\<^sub>2)\<close> & @{term[source]"Let e\<^sub>1 (\<lambda>x. e\<^sub>2)"}\\
\end{supertabular}
@@ -50,41 +50,41 @@
\<^smallskip>
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-@{const Orderings.less_eq} & @{typeof Orderings.less_eq} & (\<^verbatim>\<open><=\<close>)\\
-@{const Orderings.less} & @{typeof Orderings.less}\\
-@{const Orderings.Least} & @{typeof Orderings.Least}\\
-@{const Orderings.Greatest} & @{typeof Orderings.Greatest}\\
-@{const Orderings.min} & @{typeof Orderings.min}\\
-@{const Orderings.max} & @{typeof Orderings.max}\\
-@{const[source] top} & @{typeof Orderings.top}\\
-@{const[source] bot} & @{typeof Orderings.bot}\\
-@{const Orderings.mono} & @{typeof Orderings.mono}\\
-@{const Orderings.strict_mono} & @{typeof Orderings.strict_mono}\\
+\<^const>\<open>Orderings.less_eq\<close> & \<^typeof>\<open>Orderings.less_eq\<close> & (\<^verbatim>\<open><=\<close>)\\
+\<^const>\<open>Orderings.less\<close> & \<^typeof>\<open>Orderings.less\<close>\\
+\<^const>\<open>Orderings.Least\<close> & \<^typeof>\<open>Orderings.Least\<close>\\
+\<^const>\<open>Orderings.Greatest\<close> & \<^typeof>\<open>Orderings.Greatest\<close>\\
+\<^const>\<open>Orderings.min\<close> & \<^typeof>\<open>Orderings.min\<close>\\
+\<^const>\<open>Orderings.max\<close> & \<^typeof>\<open>Orderings.max\<close>\\
+@{const[source] top} & \<^typeof>\<open>Orderings.top\<close>\\
+@{const[source] bot} & \<^typeof>\<open>Orderings.bot\<close>\\
+\<^const>\<open>Orderings.mono\<close> & \<^typeof>\<open>Orderings.mono\<close>\\
+\<^const>\<open>Orderings.strict_mono\<close> & \<^typeof>\<open>Orderings.strict_mono\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term[source]"x \<ge> y"} & @{term"x \<ge> y"} & (\<^verbatim>\<open>>=\<close>)\\
-@{term[source]"x > y"} & @{term"x > y"}\\
-@{term "\<forall>x\<le>y. P"} & @{term[source]"\<forall>x. x \<le> y \<longrightarrow> P"}\\
-@{term "\<exists>x\<le>y. P"} & @{term[source]"\<exists>x. x \<le> y \<and> P"}\\
+@{term[source]"x \<ge> y"} & \<^term>\<open>x \<ge> y\<close> & (\<^verbatim>\<open>>=\<close>)\\
+@{term[source]"x > y"} & \<^term>\<open>x > y\<close>\\
+\<^term>\<open>\<forall>x\<le>y. P\<close> & @{term[source]"\<forall>x. x \<le> y \<longrightarrow> P"}\\
+\<^term>\<open>\<exists>x\<le>y. P\<close> & @{term[source]"\<exists>x. x \<le> y \<and> P"}\\
\multicolumn{2}{@ {}l@ {}}{Similarly for $<$, $\ge$ and $>$}\\
-@{term "LEAST x. P"} & @{term[source]"Least (\<lambda>x. P)"}\\
-@{term "GREATEST x. P"} & @{term[source]"Greatest (\<lambda>x. P)"}\\
+\<^term>\<open>LEAST x. P\<close> & @{term[source]"Least (\<lambda>x. P)"}\\
+\<^term>\<open>GREATEST x. P\<close> & @{term[source]"Greatest (\<lambda>x. P)"}\\
\end{supertabular}
\section*{Lattices}
Classes semilattice, lattice, distributive lattice and complete lattice (the
-latter in theory @{theory HOL.Set}).
+latter in theory \<^theory>\<open>HOL.Set\<close>).
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Lattices.inf} & @{typeof Lattices.inf}\\
-@{const Lattices.sup} & @{typeof Lattices.sup}\\
-@{const Complete_Lattices.Inf} & @{term_type_only Complete_Lattices.Inf "'a set \<Rightarrow> 'a::Inf"}\\
-@{const Complete_Lattices.Sup} & @{term_type_only Complete_Lattices.Sup "'a set \<Rightarrow> 'a::Sup"}\\
+\<^const>\<open>Lattices.inf\<close> & \<^typeof>\<open>Lattices.inf\<close>\\
+\<^const>\<open>Lattices.sup\<close> & \<^typeof>\<open>Lattices.sup\<close>\\
+\<^const>\<open>Complete_Lattices.Inf\<close> & @{term_type_only Complete_Lattices.Inf "'a set \<Rightarrow> 'a::Inf"}\\
+\<^const>\<open>Complete_Lattices.Sup\<close> & @{term_type_only Complete_Lattices.Sup "'a set \<Rightarrow> 'a::Sup"}\\
\end{tabular}
\subsubsection*{Syntax}
@@ -92,12 +92,12 @@
Available by loading theory \<open>Lattice_Syntax\<close> in directory \<open>Library\<close>.
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{text[source]"x \<sqsubseteq> y"} & @{term"x \<le> y"}\\
-@{text[source]"x \<sqsubset> y"} & @{term"x < y"}\\
-@{text[source]"x \<sqinter> y"} & @{term"inf x y"}\\
-@{text[source]"x \<squnion> y"} & @{term"sup x y"}\\
-@{text[source]"\<Sqinter>A"} & @{term"Inf A"}\\
-@{text[source]"\<Squnion>A"} & @{term"Sup A"}\\
+@{text[source]"x \<sqsubseteq> y"} & \<^term>\<open>x \<le> y\<close>\\
+@{text[source]"x \<sqsubset> y"} & \<^term>\<open>x < y\<close>\\
+@{text[source]"x \<sqinter> y"} & \<^term>\<open>inf x y\<close>\\
+@{text[source]"x \<squnion> y"} & \<^term>\<open>sup x y\<close>\\
+@{text[source]"\<Sqinter>A"} & \<^term>\<open>Inf A\<close>\\
+@{text[source]"\<Squnion>A"} & \<^term>\<open>Sup A\<close>\\
@{text[source]"\<top>"} & @{term[source] top}\\
@{text[source]"\<bottom>"} & @{term[source] bot}\\
\end{supertabular}
@@ -106,335 +106,334 @@
\section*{Set}
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-@{const Set.empty} & @{term_type_only "Set.empty" "'a set"}\\
-@{const Set.insert} & @{term_type_only insert "'a\<Rightarrow>'a set\<Rightarrow>'a set"}\\
-@{const Collect} & @{term_type_only Collect "('a\<Rightarrow>bool)\<Rightarrow>'a set"}\\
-@{const Set.member} & @{term_type_only Set.member "'a\<Rightarrow>'a set\<Rightarrow>bool"} & (\<^verbatim>\<open>:\<close>)\\
-@{const Set.union} & @{term_type_only Set.union "'a set\<Rightarrow>'a set \<Rightarrow> 'a set"} & (\<^verbatim>\<open>Un\<close>)\\
-@{const Set.inter} & @{term_type_only Set.inter "'a set\<Rightarrow>'a set \<Rightarrow> 'a set"} & (\<^verbatim>\<open>Int\<close>)\\
-@{const Union} & @{term_type_only Union "'a set set\<Rightarrow>'a set"}\\
-@{const Inter} & @{term_type_only Inter "'a set set\<Rightarrow>'a set"}\\
-@{const Pow} & @{term_type_only Pow "'a set \<Rightarrow>'a set set"}\\
-@{const UNIV} & @{term_type_only UNIV "'a set"}\\
-@{const image} & @{term_type_only image "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>'b set"}\\
-@{const Ball} & @{term_type_only Ball "'a set\<Rightarrow>('a\<Rightarrow>bool)\<Rightarrow>bool"}\\
-@{const Bex} & @{term_type_only Bex "'a set\<Rightarrow>('a\<Rightarrow>bool)\<Rightarrow>bool"}\\
+\<^const>\<open>Set.empty\<close> & @{term_type_only "Set.empty" "'a set"}\\
+\<^const>\<open>Set.insert\<close> & @{term_type_only insert "'a\<Rightarrow>'a set\<Rightarrow>'a set"}\\
+\<^const>\<open>Collect\<close> & @{term_type_only Collect "('a\<Rightarrow>bool)\<Rightarrow>'a set"}\\
+\<^const>\<open>Set.member\<close> & @{term_type_only Set.member "'a\<Rightarrow>'a set\<Rightarrow>bool"} & (\<^verbatim>\<open>:\<close>)\\
+\<^const>\<open>Set.union\<close> & @{term_type_only Set.union "'a set\<Rightarrow>'a set \<Rightarrow> 'a set"} & (\<^verbatim>\<open>Un\<close>)\\
+\<^const>\<open>Set.inter\<close> & @{term_type_only Set.inter "'a set\<Rightarrow>'a set \<Rightarrow> 'a set"} & (\<^verbatim>\<open>Int\<close>)\\
+\<^const>\<open>Union\<close> & @{term_type_only Union "'a set set\<Rightarrow>'a set"}\\
+\<^const>\<open>Inter\<close> & @{term_type_only Inter "'a set set\<Rightarrow>'a set"}\\
+\<^const>\<open>Pow\<close> & @{term_type_only Pow "'a set \<Rightarrow>'a set set"}\\
+\<^const>\<open>UNIV\<close> & @{term_type_only UNIV "'a set"}\\
+\<^const>\<open>image\<close> & @{term_type_only image "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>'b set"}\\
+\<^const>\<open>Ball\<close> & @{term_type_only Ball "'a set\<Rightarrow>('a\<Rightarrow>bool)\<Rightarrow>bool"}\\
+\<^const>\<open>Bex\<close> & @{term_type_only Bex "'a set\<Rightarrow>('a\<Rightarrow>bool)\<Rightarrow>bool"}\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
\<open>{a\<^sub>1,\<dots>,a\<^sub>n}\<close> & \<open>insert a\<^sub>1 (\<dots> (insert a\<^sub>n {})\<dots>)\<close>\\
-@{term "a \<notin> A"} & @{term[source]"\<not>(x \<in> A)"}\\
-@{term "A \<subseteq> B"} & @{term[source]"A \<le> B"}\\
-@{term "A \<subset> B"} & @{term[source]"A < B"}\\
+\<^term>\<open>a \<notin> A\<close> & @{term[source]"\<not>(x \<in> A)"}\\
+\<^term>\<open>A \<subseteq> B\<close> & @{term[source]"A \<le> B"}\\
+\<^term>\<open>A \<subset> B\<close> & @{term[source]"A < B"}\\
@{term[source]"A \<supseteq> B"} & @{term[source]"B \<le> A"}\\
@{term[source]"A \<supset> B"} & @{term[source]"B < A"}\\
-@{term "{x. P}"} & @{term[source]"Collect (\<lambda>x. P)"}\\
+\<^term>\<open>{x. P}\<close> & @{term[source]"Collect (\<lambda>x. P)"}\\
\<open>{t | x\<^sub>1 \<dots> x\<^sub>n. P}\<close> & \<open>{v. \<exists>x\<^sub>1 \<dots> x\<^sub>n. v = t \<and> P}\<close>\\
@{term[source]"\<Union>x\<in>I. A"} & @{term[source]"\<Union>((\<lambda>x. A) ` I)"} & (\texttt{UN})\\
@{term[source]"\<Union>x. A"} & @{term[source]"\<Union>((\<lambda>x. A) ` UNIV)"}\\
@{term[source]"\<Inter>x\<in>I. A"} & @{term[source]"\<Inter>((\<lambda>x. A) ` I)"} & (\texttt{INT})\\
@{term[source]"\<Inter>x. A"} & @{term[source]"\<Inter>((\<lambda>x. A) ` UNIV)"}\\
-@{term "\<forall>x\<in>A. P"} & @{term[source]"Ball A (\<lambda>x. P)"}\\
-@{term "\<exists>x\<in>A. P"} & @{term[source]"Bex A (\<lambda>x. P)"}\\
-@{term "range f"} & @{term[source]"f ` UNIV"}\\
+\<^term>\<open>\<forall>x\<in>A. P\<close> & @{term[source]"Ball A (\<lambda>x. P)"}\\
+\<^term>\<open>\<exists>x\<in>A. P\<close> & @{term[source]"Bex A (\<lambda>x. P)"}\\
+\<^term>\<open>range f\<close> & @{term[source]"f ` UNIV"}\\
\end{supertabular}
\section*{Fun}
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-@{const "Fun.id"} & @{typeof Fun.id}\\
-@{const "Fun.comp"} & @{typeof Fun.comp} & (\texttt{o})\\
-@{const "Fun.inj_on"} & @{term_type_only Fun.inj_on "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>bool"}\\
-@{const "Fun.inj"} & @{typeof Fun.inj}\\
-@{const "Fun.surj"} & @{typeof Fun.surj}\\
-@{const "Fun.bij"} & @{typeof Fun.bij}\\
-@{const "Fun.bij_betw"} & @{term_type_only Fun.bij_betw "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>'b set\<Rightarrow>bool"}\\
-@{const "Fun.fun_upd"} & @{typeof Fun.fun_upd}\\
+\<^const>\<open>Fun.id\<close> & \<^typeof>\<open>Fun.id\<close>\\
+\<^const>\<open>Fun.comp\<close> & \<^typeof>\<open>Fun.comp\<close> & (\texttt{o})\\
+\<^const>\<open>Fun.inj_on\<close> & @{term_type_only Fun.inj_on "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>bool"}\\
+\<^const>\<open>Fun.inj\<close> & \<^typeof>\<open>Fun.inj\<close>\\
+\<^const>\<open>Fun.surj\<close> & \<^typeof>\<open>Fun.surj\<close>\\
+\<^const>\<open>Fun.bij\<close> & \<^typeof>\<open>Fun.bij\<close>\\
+\<^const>\<open>Fun.bij_betw\<close> & @{term_type_only Fun.bij_betw "('a\<Rightarrow>'b)\<Rightarrow>'a set\<Rightarrow>'b set\<Rightarrow>bool"}\\
+\<^const>\<open>Fun.fun_upd\<close> & \<^typeof>\<open>Fun.fun_upd\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term"fun_upd f x y"} & @{term[source]"fun_upd f x y"}\\
+\<^term>\<open>fun_upd f x y\<close> & @{term[source]"fun_upd f x y"}\\
\<open>f(x\<^sub>1:=y\<^sub>1,\<dots>,x\<^sub>n:=y\<^sub>n)\<close> & \<open>f(x\<^sub>1:=y\<^sub>1)\<dots>(x\<^sub>n:=y\<^sub>n)\<close>\\
\end{tabular}
\section*{Hilbert\_Choice}
-Hilbert's selection ($\varepsilon$) operator: @{term"SOME x. P"}.
+Hilbert's selection ($\varepsilon$) operator: \<^term>\<open>SOME x. P\<close>.
\<^smallskip>
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Hilbert_Choice.inv_into} & @{term_type_only Hilbert_Choice.inv_into "'a set \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a)"}
+\<^const>\<open>Hilbert_Choice.inv_into\<close> & @{term_type_only Hilbert_Choice.inv_into "'a set \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a)"}
\end{tabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term inv} & @{term[source]"inv_into UNIV"}
+\<^term>\<open>inv\<close> & @{term[source]"inv_into UNIV"}
\end{tabular}
\section*{Fixed Points}
-Theory: @{theory HOL.Inductive}.
+Theory: \<^theory>\<open>HOL.Inductive\<close>.
-Least and greatest fixed points in a complete lattice @{typ 'a}:
+Least and greatest fixed points in a complete lattice \<^typ>\<open>'a\<close>:
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Inductive.lfp} & @{typeof Inductive.lfp}\\
-@{const Inductive.gfp} & @{typeof Inductive.gfp}\\
+\<^const>\<open>Inductive.lfp\<close> & \<^typeof>\<open>Inductive.lfp\<close>\\
+\<^const>\<open>Inductive.gfp\<close> & \<^typeof>\<open>Inductive.gfp\<close>\\
\end{tabular}
-Note that in particular sets (@{typ"'a \<Rightarrow> bool"}) are complete lattices.
+Note that in particular sets (\<^typ>\<open>'a \<Rightarrow> bool\<close>) are complete lattices.
\section*{Sum\_Type}
Type constructor \<open>+\<close>.
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Sum_Type.Inl} & @{typeof Sum_Type.Inl}\\
-@{const Sum_Type.Inr} & @{typeof Sum_Type.Inr}\\
-@{const Sum_Type.Plus} & @{term_type_only Sum_Type.Plus "'a set\<Rightarrow>'b set\<Rightarrow>('a+'b)set"}
+\<^const>\<open>Sum_Type.Inl\<close> & \<^typeof>\<open>Sum_Type.Inl\<close>\\
+\<^const>\<open>Sum_Type.Inr\<close> & \<^typeof>\<open>Sum_Type.Inr\<close>\\
+\<^const>\<open>Sum_Type.Plus\<close> & @{term_type_only Sum_Type.Plus "'a set\<Rightarrow>'b set\<Rightarrow>('a+'b)set"}
\end{tabular}
\section*{Product\_Type}
-Types @{typ unit} and \<open>\<times>\<close>.
+Types \<^typ>\<open>unit\<close> and \<open>\<times>\<close>.
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Product_Type.Unity} & @{typeof Product_Type.Unity}\\
-@{const Pair} & @{typeof Pair}\\
-@{const fst} & @{typeof fst}\\
-@{const snd} & @{typeof snd}\\
-@{const case_prod} & @{typeof case_prod}\\
-@{const curry} & @{typeof curry}\\
-@{const Product_Type.Sigma} & @{term_type_only Product_Type.Sigma "'a set\<Rightarrow>('a\<Rightarrow>'b set)\<Rightarrow>('a*'b)set"}\\
+\<^const>\<open>Product_Type.Unity\<close> & \<^typeof>\<open>Product_Type.Unity\<close>\\
+\<^const>\<open>Pair\<close> & \<^typeof>\<open>Pair\<close>\\
+\<^const>\<open>fst\<close> & \<^typeof>\<open>fst\<close>\\
+\<^const>\<open>snd\<close> & \<^typeof>\<open>snd\<close>\\
+\<^const>\<open>case_prod\<close> & \<^typeof>\<open>case_prod\<close>\\
+\<^const>\<open>curry\<close> & \<^typeof>\<open>curry\<close>\\
+\<^const>\<open>Product_Type.Sigma\<close> & @{term_type_only Product_Type.Sigma "'a set\<Rightarrow>('a\<Rightarrow>'b set)\<Rightarrow>('a*'b)set"}\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} ll @ {}}
-@{term "Pair a b"} & @{term[source]"Pair a b"}\\
-@{term "case_prod (\<lambda>x y. t)"} & @{term[source]"case_prod (\<lambda>x y. t)"}\\
-@{term "A \<times> B"} & \<open>Sigma A (\<lambda>\<^latex>\<open>\_\<close>. B)\<close>
+\<^term>\<open>Pair a b\<close> & @{term[source]"Pair a b"}\\
+\<^term>\<open>case_prod (\<lambda>x y. t)\<close> & @{term[source]"case_prod (\<lambda>x y. t)"}\\
+\<^term>\<open>A \<times> B\<close> & \<open>Sigma A (\<lambda>\<^latex>\<open>\_\<close>. B)\<close>
\end{tabular}
Pairs may be nested. Nesting to the right is printed as a tuple,
-e.g.\ \mbox{@{term "(a,b,c)"}} is really \mbox{\<open>(a, (b, c))\<close>.}
+e.g.\ \mbox{\<^term>\<open>(a,b,c)\<close>} is really \mbox{\<open>(a, (b, c))\<close>.}
Pattern matching with pairs and tuples extends to all binders,
-e.g.\ \mbox{@{prop "\<forall>(x,y)\<in>A. P"},} @{term "{(x,y). P}"}, etc.
+e.g.\ \mbox{\<^prop>\<open>\<forall>(x,y)\<in>A. P\<close>,} \<^term>\<open>{(x,y). P}\<close>, etc.
\section*{Relation}
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Relation.converse} & @{term_type_only Relation.converse "('a * 'b)set \<Rightarrow> ('b*'a)set"}\\
-@{const Relation.relcomp} & @{term_type_only Relation.relcomp "('a*'b)set\<Rightarrow>('b*'c)set\<Rightarrow>('a*'c)set"}\\
-@{const Relation.Image} & @{term_type_only Relation.Image "('a*'b)set\<Rightarrow>'a set\<Rightarrow>'b set"}\\
-@{const Relation.inv_image} & @{term_type_only Relation.inv_image "('a*'a)set\<Rightarrow>('b\<Rightarrow>'a)\<Rightarrow>('b*'b)set"}\\
-@{const Relation.Id_on} & @{term_type_only Relation.Id_on "'a set\<Rightarrow>('a*'a)set"}\\
-@{const Relation.Id} & @{term_type_only Relation.Id "('a*'a)set"}\\
-@{const Relation.Domain} & @{term_type_only Relation.Domain "('a*'b)set\<Rightarrow>'a set"}\\
-@{const Relation.Range} & @{term_type_only Relation.Range "('a*'b)set\<Rightarrow>'b set"}\\
-@{const Relation.Field} & @{term_type_only Relation.Field "('a*'a)set\<Rightarrow>'a set"}\\
-@{const Relation.refl_on} & @{term_type_only Relation.refl_on "'a set\<Rightarrow>('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.refl} & @{term_type_only Relation.refl "('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.sym} & @{term_type_only Relation.sym "('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.antisym} & @{term_type_only Relation.antisym "('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.trans} & @{term_type_only Relation.trans "('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.irrefl} & @{term_type_only Relation.irrefl "('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.total_on} & @{term_type_only Relation.total_on "'a set\<Rightarrow>('a*'a)set\<Rightarrow>bool"}\\
-@{const Relation.total} & @{term_type_only Relation.total "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.converse\<close> & @{term_type_only Relation.converse "('a * 'b)set \<Rightarrow> ('b*'a)set"}\\
+\<^const>\<open>Relation.relcomp\<close> & @{term_type_only Relation.relcomp "('a*'b)set\<Rightarrow>('b*'c)set\<Rightarrow>('a*'c)set"}\\
+\<^const>\<open>Relation.Image\<close> & @{term_type_only Relation.Image "('a*'b)set\<Rightarrow>'a set\<Rightarrow>'b set"}\\
+\<^const>\<open>Relation.inv_image\<close> & @{term_type_only Relation.inv_image "('a*'a)set\<Rightarrow>('b\<Rightarrow>'a)\<Rightarrow>('b*'b)set"}\\
+\<^const>\<open>Relation.Id_on\<close> & @{term_type_only Relation.Id_on "'a set\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Relation.Id\<close> & @{term_type_only Relation.Id "('a*'a)set"}\\
+\<^const>\<open>Relation.Domain\<close> & @{term_type_only Relation.Domain "('a*'b)set\<Rightarrow>'a set"}\\
+\<^const>\<open>Relation.Range\<close> & @{term_type_only Relation.Range "('a*'b)set\<Rightarrow>'b set"}\\
+\<^const>\<open>Relation.Field\<close> & @{term_type_only Relation.Field "('a*'a)set\<Rightarrow>'a set"}\\
+\<^const>\<open>Relation.refl_on\<close> & @{term_type_only Relation.refl_on "'a set\<Rightarrow>('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.refl\<close> & @{term_type_only Relation.refl "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.sym\<close> & @{term_type_only Relation.sym "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.antisym\<close> & @{term_type_only Relation.antisym "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.trans\<close> & @{term_type_only Relation.trans "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.irrefl\<close> & @{term_type_only Relation.irrefl "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.total_on\<close> & @{term_type_only Relation.total_on "'a set\<Rightarrow>('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Relation.total\<close> & @{term_type_only Relation.total "('a*'a)set\<Rightarrow>bool"}\\
\end{tabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term"converse r"} & @{term[source]"converse r"} & (\<^verbatim>\<open>^-1\<close>)
+\<^term>\<open>converse r\<close> & @{term[source]"converse r"} & (\<^verbatim>\<open>^-1\<close>)
\end{tabular}
\<^medskip>
\noindent
-Type synonym \ @{typ"'a rel"} \<open>=\<close> @{expanded_typ "'a rel"}
+Type synonym \ \<^typ>\<open>'a rel\<close> \<open>=\<close> @{expanded_typ "'a rel"}
\section*{Equiv\_Relations}
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Equiv_Relations.equiv} & @{term_type_only Equiv_Relations.equiv "'a set \<Rightarrow> ('a*'a)set\<Rightarrow>bool"}\\
-@{const Equiv_Relations.quotient} & @{term_type_only Equiv_Relations.quotient "'a set \<Rightarrow> ('a \<times> 'a) set \<Rightarrow> 'a set set"}\\
-@{const Equiv_Relations.congruent} & @{term_type_only Equiv_Relations.congruent "('a*'a)set\<Rightarrow>('a\<Rightarrow>'b)\<Rightarrow>bool"}\\
-@{const Equiv_Relations.congruent2} & @{term_type_only Equiv_Relations.congruent2 "('a*'a)set\<Rightarrow>('b*'b)set\<Rightarrow>('a\<Rightarrow>'b\<Rightarrow>'c)\<Rightarrow>bool"}\\
+\<^const>\<open>Equiv_Relations.equiv\<close> & @{term_type_only Equiv_Relations.equiv "'a set \<Rightarrow> ('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Equiv_Relations.quotient\<close> & @{term_type_only Equiv_Relations.quotient "'a set \<Rightarrow> ('a \<times> 'a) set \<Rightarrow> 'a set set"}\\
+\<^const>\<open>Equiv_Relations.congruent\<close> & @{term_type_only Equiv_Relations.congruent "('a*'a)set\<Rightarrow>('a\<Rightarrow>'b)\<Rightarrow>bool"}\\
+\<^const>\<open>Equiv_Relations.congruent2\<close> & @{term_type_only Equiv_Relations.congruent2 "('a*'a)set\<Rightarrow>('b*'b)set\<Rightarrow>('a\<Rightarrow>'b\<Rightarrow>'c)\<Rightarrow>bool"}\\
%@ {const Equiv_Relations.} & @ {term_type_only Equiv_Relations. ""}\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term"congruent r f"} & @{term[source]"congruent r f"}\\
-@{term"congruent2 r r f"} & @{term[source]"congruent2 r r f"}\\
+\<^term>\<open>congruent r f\<close> & @{term[source]"congruent r f"}\\
+\<^term>\<open>congruent2 r r f\<close> & @{term[source]"congruent2 r r f"}\\
\end{tabular}
\section*{Transitive\_Closure}
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Transitive_Closure.rtrancl} & @{term_type_only Transitive_Closure.rtrancl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
-@{const Transitive_Closure.trancl} & @{term_type_only Transitive_Closure.trancl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
-@{const Transitive_Closure.reflcl} & @{term_type_only Transitive_Closure.reflcl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
-@{const Transitive_Closure.acyclic} & @{term_type_only Transitive_Closure.acyclic "('a*'a)set\<Rightarrow>bool"}\\
-@{const compower} & @{term_type_only "(^^) :: ('a*'a)set\<Rightarrow>nat\<Rightarrow>('a*'a)set" "('a*'a)set\<Rightarrow>nat\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Transitive_Closure.rtrancl\<close> & @{term_type_only Transitive_Closure.rtrancl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Transitive_Closure.trancl\<close> & @{term_type_only Transitive_Closure.trancl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Transitive_Closure.reflcl\<close> & @{term_type_only Transitive_Closure.reflcl "('a*'a)set\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Transitive_Closure.acyclic\<close> & @{term_type_only Transitive_Closure.acyclic "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>compower\<close> & @{term_type_only "(^^) :: ('a*'a)set\<Rightarrow>nat\<Rightarrow>('a*'a)set" "('a*'a)set\<Rightarrow>nat\<Rightarrow>('a*'a)set"}\\
\end{tabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term"rtrancl r"} & @{term[source]"rtrancl r"} & (\<^verbatim>\<open>^*\<close>)\\
-@{term"trancl r"} & @{term[source]"trancl r"} & (\<^verbatim>\<open>^+\<close>)\\
-@{term"reflcl r"} & @{term[source]"reflcl r"} & (\<^verbatim>\<open>^=\<close>)
+\<^term>\<open>rtrancl r\<close> & @{term[source]"rtrancl r"} & (\<^verbatim>\<open>^*\<close>)\\
+\<^term>\<open>trancl r\<close> & @{term[source]"trancl r"} & (\<^verbatim>\<open>^+\<close>)\\
+\<^term>\<open>reflcl r\<close> & @{term[source]"reflcl r"} & (\<^verbatim>\<open>^=\<close>)
\end{tabular}
\section*{Algebra}
-Theories @{theory HOL.Groups}, @{theory HOL.Rings}, @{theory HOL.Fields} and @{theory
-HOL.Divides} define a large collection of classes describing common algebraic
+Theories \<^theory>\<open>HOL.Groups\<close>, \<^theory>\<open>HOL.Rings\<close>, \<^theory>\<open>HOL.Fields\<close> and \<^theory>\<open>HOL.Divides\<close> define a large collection of classes describing common algebraic
structures from semigroups up to fields. Everything is done in terms of
overloaded operators:
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-\<open>0\<close> & @{typeof zero}\\
-\<open>1\<close> & @{typeof one}\\
-@{const plus} & @{typeof plus}\\
-@{const minus} & @{typeof minus}\\
-@{const uminus} & @{typeof uminus} & (\<^verbatim>\<open>-\<close>)\\
-@{const times} & @{typeof times}\\
-@{const inverse} & @{typeof inverse}\\
-@{const divide} & @{typeof divide}\\
-@{const abs} & @{typeof abs}\\
-@{const sgn} & @{typeof sgn}\\
-@{const Rings.dvd} & @{typeof Rings.dvd}\\
-@{const divide} & @{typeof divide}\\
-@{const modulo} & @{typeof modulo}\\
+\<open>0\<close> & \<^typeof>\<open>zero\<close>\\
+\<open>1\<close> & \<^typeof>\<open>one\<close>\\
+\<^const>\<open>plus\<close> & \<^typeof>\<open>plus\<close>\\
+\<^const>\<open>minus\<close> & \<^typeof>\<open>minus\<close>\\
+\<^const>\<open>uminus\<close> & \<^typeof>\<open>uminus\<close> & (\<^verbatim>\<open>-\<close>)\\
+\<^const>\<open>times\<close> & \<^typeof>\<open>times\<close>\\
+\<^const>\<open>inverse\<close> & \<^typeof>\<open>inverse\<close>\\
+\<^const>\<open>divide\<close> & \<^typeof>\<open>divide\<close>\\
+\<^const>\<open>abs\<close> & \<^typeof>\<open>abs\<close>\\
+\<^const>\<open>sgn\<close> & \<^typeof>\<open>sgn\<close>\\
+\<^const>\<open>Rings.dvd\<close> & \<^typeof>\<open>Rings.dvd\<close>\\
+\<^const>\<open>divide\<close> & \<^typeof>\<open>divide\<close>\\
+\<^const>\<open>modulo\<close> & \<^typeof>\<open>modulo\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term "\<bar>x\<bar>"} & @{term[source] "abs x"}
+\<^term>\<open>\<bar>x\<bar>\<close> & @{term[source] "abs x"}
\end{tabular}
\section*{Nat}
-@{datatype nat}
+\<^datatype>\<open>nat\<close>
\<^bigskip>
\begin{tabular}{@ {} lllllll @ {}}
-@{term "(+) :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "(-) :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "(*) :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "(^) :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "(div) :: nat \<Rightarrow> nat \<Rightarrow> nat"}&
-@{term "(mod) :: nat \<Rightarrow> nat \<Rightarrow> nat"}&
-@{term "(dvd) :: nat \<Rightarrow> nat \<Rightarrow> bool"}\\
-@{term "(\<le>) :: nat \<Rightarrow> nat \<Rightarrow> bool"} &
-@{term "(<) :: nat \<Rightarrow> nat \<Rightarrow> bool"} &
-@{term "min :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "max :: nat \<Rightarrow> nat \<Rightarrow> nat"} &
-@{term "Min :: nat set \<Rightarrow> nat"} &
-@{term "Max :: nat set \<Rightarrow> nat"}\\
+\<^term>\<open>(+) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>(-) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>(*) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>(^) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>(div) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close>&
+\<^term>\<open>(mod) :: nat \<Rightarrow> nat \<Rightarrow> nat\<close>&
+\<^term>\<open>(dvd) :: nat \<Rightarrow> nat \<Rightarrow> bool\<close>\\
+\<^term>\<open>(\<le>) :: nat \<Rightarrow> nat \<Rightarrow> bool\<close> &
+\<^term>\<open>(<) :: nat \<Rightarrow> nat \<Rightarrow> bool\<close> &
+\<^term>\<open>min :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>max :: nat \<Rightarrow> nat \<Rightarrow> nat\<close> &
+\<^term>\<open>Min :: nat set \<Rightarrow> nat\<close> &
+\<^term>\<open>Max :: nat set \<Rightarrow> nat\<close>\\
\end{tabular}
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Nat.of_nat} & @{typeof Nat.of_nat}\\
-@{term "(^^) :: ('a \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a"} &
+\<^const>\<open>Nat.of_nat\<close> & \<^typeof>\<open>Nat.of_nat\<close>\\
+\<^term>\<open>(^^) :: ('a \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a\<close> &
@{term_type_only "(^^) :: ('a \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a" "('a \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a"}
\end{tabular}
\section*{Int}
-Type @{typ int}
+Type \<^typ>\<open>int\<close>
\<^bigskip>
\begin{tabular}{@ {} llllllll @ {}}
-@{term "(+) :: int \<Rightarrow> int \<Rightarrow> int"} &
-@{term "(-) :: int \<Rightarrow> int \<Rightarrow> int"} &
-@{term "uminus :: int \<Rightarrow> int"} &
-@{term "(*) :: int \<Rightarrow> int \<Rightarrow> int"} &
-@{term "(^) :: int \<Rightarrow> nat \<Rightarrow> int"} &
-@{term "(div) :: int \<Rightarrow> int \<Rightarrow> int"}&
-@{term "(mod) :: int \<Rightarrow> int \<Rightarrow> int"}&
-@{term "(dvd) :: int \<Rightarrow> int \<Rightarrow> bool"}\\
-@{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"} &
-@{term "(<) :: int \<Rightarrow> int \<Rightarrow> bool"} &
-@{term "min :: int \<Rightarrow> int \<Rightarrow> int"} &
-@{term "max :: int \<Rightarrow> int \<Rightarrow> int"} &
-@{term "Min :: int set \<Rightarrow> int"} &
-@{term "Max :: int set \<Rightarrow> int"}\\
-@{term "abs :: int \<Rightarrow> int"} &
-@{term "sgn :: int \<Rightarrow> int"}\\
+\<^term>\<open>(+) :: int \<Rightarrow> int \<Rightarrow> int\<close> &
+\<^term>\<open>(-) :: int \<Rightarrow> int \<Rightarrow> int\<close> &
+\<^term>\<open>uminus :: int \<Rightarrow> int\<close> &
+\<^term>\<open>(*) :: int \<Rightarrow> int \<Rightarrow> int\<close> &
+\<^term>\<open>(^) :: int \<Rightarrow> nat \<Rightarrow> int\<close> &
+\<^term>\<open>(div) :: int \<Rightarrow> int \<Rightarrow> int\<close>&
+\<^term>\<open>(mod) :: int \<Rightarrow> int \<Rightarrow> int\<close>&
+\<^term>\<open>(dvd) :: int \<Rightarrow> int \<Rightarrow> bool\<close>\\
+\<^term>\<open>(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool\<close> &
+\<^term>\<open>(<) :: int \<Rightarrow> int \<Rightarrow> bool\<close> &
+\<^term>\<open>min :: int \<Rightarrow> int \<Rightarrow> int\<close> &
+\<^term>\<open>max :: int \<Rightarrow> int \<Rightarrow> int\<close> &
+\<^term>\<open>Min :: int set \<Rightarrow> int\<close> &
+\<^term>\<open>Max :: int set \<Rightarrow> int\<close>\\
+\<^term>\<open>abs :: int \<Rightarrow> int\<close> &
+\<^term>\<open>sgn :: int \<Rightarrow> int\<close>\\
\end{tabular}
\begin{tabular}{@ {} l @ {~::~} l l @ {}}
-@{const Int.nat} & @{typeof Int.nat}\\
-@{const Int.of_int} & @{typeof Int.of_int}\\
-@{const Int.Ints} & @{term_type_only Int.Ints "'a::ring_1 set"} & (\<^verbatim>\<open>Ints\<close>)
+\<^const>\<open>Int.nat\<close> & \<^typeof>\<open>Int.nat\<close>\\
+\<^const>\<open>Int.of_int\<close> & \<^typeof>\<open>Int.of_int\<close>\\
+\<^const>\<open>Int.Ints\<close> & @{term_type_only Int.Ints "'a::ring_1 set"} & (\<^verbatim>\<open>Ints\<close>)
\end{tabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term"of_nat::nat\<Rightarrow>int"} & @{term[source]"of_nat"}\\
+\<^term>\<open>of_nat::nat\<Rightarrow>int\<close> & @{term[source]"of_nat"}\\
\end{tabular}
\section*{Finite\_Set}
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Finite_Set.finite} & @{term_type_only Finite_Set.finite "'a set\<Rightarrow>bool"}\\
-@{const Finite_Set.card} & @{term_type_only Finite_Set.card "'a set \<Rightarrow> nat"}\\
-@{const Finite_Set.fold} & @{term_type_only Finite_Set.fold "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a set \<Rightarrow> 'b"}\\
+\<^const>\<open>Finite_Set.finite\<close> & @{term_type_only Finite_Set.finite "'a set\<Rightarrow>bool"}\\
+\<^const>\<open>Finite_Set.card\<close> & @{term_type_only Finite_Set.card "'a set \<Rightarrow> nat"}\\
+\<^const>\<open>Finite_Set.fold\<close> & @{term_type_only Finite_Set.fold "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a set \<Rightarrow> 'b"}\\
\end{supertabular}
\section*{Lattices\_Big}
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-@{const Lattices_Big.Min} & @{typeof Lattices_Big.Min}\\
-@{const Lattices_Big.Max} & @{typeof Lattices_Big.Max}\\
-@{const Lattices_Big.arg_min} & @{typeof Lattices_Big.arg_min}\\
-@{const Lattices_Big.is_arg_min} & @{typeof Lattices_Big.is_arg_min}\\
-@{const Lattices_Big.arg_max} & @{typeof Lattices_Big.arg_max}\\
-@{const Lattices_Big.is_arg_max} & @{typeof Lattices_Big.is_arg_max}\\
+\<^const>\<open>Lattices_Big.Min\<close> & \<^typeof>\<open>Lattices_Big.Min\<close>\\
+\<^const>\<open>Lattices_Big.Max\<close> & \<^typeof>\<open>Lattices_Big.Max\<close>\\
+\<^const>\<open>Lattices_Big.arg_min\<close> & \<^typeof>\<open>Lattices_Big.arg_min\<close>\\
+\<^const>\<open>Lattices_Big.is_arg_min\<close> & \<^typeof>\<open>Lattices_Big.is_arg_min\<close>\\
+\<^const>\<open>Lattices_Big.arg_max\<close> & \<^typeof>\<open>Lattices_Big.arg_max\<close>\\
+\<^const>\<open>Lattices_Big.is_arg_max\<close> & \<^typeof>\<open>Lattices_Big.is_arg_max\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term "ARG_MIN f x. P"} & @{term[source]"arg_min f (\<lambda>x. P)"}\\
-@{term "ARG_MAX f x. P"} & @{term[source]"arg_max f (\<lambda>x. P)"}\\
+\<^term>\<open>ARG_MIN f x. P\<close> & @{term[source]"arg_min f (\<lambda>x. P)"}\\
+\<^term>\<open>ARG_MAX f x. P\<close> & @{term[source]"arg_max f (\<lambda>x. P)"}\\
\end{supertabular}
\section*{Groups\_Big}
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Groups_Big.sum} & @{term_type_only Groups_Big.sum "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b::comm_monoid_add"}\\
-@{const Groups_Big.prod} & @{term_type_only Groups_Big.prod "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b::comm_monoid_mult"}\\
+\<^const>\<open>Groups_Big.sum\<close> & @{term_type_only Groups_Big.sum "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b::comm_monoid_add"}\\
+\<^const>\<open>Groups_Big.prod\<close> & @{term_type_only Groups_Big.prod "('a \<Rightarrow> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b::comm_monoid_mult"}\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l l @ {}}
-@{term "sum (\<lambda>x. x) A"} & @{term[source]"sum (\<lambda>x. x) A"} & (\<^verbatim>\<open>SUM\<close>)\\
-@{term "sum (\<lambda>x. t) A"} & @{term[source]"sum (\<lambda>x. t) A"}\\
-@{term[source] "\<Sum>x|P. t"} & @{term"\<Sum>x|P. t"}\\
+\<^term>\<open>sum (\<lambda>x. x) A\<close> & @{term[source]"sum (\<lambda>x. x) A"} & (\<^verbatim>\<open>SUM\<close>)\\
+\<^term>\<open>sum (\<lambda>x. t) A\<close> & @{term[source]"sum (\<lambda>x. t) A"}\\
+@{term[source] "\<Sum>x|P. t"} & \<^term>\<open>\<Sum>x|P. t\<close>\\
\multicolumn{2}{@ {}l@ {}}{Similarly for \<open>\<Prod>\<close> instead of \<open>\<Sum>\<close>} & (\<^verbatim>\<open>PROD\<close>)\\
\end{supertabular}
@@ -442,47 +441,47 @@
\section*{Wellfounded}
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Wellfounded.wf} & @{term_type_only Wellfounded.wf "('a*'a)set\<Rightarrow>bool"}\\
-@{const Wellfounded.acc} & @{term_type_only Wellfounded.acc "('a*'a)set\<Rightarrow>'a set"}\\
-@{const Wellfounded.measure} & @{term_type_only Wellfounded.measure "('a\<Rightarrow>nat)\<Rightarrow>('a*'a)set"}\\
-@{const Wellfounded.lex_prod} & @{term_type_only Wellfounded.lex_prod "('a*'a)set\<Rightarrow>('b*'b)set\<Rightarrow>(('a*'b)*('a*'b))set"}\\
-@{const Wellfounded.mlex_prod} & @{term_type_only Wellfounded.mlex_prod "('a\<Rightarrow>nat)\<Rightarrow>('a*'a)set\<Rightarrow>('a*'a)set"}\\
-@{const Wellfounded.less_than} & @{term_type_only Wellfounded.less_than "(nat*nat)set"}\\
-@{const Wellfounded.pred_nat} & @{term_type_only Wellfounded.pred_nat "(nat*nat)set"}\\
+\<^const>\<open>Wellfounded.wf\<close> & @{term_type_only Wellfounded.wf "('a*'a)set\<Rightarrow>bool"}\\
+\<^const>\<open>Wellfounded.acc\<close> & @{term_type_only Wellfounded.acc "('a*'a)set\<Rightarrow>'a set"}\\
+\<^const>\<open>Wellfounded.measure\<close> & @{term_type_only Wellfounded.measure "('a\<Rightarrow>nat)\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Wellfounded.lex_prod\<close> & @{term_type_only Wellfounded.lex_prod "('a*'a)set\<Rightarrow>('b*'b)set\<Rightarrow>(('a*'b)*('a*'b))set"}\\
+\<^const>\<open>Wellfounded.mlex_prod\<close> & @{term_type_only Wellfounded.mlex_prod "('a\<Rightarrow>nat)\<Rightarrow>('a*'a)set\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>Wellfounded.less_than\<close> & @{term_type_only Wellfounded.less_than "(nat*nat)set"}\\
+\<^const>\<open>Wellfounded.pred_nat\<close> & @{term_type_only Wellfounded.pred_nat "(nat*nat)set"}\\
\end{supertabular}
-\section*{Set\_Interval} % @{theory HOL.Set_Interval}
+\section*{Set\_Interval} % \<^theory>\<open>HOL.Set_Interval\<close>
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const lessThan} & @{term_type_only lessThan "'a::ord \<Rightarrow> 'a set"}\\
-@{const atMost} & @{term_type_only atMost "'a::ord \<Rightarrow> 'a set"}\\
-@{const greaterThan} & @{term_type_only greaterThan "'a::ord \<Rightarrow> 'a set"}\\
-@{const atLeast} & @{term_type_only atLeast "'a::ord \<Rightarrow> 'a set"}\\
-@{const greaterThanLessThan} & @{term_type_only greaterThanLessThan "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
-@{const atLeastLessThan} & @{term_type_only atLeastLessThan "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
-@{const greaterThanAtMost} & @{term_type_only greaterThanAtMost "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
-@{const atLeastAtMost} & @{term_type_only atLeastAtMost "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
+\<^const>\<open>lessThan\<close> & @{term_type_only lessThan "'a::ord \<Rightarrow> 'a set"}\\
+\<^const>\<open>atMost\<close> & @{term_type_only atMost "'a::ord \<Rightarrow> 'a set"}\\
+\<^const>\<open>greaterThan\<close> & @{term_type_only greaterThan "'a::ord \<Rightarrow> 'a set"}\\
+\<^const>\<open>atLeast\<close> & @{term_type_only atLeast "'a::ord \<Rightarrow> 'a set"}\\
+\<^const>\<open>greaterThanLessThan\<close> & @{term_type_only greaterThanLessThan "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
+\<^const>\<open>atLeastLessThan\<close> & @{term_type_only atLeastLessThan "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
+\<^const>\<open>greaterThanAtMost\<close> & @{term_type_only greaterThanAtMost "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
+\<^const>\<open>atLeastAtMost\<close> & @{term_type_only atLeastAtMost "'a::ord \<Rightarrow> 'a \<Rightarrow> 'a set"}\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term "lessThan y"} & @{term[source] "lessThan y"}\\
-@{term "atMost y"} & @{term[source] "atMost y"}\\
-@{term "greaterThan x"} & @{term[source] "greaterThan x"}\\
-@{term "atLeast x"} & @{term[source] "atLeast x"}\\
-@{term "greaterThanLessThan x y"} & @{term[source] "greaterThanLessThan x y"}\\
-@{term "atLeastLessThan x y"} & @{term[source] "atLeastLessThan x y"}\\
-@{term "greaterThanAtMost x y"} & @{term[source] "greaterThanAtMost x y"}\\
-@{term "atLeastAtMost x y"} & @{term[source] "atLeastAtMost x y"}\\
+\<^term>\<open>lessThan y\<close> & @{term[source] "lessThan y"}\\
+\<^term>\<open>atMost y\<close> & @{term[source] "atMost y"}\\
+\<^term>\<open>greaterThan x\<close> & @{term[source] "greaterThan x"}\\
+\<^term>\<open>atLeast x\<close> & @{term[source] "atLeast x"}\\
+\<^term>\<open>greaterThanLessThan x y\<close> & @{term[source] "greaterThanLessThan x y"}\\
+\<^term>\<open>atLeastLessThan x y\<close> & @{term[source] "atLeastLessThan x y"}\\
+\<^term>\<open>greaterThanAtMost x y\<close> & @{term[source] "greaterThanAtMost x y"}\\
+\<^term>\<open>atLeastAtMost x y\<close> & @{term[source] "atLeastAtMost x y"}\\
@{term[source] "\<Union>i\<le>n. A"} & @{term[source] "\<Union>i \<in> {..n}. A"}\\
@{term[source] "\<Union>i<n. A"} & @{term[source] "\<Union>i \<in> {..<n}. A"}\\
\multicolumn{2}{@ {}l@ {}}{Similarly for \<open>\<Inter>\<close> instead of \<open>\<Union>\<close>}\\
-@{term "sum (\<lambda>x. t) {a..b}"} & @{term[source] "sum (\<lambda>x. t) {a..b}"}\\
-@{term "sum (\<lambda>x. t) {a..<b}"} & @{term[source] "sum (\<lambda>x. t) {a..<b}"}\\
-@{term "sum (\<lambda>x. t) {..b}"} & @{term[source] "sum (\<lambda>x. t) {..b}"}\\
-@{term "sum (\<lambda>x. t) {..<b}"} & @{term[source] "sum (\<lambda>x. t) {..<b}"}\\
+\<^term>\<open>sum (\<lambda>x. t) {a..b}\<close> & @{term[source] "sum (\<lambda>x. t) {a..b}"}\\
+\<^term>\<open>sum (\<lambda>x. t) {a..<b}\<close> & @{term[source] "sum (\<lambda>x. t) {a..<b}"}\\
+\<^term>\<open>sum (\<lambda>x. t) {..b}\<close> & @{term[source] "sum (\<lambda>x. t) {..b}"}\\
+\<^term>\<open>sum (\<lambda>x. t) {..<b}\<close> & @{term[source] "sum (\<lambda>x. t) {..<b}"}\\
\multicolumn{2}{@ {}l@ {}}{Similarly for \<open>\<Prod>\<close> instead of \<open>\<Sum>\<close>}\\
\end{supertabular}
@@ -490,92 +489,92 @@
\section*{Power}
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Power.power} & @{typeof Power.power}
+\<^const>\<open>Power.power\<close> & \<^typeof>\<open>Power.power\<close>
\end{tabular}
\section*{Option}
-@{datatype option}
+\<^datatype>\<open>option\<close>
\<^bigskip>
\begin{tabular}{@ {} l @ {~::~} l @ {}}
-@{const Option.the} & @{typeof Option.the}\\
-@{const map_option} & @{typ[source]"('a \<Rightarrow> 'b) \<Rightarrow> 'a option \<Rightarrow> 'b option"}\\
-@{const set_option} & @{term_type_only set_option "'a option \<Rightarrow> 'a set"}\\
-@{const Option.bind} & @{term_type_only Option.bind "'a option \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> 'b option"}
+\<^const>\<open>Option.the\<close> & \<^typeof>\<open>Option.the\<close>\\
+\<^const>\<open>map_option\<close> & @{typ[source]"('a \<Rightarrow> 'b) \<Rightarrow> 'a option \<Rightarrow> 'b option"}\\
+\<^const>\<open>set_option\<close> & @{term_type_only set_option "'a option \<Rightarrow> 'a set"}\\
+\<^const>\<open>Option.bind\<close> & @{term_type_only Option.bind "'a option \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> 'b option"}
\end{tabular}
\section*{List}
-@{datatype list}
+\<^datatype>\<open>list\<close>
\<^bigskip>
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const List.append} & @{typeof List.append}\\
-@{const List.butlast} & @{typeof List.butlast}\\
-@{const List.concat} & @{typeof List.concat}\\
-@{const List.distinct} & @{typeof List.distinct}\\
-@{const List.drop} & @{typeof List.drop}\\
-@{const List.dropWhile} & @{typeof List.dropWhile}\\
-@{const List.filter} & @{typeof List.filter}\\
-@{const List.find} & @{typeof List.find}\\
-@{const List.fold} & @{typeof List.fold}\\
-@{const List.foldr} & @{typeof List.foldr}\\
-@{const List.foldl} & @{typeof List.foldl}\\
-@{const List.hd} & @{typeof List.hd}\\
-@{const List.last} & @{typeof List.last}\\
-@{const List.length} & @{typeof List.length}\\
-@{const List.lenlex} & @{term_type_only List.lenlex "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
-@{const List.lex} & @{term_type_only List.lex "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
-@{const List.lexn} & @{term_type_only List.lexn "('a*'a)set\<Rightarrow>nat\<Rightarrow>('a list * 'a list)set"}\\
-@{const List.lexord} & @{term_type_only List.lexord "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
-@{const List.listrel} & @{term_type_only List.listrel "('a*'b)set\<Rightarrow>('a list * 'b list)set"}\\
-@{const List.listrel1} & @{term_type_only List.listrel1 "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
-@{const List.lists} & @{term_type_only List.lists "'a set\<Rightarrow>'a list set"}\\
-@{const List.listset} & @{term_type_only List.listset "'a set list \<Rightarrow> 'a list set"}\\
-@{const Groups_List.sum_list} & @{typeof Groups_List.sum_list}\\
-@{const Groups_List.prod_list} & @{typeof Groups_List.prod_list}\\
-@{const List.list_all2} & @{typeof List.list_all2}\\
-@{const List.list_update} & @{typeof List.list_update}\\
-@{const List.map} & @{typeof List.map}\\
-@{const List.measures} & @{term_type_only List.measures "('a\<Rightarrow>nat)list\<Rightarrow>('a*'a)set"}\\
-@{const List.nth} & @{typeof List.nth}\\
-@{const List.nths} & @{typeof List.nths}\\
-@{const List.remdups} & @{typeof List.remdups}\\
-@{const List.removeAll} & @{typeof List.removeAll}\\
-@{const List.remove1} & @{typeof List.remove1}\\
-@{const List.replicate} & @{typeof List.replicate}\\
-@{const List.rev} & @{typeof List.rev}\\
-@{const List.rotate} & @{typeof List.rotate}\\
-@{const List.rotate1} & @{typeof List.rotate1}\\
-@{const List.set} & @{term_type_only List.set "'a list \<Rightarrow> 'a set"}\\
-@{const List.shuffles} & @{typeof List.shuffles}\\
-@{const List.sort} & @{typeof List.sort}\\
-@{const List.sorted} & @{typeof List.sorted}\\
-@{const List.sorted_wrt} & @{typeof List.sorted_wrt}\\
-@{const List.splice} & @{typeof List.splice}\\
-@{const List.take} & @{typeof List.take}\\
-@{const List.takeWhile} & @{typeof List.takeWhile}\\
-@{const List.tl} & @{typeof List.tl}\\
-@{const List.upt} & @{typeof List.upt}\\
-@{const List.upto} & @{typeof List.upto}\\
-@{const List.zip} & @{typeof List.zip}\\
+\<^const>\<open>List.append\<close> & \<^typeof>\<open>List.append\<close>\\
+\<^const>\<open>List.butlast\<close> & \<^typeof>\<open>List.butlast\<close>\\
+\<^const>\<open>List.concat\<close> & \<^typeof>\<open>List.concat\<close>\\
+\<^const>\<open>List.distinct\<close> & \<^typeof>\<open>List.distinct\<close>\\
+\<^const>\<open>List.drop\<close> & \<^typeof>\<open>List.drop\<close>\\
+\<^const>\<open>List.dropWhile\<close> & \<^typeof>\<open>List.dropWhile\<close>\\
+\<^const>\<open>List.filter\<close> & \<^typeof>\<open>List.filter\<close>\\
+\<^const>\<open>List.find\<close> & \<^typeof>\<open>List.find\<close>\\
+\<^const>\<open>List.fold\<close> & \<^typeof>\<open>List.fold\<close>\\
+\<^const>\<open>List.foldr\<close> & \<^typeof>\<open>List.foldr\<close>\\
+\<^const>\<open>List.foldl\<close> & \<^typeof>\<open>List.foldl\<close>\\
+\<^const>\<open>List.hd\<close> & \<^typeof>\<open>List.hd\<close>\\
+\<^const>\<open>List.last\<close> & \<^typeof>\<open>List.last\<close>\\
+\<^const>\<open>List.length\<close> & \<^typeof>\<open>List.length\<close>\\
+\<^const>\<open>List.lenlex\<close> & @{term_type_only List.lenlex "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
+\<^const>\<open>List.lex\<close> & @{term_type_only List.lex "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
+\<^const>\<open>List.lexn\<close> & @{term_type_only List.lexn "('a*'a)set\<Rightarrow>nat\<Rightarrow>('a list * 'a list)set"}\\
+\<^const>\<open>List.lexord\<close> & @{term_type_only List.lexord "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
+\<^const>\<open>List.listrel\<close> & @{term_type_only List.listrel "('a*'b)set\<Rightarrow>('a list * 'b list)set"}\\
+\<^const>\<open>List.listrel1\<close> & @{term_type_only List.listrel1 "('a*'a)set\<Rightarrow>('a list * 'a list)set"}\\
+\<^const>\<open>List.lists\<close> & @{term_type_only List.lists "'a set\<Rightarrow>'a list set"}\\
+\<^const>\<open>List.listset\<close> & @{term_type_only List.listset "'a set list \<Rightarrow> 'a list set"}\\
+\<^const>\<open>Groups_List.sum_list\<close> & \<^typeof>\<open>Groups_List.sum_list\<close>\\
+\<^const>\<open>Groups_List.prod_list\<close> & \<^typeof>\<open>Groups_List.prod_list\<close>\\
+\<^const>\<open>List.list_all2\<close> & \<^typeof>\<open>List.list_all2\<close>\\
+\<^const>\<open>List.list_update\<close> & \<^typeof>\<open>List.list_update\<close>\\
+\<^const>\<open>List.map\<close> & \<^typeof>\<open>List.map\<close>\\
+\<^const>\<open>List.measures\<close> & @{term_type_only List.measures "('a\<Rightarrow>nat)list\<Rightarrow>('a*'a)set"}\\
+\<^const>\<open>List.nth\<close> & \<^typeof>\<open>List.nth\<close>\\
+\<^const>\<open>List.nths\<close> & \<^typeof>\<open>List.nths\<close>\\
+\<^const>\<open>List.remdups\<close> & \<^typeof>\<open>List.remdups\<close>\\
+\<^const>\<open>List.removeAll\<close> & \<^typeof>\<open>List.removeAll\<close>\\
+\<^const>\<open>List.remove1\<close> & \<^typeof>\<open>List.remove1\<close>\\
+\<^const>\<open>List.replicate\<close> & \<^typeof>\<open>List.replicate\<close>\\
+\<^const>\<open>List.rev\<close> & \<^typeof>\<open>List.rev\<close>\\
+\<^const>\<open>List.rotate\<close> & \<^typeof>\<open>List.rotate\<close>\\
+\<^const>\<open>List.rotate1\<close> & \<^typeof>\<open>List.rotate1\<close>\\
+\<^const>\<open>List.set\<close> & @{term_type_only List.set "'a list \<Rightarrow> 'a set"}\\
+\<^const>\<open>List.shuffles\<close> & \<^typeof>\<open>List.shuffles\<close>\\
+\<^const>\<open>List.sort\<close> & \<^typeof>\<open>List.sort\<close>\\
+\<^const>\<open>List.sorted\<close> & \<^typeof>\<open>List.sorted\<close>\\
+\<^const>\<open>List.sorted_wrt\<close> & \<^typeof>\<open>List.sorted_wrt\<close>\\
+\<^const>\<open>List.splice\<close> & \<^typeof>\<open>List.splice\<close>\\
+\<^const>\<open>List.take\<close> & \<^typeof>\<open>List.take\<close>\\
+\<^const>\<open>List.takeWhile\<close> & \<^typeof>\<open>List.takeWhile\<close>\\
+\<^const>\<open>List.tl\<close> & \<^typeof>\<open>List.tl\<close>\\
+\<^const>\<open>List.upt\<close> & \<^typeof>\<open>List.upt\<close>\\
+\<^const>\<open>List.upto\<close> & \<^typeof>\<open>List.upto\<close>\\
+\<^const>\<open>List.zip\<close> & \<^typeof>\<open>List.zip\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{supertabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
\<open>[x\<^sub>1,\<dots>,x\<^sub>n]\<close> & \<open>x\<^sub>1 # \<dots> # x\<^sub>n # []\<close>\\
-@{term"[m..<n]"} & @{term[source]"upt m n"}\\
-@{term"[i..j]"} & @{term[source]"upto i j"}\\
-@{term"xs[n := x]"} & @{term[source]"list_update xs n x"}\\
-@{term"\<Sum>x\<leftarrow>xs. e"} & @{term[source]"listsum (map (\<lambda>x. e) xs)"}\\
+\<^term>\<open>[m..<n]\<close> & @{term[source]"upt m n"}\\
+\<^term>\<open>[i..j]\<close> & @{term[source]"upto i j"}\\
+\<^term>\<open>xs[n := x]\<close> & @{term[source]"list_update xs n x"}\\
+\<^term>\<open>\<Sum>x\<leftarrow>xs. e\<close> & @{term[source]"listsum (map (\<lambda>x. e) xs)"}\\
\end{supertabular}
\<^medskip>
Filter input syntax \<open>[pat \<leftarrow> e. b]\<close>, where
-\<open>pat\<close> is a tuple pattern, which stands for @{term "filter (\<lambda>pat. b) e"}.
+\<open>pat\<close> is a tuple pattern, which stands for \<^term>\<open>filter (\<lambda>pat. b) e\<close>.
List comprehension input syntax: \<open>[e. q\<^sub>1, \<dots>, q\<^sub>n]\<close> where each
qualifier \<open>q\<^sub>i\<close> is either a generator \mbox{\<open>pat \<leftarrow> e\<close>} or a
@@ -587,28 +586,28 @@
the domain of a map may be infinite.
\begin{supertabular}{@ {} l @ {~::~} l @ {}}
-@{const Map.empty} & @{typeof Map.empty}\\
-@{const Map.map_add} & @{typeof Map.map_add}\\
-@{const Map.map_comp} & @{typeof Map.map_comp}\\
-@{const Map.restrict_map} & @{term_type_only Map.restrict_map "('a\<Rightarrow>'b option)\<Rightarrow>'a set\<Rightarrow>('a\<Rightarrow>'b option)"}\\
-@{const Map.dom} & @{term_type_only Map.dom "('a\<Rightarrow>'b option)\<Rightarrow>'a set"}\\
-@{const Map.ran} & @{term_type_only Map.ran "('a\<Rightarrow>'b option)\<Rightarrow>'b set"}\\
-@{const Map.map_le} & @{typeof Map.map_le}\\
-@{const Map.map_of} & @{typeof Map.map_of}\\
-@{const Map.map_upds} & @{typeof Map.map_upds}\\
+\<^const>\<open>Map.empty\<close> & \<^typeof>\<open>Map.empty\<close>\\
+\<^const>\<open>Map.map_add\<close> & \<^typeof>\<open>Map.map_add\<close>\\
+\<^const>\<open>Map.map_comp\<close> & \<^typeof>\<open>Map.map_comp\<close>\\
+\<^const>\<open>Map.restrict_map\<close> & @{term_type_only Map.restrict_map "('a\<Rightarrow>'b option)\<Rightarrow>'a set\<Rightarrow>('a\<Rightarrow>'b option)"}\\
+\<^const>\<open>Map.dom\<close> & @{term_type_only Map.dom "('a\<Rightarrow>'b option)\<Rightarrow>'a set"}\\
+\<^const>\<open>Map.ran\<close> & @{term_type_only Map.ran "('a\<Rightarrow>'b option)\<Rightarrow>'b set"}\\
+\<^const>\<open>Map.map_le\<close> & \<^typeof>\<open>Map.map_le\<close>\\
+\<^const>\<open>Map.map_of\<close> & \<^typeof>\<open>Map.map_of\<close>\\
+\<^const>\<open>Map.map_upds\<close> & \<^typeof>\<open>Map.map_upds\<close>\\
\end{supertabular}
\subsubsection*{Syntax}
\begin{tabular}{@ {} l @ {\quad$\equiv$\quad} l @ {}}
-@{term"Map.empty"} & @{term"\<lambda>x. None"}\\
-@{term"m(x:=Some y)"} & @{term[source]"m(x:=Some y)"}\\
+\<^term>\<open>Map.empty\<close> & \<^term>\<open>\<lambda>x. None\<close>\\
+\<^term>\<open>m(x:=Some y)\<close> & @{term[source]"m(x:=Some y)"}\\
\<open>m(x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n)\<close> & @{text[source]"m(x\<^sub>1\<mapsto>y\<^sub>1)\<dots>(x\<^sub>n\<mapsto>y\<^sub>n)"}\\
\<open>[x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n]\<close> & @{text[source]"Map.empty(x\<^sub>1\<mapsto>y\<^sub>1,\<dots>,x\<^sub>n\<mapsto>y\<^sub>n)"}\\
-@{term"map_upds m xs ys"} & @{term[source]"map_upds m xs ys"}\\
+\<^term>\<open>map_upds m xs ys\<close> & @{term[source]"map_upds m xs ys"}\\
\end{tabular}
-\section*{Infix operators in Main} % @{theory Main}
+\section*{Infix operators in Main} % \<^theory>\<open>Main\<close>
\begin{center}
\begin{tabular}{llll}
--- a/src/Doc/Prog_Prove/Basics.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Basics.thy Sat Jan 05 17:24:33 2019 +0100
@@ -16,32 +16,32 @@
programming languages. Thus there are
\begin{description}
\item[base types,]
-in particular @{typ bool}, the type of truth values,
-@{typ nat}, the type of natural numbers ($\mathbb{N}$), and \indexed{@{typ int}}{int},
+in particular \<^typ>\<open>bool\<close>, the type of truth values,
+\<^typ>\<open>nat\<close>, the type of natural numbers ($\mathbb{N}$), and \indexed{\<^typ>\<open>int\<close>}{int},
the type of mathematical integers ($\mathbb{Z}$).
\item[type constructors,]
in particular \<open>list\<close>, the type of
lists, and \<open>set\<close>, the type of sets. Type constructors are written
postfix, i.e., after their arguments. For example,
-@{typ "nat list"} is the type of lists whose elements are natural numbers.
+\<^typ>\<open>nat list\<close> is the type of lists whose elements are natural numbers.
\item[function types,]
denoted by \<open>\<Rightarrow>\<close>.
\item[type variables,]
- denoted by @{typ 'a}, @{typ 'b}, etc., like in ML\@.
+ denoted by \<^typ>\<open>'a\<close>, \<^typ>\<open>'b\<close>, etc., like in ML\@.
\end{description}
-Note that @{typ"'a \<Rightarrow> 'b list"} means \noquotes{@{typ[source]"'a \<Rightarrow> ('b list)"}},
-not @{typ"('a \<Rightarrow> 'b) list"}: postfix type constructors have precedence
+Note that \<^typ>\<open>'a \<Rightarrow> 'b list\<close> means \noquotes{@{typ[source]"'a \<Rightarrow> ('b list)"}},
+not \<^typ>\<open>('a \<Rightarrow> 'b) list\<close>: postfix type constructors have precedence
over \<open>\<Rightarrow>\<close>.
\conceptidx{Terms}{term} are formed as in functional programming by
applying functions to arguments. If \<open>f\<close> is a function of type
\<open>\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2\<close> and \<open>t\<close> is a term of type
-\<open>\<tau>\<^sub>1\<close> then @{term"f t"} is a term of type \<open>\<tau>\<^sub>2\<close>. We write \<open>t :: \<tau>\<close> to mean that term \<open>t\<close> has type \<open>\<tau>\<close>.
+\<open>\<tau>\<^sub>1\<close> then \<^term>\<open>f t\<close> is a term of type \<open>\<tau>\<^sub>2\<close>. We write \<open>t :: \<tau>\<close> to mean that term \<open>t\<close> has type \<open>\<tau>\<close>.
\begin{warn}
There are many predefined infix symbols like \<open>+\<close> and \<open>\<le>\<close>.
-The name of the corresponding binary function is @{term"(+)"},
-not just \<open>+\<close>. That is, @{term"x + y"} is nice surface syntax
+The name of the corresponding binary function is \<^term>\<open>(+)\<close>,
+not just \<open>+\<close>. That is, \<^term>\<open>x + y\<close> is nice surface syntax
(``syntactic sugar'') for \noquotes{@{term[source]"(+) x y"}}.
\end{warn}
@@ -56,18 +56,18 @@
if they occur inside other constructs.
\end{warn}
Terms may also contain \<open>\<lambda>\<close>-abstractions. For example,
-@{term "\<lambda>x. x"} is the identity function.
+\<^term>\<open>\<lambda>x. x\<close> is the identity function.
\conceptidx{Formulas}{formula} are terms of type \<open>bool\<close>.
-There are the basic constants @{term True} and @{term False} and
+There are the basic constants \<^term>\<open>True\<close> and \<^term>\<open>False\<close> and
the usual logical connectives (in decreasing order of precedence):
\<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>.
\conceptidx{Equality}{equality} is available in the form of the infix function \<open>=\<close>
-of type @{typ "'a \<Rightarrow> 'a \<Rightarrow> bool"}. It also works for formulas, where
+of type \<^typ>\<open>'a \<Rightarrow> 'a \<Rightarrow> bool\<close>. It also works for formulas, where
it means ``if and only if''.
-\conceptidx{Quantifiers}{quantifier} are written @{prop"\<forall>x. P"} and @{prop"\<exists>x. P"}.
+\conceptidx{Quantifiers}{quantifier} are written \<^prop>\<open>\<forall>x. P\<close> and \<^prop>\<open>\<exists>x. P\<close>.
Isabelle automatically computes the type of each variable in a term. This is
called \concept{type inference}. Despite type inference, it is sometimes
@@ -117,7 +117,7 @@
reside in a \concept{theory file} named \<open>T.thy\<close>.
\begin{warn}
-HOL contains a theory @{theory Main}\index{Main@@{theory Main}}, the union of all the basic
+HOL contains a theory \<^theory>\<open>Main\<close>\index{Main@\<^theory>\<open>Main\<close>}, the union of all the basic
predefined theories like arithmetic, lists, sets, etc.
Unless you know what you are doing, always include \<open>Main\<close>
as a direct or indirect parent of all your theories.
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy Sat Jan 05 17:24:33 2019 +0100
@@ -6,17 +6,17 @@
text\<open>
\vspace{-4ex}
-\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and \<open>list\<close>}{Types bool, nat and list}}
+\section{\texorpdfstring{Types \<^typ>\<open>bool\<close>, \<^typ>\<open>nat\<close> and \<open>list\<close>}{Types bool, nat and list}}
These are the most important predefined types. We go through them one by one.
Based on examples we learn how to define (possibly recursive) functions and
prove theorems about them by induction and simplification.
-\subsection{Type \indexed{@{typ bool}}{bool}}
+\subsection{Type \indexed{\<^typ>\<open>bool\<close>}{bool}}
The type of boolean values is a predefined datatype
@{datatype[display] bool}
-with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
+with the two values \indexed{\<^const>\<open>True\<close>}{True} and \indexed{\<^const>\<open>False\<close>}{False} and
with many predefined functions: \<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, etc. Here is how conjunction could be defined by pattern matching:
\<close>
@@ -27,13 +27,13 @@
text\<open>Both the datatype and function definitions roughly follow the syntax
of functional programming languages.
-\subsection{Type \indexed{@{typ nat}}{nat}}
+\subsection{Type \indexed{\<^typ>\<open>nat\<close>}{nat}}
Natural numbers are another predefined datatype:
-@{datatype[display] nat}\index{Suc@@{const Suc}}
-All values of type @{typ nat} are generated by the constructors
-\<open>0\<close> and @{const Suc}. Thus the values of type @{typ nat} are
-\<open>0\<close>, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
+@{datatype[display] nat}\index{Suc@\<^const>\<open>Suc\<close>}
+All values of type \<^typ>\<open>nat\<close> are generated by the constructors
+\<open>0\<close> and \<^const>\<open>Suc\<close>. Thus the values of type \<^typ>\<open>nat\<close> are
+\<open>0\<close>, \<^term>\<open>Suc 0\<close>, \<^term>\<open>Suc(Suc 0)\<close>, etc.
There are many predefined functions: \<open>+\<close>, \<open>*\<close>, \<open>\<le>\<close>, etc. Here is how you could define your own addition:
\<close>
@@ -41,7 +41,7 @@
"add 0 n = n" |
"add (Suc m) n = Suc(add m n)"
-text\<open>And here is a proof of the fact that @{prop"add m 0 = m"}:\<close>
+text\<open>And here is a proof of the fact that \<^prop>\<open>add m 0 = m\<close>:\<close>
lemma add_02: "add m 0 = m"
apply(induction m)
@@ -65,10 +65,10 @@
The command \isacom{apply}\<open>(auto)\<close> instructs Isabelle to try
and prove all subgoals automatically, essentially by simplifying them.
Because both subgoals are easy, Isabelle can do it.
-The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
+The base case \<^prop>\<open>add 0 0 = 0\<close> holds by definition of \<^const>\<open>add\<close>,
and the induction step is almost as simple:
\<open>add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m\<close>
-using first the definition of @{const add} and then the induction hypothesis.
+using first the definition of \<^const>\<open>add\<close> and then the induction hypothesis.
In summary, both subproofs rely on simplification with function definitions and
the induction hypothesis.
As a result of that final \isacom{done}, Isabelle associates the lemma
@@ -97,46 +97,46 @@
not just for natural numbers but for other types as well.
For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
that you are talking about natural numbers. Hence Isabelle can only infer
- that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
+ that \<^term>\<open>x\<close> is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
exist. As a consequence, you will be unable to prove the goal.
% To alert you to such pitfalls, Isabelle flags numerals without a
% fixed type in its output: @ {prop"x+0 = x"}.
In this particular example, you need to include
an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
- is enough contextual information this may not be necessary: @{prop"Suc x =
- x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
+ is enough contextual information this may not be necessary: \<^prop>\<open>Suc x =
+ x\<close> automatically implies \<open>x::nat\<close> because \<^term>\<open>Suc\<close> is not
overloaded.
\end{warn}
\subsubsection{An Informal Proof}
Above we gave some terse informal explanation of the proof of
-@{prop"add m 0 = m"}. A more detailed informal exposition of the lemma
+\<^prop>\<open>add m 0 = m\<close>. A more detailed informal exposition of the lemma
might look like this:
\bigskip
\noindent
-\textbf{Lemma} @{prop"add m 0 = m"}
+\textbf{Lemma} \<^prop>\<open>add m 0 = m\<close>
\noindent
\textbf{Proof} by induction on \<open>m\<close>.
\begin{itemize}
-\item Case \<open>0\<close> (the base case): @{prop"add 0 0 = 0"}
- holds by definition of @{const add}.
-\item Case @{term"Suc m"} (the induction step):
- We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
+\item Case \<open>0\<close> (the base case): \<^prop>\<open>add 0 0 = 0\<close>
+ holds by definition of \<^const>\<open>add\<close>.
+\item Case \<^term>\<open>Suc m\<close> (the induction step):
+ We assume \<^prop>\<open>add m 0 = m\<close>, the induction hypothesis (IH),
and we need to show \<open>add (Suc m) 0 = Suc m\<close>.
The proof is as follows:\smallskip
\begin{tabular}{@ {}rcl@ {\quad}l@ {}}
- @{term "add (Suc m) 0"} &\<open>=\<close>& @{term"Suc(add m 0)"}
+ \<^term>\<open>add (Suc m) 0\<close> &\<open>=\<close>& \<^term>\<open>Suc(add m 0)\<close>
& by definition of \<open>add\<close>\\
- &\<open>=\<close>& @{term "Suc m"} & by IH
+ &\<open>=\<close>& \<^term>\<open>Suc m\<close> & by IH
\end{tabular}
\end{itemize}
Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
-We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
+We have now seen three proofs of \<^prop>\<open>add m 0 = 0\<close>: the Isabelle one, the
terse four lines explaining the base case and the induction step, and just now a
model of a traditional inductive proof. The three proofs differ in the level
of detail given and the intended reader: the Isabelle proof is for the
@@ -164,14 +164,14 @@
text\<open>
\begin{itemize}
-\item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
-\item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
-Hence all lists are of the form @{const Nil}, or @{term"Cons x Nil"},
-or @{term"Cons x (Cons y Nil)"}, etc.
+\item Type \<^typ>\<open>'a list\<close> is the type of lists over elements of type \<^typ>\<open>'a\<close>. Because \<^typ>\<open>'a\<close> is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
+\item Lists have two constructors: \<^const>\<open>Nil\<close>, the empty list, and \<^const>\<open>Cons\<close>, which puts an element (of type \<^typ>\<open>'a\<close>) in front of a list (of type \<^typ>\<open>'a list\<close>).
+Hence all lists are of the form \<^const>\<open>Nil\<close>, or \<^term>\<open>Cons x Nil\<close>,
+or \<^term>\<open>Cons x (Cons y Nil)\<close>, etc.
\item \isacom{datatype} requires no quotation marks on the
left-hand side, but on the right-hand side each of the argument
types of a constructor needs to be enclosed in quotation marks, unless
-it is just an identifier (e.g., @{typ nat} or @{typ 'a}).
+it is just an identifier (e.g., \<^typ>\<open>nat\<close> or \<^typ>\<open>'a\<close>).
\end{itemize}
We also define two standard functions, append and reverse:\<close>
@@ -190,17 +190,17 @@
value "rev(Cons True (Cons False Nil))"
-text\<open>yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too:\<close>
+text\<open>yields the result \<^value>\<open>rev(Cons True (Cons False Nil))\<close>. This works symbolically, too:\<close>
value "rev(Cons a (Cons b Nil))"
-text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
+text\<open>yields \<^value>\<open>rev(Cons a (Cons b Nil))\<close>.
\medskip
Figure~\ref{fig:MyList} shows the theory created so far.
-Because \<open>list\<close>, @{const Nil}, @{const Cons}, etc.\ are already predefined,
+Because \<open>list\<close>, \<^const>\<open>Nil\<close>, \<^const>\<open>Cons\<close>, etc.\ are already predefined,
Isabelle prints qualified (long) names when executing this theory, for example, \<open>MyList.Nil\<close>
- instead of @{const Nil}.
+ instead of \<^const>\<open>Nil\<close>.
To suppress the qualified names you can insert the command
\texttt{declare [[names\_short]]}.
This is not recommended in general but is convenient for this unusual example.
@@ -223,11 +223,11 @@
Just as for natural numbers, there is a proof principle of induction for
lists. Induction over a list is essentially induction over the length of
the list, although the length remains implicit. To prove that some property
-\<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{@{prop"P(xs)"}},
+\<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{\<^prop>\<open>P(xs)\<close>},
you need to prove
\begin{enumerate}
-\item the base case @{prop"P(Nil)"} and
-\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
+\item the base case \<^prop>\<open>P(Nil)\<close> and
+\item the inductive case \<^prop>\<open>P(Cons x xs)\<close> under the assumption \<^prop>\<open>P(xs)\<close>, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
\end{enumerate}
This is often called \concept{structural induction} for lists.
@@ -244,13 +244,13 @@
interchangeable and merely indicate the importance we attach to a
proposition. Via the bracketed attribute \<open>simp\<close> we also tell Isabelle
to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
-involving simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}. The proof is by induction:\<close>
+involving simplification will replace occurrences of \<^term>\<open>rev(rev xs)\<close> by
+\<^term>\<open>xs\<close>. The proof is by induction:\<close>
apply(induction xs)
txt\<open>
-As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
+As explained above, we obtain two subgoals, namely the base case (\<^const>\<open>Nil\<close>) and the induction step (\<^const>\<open>Cons\<close>):
@{subgoals[display,indent=0,margin=65]}
Let us try to solve both goals automatically:
\<close>
@@ -272,7 +272,7 @@
lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
txt\<open>There are two variables that we could induct on: \<open>xs\<close> and
-\<open>ys\<close>. Because @{const app} is defined by recursion on
+\<open>ys\<close>. Because \<^const>\<open>app\<close> is defined by recursion on
the first argument, \<open>xs\<close> is the correct one:
\<close>
@@ -311,10 +311,10 @@
We find that this time \<open>auto\<close> solves the base case, but the
induction step merely simplifies to
@{subgoals[display,indent=0,goals_limit=1]}
-The missing lemma is associativity of @{const app},
+The missing lemma is associativity of \<^const>\<open>app\<close>,
which we insert in front of the failed lemma \<open>rev_app\<close>.
-\subsubsection{Associativity of @{const app}}
+\subsubsection{Associativity of \<^const>\<open>app\<close>}
The canonical proof procedure succeeds without further ado:
\<close>
@@ -340,27 +340,27 @@
\subsubsection{Another Informal Proof}
-Here is the informal proof of associativity of @{const app}
+Here is the informal proof of associativity of \<^const>\<open>app\<close>
corresponding to the Isabelle proof above.
\bigskip
\noindent
-\textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
+\textbf{Lemma} \<^prop>\<open>app (app xs ys) zs = app xs (app ys zs)\<close>
\noindent
\textbf{Proof} by induction on \<open>xs\<close>.
\begin{itemize}
-\item Case \<open>Nil\<close>: \ @{prop"app (app Nil ys) zs = app ys zs"} \<open>=\<close>
- \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of \<open>app\<close>.
+\item Case \<open>Nil\<close>: \ \<^prop>\<open>app (app Nil ys) zs = app ys zs\<close> \<open>=\<close>
+ \mbox{\<^term>\<open>app Nil (app ys zs)\<close>} \ holds by definition of \<open>app\<close>.
\item Case \<open>Cons x xs\<close>: We assume
- \begin{center} \hfill @{term"app (app xs ys) zs"} \<open>=\<close>
- @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
+ \begin{center} \hfill \<^term>\<open>app (app xs ys) zs\<close> \<open>=\<close>
+ \<^term>\<open>app xs (app ys zs)\<close> \hfill (IH) \end{center}
and we need to show
- \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
+ \begin{center} \<^prop>\<open>app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)\<close>.\end{center}
The proof is as follows:\smallskip
\begin{tabular}{@ {}l@ {\quad}l@ {}}
- @{term"app (app (Cons x xs) ys) zs"}\\
+ \<^term>\<open>app (app (Cons x xs) ys) zs\<close>\\
\<open>= app (Cons x (app xs ys)) zs\<close> & by definition of \<open>app\<close>\\
\<open>= Cons x (app (app xs ys) zs)\<close> & by definition of \<open>app\<close>\\
\<open>= Cons x (app xs (app ys zs))\<close> & by IH\\
@@ -371,14 +371,14 @@
\noindent Didn't we say earlier that all proofs are by simplification? But
in both cases, going from left to right, the last equality step is not a
-simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
-ys zs)"}. It appears almost mysterious because we suddenly complicate the
+simplification at all! In the base case it is \<^prop>\<open>app ys zs = app Nil (app
+ys zs)\<close>. It appears almost mysterious because we suddenly complicate the
term by appending \<open>Nil\<close> on the left. What is really going on is this:
-when proving some equality \mbox{@{prop"s = t"}}, both \<open>s\<close> and \<open>t\<close> are
+when proving some equality \mbox{\<^prop>\<open>s = t\<close>}, both \<open>s\<close> and \<open>t\<close> are
simplified until they ``meet in the middle''. This heuristic for equality proofs
works well for a functional programming context like ours. In the base case
-both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
-ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
+both \<^term>\<open>app (app Nil ys) zs\<close> and \<^term>\<open>app Nil (app
+ys zs)\<close> are simplified to \<^term>\<open>app ys zs\<close>, the term in the middle.
\subsection{Predefined Lists}
\label{sec:predeflists}
@@ -386,17 +386,17 @@
Isabelle's predefined lists are the same as the ones above, but with
more syntactic sugar:
\begin{itemize}
-\item \<open>[]\<close> is \indexed{@{const Nil}}{Nil},
-\item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
+\item \<open>[]\<close> is \indexed{\<^const>\<open>Nil\<close>}{Nil},
+\item \<^term>\<open>x # xs\<close> is \<^term>\<open>Cons x xs\<close>\index{Cons@\<^const>\<open>Cons\<close>},
\item \<open>[x\<^sub>1, \<dots>, x\<^sub>n]\<close> is \<open>x\<^sub>1 # \<dots> # x\<^sub>n # []\<close>, and
-\item @{term "xs @ ys"} is @{term"app xs ys"}.
+\item \<^term>\<open>xs @ ys\<close> is \<^term>\<open>app xs ys\<close>.
\end{itemize}
There is also a large library of predefined functions.
The most important ones are the length function
-\<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@@{const length}} (with the obvious definition),
-and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
+\<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@\<^const>\<open>length\<close>} (with the obvious definition),
+and the \indexed{\<^const>\<open>map\<close>}{map} function that applies a function to all elements of a list:
\begin{isabelle}
-\isacom{fun} @{const map} \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
+\isacom{fun} \<^const>\<open>map\<close> \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
\<open>"\<close>@{thm list.map(1) [of f]}\<open>" |\<close>\\
\<open>"\<close>@{thm list.map(2) [of f x xs]}\<open>"\<close>
\end{isabelle}
@@ -404,17 +404,17 @@
\ifsem
Also useful are the \concept{head} of a list, its first element,
and the \concept{tail}, the rest of the list:
-\begin{isabelle}\index{hd@@{const hd}}
+\begin{isabelle}\index{hd@\<^const>\<open>hd\<close>}
\isacom{fun} \<open>hd :: 'a list \<Rightarrow> 'a\<close>\\
-@{prop"hd(x#xs) = x"}
+\<^prop>\<open>hd(x#xs) = x\<close>
\end{isabelle}
-\begin{isabelle}\index{tl@@{const tl}}
+\begin{isabelle}\index{tl@\<^const>\<open>tl\<close>}
\isacom{fun} \<open>tl :: 'a list \<Rightarrow> 'a list\<close>\\
-@{prop"tl [] = []"} \<open>|\<close>\\
-@{prop"tl(x#xs) = xs"}
+\<^prop>\<open>tl [] = []\<close> \<open>|\<close>\\
+\<^prop>\<open>tl(x#xs) = xs\<close>
\end{isabelle}
-Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
-but we do not know what the result is. That is, @{term"hd []"} is not undefined
+Note that since HOL is a logic of total functions, \<^term>\<open>hd []\<close> is defined,
+but we do not know what the result is. That is, \<^term>\<open>hd []\<close> is not undefined
but underdefined.
\fi
%
@@ -422,44 +422,44 @@
From now on lists are always the predefined lists.
\ifsem\else
-\subsection{Types @{typ int} and @{typ real}}
+\subsection{Types \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>}
-In addition to @{typ nat} there are also the types @{typ int} and @{typ real}, the mathematical integers
+In addition to \<^typ>\<open>nat\<close> there are also the types \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>, the mathematical integers
and real numbers. As mentioned above, numerals and most of the standard arithmetic operations are overloaded.
-In particular they are defined on @{typ int} and @{typ real}.
+In particular they are defined on \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>.
\begin{warn}
There are two infix exponentiation operators:
-@{term "(^)"} for @{typ nat} and @{typ int} (with exponent of type @{typ nat} in both cases)
-and @{term "(powr)"} for @{typ real}.
+\<^term>\<open>(^)\<close> for \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close> (with exponent of type \<^typ>\<open>nat\<close> in both cases)
+and \<^term>\<open>(powr)\<close> for \<^typ>\<open>real\<close>.
\end{warn}
\begin{warn}
-Type @{typ int} is already part of theory @{theory Main}, but in order to use @{typ real} as well, you have to import
-theory @{theory Complex_Main} instead of @{theory Main}.
+Type \<^typ>\<open>int\<close> is already part of theory \<^theory>\<open>Main\<close>, but in order to use \<^typ>\<open>real\<close> as well, you have to import
+theory \<^theory>\<open>Complex_Main\<close> instead of \<^theory>\<open>Main\<close>.
\end{warn}
There are three conversion functions, meaning inclusions:
\begin{quote}
\begin{tabular}{rcl}
-@{const int} &\<open>::\<close>& @{typ "nat \<Rightarrow> int"}\\
-@{const real} &\<open>::\<close>& @{typ "nat \<Rightarrow> real"}\\
-@{const real_of_int} &\<open>::\<close>& @{typ "int \<Rightarrow> real"}\\
+\<^const>\<open>int\<close> &\<open>::\<close>& \<^typ>\<open>nat \<Rightarrow> int\<close>\\
+\<^const>\<open>real\<close> &\<open>::\<close>& \<^typ>\<open>nat \<Rightarrow> real\<close>\\
+\<^const>\<open>real_of_int\<close> &\<open>::\<close>& \<^typ>\<open>int \<Rightarrow> real\<close>\\
\end{tabular}
\end{quote}
Isabelle inserts these conversion functions automatically once you import \<open>Complex_Main\<close>.
If there are multiple type-correct completions, Isabelle chooses an arbitrary one.
For example, the input \noquotes{@{term[source] "(i::int) + (n::nat)"}} has the unique
-type-correct completion @{term"(i::int) + int(n::nat)"}. In contrast,
+type-correct completion \<^term>\<open>(i::int) + int(n::nat)\<close>. In contrast,
\noquotes{@{term[source] "((n::nat) + n) :: real"}} has two type-correct completions,
\noquotes{@{term[source]"real(n+n)"}} and \noquotes{@{term[source]"real n + real n"}}.
There are also the coercion functions in the other direction:
\begin{quote}
\begin{tabular}{rcl}
-@{const nat} &\<open>::\<close>& @{typ "int \<Rightarrow> nat"}\\
-@{const floor} &\<open>::\<close>& @{typ "real \<Rightarrow> int"}\\
-@{const ceiling} &\<open>::\<close>& @{typ "real \<Rightarrow> int"}\\
+\<^const>\<open>nat\<close> &\<open>::\<close>& \<^typ>\<open>int \<Rightarrow> nat\<close>\\
+\<^const>\<open>floor\<close> &\<open>::\<close>& \<^typ>\<open>real \<Rightarrow> int\<close>\\
+\<^const>\<open>ceiling\<close> &\<open>::\<close>& \<^typ>\<open>real \<Rightarrow> int\<close>\\
\end{tabular}
\end{quote}
\fi
@@ -473,29 +473,29 @@
\end{exercise}
\begin{exercise}
-Start from the definition of @{const add} given above.
-Prove that @{const add} is associative and commutative.
-Define a recursive function \<open>double\<close> \<open>::\<close> @{typ"nat \<Rightarrow> nat"}
-and prove @{prop"double m = add m m"}.
+Start from the definition of \<^const>\<open>add\<close> given above.
+Prove that \<^const>\<open>add\<close> is associative and commutative.
+Define a recursive function \<open>double\<close> \<open>::\<close> \<^typ>\<open>nat \<Rightarrow> nat\<close>
+and prove \<^prop>\<open>double m = add m m\<close>.
\end{exercise}
\begin{exercise}
-Define a function \<open>count ::\<close> @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
+Define a function \<open>count ::\<close> \<^typ>\<open>'a \<Rightarrow> 'a list \<Rightarrow> nat\<close>
that counts the number of occurrences of an element in a list. Prove
-@{prop"count x xs \<le> length xs"}.
+\<^prop>\<open>count x xs \<le> length xs\<close>.
\end{exercise}
\begin{exercise}
-Define a recursive function \<open>snoc ::\<close> @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
+Define a recursive function \<open>snoc ::\<close> \<^typ>\<open>'a list \<Rightarrow> 'a \<Rightarrow> 'a list\<close>
that appends an element to the end of a list. With the help of \<open>snoc\<close>
-define a recursive function \<open>reverse ::\<close> @{typ"'a list \<Rightarrow> 'a list"}
-that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
+define a recursive function \<open>reverse ::\<close> \<^typ>\<open>'a list \<Rightarrow> 'a list\<close>
+that reverses a list. Prove \<^prop>\<open>reverse(reverse xs) = xs\<close>.
\end{exercise}
\begin{exercise}
-Define a recursive function \<open>sum_upto ::\<close> @{typ"nat \<Rightarrow> nat"} such that
+Define a recursive function \<open>sum_upto ::\<close> \<^typ>\<open>nat \<Rightarrow> nat\<close> such that
\mbox{\<open>sum_upto n\<close>} \<open>=\<close> \<open>0 + ... + n\<close> and prove
-@{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
+\<^prop>\<open> sum_upto (n::nat) = n * (n+1) div 2\<close>.
\end{exercise}
\<close>
(*<*)
--- a/src/Doc/Prog_Prove/Isar.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Isar.thy Sat Jan 05 17:24:33 2019 +0100
@@ -87,7 +87,7 @@
We show a number of proofs of Cantor's theorem that a function from a set to
its powerset cannot be surjective, illustrating various features of Isar. The
-constant @{const surj} is predefined.
+constant \<^const>\<open>surj\<close> is predefined.
\<close>
lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
@@ -107,13 +107,13 @@
\mbox{@{thm (prem 1) notI}}}
{\mbox{@{thm (concl) notI}}}
\]
-In order to prove @{prop"~ P"}, assume \<open>P\<close> and show \<open>False\<close>.
+In order to prove \<^prop>\<open>~ P\<close>, assume \<open>P\<close> and show \<open>False\<close>.
Thus we may assume \mbox{\noquotes{@{prop [source] "surj f"}}}. The proof shows that names of propositions
may be (single!) digits --- meaningful names are hard to invent and are often
not necessary. Both \isacom{have} steps are obvious. The second one introduces
-the diagonal set @{term"{x. x \<notin> f x}"}, the key idea in the proof.
+the diagonal set \<^term>\<open>{x. x \<notin> f x}\<close>, the key idea in the proof.
If you wonder why \<open>2\<close> directly implies \<open>False\<close>: from \<open>2\<close>
-it follows that @{prop"a \<notin> f a \<longleftrightarrow> a \<in> f a"}.
+it follows that \<^prop>\<open>a \<notin> f a \<longleftrightarrow> a \<in> f a\<close>.
\subsection{\indexed{\<open>this\<close>}{this}, \indexed{\isacom{then}}{then}, \indexed{\isacom{hence}}{hence} and \indexed{\isacom{thus}}{thus}}
@@ -206,7 +206,7 @@
\begin{warn}
Note the hyphen after the \isacom{proof} command.
It is the null method that does nothing to the goal. Leaving it out would be asking
-Isabelle to try some suitable introduction rule on the goal @{const False} --- but
+Isabelle to try some suitable introduction rule on the goal \<^const>\<open>False\<close> --- but
there is no such rule and \isacom{proof} would fail.
\end{warn}
In the \isacom{have} step the assumption \noquotes{@{prop[source]"surj f"}} is now
@@ -232,7 +232,7 @@
We start with two forms of \concept{case analysis}:
starting from a formula \<open>P\<close> we have the two cases \<open>P\<close> and
-@{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
+\<^prop>\<open>~P\<close>, and starting from a fact \<^prop>\<open>P \<or> Q\<close>
we have the two cases \<open>P\<close> and \<open>Q\<close>:
\<close>text_raw\<open>
\begin{tabular}{@ {}ll@ {}}
@@ -458,7 +458,7 @@
\begin{description}
\item[``\<open>...\<close>''] is literally three dots. It is the name of an unknown that Isar
automatically instantiates with the right-hand side of the previous equation.
-In general, if \<open>this\<close> is the theorem @{term "p t\<^sub>1 t\<^sub>2"} then ``\<open>...\<close>''
+In general, if \<open>this\<close> is the theorem \<^term>\<open>p t\<^sub>1 t\<^sub>2\<close> then ``\<open>...\<close>''
stands for \<open>t\<^sub>2\<close>.
\item[``\<open>.\<close>''] (a single dot) is a proof method that solves a goal by one of the
assumptions. This works here because the result of \isakeyword{finally}
@@ -490,7 +490,7 @@
\<open>calculation := this\<close>. In each subsequent \isakeyword{also} step,
Isabelle composes the theorems \<open>calculation\<close> and \<open>this\<close> (i.e.\ the two previous
(in)equalities) using some predefined set of rules including transitivity
-of \<open>=\<close>, \<open>\<le>\<close> and \<open><\<close> but also mixed rules like @{prop"\<lbrakk> x \<le> y; y < z \<rbrakk> \<Longrightarrow> x < z"}.
+of \<open>=\<close>, \<open>\<le>\<close> and \<open><\<close> but also mixed rules like \<^prop>\<open>\<lbrakk> x \<le> y; y < z \<rbrakk> \<Longrightarrow> x < z\<close>.
The result of this composition is assigned to \<open>calculation\<close>. Consider
\begin{quote}
\isakeyword{have} \<open>"t\<^sub>1 \<le> t\<^sub>2"\<close> \isasymproof\\
@@ -678,7 +678,7 @@
Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
such that \<open>take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]\<close> and
\<open>drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]\<close>. Let sledgehammer find and apply
-the relevant @{const take} and @{const drop} lemmas for you.
+the relevant \<^const>\<open>take\<close> and \<^const>\<open>drop\<close> lemmas for you.
\endexercise
@@ -688,8 +688,8 @@
\index{case analysis|(}
We have seen case analysis on formulas. Now we want to distinguish
-which form some term takes: is it \<open>0\<close> or of the form @{term"Suc n"},
-is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
+which form some term takes: is it \<open>0\<close> or of the form \<^term>\<open>Suc n\<close>,
+is it \<^term>\<open>[]\<close> or of the form \<^term>\<open>x#xs\<close>, etc. Here is a typical example
proof by case analysis on the form of \<open>xs\<close>:
\<close>
@@ -703,8 +703,8 @@
qed
text\<open>\index{cases@\<open>cases\<close>|(}Function \<open>tl\<close> (''tail'') is defined by @{thm list.sel(2)} and
-@{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
-and @{prop"0 - 1 = (0::nat)"}.
+@{thm list.sel(3)}. Note that the result type of \<^const>\<open>length\<close> is \<^typ>\<open>nat\<close>
+and \<^prop>\<open>0 - 1 = (0::nat)\<close>.
This proof pattern works for any term \<open>t\<close> whose type is a datatype.
The goal has to be proved for each constructor \<open>C\<close>:
@@ -742,7 +742,7 @@
We illustrate structural induction with an example based on natural numbers:
the sum (\<open>\<Sum>\<close>) of the first \<open>n\<close> natural numbers
-(\<open>{0..n::nat}\<close>) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
+(\<open>{0..n::nat}\<close>) is equal to \mbox{\<^term>\<open>n*(n+1) div 2::nat\<close>}.
Never mind the details, just focus on the pattern:
\<close>
@@ -769,7 +769,7 @@
text\<open>The first line introduces an abbreviation \<open>?P n\<close> for the goal.
Pattern matching \<open>?P n\<close> with the goal instantiates \<open>?P\<close> to the
-function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}. Now the proposition to
+function \<^term>\<open>\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2\<close>. Now the proposition to
be proved in the base case can be written as \<open>?P 0\<close>, the induction
hypothesis as \<open>?P n\<close>, and the conclusion of the induction step as
\<open>?P(Suc n)\<close>.
@@ -790,7 +790,7 @@
The unknown \<open>?case\<close>\index{case?@\<open>?case\<close>|(} is set in each case to the required
claim, i.e., \<open>?P 0\<close> and \mbox{\<open>?P(Suc n)\<close>} in the above proof,
without requiring the user to define a \<open>?P\<close>. The general
-pattern for induction over @{typ nat} is shown on the left-hand side:
+pattern for induction over \<^typ>\<open>nat\<close> is shown on the left-hand side:
\<close>text_raw\<open>
\begin{tabular}{@ {}ll@ {}}
\begin{minipage}[t]{.4\textwidth}
@@ -920,7 +920,7 @@
"evn (Suc 0) = False" |
"evn (Suc(Suc n)) = evn n"
-text\<open>We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
+text\<open>We recast the proof of \<^prop>\<open>ev n \<Longrightarrow> evn n\<close> in Isar. The
left column shows the actual proof text, the right column shows
the implicit effect of the two \isacom{case} commands:\<close>text_raw\<open>
\begin{tabular}{@ {}l@ {\qquad}l@ {}}
@@ -962,8 +962,8 @@
explicitly and the names of the cases are the names of the rules in the
inductive definition.
Let us examine the two assumptions named @{thm[source]evSS}:
-@{prop "ev n"} is the premise of rule @{thm[source]evSS}, which we may assume
-because we are in the case where that rule was used; @{prop"evn n"}
+\<^prop>\<open>ev n\<close> is the premise of rule @{thm[source]evSS}, which we may assume
+because we are in the case where that rule was used; \<^prop>\<open>evn n\<close>
is the induction hypothesis.
\begin{warn}
Because each \isacom{case} command introduces a list of assumptions
@@ -1063,11 +1063,11 @@
Rule inversion is case analysis of which rule could have been used to
derive some fact. The name \conceptnoidx{rule inversion} emphasizes that we are
reasoning backwards: by which rules could some given fact have been proved?
-For the inductive definition of @{const ev}, rule inversion can be summarized
+For the inductive definition of \<^const>\<open>ev\<close>, rule inversion can be summarized
like this:
@{prop[display]"ev n \<Longrightarrow> n = 0 \<or> (\<exists>k. n = Suc(Suc k) \<and> ev k)"}
The realisation in Isabelle is a case analysis.
-A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
+A simple example is the proof that \<^prop>\<open>ev n \<Longrightarrow> ev (n - 2)\<close>. We
already went through the details informally in \autoref{sec:Logic:even}. This
is the Isar proof:
\<close>
@@ -1090,16 +1090,16 @@
defined predicate is triggered by piping the given fact
(here: \isacom{from}~\<open>this\<close>) into a proof by \<open>cases\<close>.
Let us examine the assumptions available in each case. In case \<open>ev0\<close>
-we have \<open>n = 0\<close> and in case \<open>evSS\<close> we have @{prop"n = Suc(Suc k)"}
-and @{prop"ev k"}. In each case the assumptions are available under the name
+we have \<open>n = 0\<close> and in case \<open>evSS\<close> we have \<^prop>\<open>n = Suc(Suc k)\<close>
+and \<^prop>\<open>ev k\<close>. In each case the assumptions are available under the name
of the case; there is no fine-grained naming schema like there is for induction.
Sometimes some rules could not have been used to derive the given fact
because constructors clash. As an extreme example consider
-rule inversion applied to @{prop"ev(Suc 0)"}: neither rule \<open>ev0\<close> nor
-rule \<open>evSS\<close> can yield @{prop"ev(Suc 0)"} because \<open>Suc 0\<close> unifies
-neither with \<open>0\<close> nor with @{term"Suc(Suc n)"}. Impossible cases do not
-have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
+rule inversion applied to \<^prop>\<open>ev(Suc 0)\<close>: neither rule \<open>ev0\<close> nor
+rule \<open>evSS\<close> can yield \<^prop>\<open>ev(Suc 0)\<close> because \<open>Suc 0\<close> unifies
+neither with \<open>0\<close> nor with \<^term>\<open>Suc(Suc n)\<close>. Impossible cases do not
+have to be proved. Hence we can prove anything from \<^prop>\<open>ev(Suc 0)\<close>:
\<close>
(*<*)
notepad begin fix P
@@ -1109,7 +1109,7 @@
end
(*>*)
-text\<open>That is, @{prop"ev(Suc 0)"} is simply not provable:\<close>
+text\<open>That is, \<^prop>\<open>ev(Suc 0)\<close> is simply not provable:\<close>
lemma "\<not> ev(Suc 0)"
proof
@@ -1117,7 +1117,7 @@
qed
text\<open>Normally not all cases will be impossible. As a simple exercise,
-prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
+prove that \mbox{\<^prop>\<open>\<not> ev(Suc(Suc(Suc 0)))\<close>.}
\subsection{Advanced Rule Induction}
\label{sec:advanced-rule-induction}
@@ -1174,10 +1174,10 @@
The form of the \<open>IH\<close> shows us that internally the lemma was expanded as explained
above: \noquotes{@{prop[source]"ev x \<Longrightarrow> x = Suc m \<Longrightarrow> \<not> ev m"}}.
\item
-The goal @{prop"\<not> ev (Suc n)"} may surprise. The expanded version of the lemma
-would suggest that we have a \isacom{fix} \<open>m\<close> \isacom{assume} @{prop"Suc(Suc n) = Suc m"}
-and need to show @{prop"\<not> ev m"}. What happened is that Isabelle immediately
-simplified @{prop"Suc(Suc n) = Suc m"} to @{prop"Suc n = m"} and could then eliminate
+The goal \<^prop>\<open>\<not> ev (Suc n)\<close> may surprise. The expanded version of the lemma
+would suggest that we have a \isacom{fix} \<open>m\<close> \isacom{assume} \<^prop>\<open>Suc(Suc n) = Suc m\<close>
+and need to show \<^prop>\<open>\<not> ev m\<close>. What happened is that Isabelle immediately
+simplified \<^prop>\<open>Suc(Suc n) = Suc m\<close> to \<^prop>\<open>Suc n = m\<close> and could then eliminate
\<open>m\<close>. Beware of such nice surprises with this advanced form of induction.
\end{itemize}
\begin{warn}
@@ -1208,32 +1208,32 @@
\endexercise
\begin{exercise}
-Give a structured proof of @{prop "\<not> ev(Suc(Suc(Suc 0)))"}
+Give a structured proof of \<^prop>\<open>\<not> ev(Suc(Suc(Suc 0)))\<close>
by rule inversions. If there are no cases to be proved you can close
a proof immediately with \isacom{qed}.
\end{exercise}
\begin{exercise}
Recall predicate \<open>star\<close> from \autoref{sec:star} and \<open>iter\<close>
-from Exercise~\ref{exe:iter}. Prove @{prop "iter r n x y \<Longrightarrow> star r x y"}
+from Exercise~\ref{exe:iter}. Prove \<^prop>\<open>iter r n x y \<Longrightarrow> star r x y\<close>
in a structured style; do not just sledgehammer each case of the
required induction.
\end{exercise}
\begin{exercise}
-Define a recursive function \<open>elems ::\<close> @{typ"'a list \<Rightarrow> 'a set"}
-and prove @{prop "x \<in> elems xs \<Longrightarrow> \<exists>ys zs. xs = ys @ x # zs \<and> x \<notin> elems ys"}.
+Define a recursive function \<open>elems ::\<close> \<^typ>\<open>'a list \<Rightarrow> 'a set\<close>
+and prove \<^prop>\<open>x \<in> elems xs \<Longrightarrow> \<exists>ys zs. xs = ys @ x # zs \<and> x \<notin> elems ys\<close>.
\end{exercise}
\begin{exercise}
Extend Exercise~\ref{exe:cfg} with a function that checks if some
\mbox{\<open>alpha list\<close>} is a balanced
string of parentheses. More precisely, define a \mbox{recursive} function
-\<open>balanced :: nat \<Rightarrow> alpha list \<Rightarrow> bool\<close> such that @{term"balanced n w"}
+\<open>balanced :: nat \<Rightarrow> alpha list \<Rightarrow> bool\<close> such that \<^term>\<open>balanced n w\<close>
is true iff (informally) \<open>S (a\<^sup>n @ w)\<close>. Formally, prove that
-@{prop "balanced n w \<longleftrightarrow> S (replicate n a @ w)"} where
-@{const replicate} \<open>::\<close> @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
-and @{term"replicate n x"} yields the list \<open>[x, \<dots>, x]\<close> of length \<open>n\<close>.
+\<^prop>\<open>balanced n w \<longleftrightarrow> S (replicate n a @ w)\<close> where
+\<^const>\<open>replicate\<close> \<open>::\<close> \<^typ>\<open>nat \<Rightarrow> 'a \<Rightarrow> 'a list\<close> is predefined
+and \<^term>\<open>replicate n x\<close> yields the list \<open>[x, \<dots>, x]\<close> of length \<open>n\<close>.
\end{exercise}
\<close>
--- a/src/Doc/Prog_Prove/Logic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Logic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,14 +14,14 @@
\mathit{form} & ::= &
\<open>(form)\<close> ~\mid~
- @{const True} ~\mid~
- @{const False} ~\mid~
- @{prop "term = term"}\\
- &\mid& @{prop"\<not> form"}\index{$HOL4@\isasymnot} ~\mid~
- @{prop "form \<and> form"}\index{$HOL0@\isasymand} ~\mid~
- @{prop "form \<or> form"}\index{$HOL1@\isasymor} ~\mid~
- @{prop "form \<longrightarrow> form"}\index{$HOL2@\isasymlongrightarrow}\\
- &\mid& @{prop"\<forall>x. form"}\index{$HOL6@\isasymforall} ~\mid~ @{prop"\<exists>x. form"}\index{$HOL7@\isasymexists}
+ \<^const>\<open>True\<close> ~\mid~
+ \<^const>\<open>False\<close> ~\mid~
+ \<^prop>\<open>term = term\<close>\\
+ &\mid& \<^prop>\<open>\<not> form\<close>\index{$HOL4@\isasymnot} ~\mid~
+ \<^prop>\<open>form \<and> form\<close>\index{$HOL0@\isasymand} ~\mid~
+ \<^prop>\<open>form \<or> form\<close>\index{$HOL1@\isasymor} ~\mid~
+ \<^prop>\<open>form \<longrightarrow> form\<close>\index{$HOL2@\isasymlongrightarrow}\\
+ &\mid& \<^prop>\<open>\<forall>x. form\<close>\index{$HOL6@\isasymforall} ~\mid~ \<^prop>\<open>\<exists>x. form\<close>\index{$HOL7@\isasymexists}
\end{array}
\]
Terms are the ones we have seen all along, built from constants, variables,
@@ -30,8 +30,8 @@
\begin{warn}
Remember that formulas are simply terms of type \<open>bool\<close>. Hence
\<open>=\<close> also works for formulas. Beware that \<open>=\<close> has a higher
-precedence than the other logical operators. Hence @{prop"s = t \<and> A"} means
-\<open>(s = t) \<and> A\<close>, and @{prop"A\<and>B = B\<and>A"} means \<open>A \<and> (B = B) \<and> A\<close>.
+precedence than the other logical operators. Hence \<^prop>\<open>s = t \<and> A\<close> means
+\<open>(s = t) \<and> A\<close>, and \<^prop>\<open>A\<and>B = B\<and>A\<close> means \<open>A \<and> (B = B) \<and> A\<close>.
Logical equivalence can also be written with
\<open>\<longleftrightarrow>\<close> instead of \<open>=\<close>, where \<open>\<longleftrightarrow>\<close> has the same low
precedence as \<open>\<longrightarrow>\<close>. Hence \<open>A \<and> B \<longleftrightarrow> B \<and> A\<close> really means
@@ -80,25 +80,25 @@
\section{Sets}
\label{sec:Sets}
-Sets of elements of type @{typ 'a} have type @{typ"'a set"}\index{set@\<open>set\<close>}.
+Sets of elements of type \<^typ>\<open>'a\<close> have type \<^typ>\<open>'a set\<close>\index{set@\<open>set\<close>}.
They can be finite or infinite. Sets come with the usual notation:
\begin{itemize}
-\item \indexed{@{term"{}"}}{$IMP042},\quad \<open>{e\<^sub>1,\<dots>,e\<^sub>n}\<close>
-\item @{prop"e \<in> A"}\index{$HOLSet0@\isasymin},\quad @{prop"A \<subseteq> B"}\index{$HOLSet2@\isasymsubseteq}
-\item @{term"A \<union> B"}\index{$HOLSet4@\isasymunion},\quad @{term"A \<inter> B"}\index{$HOLSet5@\isasyminter},\quad @{term"A - B"},\quad @{term"-A"}
+\item \indexed{\<^term>\<open>{}\<close>}{$IMP042},\quad \<open>{e\<^sub>1,\<dots>,e\<^sub>n}\<close>
+\item \<^prop>\<open>e \<in> A\<close>\index{$HOLSet0@\isasymin},\quad \<^prop>\<open>A \<subseteq> B\<close>\index{$HOLSet2@\isasymsubseteq}
+\item \<^term>\<open>A \<union> B\<close>\index{$HOLSet4@\isasymunion},\quad \<^term>\<open>A \<inter> B\<close>\index{$HOLSet5@\isasyminter},\quad \<^term>\<open>A - B\<close>,\quad \<^term>\<open>-A\<close>
\end{itemize}
-(where @{term"A-B"} and \<open>-A\<close> are set difference and complement)
-and much more. @{const UNIV} is the set of all elements of some type.
+(where \<^term>\<open>A-B\<close> and \<open>-A\<close> are set difference and complement)
+and much more. \<^const>\<open>UNIV\<close> is the set of all elements of some type.
Set comprehension\index{set comprehension} is written
-@{term"{x. P}"}\index{$IMP042@@{term"{x. P}"}} rather than \<open>{x | P}\<close>.
+\<^term>\<open>{x. P}\<close>\index{$IMP042@\<^term>\<open>{x. P}\<close>} rather than \<open>{x | P}\<close>.
\begin{warn}
-In @{term"{x. P}"} the \<open>x\<close> must be a variable. Set comprehension
+In \<^term>\<open>{x. P}\<close> the \<open>x\<close> must be a variable. Set comprehension
involving a proper term \<open>t\<close> must be written
\noquotes{@{term[source] "{t | x y. P}"}}\index{$IMP042@\<open>{t |x. P}\<close>},
where \<open>x y\<close> are those free variables in \<open>t\<close>
that occur in \<open>P\<close>.
-This is just a shorthand for @{term"{v. \<exists>x y. v = t \<and> P}"}, where
-\<open>v\<close> is a new variable. For example, @{term"{x+y|x. x \<in> A}"}
+This is just a shorthand for \<^term>\<open>{v. \<exists>x y. v = t \<and> P}\<close>, where
+\<open>v\<close> is a new variable. For example, \<^term>\<open>{x+y|x. x \<in> A}\<close>
is short for \noquotes{@{term[source]"{v. \<exists>x. v = x+y \<and> x \<in> A}"}}.
\end{warn}
@@ -111,8 +111,8 @@
\<open>\<inter>\<close> & \texttt{\char`\\\char`\<inter>} & \texttt{Int}
\end{tabular}
\end{center}
-Sets also allow bounded quantifications @{prop"\<forall>x \<in> A. P"} and
-@{prop"\<exists>x \<in> A. P"}.
+Sets also allow bounded quantifications \<^prop>\<open>\<forall>x \<in> A. P\<close> and
+\<^prop>\<open>\<exists>x \<in> A. P\<close>.
For the more ambitious, there are also \<open>\<Union>\<close>\index{$HOLSet6@\isasymUnion}
and \<open>\<Inter>\<close>\index{$HOLSet7@\isasymInter}:
@@ -132,15 +132,15 @@
Some other frequently useful functions on sets are the following:
\begin{center}
\begin{tabular}{l@ {\quad}l}
-@{const_typ set}\index{set@@{const set}} & converts a list to the set of its elements\\
-@{const_typ finite}\index{finite@@{const finite}} & is true iff its argument is finite\\
-\noquotes{@{term[source] "card :: 'a set \<Rightarrow> nat"}}\index{card@@{const card}} & is the cardinality of a finite set\\
+@{const_typ set}\index{set@\<^const>\<open>set\<close>} & converts a list to the set of its elements\\
+@{const_typ finite}\index{finite@\<^const>\<open>finite\<close>} & is true iff its argument is finite\\
+\noquotes{@{term[source] "card :: 'a set \<Rightarrow> nat"}}\index{card@\<^const>\<open>card\<close>} & is the cardinality of a finite set\\
& and is \<open>0\<close> for all infinite sets\\
-@{thm image_def}\index{$IMP042@@{term"f ` A"}} & is the image of a function over a set
+@{thm image_def}\index{$IMP042@\<^term>\<open>f ` A\<close>} & is the image of a function over a set
\end{tabular}
\end{center}
See @{cite "Nipkow-Main"} for the wealth of further predefined functions in theory
-@{theory Main}.
+\<^theory>\<open>Main\<close>.
\subsection*{Exercises}
@@ -152,15 +152,15 @@
datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
text\<open>
-Define a function \<open>set ::\<close> @{typ "'a tree \<Rightarrow> 'a set"}
+Define a function \<open>set ::\<close> \<^typ>\<open>'a tree \<Rightarrow> 'a set\<close>
that returns the elements in a tree and a function
-\<open>ord ::\<close> @{typ "int tree \<Rightarrow> bool"}
-that tests if an @{typ "int tree"} is ordered.
+\<open>ord ::\<close> \<^typ>\<open>int tree \<Rightarrow> bool\<close>
+that tests if an \<^typ>\<open>int tree\<close> is ordered.
-Define a function \<open>ins\<close> that inserts an element into an ordered @{typ "int tree"}
+Define a function \<open>ins\<close> that inserts an element into an ordered \<^typ>\<open>int tree\<close>
while maintaining the order of the tree. If the element is already in the tree, the
same tree should be returned. Prove correctness of \<open>ins\<close>:
-@{prop "set(ins x t) = {x} \<union> set t"} and @{prop "ord t \<Longrightarrow> ord(ins i t)"}.
+\<^prop>\<open>set(ins x t) = {x} \<union> set t\<close> and \<^prop>\<open>ord t \<Longrightarrow> ord(ins i t)\<close>.
\endexercise
@@ -266,8 +266,8 @@
In this case lemma @{thm[source]append_eq_conv_conj} alone suffices:
@{thm[display] append_eq_conv_conj}
We leave it to the reader to figure out why this lemma suffices to prove
-the above lemma, even without any knowledge of what the functions @{const take}
-and @{const drop} do. Keep in mind that the variables in the two lemmas
+the above lemma, even without any knowledge of what the functions \<^const>\<open>take\<close>
+and \<^const>\<open>drop\<close> do. Keep in mind that the variables in the two lemmas
are independent of each other, despite the same names, and that you can
substitute arbitrary values for the free variables in a lemma.
@@ -294,10 +294,10 @@
version of \<open>arith\<close>. Hence it is usually not necessary to invoke
\<open>arith\<close> explicitly.
-The above example involves natural numbers, but integers (type @{typ int})
+The above example involves natural numbers, but integers (type \<^typ>\<open>int\<close>)
and real numbers (type \<open>real\<close>) are supported as well. As are a number
-of further operators like @{const min} and @{const max}. On @{typ nat} and
-@{typ int}, \<open>arith\<close> can even prove theorems with quantifiers in them,
+of further operators like \<^const>\<open>min\<close> and \<^const>\<open>max\<close>. On \<^typ>\<open>nat\<close> and
+\<^typ>\<open>int\<close>, \<open>arith\<close> can even prove theorems with quantifiers in them,
but we will not enlarge on that here.
@@ -320,7 +320,7 @@
to find out why. When \<open>fastforce\<close> or \<open>blast\<close> simply fail, you have
no clue why. At this point, the stepwise
application of proof rules may be necessary. For example, if \<open>blast\<close>
-fails on @{prop"A \<and> B"}, you want to attack the two
+fails on \<^prop>\<open>A \<and> B\<close>, you want to attack the two
conjuncts \<open>A\<close> and \<open>B\<close> separately. This can
be achieved by applying \emph{conjunction introduction}
\[ @{thm[mode=Rule,show_question_marks]conjI}\ \<open>conjI\<close>
@@ -346,8 +346,8 @@
\item By unification. \conceptidx{Unification}{unification} is the process of making two
terms syntactically equal by suitable instantiations of unknowns. For example,
-unifying \<open>?P \<and> ?Q\<close> with \mbox{@{prop"a=b \<and> False"}} instantiates
-\<open>?P\<close> with @{prop "a=b"} and \<open>?Q\<close> with @{prop False}.
+unifying \<open>?P \<and> ?Q\<close> with \mbox{\<^prop>\<open>a=b \<and> False\<close>} instantiates
+\<open>?P\<close> with \<^prop>\<open>a=b\<close> and \<open>?Q\<close> with \<^prop>\<open>False\<close>.
\end{itemize}
We need not instantiate all unknowns. If we want to skip a particular one we
can write \<open>_\<close> instead, for example \<open>conjI[of _ "False"]\<close>.
@@ -419,7 +419,7 @@
attribute should be used with care because it increases the search space and
can lead to nontermination. Sometimes it is better to use it only in
specific calls of \<open>blast\<close> and friends. For example,
-@{thm[source] le_trans}, transitivity of \<open>\<le>\<close> on type @{typ nat},
+@{thm[source] le_trans}, transitivity of \<open>\<le>\<close> on type \<^typ>\<open>nat\<close>,
is not an introduction rule by default because of the disastrous effect
on the search space, but can be useful in specific situations:
\<close>
@@ -436,7 +436,7 @@
Forward proof means deriving new theorems from old theorems. We have already
seen a very simple form of forward proof: the \<open>of\<close> operator for
instantiating unknowns in a theorem. The big brother of \<open>of\<close> is
-\indexed{\<open>OF\<close>}{OF} for applying one theorem to others. Given a theorem @{prop"A \<Longrightarrow> B"} called
+\indexed{\<open>OF\<close>}{OF} for applying one theorem to others. Given a theorem \<^prop>\<open>A \<Longrightarrow> B\<close> called
\<open>r\<close> and a theorem \<open>A'\<close> called \<open>r'\<close>, the theorem \<open>r[OF r']\<close> is the result of applying \<open>r\<close> to \<open>r'\<close>, where \<open>r\<close> should be viewed as a function taking a theorem \<open>A\<close> and returning
\<open>B\<close>. More precisely, \<open>A\<close> and \<open>A'\<close> are unified, thus
instantiating the unknowns in \<open>B\<close>, and the result is the instantiated
@@ -519,9 +519,9 @@
text_raw\<open>@{prop[source]"ev n \<Longrightarrow> ev (n + 2)"}\<close>
text\<open>To get used to inductive definitions, we will first prove a few
-properties of @{const ev} informally before we descend to the Isabelle level.
+properties of \<^const>\<open>ev\<close> informally before we descend to the Isabelle level.
-How do we prove that some number is even, e.g., @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
+How do we prove that some number is even, e.g., \<^prop>\<open>ev 4\<close>? Simply by combining the defining rules for \<^const>\<open>ev\<close>:
\begin{quote}
\<open>ev 0 \<Longrightarrow> ev (0 + 2) \<Longrightarrow> ev((0 + 2) + 2) = ev 4\<close>
\end{quote}
@@ -537,54 +537,54 @@
"evn (Suc 0) = False" |
"evn (Suc(Suc n)) = evn n"
-text\<open>We prove @{prop"ev m \<Longrightarrow> evn m"}. That is, we
-assume @{prop"ev m"} and by induction on the form of its derivation
-prove @{prop"evn m"}. There are two cases corresponding to the two rules
-for @{const ev}:
+text\<open>We prove \<^prop>\<open>ev m \<Longrightarrow> evn m\<close>. That is, we
+assume \<^prop>\<open>ev m\<close> and by induction on the form of its derivation
+prove \<^prop>\<open>evn m\<close>. There are two cases corresponding to the two rules
+for \<^const>\<open>ev\<close>:
\begin{description}
\item[Case @{thm[source]ev0}:]
- @{prop"ev m"} was derived by rule @{prop "ev 0"}: \\
- \<open>\<Longrightarrow>\<close> @{prop"m=(0::nat)"} \<open>\<Longrightarrow>\<close> \<open>evn m = evn 0 = True\<close>
+ \<^prop>\<open>ev m\<close> was derived by rule \<^prop>\<open>ev 0\<close>: \\
+ \<open>\<Longrightarrow>\<close> \<^prop>\<open>m=(0::nat)\<close> \<open>\<Longrightarrow>\<close> \<open>evn m = evn 0 = True\<close>
\item[Case @{thm[source]evSS}:]
- @{prop"ev m"} was derived by rule @{prop "ev n \<Longrightarrow> ev(n+2)"}: \\
-\<open>\<Longrightarrow>\<close> @{prop"m=n+(2::nat)"} and by induction hypothesis @{prop"evn n"}\\
+ \<^prop>\<open>ev m\<close> was derived by rule \<^prop>\<open>ev n \<Longrightarrow> ev(n+2)\<close>: \\
+\<open>\<Longrightarrow>\<close> \<^prop>\<open>m=n+(2::nat)\<close> and by induction hypothesis \<^prop>\<open>evn n\<close>\\
\<open>\<Longrightarrow>\<close> \<open>evn m = evn(n + 2) = evn n = True\<close>
\end{description}
What we have just seen is a special case of \concept{rule induction}.
Rule induction applies to propositions of this form
\begin{quote}
-@{prop "ev n \<Longrightarrow> P n"}
+\<^prop>\<open>ev n \<Longrightarrow> P n\<close>
\end{quote}
-That is, we want to prove a property @{prop"P n"}
-for all even \<open>n\<close>. But if we assume @{prop"ev n"}, then there must be
+That is, we want to prove a property \<^prop>\<open>P n\<close>
+for all even \<open>n\<close>. But if we assume \<^prop>\<open>ev n\<close>, then there must be
some derivation of this assumption using the two defining rules for
-@{const ev}. That is, we must prove
+\<^const>\<open>ev\<close>. That is, we must prove
\begin{description}
-\item[Case @{thm[source]ev0}:] @{prop"P(0::nat)"}
-\item[Case @{thm[source]evSS}:] @{prop"\<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n + 2::nat)"}
+\item[Case @{thm[source]ev0}:] \<^prop>\<open>P(0::nat)\<close>
+\item[Case @{thm[source]evSS}:] \<^prop>\<open>\<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n + 2::nat)\<close>
\end{description}
The corresponding rule is called @{thm[source] ev.induct} and looks like this:
\[
\inferrule{
\mbox{@{thm (prem 1) ev.induct[of "n"]}}\\
\mbox{@{thm (prem 2) ev.induct}}\\
-\mbox{@{prop"!!n. \<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n+2)"}}}
+\mbox{\<^prop>\<open>!!n. \<lbrakk> ev n; P n \<rbrakk> \<Longrightarrow> P(n+2)\<close>}}
{\mbox{@{thm (concl) ev.induct[of "n"]}}}
\]
-The first premise @{prop"ev n"} enforces that this rule can only be applied
+The first premise \<^prop>\<open>ev n\<close> enforces that this rule can only be applied
in situations where we know that \<open>n\<close> is even.
-Note that in the induction step we may not just assume @{prop"P n"} but also
-\mbox{@{prop"ev n"}}, which is simply the premise of rule @{thm[source]
-evSS}. Here is an example where the local assumption @{prop"ev n"} comes in
-handy: we prove @{prop"ev m \<Longrightarrow> ev(m - 2)"} by induction on @{prop"ev m"}.
-Case @{thm[source]ev0} requires us to prove @{prop"ev(0 - 2)"}, which follows
-from @{prop"ev 0"} because @{prop"0 - 2 = (0::nat)"} on type @{typ nat}. In
-case @{thm[source]evSS} we have \mbox{@{prop"m = n+(2::nat)"}} and may assume
-@{prop"ev n"}, which implies @{prop"ev (m - 2)"} because \<open>m - 2 = (n +
+Note that in the induction step we may not just assume \<^prop>\<open>P n\<close> but also
+\mbox{\<^prop>\<open>ev n\<close>}, which is simply the premise of rule @{thm[source]
+evSS}. Here is an example where the local assumption \<^prop>\<open>ev n\<close> comes in
+handy: we prove \<^prop>\<open>ev m \<Longrightarrow> ev(m - 2)\<close> by induction on \<^prop>\<open>ev m\<close>.
+Case @{thm[source]ev0} requires us to prove \<^prop>\<open>ev(0 - 2)\<close>, which follows
+from \<^prop>\<open>ev 0\<close> because \<^prop>\<open>0 - 2 = (0::nat)\<close> on type \<^typ>\<open>nat\<close>. In
+case @{thm[source]evSS} we have \mbox{\<^prop>\<open>m = n+(2::nat)\<close>} and may assume
+\<^prop>\<open>ev n\<close>, which implies \<^prop>\<open>ev (m - 2)\<close> because \<open>m - 2 = (n +
2) - 2 = n\<close>. We did not need the induction hypothesis at all for this proof (it
-is just a case analysis of which rule was used) but having @{prop"ev n"}
+is just a case analysis of which rule was used) but having \<^prop>\<open>ev n\<close>
at our disposal in case @{thm[source]evSS} was essential.
This case analysis of rules is also called ``rule inversion''
and is discussed in more detail in \autoref{ch:Isar}.
@@ -592,12 +592,12 @@
\subsubsection{In Isabelle}
Let us now recast the above informal proofs in Isabelle. For a start,
-we use @{const Suc} terms instead of numerals in rule @{thm[source]evSS}:
+we use \<^const>\<open>Suc\<close> terms instead of numerals in rule @{thm[source]evSS}:
@{thm[display] evSS}
This avoids the difficulty of unifying \<open>n+2\<close> with some numeral,
which is not automatic.
-The simplest way to prove @{prop"ev(Suc(Suc(Suc(Suc 0))))"} is in a forward
+The simplest way to prove \<^prop>\<open>ev(Suc(Suc(Suc(Suc 0))))\<close> is in a forward
direction: \<open>evSS[OF evSS[OF ev0]]\<close> yields the theorem @{thm evSS[OF
evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
fashion. Although this is more verbose, it allows us to demonstrate how each
@@ -627,11 +627,11 @@
by(simp_all)
text\<open>Both cases are automatic. Note that if there are multiple assumptions
-of the form @{prop"ev t"}, method \<open>induction\<close> will induct on the leftmost
+of the form \<^prop>\<open>ev t\<close>, method \<open>induction\<close> will induct on the leftmost
one.
As a bonus, we also prove the remaining direction of the equivalence of
-@{const ev} and @{const evn}:
+\<^const>\<open>ev\<close> and \<^const>\<open>evn\<close>:
\<close>
lemma "evn n \<Longrightarrow> ev n"
@@ -639,14 +639,14 @@
txt\<open>This is a proof by computation induction on \<open>n\<close> (see
\autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
-the three equations for @{const evn}:
+the three equations for \<^const>\<open>evn\<close>:
@{subgoals[display,indent=0]}
-The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"evn(Suc 0)"} is @{const False}:
+The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because \<^prop>\<open>evn(Suc 0)\<close> is \<^const>\<open>False\<close>:
\<close>
by (simp_all add: ev0 evSS)
-text\<open>The rules for @{const ev} make perfect simplification and introduction
+text\<open>The rules for \<^const>\<open>ev\<close> make perfect simplification and introduction
rules because their premises are always smaller than the conclusion. It
makes sense to turn them into simplification and introduction rules
permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
@@ -668,8 +668,8 @@
definition only expresses the positive information directly. The negative
information, for example, that \<open>1\<close> is not even, has to be proved from
it (by induction or rule inversion). On the other hand, rule induction is
-tailor-made for proving \mbox{@{prop"ev n \<Longrightarrow> P n"}} because it only asks you
-to prove the positive cases. In the proof of @{prop"evn n \<Longrightarrow> P n"} by
+tailor-made for proving \mbox{\<^prop>\<open>ev n \<Longrightarrow> P n\<close>} because it only asks you
+to prove the positive cases. In the proof of \<^prop>\<open>evn n \<Longrightarrow> P n\<close> by
computation induction via @{thm[source]evn.induct}, we are also presented
with the trivial negative cases. If you want the convenience of both
rewriting and rule induction, you can make two definitions and show their
@@ -696,11 +696,11 @@
The reflexive transitive closure, called \<open>star\<close> below, is a function
that maps a binary predicate to another binary predicate: if \<open>r\<close> is of
-type \<open>\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool\<close> then @{term "star r"} is again of type \<open>\<tau> \<Rightarrow>
-\<tau> \<Rightarrow> bool\<close>, and @{prop"star r x y"} means that \<open>x\<close> and \<open>y\<close> are in
-the relation @{term"star r"}. Think @{term"r\<^sup>*"} when you see @{term"star
-r"}, because \<open>star r\<close> is meant to be the reflexive transitive closure.
-That is, @{prop"star r x y"} is meant to be true if from \<open>x\<close> we can
+type \<open>\<tau> \<Rightarrow> \<tau> \<Rightarrow> bool\<close> then \<^term>\<open>star r\<close> is again of type \<open>\<tau> \<Rightarrow>
+\<tau> \<Rightarrow> bool\<close>, and \<^prop>\<open>star r x y\<close> means that \<open>x\<close> and \<open>y\<close> are in
+the relation \<^term>\<open>star r\<close>. Think \<^term>\<open>r\<^sup>*\<close> when you see \<^term>\<open>star
+r\<close>, because \<open>star r\<close> is meant to be the reflexive transitive closure.
+That is, \<^prop>\<open>star r x y\<close> is meant to be true if from \<open>x\<close> we can
reach \<open>y\<close> in finitely many \<open>r\<close> steps. This concept is naturally
defined inductively:\<close>
@@ -708,16 +708,16 @@
refl: "star r x x" |
step: "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
-text\<open>The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
+text\<open>The base case @{thm[source] refl} is reflexivity: \<^term>\<open>x=y\<close>. The
step case @{thm[source]step} combines an \<open>r\<close> step (from \<open>x\<close> to
-\<open>y\<close>) and a @{term"star r"} step (from \<open>y\<close> to \<open>z\<close>) into a
-@{term"star r"} step (from \<open>x\<close> to \<open>z\<close>).
+\<open>y\<close>) and a \<^term>\<open>star r\<close> step (from \<open>y\<close> to \<open>z\<close>) into a
+\<^term>\<open>star r\<close> step (from \<open>x\<close> to \<open>z\<close>).
The ``\isacom{for}~\<open>r\<close>'' in the header is merely a hint to Isabelle
-that \<open>r\<close> is a fixed parameter of @{const star}, in contrast to the
-further parameters of @{const star}, which change. As a result, Isabelle
+that \<open>r\<close> is a fixed parameter of \<^const>\<open>star\<close>, in contrast to the
+further parameters of \<^const>\<open>star\<close>, which change. As a result, Isabelle
generates a simpler induction rule.
-By definition @{term"star r"} is reflexive. It is also transitive, but we
+By definition \<^term>\<open>star r\<close> is reflexive. It is also transitive, but we
need rule induction to prove that:\<close>
lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
@@ -727,23 +727,23 @@
apply(rename_tac u x y)
defer
(*>*)
-txt\<open>The induction is over @{prop"star r x y"} (the first matching assumption)
-and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
-which we abbreviate by @{prop"P x y"}. These are our two subgoals:
+txt\<open>The induction is over \<^prop>\<open>star r x y\<close> (the first matching assumption)
+and we try to prove \mbox{\<^prop>\<open>star r y z \<Longrightarrow> star r x z\<close>},
+which we abbreviate by \<^prop>\<open>P x y\<close>. These are our two subgoals:
@{subgoals[display,indent=0]}
-The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
+The first one is \<^prop>\<open>P x x\<close>, the result of case @{thm[source]refl},
and it is trivial:\index{assumption@\<open>assumption\<close>}
\<close>
apply(assumption)
txt\<open>Let us examine subgoal \<open>2\<close>, case @{thm[source] step}.
-Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
+Assumptions \<^prop>\<open>r u x\<close> and \mbox{\<^prop>\<open>star r x y\<close>}
are the premises of rule @{thm[source]step}.
-Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
-the IH coming from @{prop"star r x y"}. We have to prove @{prop"P u y"},
-which we do by assuming @{prop"star r y z"} and proving @{prop"star r u z"}.
-The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
-leads to @{prop"star r x z"} which, together with @{prop"r u x"},
-leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
+Assumption \<^prop>\<open>star r y z \<Longrightarrow> star r x z\<close> is \mbox{\<^prop>\<open>P x y\<close>},
+the IH coming from \<^prop>\<open>star r x y\<close>. We have to prove \<^prop>\<open>P u y\<close>,
+which we do by assuming \<^prop>\<open>star r y z\<close> and proving \<^prop>\<open>star r u z\<close>.
+The proof itself is straightforward: from \mbox{\<^prop>\<open>star r y z\<close>} the IH
+leads to \<^prop>\<open>star r x z\<close> which, together with \<^prop>\<open>r u x\<close>,
+leads to \mbox{\<^prop>\<open>star r u z\<close>} via rule @{thm[source]step}:
\<close>
apply(metis step)
done
@@ -764,14 +764,14 @@
The corresponding rule induction principle
\<open>I.induct\<close> applies to propositions of the form
\begin{quote}
-@{prop "I x \<Longrightarrow> P x"}
+\<^prop>\<open>I x \<Longrightarrow> P x\<close>
\end{quote}
where \<open>P\<close> may itself be a chain of implications.
\begin{warn}
Rule induction is always on the leftmost premise of the goal.
Hence \<open>I x\<close> must be the first premise.
\end{warn}
-Proving @{prop "I x \<Longrightarrow> P x"} by rule induction means proving
+Proving \<^prop>\<open>I x \<Longrightarrow> P x\<close> by rule induction means proving
for every rule of \<open>I\<close> that \<open>P\<close> is invariant:
\begin{quote}
\<open>\<lbrakk> I a\<^sub>1; P a\<^sub>1; \<dots>; I a\<^sub>n; P a\<^sub>n \<rbrakk> \<Longrightarrow> P a\<close>
@@ -791,14 +791,14 @@
Formalize the following definition of palindromes
\begin{itemize}
\item The empty list and a singleton list are palindromes.
-\item If \<open>xs\<close> is a palindrome, so is @{term "a # xs @ [a]"}.
+\item If \<open>xs\<close> is a palindrome, so is \<^term>\<open>a # xs @ [a]\<close>.
\end{itemize}
-as an inductive predicate \<open>palindrome ::\<close> @{typ "'a list \<Rightarrow> bool"}
-and prove that @{prop "rev xs = xs"} if \<open>xs\<close> is a palindrome.
+as an inductive predicate \<open>palindrome ::\<close> \<^typ>\<open>'a list \<Rightarrow> bool\<close>
+and prove that \<^prop>\<open>rev xs = xs\<close> if \<open>xs\<close> is a palindrome.
\end{exercise}
\exercise
-We could also have defined @{const star} as follows:
+We could also have defined \<^const>\<open>star\<close> as follows:
\<close>
inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
@@ -807,18 +807,18 @@
text\<open>
The single \<open>r\<close> step is performed after rather than before the \<open>star'\<close>
-steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
-@{prop "star r x y \<Longrightarrow> star' r x y"}. You may need lemmas.
+steps. Prove \<^prop>\<open>star' r x y \<Longrightarrow> star r x y\<close> and
+\<^prop>\<open>star r x y \<Longrightarrow> star' r x y\<close>. You may need lemmas.
Note that rule induction fails
if the assumption about the inductive predicate is not the first assumption.
\endexercise
\begin{exercise}\label{exe:iter}
-Analogous to @{const star}, give an inductive definition of the \<open>n\<close>-fold iteration
-of a relation \<open>r\<close>: @{term "iter r n x y"} should hold if there are \<open>x\<^sub>0\<close>, \dots, \<open>x\<^sub>n\<close>
-such that @{prop"x = x\<^sub>0"}, @{prop"x\<^sub>n = y"} and \<open>r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>\<close> for
-all @{prop"i < n"}. Correct and prove the following claim:
-@{prop"star r x y \<Longrightarrow> iter r n x y"}.
+Analogous to \<^const>\<open>star\<close>, give an inductive definition of the \<open>n\<close>-fold iteration
+of a relation \<open>r\<close>: \<^term>\<open>iter r n x y\<close> should hold if there are \<open>x\<^sub>0\<close>, \dots, \<open>x\<^sub>n\<close>
+such that \<^prop>\<open>x = x\<^sub>0\<close>, \<^prop>\<open>x\<^sub>n = y\<close> and \<open>r x\<^bsub>i\<^esub> x\<^bsub>i+1\<^esub>\<close> for
+all \<^prop>\<open>i < n\<close>. Correct and prove the following claim:
+\<^prop>\<open>star r x y \<Longrightarrow> iter r n x y\<close>.
\end{exercise}
\begin{exercise}\label{exe:cfg}
@@ -826,7 +826,7 @@
nonterminal $A$ is an inductively defined predicate on lists of terminal
symbols: $A(w)$ means that $w$ is in the language generated by $A$.
For example, the production $S \to a S b$ can be viewed as the implication
-@{prop"S w \<Longrightarrow> S (a # w @ [b])"} where \<open>a\<close> and \<open>b\<close> are terminal symbols,
+\<^prop>\<open>S w \<Longrightarrow> S (a # w @ [b])\<close> where \<open>a\<close> and \<open>b\<close> are terminal symbols,
i.e., elements of some alphabet. The alphabet can be defined like this:
\isacom{datatype} \<open>alpha = a | b | \<dots>\<close>
@@ -840,8 +840,8 @@
as two inductive predicates.
If you think of \<open>a\<close> and \<open>b\<close> as ``\<open>(\<close>'' and ``\<open>)\<close>'',
the grammar defines strings of balanced parentheses.
-Prove @{prop"T w \<Longrightarrow> S w"} and \mbox{@{prop "S w \<Longrightarrow> T w"}} separately and conclude
-@{prop "S w = T w"}.
+Prove \<^prop>\<open>T w \<Longrightarrow> S w\<close> and \mbox{\<^prop>\<open>S w \<Longrightarrow> T w\<close>} separately and conclude
+\<^prop>\<open>S w = T w\<close>.
\end{exercise}
\ifsem
@@ -851,8 +851,8 @@
Define an inductive evaluation predicate
\<open>aval_rel :: aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool\<close>
and prove that it agrees with the recursive function:
-@{prop "aval_rel a s v \<Longrightarrow> aval a s = v"},
-@{prop "aval a s = v \<Longrightarrow> aval_rel a s v"} and thus
+\<^prop>\<open>aval_rel a s v \<Longrightarrow> aval a s = v\<close>,
+\<^prop>\<open>aval a s = v \<Longrightarrow> aval_rel a s v\<close> and thus
\noquotes{@{prop [source] "aval_rel a s v \<longleftrightarrow> aval a s = v"}}.
\end{exercise}
--- a/src/Doc/Prog_Prove/Types_and_funs.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Types_and_funs.thy Sat Jan 05 17:24:33 2019 +0100
@@ -53,7 +53,7 @@
\end{quote}
Case expressions must be enclosed in parentheses.
-As an example of a datatype beyond @{typ nat} and \<open>list\<close>, consider binary trees:
+As an example of a datatype beyond \<^typ>\<open>nat\<close> and \<open>list\<close>, consider binary trees:
\<close>
datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
@@ -75,11 +75,11 @@
An application of \<open>auto\<close> finishes the proof.
A very simple but also very useful datatype is the predefined
-@{datatype[display] option}\index{option@\<open>option\<close>}\index{None@@{const None}}\index{Some@@{const Some}}
-Its sole purpose is to add a new element @{const None} to an existing
-type @{typ 'a}. To make sure that @{const None} is distinct from all the
-elements of @{typ 'a}, you wrap them up in @{const Some} and call
-the new type @{typ"'a option"}. A typical application is a lookup function
+@{datatype[display] option}\index{option@\<open>option\<close>}\index{None@\<^const>\<open>None\<close>}\index{Some@\<^const>\<open>Some\<close>}
+Its sole purpose is to add a new element \<^const>\<open>None\<close> to an existing
+type \<^typ>\<open>'a\<close>. To make sure that \<^const>\<open>None\<close> is distinct from all the
+elements of \<^typ>\<open>'a\<close>, you wrap them up in \<^const>\<open>Some\<close> and call
+the new type \<^typ>\<open>'a option\<close>. A typical application is a lookup function
on a list of key-value pairs, often called an association list:
\<close>
(*<*)
@@ -93,8 +93,8 @@
text\<open>
Note that \<open>\<tau>\<^sub>1 * \<tau>\<^sub>2\<close> is the type of pairs, also written \<open>\<tau>\<^sub>1 \<times> \<tau>\<^sub>2\<close>.
Pairs can be taken apart either by pattern matching (as above) or with the
-projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
-Tuples are simulated by pairs nested to the right: @{term"(a,b,c)"}
+projection functions \<^const>\<open>fst\<close> and \<^const>\<open>snd\<close>: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
+Tuples are simulated by pairs nested to the right: \<^term>\<open>(a,b,c)\<close>
is short for \<open>(a, (b, c))\<close> and \<open>\<tau>\<^sub>1 \<times> \<tau>\<^sub>2 \<times> \<tau>\<^sub>3\<close> is short for
\<open>\<tau>\<^sub>1 \<times> (\<tau>\<^sub>2 \<times> \<tau>\<^sub>3)\<close>.
@@ -117,10 +117,10 @@
abbreviation sq' :: "nat \<Rightarrow> nat" where
"sq' n \<equiv> n * n"
-text\<open>The key difference is that @{const sq'} is only syntactic sugar:
-after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}};
-before printing, every occurrence of @{term"u*u"} is replaced by
-\mbox{@{term"sq' u"}}. Internally, @{const sq'} does not exist.
+text\<open>The key difference is that \<^const>\<open>sq'\<close> is only syntactic sugar:
+after parsing, \<^term>\<open>sq' t\<close> is replaced by \mbox{\<^term>\<open>t*t\<close>};
+before printing, every occurrence of \<^term>\<open>u*u\<close> is replaced by
+\mbox{\<^term>\<open>sq' u\<close>}. Internally, \<^const>\<open>sq'\<close> does not exist.
This is the
advantage of abbreviations over definitions: definitions need to be expanded
explicitly (\autoref{sec:rewr-defs}) whereas abbreviations are already
@@ -138,8 +138,8 @@
functional programming languages. However, all HOL functions must be
total. This simplifies the logic --- terms are always defined --- but means
that recursive functions must terminate. Otherwise one could define a
-function @{prop"f n = f n + (1::nat)"} and conclude \mbox{@{prop"(0::nat) = 1"}} by
-subtracting @{term"f n"} on both sides.
+function \<^prop>\<open>f n = f n + (1::nat)\<close> and conclude \mbox{\<^prop>\<open>(0::nat) = 1\<close>} by
+subtracting \<^term>\<open>f n\<close> on both sides.
Isabelle's automatic termination checker requires that the arguments of
recursive calls on the right-hand side must be strictly smaller than the
@@ -159,7 +159,7 @@
"div2 (Suc 0) = 0" |
"div2 (Suc(Suc n)) = Suc(div2 n)"
-text\<open>does not just define @{const div2} but also proves a
+text\<open>does not just define \<^const>\<open>div2\<close> but also proves a
customized induction rule:
\[
\inferrule{
@@ -214,28 +214,28 @@
\begin{exercise}
Starting from the type \<open>'a tree\<close> defined in the text, define
-a function \<open>contents ::\<close> @{typ "'a tree \<Rightarrow> 'a list"}
+a function \<open>contents ::\<close> \<^typ>\<open>'a tree \<Rightarrow> 'a list\<close>
that collects all values in a tree in a list, in any order,
without removing duplicates.
-Then define a function \<open>sum_tree ::\<close> @{typ "nat tree \<Rightarrow> nat"}
+Then define a function \<open>sum_tree ::\<close> \<^typ>\<open>nat tree \<Rightarrow> nat\<close>
that sums up all values in a tree of natural numbers
-and prove @{prop "sum_tree t = sum_list(contents t)"}
-(where @{const sum_list} is predefined).
+and prove \<^prop>\<open>sum_tree t = sum_list(contents t)\<close>
+(where \<^const>\<open>sum_list\<close> is predefined).
\end{exercise}
\begin{exercise}
Define a new type \<open>'a tree2\<close> of binary trees where values are also
stored in the leaves of the tree. Also reformulate the
-@{const mirror} function accordingly. Define two functions
+\<^const>\<open>mirror\<close> function accordingly. Define two functions
\<open>pre_order\<close> and \<open>post_order\<close> of type \<open>'a tree2 \<Rightarrow> 'a list\<close>
that traverse a tree and collect all stored values in the respective order in
-a list. Prove @{prop "pre_order (mirror t) = rev (post_order t)"}.
+a list. Prove \<^prop>\<open>pre_order (mirror t) = rev (post_order t)\<close>.
\end{exercise}
\begin{exercise}
-Define a function \<open>intersperse ::\<close> @{typ "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"}
+Define a function \<open>intersperse ::\<close> \<^typ>\<open>'a \<Rightarrow> 'a list \<Rightarrow> 'a list\<close>
such that \<open>intersperse a [x\<^sub>1, ..., x\<^sub>n] = [x\<^sub>1, a, x\<^sub>2, a, ..., a, x\<^sub>n]\<close>.
-Now prove that @{prop "map f (intersperse a xs) = intersperse (f a) (map f xs)"}.
+Now prove that \<^prop>\<open>map f (intersperse a xs) = intersperse (f a) (map f xs)\<close>.
\end{exercise}
@@ -254,10 +254,10 @@
too specific, the induction hypothesis is too weak to allow the induction
step to go through. Let us illustrate the idea with an example.
-Function @{const rev} has quadratic worst-case running time
+Function \<^const>\<open>rev\<close> has quadratic worst-case running time
because it calls append for each element of the list and
append is linear in its first argument. A linear time version of
-@{const rev} requires an extra argument where the result is accumulated
+\<^const>\<open>rev\<close> requires an extra argument where the result is accumulated
gradually, using only~\<open>#\<close>:
\<close>
(*<*)
@@ -268,13 +268,13 @@
"itrev [] ys = ys" |
"itrev (x#xs) ys = itrev xs (x#ys)"
-text\<open>The behaviour of @{const itrev} is simple: it reverses
+text\<open>The behaviour of \<^const>\<open>itrev\<close> is simple: it reverses
its first argument by stacking its elements onto the second argument,
and it returns that second argument when the first one becomes
-empty. Note that @{const itrev} is tail-recursive: it can be
+empty. Note that \<^const>\<open>itrev\<close> is tail-recursive: it can be
compiled into a loop; no stack is necessary for executing it.
-Naturally, we would like to show that @{const itrev} does indeed reverse
+Naturally, we would like to show that \<^const>\<open>itrev\<close> does indeed reverse
its first argument provided the second one is empty:
\<close>
@@ -291,20 +291,20 @@
the induction step:
@{subgoals[display,margin=70]}
The induction hypothesis is too weak. The fixed
-argument,~@{term"[]"}, prevents it from rewriting the conclusion.
+argument,~\<^term>\<open>[]\<close>, prevents it from rewriting the conclusion.
This example suggests a heuristic:
\begin{quote}
\emph{Generalize goals for induction by replacing constants by variables.}
\end{quote}
-Of course one cannot do this naively: @{prop"itrev xs ys = rev xs"} is
+Of course one cannot do this naively: \<^prop>\<open>itrev xs ys = rev xs\<close> is
just not true. The correct generalization is
\<close>
(*<*)oops(*>*)
lemma "itrev xs ys = rev xs @ ys"
(*<*)apply(induction xs, auto)(*>*)
txt\<open>
-If \<open>ys\<close> is replaced by @{term"[]"}, the right-hand side simplifies to
-@{term"rev xs"}, as required.
+If \<open>ys\<close> is replaced by \<^term>\<open>[]\<close>, the right-hand side simplifies to
+\<^term>\<open>rev xs\<close>, as required.
In this instance it was easy to guess the right generalization.
Other situations can require a good deal of creativity.
@@ -316,7 +316,7 @@
intuition to generalize: the problem is that the \<open>ys\<close>
in the induction hypothesis is fixed,
but the induction hypothesis needs to be applied with
-@{term"a # ys"} instead of \<open>ys\<close>. Hence we prove the theorem
+\<^term>\<open>a # ys\<close> instead of \<open>ys\<close>. Hence we prove the theorem
for all \<open>ys\<close> instead of a fixed one. We can instruct induction
to perform this generalization for us by adding \<open>arbitrary: ys\<close>\index{arbitrary@\<open>arbitrary:\<close>}.
\<close>
@@ -350,11 +350,11 @@
\subsection*{Exercises}
\begin{exercise}
-Write a tail-recursive variant of the \<open>add\<close> function on @{typ nat}:
-@{term "itadd :: nat \<Rightarrow> nat \<Rightarrow> nat"}.
+Write a tail-recursive variant of the \<open>add\<close> function on \<^typ>\<open>nat\<close>:
+\<^term>\<open>itadd :: nat \<Rightarrow> nat \<Rightarrow> nat\<close>.
Tail-recursive means that in the recursive case, \<open>itadd\<close> needs to call
-itself directly: \mbox{@{term"itadd (Suc m) n"}} \<open>= itadd \<dots>\<close>.
-Prove @{prop "itadd m n = add m n"}.
+itself directly: \mbox{\<^term>\<open>itadd (Suc m) n\<close>} \<open>= itadd \<dots>\<close>.
+Prove \<^prop>\<open>itadd m n = add m n\<close>.
\end{exercise}
@@ -377,13 +377,13 @@
simplification rules
\[
\begin{array}{rcl@ {\quad}l}
-@{term"0 + n::nat"} &\<open>=\<close>& \<open>n\<close> & (1) \\
-@{term"(Suc m) + n"} &\<open>=\<close>& @{term"Suc (m + n)"} & (2) \\
+\<^term>\<open>0 + n::nat\<close> &\<open>=\<close>& \<open>n\<close> & (1) \\
+\<^term>\<open>(Suc m) + n\<close> &\<open>=\<close>& \<^term>\<open>Suc (m + n)\<close> & (2) \\
\<open>(Suc m \<le> Suc n)\<close> &\<open>=\<close>& \<open>(m \<le> n)\<close> & (3)\\
-\<open>(0 \<le> m)\<close> &\<open>=\<close>& @{const True} & (4)
+\<open>(0 \<le> m)\<close> &\<open>=\<close>& \<^const>\<open>True\<close> & (4)
\end{array}
\]
-the formula @{prop"0 + Suc 0 \<le> Suc 0 + x"} is simplified to @{const True}
+the formula \<^prop>\<open>0 + Suc 0 \<le> Suc 0 + x\<close> is simplified to \<^const>\<open>True\<close>
as follows:
\[
\begin{array}{r@ {}c@ {}l@ {\quad}l}
@@ -391,7 +391,7 @@
\<open>(Suc 0\<close> & \leq & \<open>Suc 0 + x)\<close> & \stackrel{(2)}{=} \\
\<open>(Suc 0\<close> & \leq & \<open>Suc (0 + x))\<close> & \stackrel{(3)}{=} \\
\<open>(0\<close> & \leq & \<open>0 + x)\<close> & \stackrel{(4)}{=} \\[1ex]
- & @{const True}
+ & \<^const>\<open>True\<close>
\end{array}
\]
Simplification is often also called \concept{rewriting}
@@ -406,10 +406,10 @@
rules, \isacom{fun} the defining equations. Definitions are not declared
as simplification rules automatically! Nearly any theorem can become a
simplification rule. The simplifier will try to transform it into an
-equation. For example, the theorem @{prop"\<not> P"} is turned into @{prop"P = False"}.
+equation. For example, the theorem \<^prop>\<open>\<not> P\<close> is turned into \<^prop>\<open>P = False\<close>.
-Only equations that really simplify, like @{prop"rev (rev xs) = xs"} and
-@{prop"xs @ [] = xs"}, should be declared as simplification
+Only equations that really simplify, like \<^prop>\<open>rev (rev xs) = xs\<close> and
+\<^prop>\<open>xs @ [] = xs\<close>, should be declared as simplification
rules. Equations that may be counterproductive as simplification rules
should only be used in specific proof steps (see \autoref{sec:simp} below).
Distributivity laws, for example, alter the structure of terms
@@ -421,17 +421,17 @@
simplifier will first try to prove the preconditions, again by
simplification. For example, given the simplification rules
\begin{quote}
-@{prop"p(0::nat) = True"}\\
-@{prop"p(x) \<Longrightarrow> f(x) = g(x)"},
+\<^prop>\<open>p(0::nat) = True\<close>\\
+\<^prop>\<open>p(x) \<Longrightarrow> f(x) = g(x)\<close>,
\end{quote}
-the term @{term"f(0::nat)"} simplifies to @{term"g(0::nat)"}
-but @{term"f(1::nat)"} does not simplify because @{prop"p(1::nat)"}
+the term \<^term>\<open>f(0::nat)\<close> simplifies to \<^term>\<open>g(0::nat)\<close>
+but \<^term>\<open>f(1::nat)\<close> does not simplify because \<^prop>\<open>p(1::nat)\<close>
is not provable.
\subsection{Termination}
-Simplification can run forever, for example if both @{prop"f x = g x"} and
-@{prop"g(x) = f(x)"} are simplification rules. It is the user's
+Simplification can run forever, for example if both \<^prop>\<open>f x = g x\<close> and
+\<^prop>\<open>g(x) = f(x)\<close> are simplification rules. It is the user's
responsibility not to include simplification rules that can lead to
nontermination, either on their own or in combination with other
simplification rules. The right-hand side of a simplification rule should
@@ -443,15 +443,15 @@
proved first. Hence all preconditions need to be
simpler than the left-hand side of the conclusion. For example
\begin{quote}
-@{prop "n < m \<Longrightarrow> (n < Suc m) = True"}
+\<^prop>\<open>n < m \<Longrightarrow> (n < Suc m) = True\<close>
\end{quote}
-is suitable as a simplification rule: both @{prop"n<m"} and @{const True}
-are simpler than \mbox{@{prop"n < Suc m"}}. But
+is suitable as a simplification rule: both \<^prop>\<open>n<m\<close> and \<^const>\<open>True\<close>
+are simpler than \mbox{\<^prop>\<open>n < Suc m\<close>}. But
\begin{quote}
-@{prop "Suc n < m \<Longrightarrow> (n < m) = True"}
+\<^prop>\<open>Suc n < m \<Longrightarrow> (n < m) = True\<close>
\end{quote}
-leads to nontermination: when trying to rewrite @{prop"n<m"} to @{const True}
-one first has to prove \mbox{@{prop"Suc n < m"}}, which can be rewritten to @{const True} provided @{prop"Suc(Suc n) < m"}, \emph{ad infinitum}.
+leads to nontermination: when trying to rewrite \<^prop>\<open>n<m\<close> to \<^const>\<open>True\<close>
+one first has to prove \mbox{\<^prop>\<open>Suc n < m\<close>}, which can be rewritten to \<^const>\<open>True\<close> provided \<^prop>\<open>Suc(Suc n) < m\<close>, \emph{ad infinitum}.
\subsection{The \indexed{\<open>simp\<close>}{simp} Proof Method}
\label{sec:simp}
@@ -514,14 +514,14 @@
Goals containing if-expressions are automatically split into two cases by
\<open>simp\<close> using the rule
\begin{quote}
-@{prop"P(if A then s else t) = ((A \<longrightarrow> P(s)) \<and> (~A \<longrightarrow> P(t)))"}
+\<^prop>\<open>P(if A then s else t) = ((A \<longrightarrow> P(s)) \<and> (~A \<longrightarrow> P(t)))\<close>
\end{quote}
For example, \<open>simp\<close> can prove
\begin{quote}
-@{prop"(A \<and> B) = (if A then B else False)"}
+\<^prop>\<open>(A \<and> B) = (if A then B else False)\<close>
\end{quote}
-because both @{prop"A \<longrightarrow> (A & B) = B"} and @{prop"~A \<longrightarrow> (A & B) = False"}
-simplify to @{const True}.
+because both \<^prop>\<open>A \<longrightarrow> (A & B) = B\<close> and \<^prop>\<open>~A \<longrightarrow> (A & B) = False\<close>
+simplify to \<^const>\<open>True\<close>.
We can split case-expressions similarly. For \<open>nat\<close> the rule looks like this:
@{prop[display,margin=65,indent=4]"P(case e of 0 \<Rightarrow> a | Suc n \<Rightarrow> b n) = ((e = 0 \<longrightarrow> P a) \<and> (\<forall>n. e = Suc n \<longrightarrow> P(b n)))"}
@@ -540,14 +540,14 @@
\ifsem\else
\subsection{Rewriting \<open>let\<close> and Numerals}
-Let-expressions (@{term "let x = s in t"}) can be expanded explicitly with the simplification rule
+Let-expressions (\<^term>\<open>let x = s in t\<close>) can be expanded explicitly with the simplification rule
@{thm[source] Let_def}. The simplifier will not expand \<open>let\<close>s automatically in many cases.
-Numerals of type @{typ nat} can be converted to @{const Suc} terms with the simplification rule
+Numerals of type \<^typ>\<open>nat\<close> can be converted to \<^const>\<open>Suc\<close> terms with the simplification rule
@{thm[source] numeral_eq_Suc}. This is required, for example, when a function that is defined
-by pattern matching with @{const Suc} is applied to a numeral: if \<open>f\<close> is defined by
+by pattern matching with \<^const>\<open>Suc\<close> is applied to a numeral: if \<open>f\<close> is defined by
\<open>f 0 = ...\<close> and \<open>f (Suc n) = ...\<close>, the simplifier cannot simplify \<open>f 2\<close> unless
-\<open>2\<close> is converted to @{term "Suc(Suc 0)"} (via @{thm[source] numeral_eq_Suc}).
+\<open>2\<close> is converted to \<^term>\<open>Suc(Suc 0)\<close> (via @{thm[source] numeral_eq_Suc}).
\fi
\subsection*{Exercises}
@@ -569,7 +569,7 @@
text \<open>
Find an equation expressing the size of a tree after exploding it
(\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
-of @{term "nodes t"} and \<open>n\<close>. Prove your equation.
+of \<^term>\<open>nodes t\<close> and \<open>n\<close>. Prove your equation.
You may use the usual arithmetic operators, including the exponentiation
operator ``\<open>^\<close>''. For example, \noquotes{@{prop [source] "2 ^ 2 = 4"}}.
@@ -578,7 +578,7 @@
\endexercise
\exercise
-Define arithmetic expressions in one variable over integers (type @{typ int})
+Define arithmetic expressions in one variable over integers (type \<^typ>\<open>int\<close>)
as a data type:
\<close>
@@ -586,18 +586,18 @@
text\<open>
Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
-such that @{term"eval e x"} evaluates \<open>e\<close> at the value
+such that \<^term>\<open>eval e x\<close> evaluates \<open>e\<close> at the value
\<open>x\<close>.
A polynomial can be represented as a list of coefficients, starting with
-the constant. For example, @{term "[4, 2, -1, 3::int]"} represents the
+the constant. For example, \<^term>\<open>[4, 2, -1, 3::int]\<close> represents the
polynomial $4 + 2x - x^2 + 3x^3$.
Define a function \noquotes{@{term [source] "evalp :: int list \<Rightarrow> int \<Rightarrow> int"}}
that evaluates a polynomial at the given value.
Define a function \noquotes{@{term[source] "coeffs :: exp \<Rightarrow> int list"}}
that transforms an expression into a polynomial. This may require auxiliary
functions. Prove that \<open>coeffs\<close> preserves the value of the expression:
-\mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
+\mbox{\<^prop>\<open>evalp (coeffs e) x = eval e x\<close>.}
Hint: consider the hint in Exercise~\ref{exe:tree0}.
\endexercise
\<close>
--- a/src/Doc/Sugar/Sugar.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Sugar/Sugar.thy Sat Jan 05 17:24:33 2019 +0100
@@ -55,15 +55,15 @@
\subsection{Logic}
-The formula @{prop[source]"\<not>(\<exists>x. P x)"} is typeset as @{prop"\<not>(\<exists>x. P x)"}.
+The formula @{prop[source]"\<not>(\<exists>x. P x)"} is typeset as \<^prop>\<open>\<not>(\<exists>x. P x)\<close>.
The predefined constructs \<open>if\<close>, \<open>let\<close> and
\<open>case\<close> are set in sans serif font to distinguish them from
other functions. This improves readability:
\begin{itemize}
-\item @{term"if b then e\<^sub>1 else e\<^sub>2"} instead of \<open>if b then e\<^sub>1 else e\<^sub>2\<close>.
-\item @{term"let x = e\<^sub>1 in e\<^sub>2"} instead of \<open>let x = e\<^sub>1 in e\<^sub>2\<close>.
-\item @{term"case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2"} instead of\\
+\item \<^term>\<open>if b then e\<^sub>1 else e\<^sub>2\<close> instead of \<open>if b then e\<^sub>1 else e\<^sub>2\<close>.
+\item \<^term>\<open>let x = e\<^sub>1 in e\<^sub>2\<close> instead of \<open>let x = e\<^sub>1 in e\<^sub>2\<close>.
+\item \<^term>\<open>case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2\<close> instead of\\
\<open>case x of True \<Rightarrow> e\<^sub>1 | False \<Rightarrow> e\<^sub>2\<close>.
\end{itemize}
@@ -72,11 +72,11 @@
Although set syntax in HOL is already close to
standard, we provide a few further improvements:
\begin{itemize}
-\item @{term"{x. P}"} instead of \<open>{x. P}\<close>.
-\item @{term"{}"} instead of \<open>{}\<close>, where
- @{term"{}"} is also input syntax.
-\item @{term"insert a (insert b (insert c M))"} instead of \<open>insert a (insert b (insert c M))\<close>.
-\item @{term"card A"} instead of \<open>card A\<close>.
+\item \<^term>\<open>{x. P}\<close> instead of \<open>{x. P}\<close>.
+\item \<^term>\<open>{}\<close> instead of \<open>{}\<close>, where
+ \<^term>\<open>{}\<close> is also input syntax.
+\item \<^term>\<open>insert a (insert b (insert c M))\<close> instead of \<open>insert a (insert b (insert c M))\<close>.
+\item \<^term>\<open>card A\<close> instead of \<open>card A\<close>.
\end{itemize}
@@ -84,16 +84,16 @@
If lists are used heavily, the following notations increase readability:
\begin{itemize}
-\item @{term"x # xs"} instead of \<open>x # xs\<close>,
- where @{term"x # xs"} is also input syntax.
-\item @{term"length xs"} instead of \<open>length xs\<close>.
-\item @{term"nth xs n"} instead of \<open>nth xs n\<close>,
+\item \<^term>\<open>x # xs\<close> instead of \<open>x # xs\<close>,
+ where \<^term>\<open>x # xs\<close> is also input syntax.
+\item \<^term>\<open>length xs\<close> instead of \<open>length xs\<close>.
+\item \<^term>\<open>nth xs n\<close> instead of \<open>nth xs n\<close>,
the $n$th element of \<open>xs\<close>.
\item Human readers are good at converting automatically from lists to
sets. Hence \texttt{OptionalSugar} contains syntax for suppressing the
-conversion function @{const set}: for example, @{prop[source]"x \<in> set xs"}
-becomes @{prop"x \<in> set xs"}.
+conversion function \<^const>\<open>set\<close>: for example, @{prop[source]"x \<in> set xs"}
+becomes \<^prop>\<open>x \<in> set xs\<close>.
\item The \<open>@\<close> operation associates implicitly to the right,
which leads to unpleasant line breaks if the term is too long for one
@@ -108,12 +108,12 @@
\subsection{Numbers}
Coercions between numeric types are alien to mathematicians who
-consider, for example, @{typ nat} as a subset of @{typ int}.
+consider, for example, \<^typ>\<open>nat\<close> as a subset of \<^typ>\<open>int\<close>.
\texttt{OptionalSugar} contains syntax for suppressing numeric coercions such
-as @{const int} \<open>::\<close> @{typ"nat \<Rightarrow> int"}. For example,
-@{term[source]"int 5"} is printed as @{term "int 5"}. Embeddings of types
-@{typ nat}, @{typ int}, @{typ real} are covered; non-injective coercions such
-as @{const nat} \<open>::\<close> @{typ"int \<Rightarrow> nat"} are not and should not be
+as \<^const>\<open>int\<close> \<open>::\<close> \<^typ>\<open>nat \<Rightarrow> int\<close>. For example,
+@{term[source]"int 5"} is printed as \<^term>\<open>int 5\<close>. Embeddings of types
+\<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close>, \<^typ>\<open>real\<close> are covered; non-injective coercions such
+as \<^const>\<open>nat\<close> \<open>::\<close> \<^typ>\<open>int \<Rightarrow> nat\<close> are not and should not be
hidden.
@@ -123,7 +123,7 @@
\verb!@!\verb!{const myconst}! \verb!@!\verb!{text "::"}! \verb!@!\verb!{typeof myconst}!,
you can write \verb!@!\verb!{const_typ myconst}! using the new antiquotation
\texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
-\verb!@!\verb!{const_typ length}! produces @{const_typ length} (see below for how to suppress
+\verb!@!\verb!{const_typ length}! produces \<^const_typ>\<open>length\<close> (see below for how to suppress
the question mark).
This works both for genuine constants and for variables fixed in some context,
especially in a locale.
@@ -131,9 +131,9 @@
\section{Printing theorems}
-The @{prop "P \<Longrightarrow> Q \<Longrightarrow> R"} syntax is a bit idiosyncratic. If you would like
+The \<^prop>\<open>P \<Longrightarrow> Q \<Longrightarrow> R\<close> syntax is a bit idiosyncratic. If you would like
to avoid it, you can easily print the premises as a conjunction:
-@{prop "P \<and> Q \<Longrightarrow> R"}. See \texttt{OptionalSugar} for the required ``code''.
+\<^prop>\<open>P \<and> Q \<Longrightarrow> R\<close>. See \texttt{OptionalSugar} for the required ``code''.
\subsection{Question marks}
@@ -372,18 +372,18 @@
\subsection{Patterns}
-In \S\ref{sec:varnames} we shows how to create patterns containing ``@{term DUMMY}''.
+In \S\ref{sec:varnames} we shows how to create patterns containing ``\<^term>\<open>DUMMY\<close>''.
You can drive this game even further and extend the syntax of let
-bindings such that certain functions like @{term fst}, @{term hd},
+bindings such that certain functions like \<^term>\<open>fst\<close>, \<^term>\<open>hd\<close>,
etc.\ are printed as patterns. \texttt{OptionalSugar} provides the following:
\begin{center}
\begin{tabular}{l@ {~~produced by~~}l}
-@{term "let x = fst p in t"} & \verb!@!\verb!{term "let x = fst p in t"}!\\
-@{term "let x = snd p in t"} & \verb!@!\verb!{term "let x = snd p in t"}!\\
-@{term "let x = hd xs in t"} & \verb!@!\verb!{term "let x = hd xs in t"}!\\
-@{term "let x = tl xs in t"} & \verb!@!\verb!{term "let x = tl xs in t"}!\\
-@{term "let x = the y in t"} & \verb!@!\verb!{term "let x = the y in t"}!\\
+\<^term>\<open>let x = fst p in t\<close> & \verb!@!\verb!{term "let x = fst p in t"}!\\
+\<^term>\<open>let x = snd p in t\<close> & \verb!@!\verb!{term "let x = snd p in t"}!\\
+\<^term>\<open>let x = hd xs in t\<close> & \verb!@!\verb!{term "let x = hd xs in t"}!\\
+\<^term>\<open>let x = tl xs in t\<close> & \verb!@!\verb!{term "let x = tl xs in t"}!\\
+\<^term>\<open>let x = the y in t\<close> & \verb!@!\verb!{term "let x = the y in t"}!\\
\end{tabular}
\end{center}
@@ -449,7 +449,7 @@
Further use cases can be found in \S\ref{sec:yourself}.
If you are not afraid of ML, you may also define your own styles.
-Have a look at module @{ML_structure Term_Style}.
+Have a look at module \<^ML_structure>\<open>Term_Style\<close>.
\section {Proofs}
--- a/src/Doc/Tutorial/Advanced/simp2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Advanced/simp2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,16 +19,15 @@
of $P \Imp Q$, it is legal to use the assumption $P$.
For $\Imp$ this policy is hardwired, but
contextual information can also be made available for other
-operators. For example, @{prop"xs = [] --> xs@xs = xs"} simplifies to @{term
-True} because we may use @{prop"xs = []"} when simplifying @{prop"xs@xs =
-xs"}. The generation of contextual information during simplification is
+operators. For example, \<^prop>\<open>xs = [] --> xs@xs = xs\<close> simplifies to \<^term>\<open>True\<close> because we may use \<^prop>\<open>xs = []\<close> when simplifying \<^prop>\<open>xs@xs =
+xs\<close>. The generation of contextual information during simplification is
controlled by so-called \bfindex{congruence rules}. This is the one for
\<open>\<longrightarrow>\<close>:
@{thm[display]imp_cong[no_vars]}
It should be read as follows:
-In order to simplify @{prop"P-->Q"} to @{prop"P'-->Q'"},
-simplify @{prop P} to @{prop P'}
-and assume @{prop"P'"} when simplifying @{prop Q} to @{prop"Q'"}.
+In order to simplify \<^prop>\<open>P-->Q\<close> to \<^prop>\<open>P'-->Q'\<close>,
+simplify \<^prop>\<open>P\<close> to \<^prop>\<open>P'\<close>
+and assume \<^prop>\<open>P'\<close> when simplifying \<^prop>\<open>Q\<close> to \<^prop>\<open>Q'\<close>.
Here are some more examples. The congruence rules for bounded
quantifiers supply contextual information about the bound variable:
@@ -70,16 +69,16 @@
\index{rewrite rules!permutative|bold}%
An equation is a \textbf{permutative rewrite rule} if the left-hand
side and right-hand side are the same up to renaming of variables. The most
-common permutative rule is commutativity: @{prop"x+y = y+x"}. Other examples
-include @{prop"(x-y)-z = (x-z)-y"} in arithmetic and @{prop"insert x (insert
-y A) = insert y (insert x A)"} for sets. Such rules are problematic because
+common permutative rule is commutativity: \<^prop>\<open>x+y = y+x\<close>. Other examples
+include \<^prop>\<open>(x-y)-z = (x-z)-y\<close> in arithmetic and \<^prop>\<open>insert x (insert
+y A) = insert y (insert x A)\<close> for sets. Such rules are problematic because
once they apply, they can be used forever. The simplifier is aware of this
danger and treats permutative rules by means of a special strategy, called
\bfindex{ordered rewriting}: a permutative rewrite
rule is only applied if the term becomes smaller with respect to a fixed
lexicographic ordering on terms. For example, commutativity rewrites
-@{term"b+a"} to @{term"a+b"}, but then stops because @{term"a+b"} is strictly
-smaller than @{term"b+a"}. Permutative rewrite rules can be turned into
+\<^term>\<open>b+a\<close> to \<^term>\<open>a+b\<close>, but then stops because \<^term>\<open>a+b\<close> is strictly
+smaller than \<^term>\<open>b+a\<close>. Permutative rewrite rules can be turned into
simplification rules in the usual manner via the \<open>simp\<close> attribute; the
simplifier recognizes their special status automatically.
@@ -140,7 +139,7 @@
The simplifier will still try to apply the rule provided it
matches directly: without much $\lambda$-calculus hocus
pocus. For example, \<open>(?f ?x \<in> range ?f) = True\<close> rewrites
-@{term"g a \<in> range g"} to @{const True}, but will fail to match
+\<^term>\<open>g a \<in> range g\<close> to \<^const>\<open>True\<close>, but will fail to match
\<open>g(h b) \<in> range(\<lambda>x. g(h x))\<close>. However, you can
eliminate the offending subterms --- those that are not patterns ---
by adding new variables and conditions.
@@ -160,9 +159,9 @@
text\<open>\label{sec:simp-preprocessor}
When a theorem is declared a simplification rule, it need not be a
conditional equation already. The simplifier will turn it into a set of
-conditional equations automatically. For example, @{prop"f x =
-g x & h x = k x"} becomes the two separate
-simplification rules @{prop"f x = g x"} and @{prop"h x = k x"}. In
+conditional equations automatically. For example, \<^prop>\<open>f x =
+g x & h x = k x\<close> becomes the two separate
+simplification rules \<^prop>\<open>f x = g x\<close> and \<^prop>\<open>h x = k x\<close>. In
general, the input theorem is converted as follows:
\begin{eqnarray}
\neg P &\mapsto& P = \hbox{\isa{False}} \nonumber\\
@@ -176,10 +175,10 @@
Once this conversion process is finished, all remaining non-equations
$P$ are turned into trivial equations $P =\isa{True}$.
For example, the formula
-\begin{center}@{prop"(p \<longrightarrow> t=u \<and> ~r) \<and> s"}\end{center}
+\begin{center}\<^prop>\<open>(p \<longrightarrow> t=u \<and> ~r) \<and> s\<close>\end{center}
is converted into the three rules
\begin{center}
-@{prop"p \<Longrightarrow> t = u"},\quad @{prop"p \<Longrightarrow> r = False"},\quad @{prop"s = True"}.
+\<^prop>\<open>p \<Longrightarrow> t = u\<close>,\quad \<^prop>\<open>p \<Longrightarrow> r = False\<close>,\quad \<^prop>\<open>s = True\<close>.
\end{center}
\index{simplification rule|)}
\index{simplification|)}
--- a/src/Doc/Tutorial/CTL/Base.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/CTL/Base.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,9 +13,9 @@
logic (PDL)\@. We then proceed to the temporal logic CTL, which is
used in many real
model checkers. In each case we give both a traditional semantics (\<open>\<Turnstile>\<close>) and a
-recursive function @{term mc} that maps a formula into the set of all states of
+recursive function \<^term>\<open>mc\<close> that maps a formula into the set of all states of
the system where the formula is valid. If the system has a finite number of
-states, @{term mc} is directly executable: it is a model checker, albeit an
+states, \<^term>\<open>mc\<close> is directly executable: it is a model checker, albeit an
inefficient one. The main proof obligation is to show that the semantics
and the model checker agree.
@@ -62,9 +62,9 @@
Command \commdx{typedecl} merely declares a new type but without
defining it (see \S\ref{sec:typedecl}). Thus we know nothing
about the type other than its existence. That is exactly what we need
-because @{typ state} really is an implicit parameter of our model. Of
-course it would have been more generic to make @{typ state} a type
-parameter of everything but declaring @{typ state} globally as above
+because \<^typ>\<open>state\<close> really is an implicit parameter of our model. Of
+course it would have been more generic to make \<^typ>\<open>state\<close> a type
+parameter of everything but declaring \<^typ>\<open>state\<close> globally as above
reduces clutter. Similarly we declare an arbitrary but fixed
transition system, i.e.\ a relation between states:
\<close>
--- a/src/Doc/Tutorial/CTL/CTL.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/CTL/CTL.thy Sat Jan 05 17:24:33 2019 +0100
@@ -21,14 +21,14 @@
which stands for ``\emph{A}lways in the \emph{F}uture'':
on all infinite paths, at some point the formula holds.
Formalizing the notion of an infinite path is easy
-in HOL: it is simply a function from @{typ nat} to @{typ state}.
+in HOL: it is simply a function from \<^typ>\<open>nat\<close> to \<^typ>\<open>state\<close>.
\<close>
definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
"Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
text\<open>\noindent
-This definition allows a succinct statement of the semantics of @{const AF}:
+This definition allows a succinct statement of the semantics of \<^const>\<open>AF\<close>:
\footnote{Do not be misled: neither datatypes nor recursive functions can be
extended by new constructors or equations. This is just a trick of the
presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
@@ -45,7 +45,7 @@
"s \<Turnstile> AF f = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
text\<open>\noindent
-Model checking @{const AF} involves a function which
+Model checking \<^const>\<open>AF\<close> involves a function which
is just complicated enough to warrant a separate definition:
\<close>
@@ -53,8 +53,8 @@
"af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
text\<open>\noindent
-Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
-@{term"mc f"} and all states all of whose direct successors are in @{term T}:
+Now we define \<^term>\<open>mc(AF f)\<close> as the least set \<^term>\<open>T\<close> that includes
+\<^term>\<open>mc f\<close> and all states all of whose direct successors are in \<^term>\<open>T\<close>:
\<close>
(*<*)
primrec mc :: "formula \<Rightarrow> state set" where
@@ -66,8 +66,8 @@
"mc(AF f) = lfp(af(mc f))"
text\<open>\noindent
-Because @{const af} is monotone in its second argument (and also its first, but
-that is irrelevant), @{term"af A"} has a least fixed point:
+Because \<^const>\<open>af\<close> is monotone in its second argument (and also its first, but
+that is irrelevant), \<^term>\<open>af A\<close> has a least fixed point:
\<close>
lemma mono_af: "mono(af A)"
@@ -97,8 +97,8 @@
by(blast)
(*>*)
text\<open>
-All we need to prove now is @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
-that @{term mc} and \<open>\<Turnstile>\<close> agree for @{const AF}\@.
+All we need to prove now is \<^prop>\<open>mc(AF f) = {s. s \<Turnstile> AF f}\<close>, which states
+that \<^term>\<open>mc\<close> and \<open>\<Turnstile>\<close> agree for \<^const>\<open>AF\<close>\@.
This time we prove the two inclusions separately, starting
with the easy one:
\<close>
@@ -106,13 +106,13 @@
theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
txt\<open>\noindent
-In contrast to the analogous proof for @{const EF}, and just
+In contrast to the analogous proof for \<^const>\<open>EF\<close>, and just
for a change, we do not use fixed point induction. Park-induction,
named after David Park, is weaker but sufficient for this proof:
\begin{center}
@{thm lfp_lowerbound[of _ "S",no_vars]} \hfill (@{thm[source]lfp_lowerbound})
\end{center}
-The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
+The instance of the premise \<^prop>\<open>f S \<subseteq> S\<close> is proved pointwise,
a decision that \isa{auto} takes for us:
\<close>
apply(rule lfp_lowerbound)
@@ -120,9 +120,9 @@
txt\<open>
@{subgoals[display,indent=0,margin=70,goals_limit=1]}
-In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
+In this remaining case, we set \<^term>\<open>t\<close> to \<^term>\<open>p(1::nat)\<close>.
The rest is automatic, which is surprising because it involves
-finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
+finding the instantiation \<^term>\<open>\<lambda>i::nat. p(i+1)\<close>
for \<open>\<forall>p\<close>.
\<close>
@@ -133,13 +133,13 @@
text\<open>
The opposite inclusion is proved by contradiction: if some state
-@{term s} is not in @{term"lfp(af A)"}, then we can construct an
-infinite @{term A}-avoiding path starting from~@{term s}. The reason is
-that by unfolding @{const lfp} we find that if @{term s} is not in
-@{term"lfp(af A)"}, then @{term s} is not in @{term A} and there is a
-direct successor of @{term s} that is again not in \mbox{@{term"lfp(af
-A)"}}. Iterating this argument yields the promised infinite
-@{term A}-avoiding path. Let us formalize this sketch.
+\<^term>\<open>s\<close> is not in \<^term>\<open>lfp(af A)\<close>, then we can construct an
+infinite \<^term>\<open>A\<close>-avoiding path starting from~\<^term>\<open>s\<close>. The reason is
+that by unfolding \<^const>\<open>lfp\<close> we find that if \<^term>\<open>s\<close> is not in
+\<^term>\<open>lfp(af A)\<close>, then \<^term>\<open>s\<close> is not in \<^term>\<open>A\<close> and there is a
+direct successor of \<^term>\<open>s\<close> that is again not in \mbox{\<^term>\<open>lfp(af
+A)\<close>}. Iterating this argument yields the promised infinite
+\<^term>\<open>A\<close>-avoiding path. Let us formalize this sketch.
The one-step argument in the sketch above
is proved by a variant of contraposition:
@@ -153,12 +153,12 @@
done
text\<open>\noindent
-We assume the negation of the conclusion and prove @{term"s \<in> lfp(af A)"}.
-Unfolding @{const lfp} once and
-simplifying with the definition of @{const af} finishes the proof.
+We assume the negation of the conclusion and prove \<^term>\<open>s \<in> lfp(af A)\<close>.
+Unfolding \<^const>\<open>lfp\<close> once and
+simplifying with the definition of \<^const>\<open>af\<close> finishes the proof.
Now we iterate this process. The following construction of the desired
-path is parameterized by a predicate @{term Q} that should hold along the path:
+path is parameterized by a predicate \<^term>\<open>Q\<close> that should hold along the path:
\<close>
primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
@@ -166,15 +166,15 @@
"path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
text\<open>\noindent
-Element @{term"n+1::nat"} on this path is some arbitrary successor
-@{term t} of element @{term n} such that @{term"Q t"} holds. Remember that \<open>SOME t. R t\<close>
-is some arbitrary but fixed @{term t} such that @{prop"R t"} holds (see \S\ref{sec:SOME}). Of
-course, such a @{term t} need not exist, but that is of no
-concern to us since we will only use @{const path} when a
-suitable @{term t} does exist.
+Element \<^term>\<open>n+1::nat\<close> on this path is some arbitrary successor
+\<^term>\<open>t\<close> of element \<^term>\<open>n\<close> such that \<^term>\<open>Q t\<close> holds. Remember that \<open>SOME t. R t\<close>
+is some arbitrary but fixed \<^term>\<open>t\<close> such that \<^prop>\<open>R t\<close> holds (see \S\ref{sec:SOME}). Of
+course, such a \<^term>\<open>t\<close> need not exist, but that is of no
+concern to us since we will only use \<^const>\<open>path\<close> when a
+suitable \<^term>\<open>t\<close> does exist.
-Let us show that if each state @{term s} that satisfies @{term Q}
-has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
+Let us show that if each state \<^term>\<open>s\<close> that satisfies \<^term>\<open>Q\<close>
+has a successor that again satisfies \<^term>\<open>Q\<close>, then there exists an infinite \<^term>\<open>Q\<close>-path:
\<close>
lemma infinity_lemma:
@@ -183,7 +183,7 @@
txt\<open>\noindent
First we rephrase the conclusion slightly because we need to prove simultaneously
-both the path property and the fact that @{term Q} holds:
+both the path property and the fact that \<^term>\<open>Q\<close> holds:
\<close>
apply(subgoal_tac
@@ -196,7 +196,7 @@
apply(simp add: Paths_def, blast)
txt\<open>\noindent
-The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
+The new subgoal is proved by providing the witness \<^term>\<open>path s Q\<close> for \<^term>\<open>p\<close>:
\<close>
apply(rule_tac x = "path s Q" in exI)
@@ -205,7 +205,7 @@
txt\<open>\noindent
After simplification and clarification, the subgoal has the following form:
@{subgoals[display,indent=0,margin=70,goals_limit=1]}
-It invites a proof by induction on @{term i}:
+It invites a proof by induction on \<^term>\<open>i\<close>:
\<close>
apply(induct_tac i)
@@ -214,14 +214,14 @@
txt\<open>\noindent
After simplification, the base case boils down to
@{subgoals[display,indent=0,margin=70,goals_limit=1]}
-The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t)\<in>M"}
-holds. However, we first have to show that such a @{term t} actually exists! This reasoning
+The conclusion looks exceedingly trivial: after all, \<^term>\<open>t\<close> is chosen such that \<^prop>\<open>(s,t)\<in>M\<close>
+holds. However, we first have to show that such a \<^term>\<open>t\<close> actually exists! This reasoning
is embodied in the theorem @{thm[source]someI2_ex}:
@{thm[display,eta_contract=false]someI2_ex}
When we apply this theorem as an introduction rule, \<open>?P x\<close> becomes
-@{prop"(s, x) \<in> M \<and> Q x"} and \<open>?Q x\<close> becomes @{prop"(s,x) \<in> M"} and we have to prove
-two subgoals: @{prop"\<exists>a. (s, a) \<in> M \<and> Q a"}, which follows from the assumptions, and
-@{prop"(s, x) \<in> M \<and> Q x \<Longrightarrow> (s,x) \<in> M"}, which is trivial. Thus it is not surprising that
+\<^prop>\<open>(s, x) \<in> M \<and> Q x\<close> and \<open>?Q x\<close> becomes \<^prop>\<open>(s,x) \<in> M\<close> and we have to prove
+two subgoals: \<^prop>\<open>\<exists>a. (s, a) \<in> M \<and> Q a\<close>, which follows from the assumptions, and
+\<^prop>\<open>(s, x) \<in> M \<and> Q x \<Longrightarrow> (s,x) \<in> M\<close>, which is trivial. Thus it is not surprising that
\<open>fast\<close> can prove the base case quickly:
\<close>
@@ -253,14 +253,14 @@
done
text\<open>
-Function @{const path} has fulfilled its purpose now and can be forgotten.
+Function \<^const>\<open>path\<close> has fulfilled its purpose now and can be forgotten.
It was merely defined to provide the witness in the proof of the
@{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
that we could have given the witness without having to define a new function:
the term
@{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
-is extensionally equal to @{term"path s Q"},
-where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
+is extensionally equal to \<^term>\<open>path s Q\<close>,
+where \<^term>\<open>rec_nat\<close> is the predefined primitive recursor on \<^typ>\<open>nat\<close>.
\<close>
(*<*)
lemma
@@ -332,8 +332,8 @@
text\<open>
The language defined above is not quite CTL\@. The latter also includes an
-until-operator @{term"EU f g"} with semantics ``there \emph{E}xists a path
-where @{term f} is true \emph{U}ntil @{term g} becomes true''. We need
+until-operator \<^term>\<open>EU f g\<close> with semantics ``there \emph{E}xists a path
+where \<^term>\<open>f\<close> is true \emph{U}ntil \<^term>\<open>g\<close> becomes true''. We need
an auxiliary function:
\<close>
@@ -346,11 +346,11 @@
"eusem A B \<equiv> {s. \<exists>p. until A B s p}"(*>*)
text\<open>\noindent
-Expressing the semantics of @{term EU} is now straightforward:
+Expressing the semantics of \<^term>\<open>EU\<close> is now straightforward:
@{prop[display]"s \<Turnstile> EU f g = (\<exists>p. until {t. t \<Turnstile> f} {t. t \<Turnstile> g} s p)"}
-Note that @{term EU} is not definable in terms of the other operators!
+Note that \<^term>\<open>EU\<close> is not definable in terms of the other operators!
-Model checking @{term EU} is again a least fixed point construction:
+Model checking \<^term>\<open>EU\<close> is again a least fixed point construction:
@{text[display]"mc(EU f g) = lfp(\<lambda>T. mc g \<union> mc f \<inter> (M\<inverse> `` T))"}
\begin{exercise}
@@ -438,11 +438,11 @@
text\<open>Let us close this section with a few words about the executability of
our model checkers. It is clear that if all sets are finite, they can be
represented as lists and the usual set operations are easily
-implemented. Only @{const lfp} requires a little thought. Fortunately, theory
+implemented. Only \<^const>\<open>lfp\<close> requires a little thought. Fortunately, theory
\<open>While_Combinator\<close> in the Library~@{cite "HOL-Library"} provides a
theorem stating that in the case of finite sets and a monotone
-function~@{term F}, the value of \mbox{@{term"lfp F"}} can be computed by
-iterated application of @{term F} to~@{term"{}"} until a fixed point is
+function~\<^term>\<open>F\<close>, the value of \mbox{\<^term>\<open>lfp F\<close>} can be computed by
+iterated application of \<^term>\<open>F\<close> to~\<^term>\<open>{}\<close> until a fixed point is
reached. It is actually possible to generate executable functional programs
from HOL definitions, but that is beyond the scope of the tutorial.%
\index{CTL|)}\<close>
--- a/src/Doc/Tutorial/CTL/CTLind.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/CTL/CTLind.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,12 +14,11 @@
involved. Below we give a simpler proof of @{thm[source]AF_lemma2}
based on an auxiliary inductive definition.
-Let us call a (finite or infinite) path \emph{@{term A}-avoiding} if it does
-not touch any node in the set @{term A}. Then @{thm[source]AF_lemma2} says
-that if no infinite path from some state @{term s} is @{term A}-avoiding,
-then @{prop"s \<in> lfp(af A)"}. We prove this by inductively defining the set
-@{term"Avoid s A"} of states reachable from @{term s} by a finite @{term
-A}-avoiding path:
+Let us call a (finite or infinite) path \emph{\<^term>\<open>A\<close>-avoiding} if it does
+not touch any node in the set \<^term>\<open>A\<close>. Then @{thm[source]AF_lemma2} says
+that if no infinite path from some state \<^term>\<open>s\<close> is \<^term>\<open>A\<close>-avoiding,
+then \<^prop>\<open>s \<in> lfp(af A)\<close>. We prove this by inductively defining the set
+\<^term>\<open>Avoid s A\<close> of states reachable from \<^term>\<open>s\<close> by a finite \<^term>\<open>A\<close>-avoiding path:
% Second proof of opposite direction, directly by well-founded induction
% on the initial segment of M that avoids A.
\<close>
@@ -32,11 +31,11 @@
| "\<lbrakk> t \<in> Avoid s A; t \<notin> A; (t,u) \<in> M \<rbrakk> \<Longrightarrow> u \<in> Avoid s A"
text\<open>
-It is easy to see that for any infinite @{term A}-avoiding path @{term f}
-with @{prop"f(0::nat) \<in> Avoid s A"} there is an infinite @{term A}-avoiding path
-starting with @{term s} because (by definition of @{const Avoid}) there is a
-finite @{term A}-avoiding path from @{term s} to @{term"f(0::nat)"}.
-The proof is by induction on @{prop"f(0::nat) \<in> Avoid s A"}. However,
+It is easy to see that for any infinite \<^term>\<open>A\<close>-avoiding path \<^term>\<open>f\<close>
+with \<^prop>\<open>f(0::nat) \<in> Avoid s A\<close> there is an infinite \<^term>\<open>A\<close>-avoiding path
+starting with \<^term>\<open>s\<close> because (by definition of \<^const>\<open>Avoid\<close>) there is a
+finite \<^term>\<open>A\<close>-avoiding path from \<^term>\<open>s\<close> to \<^term>\<open>f(0::nat)\<close>.
+The proof is by induction on \<^prop>\<open>f(0::nat) \<in> Avoid s A\<close>. However,
this requires the following
reformulation, as explained in \S\ref{sec:ind-var-in-prems} above;
the \<open>rule_format\<close> directive undoes the reformulation after the proof.
@@ -53,37 +52,36 @@
done
text\<open>\noindent
-The base case (@{prop"t = s"}) is trivial and proved by \<open>blast\<close>.
-In the induction step, we have an infinite @{term A}-avoiding path @{term f}
-starting from @{term u}, a successor of @{term t}. Now we simply instantiate
+The base case (\<^prop>\<open>t = s\<close>) is trivial and proved by \<open>blast\<close>.
+In the induction step, we have an infinite \<^term>\<open>A\<close>-avoiding path \<^term>\<open>f\<close>
+starting from \<^term>\<open>u\<close>, a successor of \<^term>\<open>t\<close>. Now we simply instantiate
the \<open>\<forall>f\<in>Paths t\<close> in the induction hypothesis by the path starting with
-@{term t} and continuing with @{term f}. That is what the above $\lambda$-term
-expresses. Simplification shows that this is a path starting with @{term t}
+\<^term>\<open>t\<close> and continuing with \<^term>\<open>f\<close>. That is what the above $\lambda$-term
+expresses. Simplification shows that this is a path starting with \<^term>\<open>t\<close>
and that the instantiated induction hypothesis implies the conclusion.
-Now we come to the key lemma. Assuming that no infinite @{term A}-avoiding
-path starts from @{term s}, we want to show @{prop"s \<in> lfp(af A)"}. For the
-inductive proof this must be generalized to the statement that every point @{term t}
-``between'' @{term s} and @{term A}, in other words all of @{term"Avoid s A"},
-is contained in @{term"lfp(af A)"}:
+Now we come to the key lemma. Assuming that no infinite \<^term>\<open>A\<close>-avoiding
+path starts from \<^term>\<open>s\<close>, we want to show \<^prop>\<open>s \<in> lfp(af A)\<close>. For the
+inductive proof this must be generalized to the statement that every point \<^term>\<open>t\<close>
+``between'' \<^term>\<open>s\<close> and \<^term>\<open>A\<close>, in other words all of \<^term>\<open>Avoid s A\<close>,
+is contained in \<^term>\<open>lfp(af A)\<close>:
\<close>
lemma Avoid_in_lfp[rule_format(no_asm)]:
"\<forall>p\<in>Paths s. \<exists>i. p i \<in> A \<Longrightarrow> t \<in> Avoid s A \<longrightarrow> t \<in> lfp(af A)"
txt\<open>\noindent
-The proof is by induction on the ``distance'' between @{term t} and @{term
-A}. Remember that @{prop"lfp(af A) = A \<union> M\<inverse> `` lfp(af A)"}.
-If @{term t} is already in @{term A}, then @{prop"t \<in> lfp(af A)"} is
-trivial. If @{term t} is not in @{term A} but all successors are in
-@{term"lfp(af A)"} (induction hypothesis), then @{prop"t \<in> lfp(af A)"} is
+The proof is by induction on the ``distance'' between \<^term>\<open>t\<close> and \<^term>\<open>A\<close>. Remember that \<^prop>\<open>lfp(af A) = A \<union> M\<inverse> `` lfp(af A)\<close>.
+If \<^term>\<open>t\<close> is already in \<^term>\<open>A\<close>, then \<^prop>\<open>t \<in> lfp(af A)\<close> is
+trivial. If \<^term>\<open>t\<close> is not in \<^term>\<open>A\<close> but all successors are in
+\<^term>\<open>lfp(af A)\<close> (induction hypothesis), then \<^prop>\<open>t \<in> lfp(af A)\<close> is
again trivial.
The formal counterpart of this proof sketch is a well-founded induction
-on~@{term M} restricted to @{term"Avoid s A - A"}, roughly speaking:
+on~\<^term>\<open>M\<close> restricted to \<^term>\<open>Avoid s A - A\<close>, roughly speaking:
@{term[display]"{(y,x). (x,y) \<in> M \<and> x \<in> Avoid s A \<and> x \<notin> A}"}
-As we shall see presently, the absence of infinite @{term A}-avoiding paths
-starting from @{term s} implies well-foundedness of this relation. For the
+As we shall see presently, the absence of infinite \<^term>\<open>A\<close>-avoiding paths
+starting from \<^term>\<open>s\<close> implies well-foundedness of this relation. For the
moment we assume this and proceed with the induction:
\<close>
@@ -94,16 +92,16 @@
txt\<open>\noindent
@{subgoals[display,indent=0,margin=65]}
-Now the induction hypothesis states that if @{prop"t \<notin> A"}
-then all successors of @{term t} that are in @{term"Avoid s A"} are in
-@{term"lfp (af A)"}. Unfolding @{term lfp} in the conclusion of the first
-subgoal once, we have to prove that @{term t} is in @{term A} or all successors
-of @{term t} are in @{term"lfp (af A)"}. But if @{term t} is not in @{term A},
+Now the induction hypothesis states that if \<^prop>\<open>t \<notin> A\<close>
+then all successors of \<^term>\<open>t\<close> that are in \<^term>\<open>Avoid s A\<close> are in
+\<^term>\<open>lfp (af A)\<close>. Unfolding \<^term>\<open>lfp\<close> in the conclusion of the first
+subgoal once, we have to prove that \<^term>\<open>t\<close> is in \<^term>\<open>A\<close> or all successors
+of \<^term>\<open>t\<close> are in \<^term>\<open>lfp (af A)\<close>. But if \<^term>\<open>t\<close> is not in \<^term>\<open>A\<close>,
the second
-@{const Avoid}-rule implies that all successors of @{term t} are in
-@{term"Avoid s A"}, because we also assume @{prop"t \<in> Avoid s A"}.
-Hence, by the induction hypothesis, all successors of @{term t} are indeed in
-@{term"lfp(af A)"}. Mechanically:
+\<^const>\<open>Avoid\<close>-rule implies that all successors of \<^term>\<open>t\<close> are in
+\<^term>\<open>Avoid s A\<close>, because we also assume \<^prop>\<open>t \<in> Avoid s A\<close>.
+Hence, by the induction hypothesis, all successors of \<^term>\<open>t\<close> are indeed in
+\<^term>\<open>lfp(af A)\<close>. Mechanically:
\<close>
apply(subst lfp_unfold[OF mono_af])
@@ -113,12 +111,11 @@
txt\<open>
Having proved the main goal, we return to the proof obligation that the
relation used above is indeed well-founded. This is proved by contradiction: if
-the relation is not well-founded then there exists an infinite @{term
-A}-avoiding path all in @{term"Avoid s A"}, by theorem
+the relation is not well-founded then there exists an infinite \<^term>\<open>A\<close>-avoiding path all in \<^term>\<open>Avoid s A\<close>, by theorem
@{thm[source]wf_iff_no_infinite_down_chain}:
@{thm[display]wf_iff_no_infinite_down_chain[no_vars]}
From lemma @{thm[source]ex_infinite_path} the existence of an infinite
-@{term A}-avoiding path starting in @{term s} follows, contradiction.
+\<^term>\<open>A\<close>-avoiding path starting in \<^term>\<open>s\<close> follows, contradiction.
\<close>
apply(erule contrapos_pp)
@@ -136,9 +133,9 @@
into a \<open>\<And>p\<close>, which would complicate matters below. As it is,
@{thm[source]Avoid_in_lfp} is now
@{thm[display]Avoid_in_lfp[no_vars]}
-The main theorem is simply the corollary where @{prop"t = s"},
-when the assumption @{prop"t \<in> Avoid s A"} is trivially true
-by the first @{const Avoid}-rule. Isabelle confirms this:%
+The main theorem is simply the corollary where \<^prop>\<open>t = s\<close>,
+when the assumption \<^prop>\<open>t \<in> Avoid s A\<close> is trivially true
+by the first \<^const>\<open>Avoid\<close>-rule. Isabelle confirms this:%
\index{CTL|)}\<close>
theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists> i. p i \<in> A} \<subseteq> lfp(af A)"
--- a/src/Doc/Tutorial/CTL/PDL.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/CTL/PDL.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,8 +36,8 @@
text\<open>\noindent
The first three equations should be self-explanatory. The temporal formula
-@{term"AX f"} means that @{term f} is true in \emph{A}ll ne\emph{X}t states whereas
-@{term"EF f"} means that there \emph{E}xists some \emph{F}uture state in which @{term f} is
+\<^term>\<open>AX f\<close> means that \<^term>\<open>f\<close> is true in \emph{A}ll ne\emph{X}t states whereas
+\<^term>\<open>EF f\<close> means that there \emph{E}xists some \emph{F}uture state in which \<^term>\<open>f\<close> is
true. The future is expressed via \<open>\<^sup>*\<close>, the reflexive transitive
closure. Because of reflexivity, the future includes the present.
@@ -53,17 +53,17 @@
"mc(EF f) = lfp(\<lambda>T. mc f \<union> (M\<inverse> `` T))"
text\<open>\noindent
-Only the equation for @{term EF} deserves some comments. Remember that the
+Only the equation for \<^term>\<open>EF\<close> deserves some comments. Remember that the
postfix \<open>\<inverse>\<close> and the infix \<open>``\<close> are predefined and denote the
converse of a relation and the image of a set under a relation. Thus
-@{term "M\<inverse> `` T"} is the set of all predecessors of @{term T} and the least
-fixed point (@{term lfp}) of @{term"\<lambda>T. mc f \<union> M\<inverse> `` T"} is the least set
-@{term T} containing @{term"mc f"} and all predecessors of @{term T}. If you
-find it hard to see that @{term"mc(EF f)"} contains exactly those states from
-which there is a path to a state where @{term f} is true, do not worry --- this
+\<^term>\<open>M\<inverse> `` T\<close> is the set of all predecessors of \<^term>\<open>T\<close> and the least
+fixed point (\<^term>\<open>lfp\<close>) of \<^term>\<open>\<lambda>T. mc f \<union> M\<inverse> `` T\<close> is the least set
+\<^term>\<open>T\<close> containing \<^term>\<open>mc f\<close> and all predecessors of \<^term>\<open>T\<close>. If you
+find it hard to see that \<^term>\<open>mc(EF f)\<close> contains exactly those states from
+which there is a path to a state where \<^term>\<open>f\<close> is true, do not worry --- this
will be proved in a moment.
-First we prove monotonicity of the function inside @{term lfp}
+First we prove monotonicity of the function inside \<^term>\<open>lfp\<close>
in order to make sure it really has a least fixed point.
\<close>
@@ -92,7 +92,7 @@
txt\<open>\noindent
Simplification leaves us with the following first subgoal
@{subgoals[display,indent=0,goals_limit=1]}
-which is proved by @{term lfp}-induction:
+which is proved by \<^term>\<open>lfp\<close>-induction:
\<close>
apply(erule lfp_induct_set)
@@ -123,15 +123,15 @@
txt\<open>\noindent
After simplification and clarification we are left with
@{subgoals[display,indent=0,goals_limit=1]}
-This goal is proved by induction on @{term"(s,t)\<in>M\<^sup>*"}. But since the model
-checker works backwards (from @{term t} to @{term s}), we cannot use the
+This goal is proved by induction on \<^term>\<open>(s,t)\<in>M\<^sup>*\<close>. But since the model
+checker works backwards (from \<^term>\<open>t\<close> to \<^term>\<open>s\<close>), we cannot use the
induction theorem @{thm[source]rtrancl_induct}: it works in the
forward direction. Fortunately the converse induction theorem
@{thm[source]converse_rtrancl_induct} already exists:
@{thm[display,margin=60]converse_rtrancl_induct[no_vars]}
-It says that if @{prop"(a,b)\<in>r\<^sup>*"} and we know @{prop"P b"} then we can infer
-@{prop"P a"} provided each step backwards from a predecessor @{term z} of
-@{term b} preserves @{term P}.
+It says that if \<^prop>\<open>(a,b)\<in>r\<^sup>*\<close> and we know \<^prop>\<open>P b\<close> then we can infer
+\<^prop>\<open>P a\<close> provided each step backwards from a predecessor \<^term>\<open>z\<close> of
+\<^term>\<open>b\<close> preserves \<^term>\<open>P\<close>.
\<close>
apply(erule converse_rtrancl_induct)
@@ -139,7 +139,7 @@
txt\<open>\noindent
The base case
@{subgoals[display,indent=0,goals_limit=1]}
-is solved by unrolling @{term lfp} once
+is solved by unrolling \<^term>\<open>lfp\<close> once
\<close>
apply(subst lfp_unfold[OF mono_ef])
@@ -171,15 +171,15 @@
text\<open>
\begin{exercise}
-@{term AX} has a dual operator @{term EN}
+\<^term>\<open>AX\<close> has a dual operator \<^term>\<open>EN\<close>
(``there exists a next state such that'')%
\footnote{We cannot use the customary \<open>EX\<close>: it is reserved
as the \textsc{ascii}-equivalent of \<open>\<exists>\<close>.}
with the intended semantics
@{prop[display]"(s \<Turnstile> EN f) = (\<exists>t. (s,t) \<in> M \<and> t \<Turnstile> f)"}
-Fortunately, @{term"EN f"} can already be expressed as a PDL formula. How?
+Fortunately, \<^term>\<open>EN f\<close> can already be expressed as a PDL formula. How?
-Show that the semantics for @{term EF} satisfies the following recursion equation:
+Show that the semantics for \<^term>\<open>EF\<close> satisfies the following recursion equation:
@{prop[display]"(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> EN(EF f))"}
\end{exercise}
\index{PDL|)}
--- a/src/Doc/Tutorial/CodeGen/CodeGen.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/CodeGen/CodeGen.thy Sat Jan 05 17:24:33 2019 +0100
@@ -62,12 +62,12 @@
| Apply f \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))"
text\<open>\noindent
-Recall that @{term"hd"} and @{term"tl"}
+Recall that \<^term>\<open>hd\<close> and \<^term>\<open>tl\<close>
return the first element and the remainder of a list.
Because all functions are total, \cdx{hd} is defined even for the empty
list, although we do not know what the result is. Thus our model of the
machine always terminates properly, although the definition above does not
-tell us much about the result in situations where @{term"Apply"} was executed
+tell us much about the result in situations where \<^term>\<open>Apply\<close> was executed
with fewer than two elements on the stack.
The compiler is a function from expressions to a list of instructions. Its
@@ -92,7 +92,7 @@
theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
txt\<open>\noindent
-It will be proved by induction on @{term"e"} followed by simplification.
+It will be proved by induction on \<^term>\<open>e\<close> followed by simplification.
First, we must prove a lemma about executing the concatenation of two
instruction sequences:
\<close>
@@ -101,7 +101,7 @@
"\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)"
txt\<open>\noindent
-This requires induction on @{term"xs"} and ordinary simplification for the
+This requires induction on \<^term>\<open>xs\<close> and ordinary simplification for the
base cases. In the induction step, simplification leaves us with a formula
that contains two \<open>case\<close>-expressions over instructions. Thus we add
automatic case splitting, which finishes the proof:
@@ -122,7 +122,7 @@
text\<open>\noindent
Although this is more compact, it is less clear for the reader of the proof.
-We could now go back and prove @{prop"exec (compile e) s [] = [value e s]"}
+We could now go back and prove \<^prop>\<open>exec (compile e) s [] = [value e s]\<close>
merely by simplification with the generalized version we just proved.
However, this is unnecessary because the generalized version fully subsumes
its instance.%
--- a/src/Doc/Tutorial/Datatype/ABexpr.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Datatype/ABexpr.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,8 +29,8 @@
text\<open>\noindent
Type \<open>aexp\<close> is similar to \<open>expr\<close> in \S\ref{sec:ExprCompiler},
except that we have added an \<open>IF\<close> constructor,
-fixed the values to be of type @{typ"nat"} and declared the two binary
-operations \<open>Sum\<close> and @{term"Diff"}. Boolean
+fixed the values to be of type \<^typ>\<open>nat\<close> and declared the two binary
+operations \<open>Sum\<close> and \<^term>\<open>Diff\<close>. Boolean
expressions can be arithmetic comparisons, conjunctions and negations.
The semantics is given by two evaluation functions:
\<close>
@@ -51,11 +51,11 @@
text\<open>\noindent
Both take an expression and an environment (a mapping from variables
-@{typ"'a"} to values @{typ"nat"}) and return its arithmetic/boolean
+\<^typ>\<open>'a\<close> to values \<^typ>\<open>nat\<close>) and return its arithmetic/boolean
value. Since the datatypes are mutually recursive, so are functions
that operate on them. Hence they need to be defined in a single
\isacommand{primrec} section. Notice the \isakeyword{and} separating
-the declarations of @{const evala} and @{const evalb}. Their defining
+the declarations of \<^const>\<open>evala\<close> and \<^const>\<open>evalb\<close>. Their defining
equations need not be split into two groups;
the empty line is purely for readability.
@@ -78,8 +78,8 @@
text\<open>\noindent
Their first argument is a function mapping variables to expressions, the
substitution. It is applied to all variables in the second argument. As a
-result, the type of variables in the expression may change from @{typ"'a"}
-to @{typ"'b"}. Note that there are only arithmetic and no boolean variables.
+result, the type of variables in the expression may change from \<^typ>\<open>'a\<close>
+to \<^typ>\<open>'b\<close>. Note that there are only arithmetic and no boolean variables.
Now we can prove a fundamental theorem about the interaction between
evaluation and substitution: applying a substitution $s$ to an expression $a$
@@ -111,13 +111,13 @@
\end{isabelle}
\begin{exercise}
- Define a function \<open>norma\<close> of type @{typ"'a aexp => 'a aexp"} that
- replaces @{term"IF"}s with complex boolean conditions by nested
- @{term"IF"}s; it should eliminate the constructors
- @{term"And"} and @{term"Neg"}, leaving only @{term"Less"}.
+ Define a function \<open>norma\<close> of type \<^typ>\<open>'a aexp => 'a aexp\<close> that
+ replaces \<^term>\<open>IF\<close>s with complex boolean conditions by nested
+ \<^term>\<open>IF\<close>s; it should eliminate the constructors
+ \<^term>\<open>And\<close> and \<^term>\<open>Neg\<close>, leaving only \<^term>\<open>Less\<close>.
Prove that \<open>norma\<close>
preserves the value of an expression and that the result of \<open>norma\<close>
- is really normal, i.e.\ no more @{term"And"}s and @{term"Neg"}s occur in
+ is really normal, i.e.\ no more \<^term>\<open>And\<close>s and \<^term>\<open>Neg\<close>s occur in
it. ({\em Hint:} proceed as in \S\ref{sec:boolex} and read the discussion
of type annotations following lemma \<open>subst_id\<close> below).
\end{exercise}
--- a/src/Doc/Tutorial/Datatype/Fundata.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Datatype/Fundata.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4,19 +4,19 @@
datatype (dead 'a,'i) bigtree = Tip | Br 'a "'i \<Rightarrow> ('a,'i)bigtree"
text\<open>\noindent
-Parameter @{typ"'a"} is the type of values stored in
-the @{term Br}anches of the tree, whereas @{typ"'i"} is the index
-type over which the tree branches. If @{typ"'i"} is instantiated to
-@{typ"bool"}, the result is a binary tree; if it is instantiated to
-@{typ"nat"}, we have an infinitely branching tree because each node
+Parameter \<^typ>\<open>'a\<close> is the type of values stored in
+the \<^term>\<open>Br\<close>anches of the tree, whereas \<^typ>\<open>'i\<close> is the index
+type over which the tree branches. If \<^typ>\<open>'i\<close> is instantiated to
+\<^typ>\<open>bool\<close>, the result is a binary tree; if it is instantiated to
+\<^typ>\<open>nat\<close>, we have an infinitely branching tree because each node
has as many subtrees as there are natural numbers. How can we possibly
write down such a tree? Using functional notation! For example, the term
@{term[display]"Br (0::nat) (\<lambda>i. Br i (\<lambda>n. Tip))"}
-of type @{typ"(nat,nat)bigtree"} is the tree whose
+of type \<^typ>\<open>(nat,nat)bigtree\<close> is the tree whose
root is labeled with 0 and whose $i$th subtree is labeled with $i$ and
-has merely @{term"Tip"}s as further subtrees.
+has merely \<^term>\<open>Tip\<close>s as further subtrees.
-Function @{term"map_bt"} applies a function to all labels in a \<open>bigtree\<close>:
+Function \<^term>\<open>map_bt\<close> applies a function to all labels in a \<open>bigtree\<close>:
\<close>
primrec map_bt :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a,'i)bigtree \<Rightarrow> ('b,'i)bigtree"
@@ -25,11 +25,11 @@
"map_bt f (Br a F) = Br (f a) (\<lambda>i. map_bt f (F i))"
text\<open>\noindent This is a valid \isacommand{primrec} definition because the
-recursive calls of @{term"map_bt"} involve only subtrees of
-@{term"F"}, which is itself a subterm of the left-hand side. Thus termination
+recursive calls of \<^term>\<open>map_bt\<close> involve only subtrees of
+\<^term>\<open>F\<close>, which is itself a subterm of the left-hand side. Thus termination
is assured. The seasoned functional programmer might try expressing
-@{term"%i. map_bt f (F i)"} as @{term"map_bt f o F"}, which Isabelle
-however will reject. Applying @{term"map_bt"} to only one of its arguments
+\<^term>\<open>%i. map_bt f (F i)\<close> as \<^term>\<open>map_bt f o F\<close>, which Isabelle
+however will reject. Applying \<^term>\<open>map_bt\<close> to only one of its arguments
makes the termination proof less obvious.
The following lemma has a simple proof by induction:\<close>
--- a/src/Doc/Tutorial/Datatype/Nested.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Datatype/Nested.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,10 +17,10 @@
text\<open>\noindent
Note that we need to quote \<open>term\<close> on the left to avoid confusion with
the Isabelle command \isacommand{term}.
-Parameter @{typ"'v"} is the type of variables and @{typ"'f"} the type of
+Parameter \<^typ>\<open>'v\<close> is the type of variables and \<^typ>\<open>'f\<close> the type of
function symbols.
-A mathematical term like $f(x,g(y))$ becomes @{term"App f [Var x, App g
- [Var y]]"}, where @{term f}, @{term g}, @{term x}, @{term y} are
+A mathematical term like $f(x,g(y))$ becomes \<^term>\<open>App f [Var x, App g
+ [Var y]]\<close>, where \<^term>\<open>f\<close>, \<^term>\<open>g\<close>, \<^term>\<open>x\<close>, \<^term>\<open>y\<close> are
suitable values, e.g.\ numbers or strings.
What complicates the definition of \<open>term\<close> is the nested occurrence of
@@ -71,8 +71,8 @@
done
text\<open>\noindent
-Note that @{term Var} is the identity substitution because by definition it
-leaves variables unchanged: @{prop"subst Var (Var x) = Var x"}. Note also
+Note that \<^term>\<open>Var\<close> is the identity substitution because by definition it
+leaves variables unchanged: \<^prop>\<open>subst Var (Var x) = Var x\<close>. Note also
that the type annotations are necessary because otherwise there is nothing in
the goal to enforce that both halves of the goal talk about the same type
parameters \<open>('v,'f)\<close>. As a result, induction would fail
@@ -87,16 +87,16 @@
its definition is found in theorem @{thm[source]o_def}).
\end{exercise}
\begin{exercise}\label{ex:trev-trev}
- Define a function @{term trev} of type @{typ"('v,'f)term => ('v,'f)term"}
+ Define a function \<^term>\<open>trev\<close> of type \<^typ>\<open>('v,'f)term => ('v,'f)term\<close>
that recursively reverses the order of arguments of all function symbols in a
- term. Prove that @{prop"trev(trev t) = t"}.
+ term. Prove that \<^prop>\<open>trev(trev t) = t\<close>.
\end{exercise}
The experienced functional programmer may feel that our definition of
-@{term subst} is too complicated in that @{const substs} is
-unnecessary. The @{term App}-case can be defined directly as
+\<^term>\<open>subst\<close> is too complicated in that \<^const>\<open>substs\<close> is
+unnecessary. The \<^term>\<open>App\<close>-case can be defined directly as
@{term[display]"subst s (App f ts) = App f (map (subst s) ts)"}
-where @{term"map"} is the standard list function such that
+where \<^term>\<open>map\<close> is the standard list function such that
\<open>map f [x1,...,xn] = [f x1,...,f xn]\<close>. This is true, but Isabelle
insists on the conjunctive format. Fortunately, we can easily \emph{prove}
that the suggested equation holds:
@@ -140,9 +140,8 @@
declare subst_App [simp del]
-text\<open>\noindent The advantage is that now we have replaced @{const
-substs} by @{const map}, we can profit from the large number of
-pre-proved lemmas about @{const map}. Unfortunately, inductive proofs
+text\<open>\noindent The advantage is that now we have replaced \<^const>\<open>substs\<close> by \<^const>\<open>map\<close>, we can profit from the large number of
+pre-proved lemmas about \<^const>\<open>map\<close>. Unfortunately, inductive proofs
about type \<open>term\<close> are still awkward because they expect a
conjunction. One could derive a new induction principle as well (see
\S\ref{sec:derive-ind}), but simpler is to stop using
--- a/src/Doc/Tutorial/Documents/Documents.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Documents/Documents.thy Sat Jan 05 17:24:33 2019 +0100
@@ -52,11 +52,11 @@
The keyword \isakeyword{infixl} seen above specifies an
infix operator that is nested to the \emph{left}: in iterated
applications the more complex expression appears on the left-hand
- side, and @{term "A [+] B [+] C"} stands for \<open>(A [+] B) [+]
+ side, and \<^term>\<open>A [+] B [+] C\<close> stands for \<open>(A [+] B) [+]
C\<close>. Similarly, \isakeyword{infixr} means nesting to the
- \emph{right}, reading @{term "A [+] B [+] C"} as \<open>A [+] (B
+ \emph{right}, reading \<^term>\<open>A [+] B [+] C\<close> as \<open>A [+] (B
[+] C)\<close>. A \emph{non-oriented} declaration via \isakeyword{infix}
- would render @{term "A [+] B [+] C"} illegal, but demand explicit
+ would render \<^term>\<open>A [+] B [+] C\<close> illegal, but demand explicit
parentheses to indicate the intended grouping.
The string @{text [source] "[+]"} in our annotation refers to the
@@ -127,7 +127,7 @@
{\small\noindent \<^verbatim>\<open>\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>\<close>}
\medskip
- \noindent is recognized as the term @{term "\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>"}
+ \noindent is recognized as the term \<^term>\<open>\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>\<close>
by Isabelle.
Replacing our previous definition of \<open>xor\<close> by the
@@ -195,9 +195,8 @@
\noindent Here the mixfix annotations on the rightmost column happen
to consist of a single Isabelle symbol each: \verb,\,\verb,<euro>,,
\verb,\,\verb,<pounds>,, \verb,\,\verb,<yen>,, and \verb,$,. Recall
- that a constructor like \<open>Euro\<close> actually is a function @{typ
- "nat \<Rightarrow> currency"}. The expression \<open>Euro 10\<close> will be
- printed as @{term "\<euro> 10"}; only the head of the application is
+ that a constructor like \<open>Euro\<close> actually is a function \<^typ>\<open>nat \<Rightarrow> currency\<close>. The expression \<open>Euro 10\<close> will be
+ printed as \<^term>\<open>\<euro> 10\<close>; only the head of the application is
subject to our concrete syntax. This rather simple form already
achieves conformance with notational standards of the European
Commission.
@@ -222,15 +221,15 @@
A typical use of abbreviations is to introduce relational notation for
membership in a set of pairs, replacing \<open>(x, y) \<in> sim\<close> by
\<open>x \<approx> y\<close>. We assume that a constant \<open>sim\<close> of type
-@{typ"('a \<times> 'a) set"} has been introduced at this point.\<close>
+\<^typ>\<open>('a \<times> 'a) set\<close> has been introduced at this point.\<close>
(*<*)consts sim :: "('a \<times> 'a) set"(*>*)
abbreviation sim2 :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<approx>" 50)
where "x \<approx> y \<equiv> (x, y) \<in> sim"
text \<open>\noindent The given meta-equality is used as a rewrite rule
-after parsing (replacing \mbox{@{prop"x \<approx> y"}} by \<open>(x,y) \<in>
+after parsing (replacing \mbox{\<^prop>\<open>x \<approx> y\<close>} by \<open>(x,y) \<in>
sim\<close>) and before printing (turning \<open>(x,y) \<in> sim\<close> back into
-\mbox{@{prop"x \<approx> y"}}). The name of the dummy constant \<open>sim2\<close>
+\mbox{\<^prop>\<open>x \<approx> y\<close>}). The name of the dummy constant \<open>sim2\<close>
does not matter, as long as it is unique.
Another common application of abbreviations is to
@@ -284,8 +283,7 @@
text \<open>
The following datatype definition of \<open>'a bintree\<close> models
- binary trees with nodes being decorated by elements of type @{typ
- 'a}.
+ binary trees with nodes being decorated by elements of type \<^typ>\<open>'a\<close>.
\<close>
datatype 'a bintree =
@@ -529,7 +527,7 @@
for types, terms, or theorems as in the formal part of a theory.
\medskip This sentence demonstrates quotations and antiquotations:
- @{term "%x y. x"} is a well-typed term.
+ \<^term>\<open>%x y. x\<close> is a well-typed term.
\medskip\noindent The output above was produced as follows:
\begin{ttbox}
--- a/src/Doc/Tutorial/Fun/fun0.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Fun/fun0.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,7 +18,7 @@
This resembles ordinary functional programming languages. Note the obligatory
\isacommand{where} and \isa{|}. Command \isacommand{fun} declares and
defines the function in one go. Isabelle establishes termination automatically
-because @{const fib}'s argument decreases in every recursive call.
+because \<^const>\<open>fib\<close>'s argument decreases in every recursive call.
Slightly more interesting is the insertion of a fixed element
between any two elements of a list:
@@ -53,9 +53,9 @@
text\<open>\noindent
To guarantee that the second equation can only be applied if the first
one does not match, Isabelle internally replaces the second equation
-by the two possibilities that are left: @{prop"sep1 a [] = []"} and
-@{prop"sep1 a [x] = [x]"}. Thus the functions @{const sep} and
-@{const sep1} are identical.
+by the two possibilities that are left: \<^prop>\<open>sep1 a [] = []\<close> and
+\<^prop>\<open>sep1 a [x] = [x]\<close>. Thus the functions \<^const>\<open>sep\<close> and
+\<^const>\<open>sep1\<close> are identical.
Because of its pattern matching syntax, \isacommand{fun} is also useful
for the definition of non-recursive functions:
@@ -77,7 +77,7 @@
\subsection{Termination}
Isabelle's automatic termination prover for \isacommand{fun} has a
-fixed notion of the \emph{size} (of type @{typ nat}) of an
+fixed notion of the \emph{size} (of type \<^typ>\<open>nat\<close>) of an
argument. The size of a natural number is the number itself. The size
of a list is its length. For the general case see \S\ref{sec:general-datatype}.
A recursive function is accepted if \isacommand{fun} can
@@ -124,14 +124,14 @@
languages and our simplifier don't do that. Unfortunately the simplifier does
something else that leads to the same problem: it splits
each \<open>if\<close>-expression unless its
-condition simplifies to @{term True} or @{term False}. For
+condition simplifies to \<^term>\<open>True\<close> or \<^term>\<open>False\<close>. For
example, simplification reduces
@{prop[display]"gcd m n = k"}
in one step to
@{prop[display]"(if n=0 then m else gcd n (m mod n)) = k"}
where the condition cannot be reduced further, and splitting leads to
@{prop[display]"(n=0 --> m=k) & (n ~= 0 --> gcd n (m mod n)=k)"}
-Since the recursive call @{term"gcd n (m mod n)"} is no longer protected by
+Since the recursive call \<^term>\<open>gcd n (m mod n)\<close> is no longer protected by
an \<open>if\<close>, it is unfolded again, which leads to an infinite chain of
simplification steps. Fortunately, this problem can be avoided in many
different ways.
@@ -143,7 +143,7 @@
\<open>if\<close> is involved.
If possible, the definition should be given by pattern matching on the left
-rather than \<open>if\<close> on the right. In the case of @{term gcd} the
+rather than \<open>if\<close> on the right. In the case of \<^term>\<open>gcd\<close> the
following alternative definition suggests itself:
\<close>
@@ -153,11 +153,11 @@
text\<open>\noindent
The order of equations is important: it hides the side condition
-@{prop"n ~= (0::nat)"}. Unfortunately, not all conditionals can be
+\<^prop>\<open>n ~= (0::nat)\<close>. Unfortunately, not all conditionals can be
expressed by pattern matching.
A simple alternative is to replace \<open>if\<close> by \<open>case\<close>,
-which is also available for @{typ bool} and is not split automatically:
+which is also available for \<^typ>\<open>bool\<close> and is not split automatically:
\<close>
fun gcd2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
@@ -168,7 +168,7 @@
always available.
A final alternative is to replace the offending simplification rules by
-derived conditional ones. For @{term gcd} it means we have to prove
+derived conditional ones. For \<^term>\<open>gcd\<close> it means we have to prove
these lemmas:
\<close>
@@ -181,7 +181,7 @@
done
text\<open>\noindent
-Simplification terminates for these proofs because the condition of the \<open>if\<close> simplifies to @{term True} or @{term False}.
+Simplification terminates for these proofs because the condition of the \<open>if\<close> simplifies to \<^term>\<open>True\<close> or \<^term>\<open>False\<close>.
Now we can disable the original simplification rule:
\<close>
@@ -205,22 +205,22 @@
requires you to prove for each \isacommand{fun} equation that the property
you are trying to establish holds for the left-hand side provided it holds
for all recursive calls on the right-hand side. Here is a simple example
-involving the predefined @{term"map"} functional on lists:
+involving the predefined \<^term>\<open>map\<close> functional on lists:
\<close>
lemma "map f (sep x xs) = sep (f x) (map f xs)"
txt\<open>\noindent
-Note that @{term"map f xs"}
-is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
-this lemma by recursion induction over @{term"sep"}:
+Note that \<^term>\<open>map f xs\<close>
+is the result of applying \<^term>\<open>f\<close> to all elements of \<^term>\<open>xs\<close>. We prove
+this lemma by recursion induction over \<^term>\<open>sep\<close>:
\<close>
apply(induct_tac x xs rule: sep.induct)
txt\<open>\noindent
The resulting proof state has three subgoals corresponding to the three
-clauses for @{term"sep"}:
+clauses for \<^term>\<open>sep\<close>:
@{subgoals[display,indent=0]}
The rest is pure simplification:
\<close>
@@ -229,7 +229,7 @@
done
text\<open>\noindent The proof goes smoothly because the induction rule
-follows the recursion of @{const sep}. Try proving the above lemma by
+follows the recursion of \<^const>\<open>sep\<close>. Try proving the above lemma by
structural induction, and you find that you need an additional case
distinction.
@@ -247,10 +247,10 @@
~~{\isasymAnd}a~x~y~zs.~P~a~(y~\#~zs)~{\isasymLongrightarrow}~P~a~(x~\#~y~\#~zs){\isasymrbrakk}\isanewline
{\isasymLongrightarrow}~P~u~v%
\end{isabelle}
-It merely says that in order to prove a property @{term"P"} of @{term"u"} and
-@{term"v"} you need to prove it for the three cases where @{term"v"} is the
+It merely says that in order to prove a property \<^term>\<open>P\<close> of \<^term>\<open>u\<close> and
+\<^term>\<open>v\<close> you need to prove it for the three cases where \<^term>\<open>v\<close> is the
empty list, the singleton list, and the list with at least two elements.
-The final case has an induction hypothesis: you may assume that @{term"P"}
+The final case has an induction hypothesis: you may assume that \<^term>\<open>P\<close>
holds for the tail of that list.
\index{induction!recursion|)}
\index{recursion induction|)}
--- a/src/Doc/Tutorial/Ifexpr/Ifexpr.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Ifexpr/Ifexpr.thy Sat Jan 05 17:24:33 2019 +0100
@@ -22,17 +22,17 @@
| And boolex boolex
text\<open>\noindent
-The two constants are represented by @{term"Const True"} and
-@{term"Const False"}. Variables are represented by terms of the form
-@{term"Var n"}, where @{term"n"} is a natural number (type @{typ"nat"}).
+The two constants are represented by \<^term>\<open>Const True\<close> and
+\<^term>\<open>Const False\<close>. Variables are represented by terms of the form
+\<^term>\<open>Var n\<close>, where \<^term>\<open>n\<close> is a natural number (type \<^typ>\<open>nat\<close>).
For example, the formula $P@0 \land \neg P@1$ is represented by the term
-@{term"And (Var 0) (Neg(Var 1))"}.
+\<^term>\<open>And (Var 0) (Neg(Var 1))\<close>.
\subsubsection{The Value of a Boolean Expression}
The value of a boolean expression depends on the value of its variables.
Hence the function \<open>value\<close> takes an additional parameter, an
-\emph{environment} of type @{typ"nat => bool"}, which maps variables to their
+\emph{environment} of type \<^typ>\<open>nat => bool\<close>, which maps variables to their
values:
\<close>
@@ -47,14 +47,14 @@
An alternative and often more efficient (because in a certain sense
canonical) representation are so-called \emph{If-expressions} built up
-from constants (@{term"CIF"}), variables (@{term"VIF"}) and conditionals
-(@{term"IF"}):
+from constants (\<^term>\<open>CIF\<close>), variables (\<^term>\<open>VIF\<close>) and conditionals
+(\<^term>\<open>IF\<close>):
\<close>
datatype ifex = CIF bool | VIF nat | IF ifex ifex ifex
text\<open>\noindent
-The evaluation of If-expressions proceeds as for @{typ"boolex"}:
+The evaluation of If-expressions proceeds as for \<^typ>\<open>boolex\<close>:
\<close>
primrec valif :: "ifex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
@@ -66,9 +66,9 @@
text\<open>
\subsubsection{Converting Boolean and If-Expressions}
-The type @{typ"boolex"} is close to the customary representation of logical
-formulae, whereas @{typ"ifex"} is designed for efficiency. It is easy to
-translate from @{typ"boolex"} into @{typ"ifex"}:
+The type \<^typ>\<open>boolex\<close> is close to the customary representation of logical
+formulae, whereas \<^typ>\<open>ifex\<close> is designed for efficiency. It is easy to
+translate from \<^typ>\<open>boolex\<close> into \<^typ>\<open>ifex\<close>:
\<close>
primrec bool2if :: "boolex \<Rightarrow> ifex" where
@@ -78,7 +78,7 @@
"bool2if (And b c) = IF (bool2if b) (bool2if c) (CIF False)"
text\<open>\noindent
-At last, we have something we can verify: that @{term"bool2if"} preserves the
+At last, we have something we can verify: that \<^term>\<open>bool2if\<close> preserves the
value of its argument:
\<close>
@@ -97,10 +97,10 @@
not show them below.
More interesting is the transformation of If-expressions into a normal form
-where the first argument of @{term"IF"} cannot be another @{term"IF"} but
+where the first argument of \<^term>\<open>IF\<close> cannot be another \<^term>\<open>IF\<close> but
must be a constant or variable. Such a normal form can be computed by
-repeatedly replacing a subterm of the form @{term"IF (IF b x y) z u"} by
-@{term"IF b (IF x z u) (IF y z u)"}, which has the same value. The following
+repeatedly replacing a subterm of the form \<^term>\<open>IF (IF b x y) z u\<close> by
+\<^term>\<open>IF b (IF x z u) (IF y z u)\<close>, which has the same value. The following
primitive recursive functions perform this task:
\<close>
@@ -124,7 +124,7 @@
text\<open>\noindent
The proof is canonical, provided we first show the following simplification
-lemma, which also helps to understand what @{term"normif"} does:
+lemma, which also helps to understand what \<^term>\<open>normif\<close> does:
\<close>
lemma [simp]:
@@ -141,7 +141,7 @@
Note that the lemma does not have a name, but is implicitly used in the proof
of the theorem shown above because of the \<open>[simp]\<close> attribute.
-But how can we be sure that @{term"norm"} really produces a normal form in
+But how can we be sure that \<^term>\<open>norm\<close> really produces a normal form in
the above sense? We define a function that tests If-expressions for normality:
\<close>
@@ -152,8 +152,8 @@
(case b of CIF b \<Rightarrow> True | VIF x \<Rightarrow> True | IF x y z \<Rightarrow> False))"
text\<open>\noindent
-Now we prove @{term"normal(norm b)"}. Of course, this requires a lemma about
-normality of @{term"normif"}:
+Now we prove \<^term>\<open>normal(norm b)\<close>. Of course, this requires a lemma about
+normality of \<^term>\<open>normif\<close>:
\<close>
lemma [simp]: "\<forall>t e. normal(normif b t e) = (normal t \<and> normal e)"
@@ -174,8 +174,8 @@
\S\ref{sec:InductionHeuristics}
\begin{exercise}
- We strengthen the definition of a @{const normal} If-expression as follows:
- the first argument of all @{term IF}s must be a variable. Adapt the above
+ We strengthen the definition of a \<^const>\<open>normal\<close> If-expression as follows:
+ the first argument of all \<^term>\<open>IF\<close>s must be a variable. Adapt the above
development to this changed requirement. (Hint: you may need to formulate
some of the goals as implications (\<open>\<longrightarrow>\<close>) rather than
equalities (\<open>=\<close>).)
--- a/src/Doc/Tutorial/Inductive/AB.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Inductive/AB.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,8 +19,8 @@
At the end we say a few words about the relationship between
the original proof @{cite \<open>p.\ts81\<close> HopcroftUllman} and our formal version.
-We start by fixing the alphabet, which consists only of @{term a}'s
-and~@{term b}'s:
+We start by fixing the alphabet, which consists only of \<^term>\<open>a\<close>'s
+and~\<^term>\<open>b\<close>'s:
\<close>
datatype alfa = a | b
@@ -33,11 +33,11 @@
by (case_tac x, auto)
text\<open>\noindent
-Words over this alphabet are of type @{typ"alfa list"}, and
+Words over this alphabet are of type \<^typ>\<open>alfa list\<close>, and
the three nonterminals are declared as sets of such words.
The productions above are recast as a \emph{mutual} inductive
definition\index{inductive definition!simultaneous}
-of @{term S}, @{term A} and~@{term B}:
+of \<^term>\<open>S\<close>, \<^term>\<open>A\<close> and~\<^term>\<open>B\<close>:
\<close>
inductive_set
@@ -56,11 +56,9 @@
| "\<lbrakk> v \<in> B; w \<in> B \<rbrakk> \<Longrightarrow> a#v@w \<in> B"
text\<open>\noindent
-First we show that all words in @{term S} contain the same number of @{term
-a}'s and @{term b}'s. Since the definition of @{term S} is by mutual
+First we show that all words in \<^term>\<open>S\<close> contain the same number of \<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s. Since the definition of \<^term>\<open>S\<close> is by mutual
induction, so is the proof: we show at the same time that all words in
-@{term A} contain one more @{term a} than @{term b} and all words in @{term
-B} contain one more @{term b} than @{term a}.
+\<^term>\<open>A\<close> contain one more \<^term>\<open>a\<close> than \<^term>\<open>b\<close> and all words in \<^term>\<open>B\<close> contain one more \<^term>\<open>b\<close> than \<^term>\<open>a\<close>.
\<close>
lemma correctness:
@@ -69,9 +67,8 @@
(w \<in> B \<longrightarrow> size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1)"
txt\<open>\noindent
-These propositions are expressed with the help of the predefined @{term
-filter} function on lists, which has the convenient syntax \<open>[x\<leftarrow>xs. P
-x]\<close>, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
+These propositions are expressed with the help of the predefined \<^term>\<open>filter\<close> function on lists, which has the convenient syntax \<open>[x\<leftarrow>xs. P
+x]\<close>, the list of all elements \<^term>\<open>x\<close> in \<^term>\<open>xs\<close> such that \<^prop>\<open>P x\<close>
holds. Remember that on lists \<open>size\<close> and \<open>length\<close> are synonymous.
The proof itself is by rule induction and afterwards automatic:
@@ -87,28 +84,27 @@
than~$b$'s.
As usual, the correctness of syntactic descriptions is easy, but completeness
-is hard: does @{term S} contain \emph{all} words with an equal number of
-@{term a}'s and @{term b}'s? It turns out that this proof requires the
-following lemma: every string with two more @{term a}'s than @{term
-b}'s can be cut somewhere such that each half has one more @{term a} than
-@{term b}. This is best seen by imagining counting the difference between the
-number of @{term a}'s and @{term b}'s starting at the left end of the
+is hard: does \<^term>\<open>S\<close> contain \emph{all} words with an equal number of
+\<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s? It turns out that this proof requires the
+following lemma: every string with two more \<^term>\<open>a\<close>'s than \<^term>\<open>b\<close>'s can be cut somewhere such that each half has one more \<^term>\<open>a\<close> than
+\<^term>\<open>b\<close>. This is best seen by imagining counting the difference between the
+number of \<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s starting at the left end of the
word. We start with 0 and end (at the right end) with 2. Since each move to the
right increases or decreases the difference by 1, we must have passed through
1 on our way from 0 to 2. Formally, we appeal to the following discrete
intermediate value theorem @{thm[source]nat0_intermed_int_val}
@{thm[display,margin=60]nat0_intermed_int_val[no_vars]}
-where @{term f} is of type @{typ"nat \<Rightarrow> int"}, @{typ int} are the integers,
+where \<^term>\<open>f\<close> is of type \<^typ>\<open>nat \<Rightarrow> int\<close>, \<^typ>\<open>int\<close> are the integers,
\<open>\<bar>.\<bar>\<close> is the absolute value function\footnote{See
Table~\ref{tab:ascii} in the Appendix for the correct \textsc{ascii}
-syntax.}, and @{term"1::int"} is the integer 1 (see \S\ref{sec:numbers}).
+syntax.}, and \<^term>\<open>1::int\<close> is the integer 1 (see \S\ref{sec:numbers}).
First we show that our specific function, the difference between the
-numbers of @{term a}'s and @{term b}'s, does indeed only change by 1 in every
-move to the right. At this point we also start generalizing from @{term a}'s
-and @{term b}'s to an arbitrary property @{term P}. Otherwise we would have
+numbers of \<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s, does indeed only change by 1 in every
+move to the right. At this point we also start generalizing from \<^term>\<open>a\<close>'s
+and \<^term>\<open>b\<close>'s to an arbitrary property \<^term>\<open>P\<close>. Otherwise we would have
to prove the desired lemma twice, once as stated above and once with the
-roles of @{term a}'s and @{term b}'s interchanged.
+roles of \<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s interchanged.
\<close>
lemma step1: "\<forall>i < size w.
@@ -117,13 +113,13 @@
txt\<open>\noindent
The lemma is a bit hard to read because of the coercion function
-\<open>int :: nat \<Rightarrow> int\<close>. It is required because @{term size} returns
-a natural number, but subtraction on type~@{typ nat} will do the wrong thing.
-Function @{term take} is predefined and @{term"take i xs"} is the prefix of
-length @{term i} of @{term xs}; below we also need @{term"drop i xs"}, which
-is what remains after that prefix has been dropped from @{term xs}.
+\<open>int :: nat \<Rightarrow> int\<close>. It is required because \<^term>\<open>size\<close> returns
+a natural number, but subtraction on type~\<^typ>\<open>nat\<close> will do the wrong thing.
+Function \<^term>\<open>take\<close> is predefined and \<^term>\<open>take i xs\<close> is the prefix of
+length \<^term>\<open>i\<close> of \<^term>\<open>xs\<close>; below we also need \<^term>\<open>drop i xs\<close>, which
+is what remains after that prefix has been dropped from \<^term>\<open>xs\<close>.
-The proof is by induction on @{term w}, with a trivial base case, and a not
+The proof is by induction on \<^term>\<open>w\<close>, with a trivial base case, and a not
so trivial induction step. Since it is essentially just arithmetic, we do not
discuss it.
\<close>
@@ -151,8 +147,8 @@
text\<open>\noindent
-Lemma @{thm[source]part1} tells us only about the prefix @{term"take i w"}.
-An easy lemma deals with the suffix @{term"drop i w"}:
+Lemma @{thm[source]part1} tells us only about the prefix \<^term>\<open>take i w\<close>.
+An easy lemma deals with the suffix \<^term>\<open>drop i w\<close>:
\<close>
@@ -182,8 +178,8 @@
This could have been done earlier but was not necessary so far.
The completeness theorem tells us that if a word has the same number of
-@{term a}'s and @{term b}'s, then it is in @{term S}, and similarly
-for @{term A} and @{term B}:
+\<^term>\<open>a\<close>'s and \<^term>\<open>b\<close>'s, then it is in \<^term>\<open>S\<close>, and similarly
+for \<^term>\<open>A\<close> and \<^term>\<open>B\<close>:
\<close>
theorem completeness:
@@ -192,10 +188,10 @@
(size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1 \<longrightarrow> w \<in> B)"
txt\<open>\noindent
-The proof is by induction on @{term w}. Structural induction would fail here
+The proof is by induction on \<^term>\<open>w\<close>. Structural induction would fail here
because, as we can see from the grammar, we need to make bigger steps than
merely appending a single letter at the front. Hence we induct on the length
-of @{term w}, using the induction rule @{thm[source]length_induct}:
+of \<^term>\<open>w\<close>, using the induction rule @{thm[source]length_induct}:
\<close>
apply(induct_tac w rule: length_induct)
@@ -205,11 +201,11 @@
The \<open>rule\<close> parameter tells \<open>induct_tac\<close> explicitly which induction
rule to use. For details see \S\ref{sec:complete-ind} below.
In this case the result is that we may assume the lemma already
-holds for all words shorter than @{term w}. Because the induction step renames
+holds for all words shorter than \<^term>\<open>w\<close>. Because the induction step renames
the induction variable we rename it back to \<open>w\<close>.
-The proof continues with a case distinction on @{term w},
-on whether @{term w} is empty or not.
+The proof continues with a case distinction on \<^term>\<open>w\<close>,
+on whether \<^term>\<open>w\<close> is empty or not.
\<close>
apply(case_tac w)
@@ -219,13 +215,12 @@
txt\<open>\noindent
Simplification disposes of the base case and leaves only a conjunction
of two step cases to be proved:
-if @{prop"w = a#v"} and @{prop[display]"size[x\<in>v. x=a] = size[x\<in>v. x=b]+2"} then
-@{prop"b#v \<in> A"}, and similarly for @{prop"w = b#v"}.
+if \<^prop>\<open>w = a#v\<close> and @{prop[display]"size[x\<in>v. x=a] = size[x\<in>v. x=b]+2"} then
+\<^prop>\<open>b#v \<in> A\<close>, and similarly for \<^prop>\<open>w = b#v\<close>.
We only consider the first case in detail.
After breaking the conjunction up into two cases, we can apply
-@{thm[source]part1} to the assumption that @{term w} contains two more @{term
-a}'s than @{term b}'s.
+@{thm[source]part1} to the assumption that \<^term>\<open>w\<close> contains two more \<^term>\<open>a\<close>'s than \<^term>\<open>b\<close>'s.
\<close>
apply(rule conjI)
@@ -233,7 +228,7 @@
apply(frule part1[of "\<lambda>x. x=a", simplified])
apply(clarify)
txt\<open>\noindent
-This yields an index @{prop"i \<le> length v"} such that
+This yields an index \<^prop>\<open>i \<le> length v\<close> such that
@{prop[display]"length [x\<leftarrow>take i v . x = a] = length [x\<leftarrow>take i v . x = b] + 1"}
With the help of @{thm[source]part2} it follows that
@{prop[display]"length [x\<leftarrow>drop i v . x = a] = length [x\<leftarrow>drop i v . x = b] + 1"}
@@ -243,31 +238,31 @@
apply(assumption)
txt\<open>\noindent
-Now it is time to decompose @{term v} in the conclusion @{prop"b#v \<in> A"}
-into @{term"take i v @ drop i v"},
+Now it is time to decompose \<^term>\<open>v\<close> in the conclusion \<^prop>\<open>b#v \<in> A\<close>
+into \<^term>\<open>take i v @ drop i v\<close>,
\<close>
apply(rule_tac n1=i and t=v in subst[OF append_take_drop_id])
txt\<open>\noindent
-(the variables @{term n1} and @{term t} are the result of composing the
+(the variables \<^term>\<open>n1\<close> and \<^term>\<open>t\<close> are the result of composing the
theorems @{thm[source]subst} and @{thm[source]append_take_drop_id})
after which the appropriate rule of the grammar reduces the goal
-to the two subgoals @{prop"take i v \<in> A"} and @{prop"drop i v \<in> A"}:
+to the two subgoals \<^prop>\<open>take i v \<in> A\<close> and \<^prop>\<open>drop i v \<in> A\<close>:
\<close>
apply(rule S_A_B.intros)
txt\<open>
-Both subgoals follow from the induction hypothesis because both @{term"take i
-v"} and @{term"drop i v"} are shorter than @{term w}:
+Both subgoals follow from the induction hypothesis because both \<^term>\<open>take i
+v\<close> and \<^term>\<open>drop i v\<close> are shorter than \<^term>\<open>w\<close>:
\<close>
apply(force simp add: min_less_iff_disj)
apply(force split: nat_diff_split)
txt\<open>
-The case @{prop"w = b#v"} is proved analogously:
+The case \<^prop>\<open>w = b#v\<close> is proved analogously:
\<close>
apply(clarify)
--- a/src/Doc/Tutorial/Inductive/Advanced.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Inductive/Advanced.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,7 +36,7 @@
datatype integer_op = Number int | UnaryMinus | Plus
text \<open>
-Now the type @{typ "integer_op gterm"} denotes the ground
+Now the type \<^typ>\<open>integer_op gterm\<close> denotes the ground
terms built over those symbols.
The type constructor \<open>gterm\<close> can be generalized to a function
@@ -54,7 +54,7 @@
A universal quantifier in the premise of the introduction rule
expresses that every element of \<open>args\<close> belongs
to our inductively defined set: is a ground term
-over~\<open>F\<close>. The function @{term set} denotes the set of elements in a given
+over~\<open>F\<close>. The function \<^term>\<open>set\<close> denotes the set of elements in a given
list.
\<close>
@@ -67,7 +67,7 @@
text \<open>
To demonstrate a proof from this definition, let us
-show that the function @{term gterms}
+show that the function \<^term>\<open>gterms\<close>
is \textbf{monotone}. We shall need this concept shortly.
\<close>
@@ -86,7 +86,7 @@
enlarging the set of function symbols enlarges the set of ground
terms. The proof is a trivial rule induction.
First we use the \<open>clarify\<close> method to assume the existence of an element of
-@{term "gterms F"}. (We could have used \<open>intro subsetI\<close>.) We then
+\<^term>\<open>gterms F\<close>. (We could have used \<open>intro subsetI\<close>.) We then
apply rule induction. Here is the resulting subgoal:
@{subgoals[display,indent=0]}
The assumptions state that \<open>f\<close> belongs
@@ -138,11 +138,11 @@
inductively defined set through an arbitrary monotone function. To
demonstrate this powerful feature, let us
change the inductive definition above, replacing the
-quantifier by a use of the function @{term lists}. This
+quantifier by a use of the function \<^term>\<open>lists\<close>. This
function, from the Isabelle theory of lists, is analogous to the
-function @{term gterms} declared above: if \<open>A\<close> is a set then
-@{term "lists A"} is the set of lists whose elements belong to
-@{term A}.
+function \<^term>\<open>gterms\<close> declared above: if \<open>A\<close> is a set then
+\<^term>\<open>lists A\<close> is the set of lists whose elements belong to
+\<^term>\<open>A\<close>.
In the inductive definition of well-formed terms, examine the one
introduction rule. The first premise states that \<open>args\<close> belongs to
@@ -161,7 +161,7 @@
text \<open>
We cite the theorem \<open>lists_mono\<close> to justify
-using the function @{term lists}.%
+using the function \<^term>\<open>lists\<close>.%
\footnote{This particular theorem is installed by default already, but we
include the \isakeyword{monos} declaration in order to illustrate its syntax.}
@{named_thms [display,indent=0] lists_mono [no_vars] (lists_mono)}
@@ -177,8 +177,8 @@
construction process to converge.
The following pair of rules do not constitute an inductive definition:
\begin{trivlist}
-\item @{term "0 \<in> even"}
-\item @{term "n \<notin> even \<Longrightarrow> (Suc n) \<in> even"}
+\item \<^term>\<open>0 \<in> even\<close>
+\item \<^term>\<open>n \<notin> even \<Longrightarrow> (Suc n) \<in> even\<close>
\end{trivlist}
Showing that 4 is even using these rules requires showing that 3 is not
even. It is far from trivial to show that this set of rules
@@ -187,9 +187,9 @@
Even with its use of the function \isa{lists}, the premise of our
introduction rule is positive:
@{thm [display,indent=0] (prem 1) step [no_vars]}
-To apply the rule we construct a list @{term args} of previously
+To apply the rule we construct a list \<^term>\<open>args\<close> of previously
constructed well-formed terms. We obtain a
-new term, @{term "Apply f args"}. Because @{term lists} is monotone,
+new term, \<^term>\<open>Apply f args\<close>. Because \<^term>\<open>lists\<close> is monotone,
applications of the rule remain valid as new terms are constructed.
Further lists of well-formed
terms become available and none are taken away.%
@@ -216,7 +216,7 @@
(*>*)
txt \<open>
The \<open>clarify\<close> method gives
-us an element of @{term "well_formed_gterm arity"} on which to perform
+us an element of \<^term>\<open>well_formed_gterm arity\<close> on which to perform
induction. The resulting subgoal can be proved automatically:
@{subgoals[display,indent=0]}
This proof resembles the one given in
@@ -238,16 +238,16 @@
The proof script is virtually identical,
but the subgoal after applying induction may be surprising:
@{subgoals[display,indent=0,margin=65]}
-The induction hypothesis contains an application of @{term lists}. Using a
+The induction hypothesis contains an application of \<^term>\<open>lists\<close>. Using a
monotone function in the inductive definition always has this effect. The
subgoal may look uninviting, but fortunately
-@{term lists} distributes over intersection:
+\<^term>\<open>lists\<close> distributes over intersection:
@{named_thms [display,indent=0] lists_Int_eq [no_vars] (lists_Int_eq)}
Thanks to this default simplification rule, the induction hypothesis
is quickly replaced by its two parts:
\begin{trivlist}
-\item @{term "args \<in> lists (well_formed_gterm' arity)"}
-\item @{term "args \<in> lists (well_formed_gterm arity)"}
+\item \<^term>\<open>args \<in> lists (well_formed_gterm' arity)\<close>
+\item \<^term>\<open>args \<in> lists (well_formed_gterm arity)\<close>
\end{trivlist}
Invoking the rule \<open>well_formed_gterm.step\<close> completes the proof. The
call to \<open>auto\<close> does all this work.
@@ -265,12 +265,12 @@
text \<open>
\index{rule inversion|(}%
-Does @{term gterms} distribute over intersection? We have proved that this
+Does \<^term>\<open>gterms\<close> distribute over intersection? We have proved that this
function is monotone, so \<open>mono_Int\<close> gives one of the inclusions. The
-opposite inclusion asserts that if @{term t} is a ground term over both of the
+opposite inclusion asserts that if \<^term>\<open>t\<close> is a ground term over both of the
sets
-@{term F} and~@{term G} then it is also a ground term over their intersection,
-@{term "F \<inter> G"}.
+\<^term>\<open>F\<close> and~\<^term>\<open>G\<close> then it is also a ground term over their intersection,
+\<^term>\<open>F \<inter> G\<close>.
\<close>
lemma gterms_IntI:
@@ -278,7 +278,7 @@
(*<*)oops(*>*)
text \<open>
Attempting this proof, we get the assumption
-@{term "Apply f args \<in> gterms G"}, which cannot be broken down.
+\<^term>\<open>Apply f args \<in> gterms G\<close>, which cannot be broken down.
It looks like a job for rule inversion:\cmmdx{inductive\protect\_cases}
\<close>
@@ -287,10 +287,10 @@
text \<open>
Here is the result.
@{named_thms [display,indent=0,margin=50] gterm_Apply_elim [no_vars] (gterm_Apply_elim)}
-This rule replaces an assumption about @{term "Apply f args"} by
-assumptions about @{term f} and~@{term args}.
+This rule replaces an assumption about \<^term>\<open>Apply f args\<close> by
+assumptions about \<^term>\<open>f\<close> and~\<^term>\<open>args\<close>.
No cases are discarded (there was only one to begin
-with) but the rule applies specifically to the pattern @{term "Apply f args"}.
+with) but the rule applies specifically to the pattern \<^term>\<open>Apply f args\<close>.
It can be applied repeatedly as an elimination rule without looping, so we
have given the \<open>elim!\<close> attribute.
@@ -308,14 +308,14 @@
(*>*)
txt \<open>
The proof begins with rule induction over the definition of
-@{term gterms}, which leaves a single subgoal:
+\<^term>\<open>gterms\<close>, which leaves a single subgoal:
@{subgoals[display,indent=0,margin=65]}
-To prove this, we assume @{term "Apply f args \<in> gterms G"}. Rule inversion,
+To prove this, we assume \<^term>\<open>Apply f args \<in> gterms G\<close>. Rule inversion,
in the form of \<open>gterm_Apply_elim\<close>, infers
-that every element of @{term args} belongs to
-@{term "gterms G"}; hence (by the induction hypothesis) it belongs
-to @{term "gterms (F \<inter> G)"}. Rule inversion also yields
-@{term "f \<in> G"} and hence @{term "f \<in> F \<inter> G"}.
+that every element of \<^term>\<open>args\<close> belongs to
+\<^term>\<open>gterms G\<close>; hence (by the induction hypothesis) it belongs
+to \<^term>\<open>gterms (F \<inter> G)\<close>. Rule inversion also yields
+\<^term>\<open>f \<in> G\<close> and hence \<^term>\<open>f \<in> F \<inter> G\<close>.
All of this reasoning is done by \<open>blast\<close>.
\smallskip
--- a/src/Doc/Tutorial/Inductive/Even.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Inductive/Even.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,7 +29,7 @@
An inductive definition consists of introduction rules. The first one
above states that 0 is even; the second states that if $n$ is even, then so
is~$n+2$. Given this declaration, Isabelle generates a fixed point
-definition for @{term even} and proves theorems about it,
+definition for \<^term>\<open>even\<close> and proves theorems about it,
thus following the definitional approach (see {\S}\ref{sec:definitional}).
These theorems
include the introduction rules specified in the declaration, an elimination
@@ -85,23 +85,23 @@
text \<open>
\index{rule induction|(}%
From the definition of the set
-@{term even}, Isabelle has
+\<^term>\<open>even\<close>, Isabelle has
generated an induction rule:
@{named_thms [display,indent=0,margin=40] even.induct [no_vars] (even.induct)}
-A property @{term P} holds for every even number provided it
+A property \<^term>\<open>P\<close> holds for every even number provided it
holds for~\<open>0\<close> and is closed under the operation
-\isa{Suc(Suc \(\cdot\))}. Then @{term P} is closed under the introduction
-rules for @{term even}, which is the least set closed under those rules.
+\isa{Suc(Suc \(\cdot\))}. Then \<^term>\<open>P\<close> is closed under the introduction
+rules for \<^term>\<open>even\<close>, which is the least set closed under those rules.
This type of inductive argument is called \textbf{rule induction}.
-Apart from the double application of @{term Suc}, the induction rule above
+Apart from the double application of \<^term>\<open>Suc\<close>, the induction rule above
resembles the familiar mathematical induction, which indeed is an instance
of rule induction; the natural numbers can be defined inductively to be
-the least set containing \<open>0\<close> and closed under~@{term Suc}.
+the least set containing \<open>0\<close> and closed under~\<^term>\<open>Suc\<close>.
Induction is the usual way of proving a property of the elements of an
inductively defined set. Let us prove that all members of the set
-@{term even} are multiples of two.
+\<^term>\<open>even\<close> are multiples of two.
\<close>
lemma even_imp_dvd: "n \<in> even \<Longrightarrow> 2 dvd n"
@@ -126,14 +126,14 @@
txt \<open>
@{subgoals[display,indent=0]}
To conclude, we tell Isabelle that the desired value is
-@{term "Suc k"}. With this hint, the subgoal falls to \<open>simp\<close>.
+\<^term>\<open>Suc k\<close>. With this hint, the subgoal falls to \<open>simp\<close>.
\<close>
apply (rule_tac x = "Suc k" in exI, simp)
(*<*)done(*>*)
text \<open>
Combining the previous two results yields our objective, the
-equivalence relating @{term even} and \<open>dvd\<close>.
+equivalence relating \<^term>\<open>even\<close> and \<open>dvd\<close>.
%
%we don't want [iff]: discuss?
\<close>
@@ -161,7 +161,7 @@
apply (erule even.induct)
(*>*)
txt \<open>
-Rule induction finds no occurrences of @{term "Suc(Suc n)"} in the
+Rule induction finds no occurrences of \<^term>\<open>Suc(Suc n)\<close> in the
conclusion, which it therefore leaves unchanged. (Look at
\<open>even.induct\<close> to see why this happens.) We have these subgoals:
@{subgoals[display,indent=0]}
@@ -185,8 +185,8 @@
This lemma is trivially inductive. Here are the subgoals:
@{subgoals[display,indent=0]}
The first is trivial because \<open>0 - 2\<close> simplifies to \<open>0\<close>, which is
-even. The second is trivial too: @{term "Suc (Suc n) - 2"} simplifies to
-@{term n}, matching the assumption.%
+even. The second is trivial too: \<^term>\<open>Suc (Suc n) - 2\<close> simplifies to
+\<^term>\<open>n\<close>, matching the assumption.%
\index{rule induction|)} %the sequel isn't really about induction
\medskip
@@ -216,21 +216,21 @@
automatically. Let us look at how rule inversion is done in
Isabelle/HOL\@.
-Recall that @{term even} is the minimal set closed under these two rules:
+Recall that \<^term>\<open>even\<close> is the minimal set closed under these two rules:
@{thm [display,indent=0] even.intros [no_vars]}
-Minimality means that @{term even} contains only the elements that these
-rules force it to contain. If we are told that @{term a}
+Minimality means that \<^term>\<open>even\<close> contains only the elements that these
+rules force it to contain. If we are told that \<^term>\<open>a\<close>
belongs to
-@{term even} then there are only two possibilities. Either @{term a} is \<open>0\<close>
-or else @{term a} has the form @{term "Suc(Suc n)"}, for some suitable @{term n}
+\<^term>\<open>even\<close> then there are only two possibilities. Either \<^term>\<open>a\<close> is \<open>0\<close>
+or else \<^term>\<open>a\<close> has the form \<^term>\<open>Suc(Suc n)\<close>, for some suitable \<^term>\<open>n\<close>
that belongs to
-@{term even}. That is the gist of the @{term cases} rule, which Isabelle proves
+\<^term>\<open>even\<close>. That is the gist of the \<^term>\<open>cases\<close> rule, which Isabelle proves
for us when it accepts an inductive definition:
@{named_thms [display,indent=0,margin=40] even.cases [no_vars] (even.cases)}
This general rule is less useful than instances of it for
-specific patterns. For example, if @{term a} has the form
-@{term "Suc(Suc n)"} then the first case becomes irrelevant, while the second
-case tells us that @{term n} belongs to @{term even}. Isabelle will generate
+specific patterns. For example, if \<^term>\<open>a\<close> has the form
+\<^term>\<open>Suc(Suc n)\<close> then the first case becomes irrelevant, while the second
+case tells us that \<^term>\<open>n\<close> belongs to \<^term>\<open>even\<close>. Isabelle will generate
this instance for us:
\<close>
@@ -242,7 +242,7 @@
@{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)}
Applying this as an elimination rule yields one case where \<open>even.cases\<close>
would yield two. Rule inversion works well when the conclusions of the
-introduction rules involve datatype constructors like @{term Suc} and \<open>#\<close>
+introduction rules involve datatype constructors like \<^term>\<open>Suc\<close> and \<open>#\<close>
(list ``cons''); freeness reasoning discards all but one or two cases.
In the \isacommand{inductive\_cases} command we supplied an
@@ -250,8 +250,8 @@
\index{elim"!@\isa {elim"!} (attribute)}%
indicating that this elimination rule can be
applied aggressively. The original
-@{term cases} rule would loop if used in that manner because the
-pattern~@{term a} matches everything.
+\<^term>\<open>cases\<close> rule would loop if used in that manner because the
+pattern~\<^term>\<open>a\<close> matches everything.
The rule \<open>Suc_Suc_cases\<close> is equivalent to the following implication:
@{term [display,indent=0] "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"}
--- a/src/Doc/Tutorial/Inductive/Mutual.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Inductive/Mutual.thy Sat Jan 05 17:24:33 2019 +0100
@@ -58,7 +58,7 @@
subsection\<open>Inductively Defined Predicates\label{sec:ind-predicates}\<close>
text\<open>\index{inductive predicates|(}
-Instead of a set of even numbers one can also define a predicate on @{typ nat}:
+Instead of a set of even numbers one can also define a predicate on \<^typ>\<open>nat\<close>:
\<close>
inductive evn :: "nat \<Rightarrow> bool" where
@@ -67,13 +67,13 @@
text\<open>\noindent Everything works as before, except that
you write \commdx{inductive} instead of \isacommand{inductive\_set} and
-@{prop"evn n"} instead of @{prop"n \<in> Even"}.
+\<^prop>\<open>evn n\<close> instead of \<^prop>\<open>n \<in> Even\<close>.
When defining an n-ary relation as a predicate, it is recommended to curry
the predicate: its type should be \mbox{\<open>\<tau>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<tau>\<^sub>n \<Rightarrow> bool\<close>}
rather than
\<open>\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<Rightarrow> bool\<close>. The curried version facilitates inductions.
-When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the \<open>\<in>\<close> notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if \<open>P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool\<close>, you have to write @{term"%x y. P x y & Q x y"} instead.
+When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the \<open>\<in>\<close> notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: \<^term>\<open>P \<union> Q\<close> is not well-typed if \<open>P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool\<close>, you have to write \<^term>\<open>%x y. P x y & Q x y\<close> instead.
\index{inductive predicates|)}
\<close>
--- a/src/Doc/Tutorial/Inductive/Star.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Inductive/Star.thy Sat Jan 05 17:24:33 2019 +0100
@@ -22,12 +22,12 @@
| rtc_step: "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
text\<open>\noindent
-The function @{term rtc} is annotated with concrete syntax: instead of
-\<open>rtc r\<close> we can write @{term"r*"}. The actual definition
+The function \<^term>\<open>rtc\<close> is annotated with concrete syntax: instead of
+\<open>rtc r\<close> we can write \<^term>\<open>r*\<close>. The actual definition
consists of two rules. Reflexivity is obvious and is immediately given the
\<open>iff\<close> attribute to increase automation. The
second rule, @{thm[source]rtc_step}, says that we can always add one more
-@{term r}-step to the left. Although we could make @{thm[source]rtc_step} an
+\<^term>\<open>r\<close>-step to the left. Although we could make @{thm[source]rtc_step} an
introduction rule, this is dangerous: the recursion in the second premise
slows down and may even kill the automatic tactics.
@@ -44,7 +44,7 @@
text\<open>\noindent
Although the lemma itself is an unremarkable consequence of the basic rules,
it has the advantage that it can be declared an introduction rule without the
-danger of killing the automatic tactics because @{term"r*"} occurs only in
+danger of killing the automatic tactics because \<^term>\<open>r*\<close> occurs only in
the conclusion and not in the premise. Thus some proofs that would otherwise
need @{thm[source]rtc_step} can now be found automatically. The proof also
shows that \<open>blast\<close> is able to handle @{thm[source]rtc_step}. But
@@ -72,19 +72,19 @@
To understand what is going on, let us look again at @{thm[source]rtc.induct}.
In the above application of \<open>erule\<close>, the first premise of
@{thm[source]rtc.induct} is unified with the first suitable assumption, which
-is @{term"(x,y) \<in> r*"} rather than @{term"(y,z) \<in> r*"}. Although that
+is \<^term>\<open>(x,y) \<in> r*\<close> rather than \<^term>\<open>(y,z) \<in> r*\<close>. Although that
is what we want, it is merely due to the order in which the assumptions occur
in the subgoal, which it is not good practice to rely on. As a result,
-\<open>?xb\<close> becomes @{term x}, \<open>?xa\<close> becomes
-@{term y} and \<open>?P\<close> becomes @{term"\<lambda>u v. (u,z) \<in> r*"}, thus
+\<open>?xb\<close> becomes \<^term>\<open>x\<close>, \<open>?xa\<close> becomes
+\<^term>\<open>y\<close> and \<open>?P\<close> becomes \<^term>\<open>\<lambda>u v. (u,z) \<in> r*\<close>, thus
yielding the above subgoal. So what went wrong?
When looking at the instantiation of \<open>?P\<close> we see that it does not
depend on its second parameter at all. The reason is that in our original
-goal, of the pair @{term"(x,y)"} only @{term x} appears also in the
-conclusion, but not @{term y}. Thus our induction statement is too
+goal, of the pair \<^term>\<open>(x,y)\<close> only \<^term>\<open>x\<close> appears also in the
+conclusion, but not \<^term>\<open>y\<close>. Thus our induction statement is too
general. Fortunately, it can easily be specialized:
-transfer the additional premise @{prop"(y,z)\<in>r*"} into the conclusion:\<close>
+transfer the additional premise \<^prop>\<open>(y,z)\<in>r*\<close> into the conclusion:\<close>
(*<*)oops(*>*)
lemma rtc_trans[rule_format]:
"(x,y) \<in> r* \<Longrightarrow> (y,z) \<in> r* \<longrightarrow> (x,z) \<in> r*"
@@ -114,9 +114,9 @@
done
text\<open>
-Let us now prove that @{term"r*"} is really the reflexive transitive closure
-of @{term r}, i.e.\ the least reflexive and transitive
-relation containing @{term r}. The latter is easily formalized
+Let us now prove that \<^term>\<open>r*\<close> is really the reflexive transitive closure
+of \<^term>\<open>r\<close>, i.e.\ the least reflexive and transitive
+relation containing \<^term>\<open>r\<close>. The latter is easily formalized
\<close>
inductive_set
@@ -151,7 +151,7 @@
transitivity. As a consequence, @{thm[source]rtc.induct} is simpler than
@{thm[source]rtc2.induct}. Since inductive proofs are hard enough
anyway, we should always pick the simplest induction schema available.
-Hence @{term rtc} is the definition of choice.
+Hence \<^term>\<open>rtc\<close> is the definition of choice.
\index{reflexive transitive closure!defining inductively|)}
\begin{exercise}\label{ex:converse-rtc-step}
@@ -160,7 +160,7 @@
\end{exercise}
\begin{exercise}
Repeat the development of this section, but starting with a definition of
-@{term rtc} where @{thm[source]rtc_step} is replaced by its converse as shown
+\<^term>\<open>rtc\<close> where @{thm[source]rtc_step} is replaced by its converse as shown
in exercise~\ref{ex:converse-rtc-step}.
\end{exercise}
\<close>
--- a/src/Doc/Tutorial/Misc/AdvancedInd.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/AdvancedInd.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
Often we have assumed that the theorem to be proved is already in a form
that is amenable to induction, but sometimes it isn't.
Here is an example.
-Since @{term"hd"} and @{term"last"} return the first and last element of a
+Since \<^term>\<open>hd\<close> and \<^term>\<open>last\<close> return the first and last element of a
non-empty list, this lemma looks easy to prove:
\<close>
@@ -35,11 +35,11 @@
\begin{isabelle}
\ 1.\ xs\ {\isasymnoteq}\ []\ {\isasymLongrightarrow}\ hd\ []\ =\ last\ []
\end{isabelle}
-We cannot prove this equality because we do not know what @{term hd} and
-@{term last} return when applied to @{term"[]"}.
+We cannot prove this equality because we do not know what \<^term>\<open>hd\<close> and
+\<^term>\<open>last\<close> return when applied to \<^term>\<open>[]\<close>.
We should not have ignored the warning. Because the induction
-formula is only the conclusion, induction does not affect the occurrence of @{term xs} in the premises.
+formula is only the conclusion, induction does not affect the occurrence of \<^term>\<open>xs\<close> in the premises.
Thus the case that should have been trivial
becomes unprovable. Fortunately, the solution is easy:\footnote{A similar
heuristic applies to rule inductions; see \S\ref{sec:rtc}.}
@@ -122,7 +122,7 @@
induction schema. In such cases a general-purpose induction schema can
be helpful. We show how to apply such induction schemas by an example.
-Structural induction on @{typ"nat"} is
+Structural induction on \<^typ>\<open>nat\<close> is
usually known as mathematical induction. There is also \textbf{complete}
\index{induction!complete}%
induction, where you prove $P(n)$ under the assumption that $P(m)$
@@ -145,15 +145,15 @@
point about methodology. If your example turns into a substantial proof
development, you should replace axioms by theorems.
\end{warn}\noindent
-The axiom for @{term"f"} implies @{prop"n <= f n"}, which can
-be proved by induction on \mbox{@{term"f n"}}. Following the recipe outlined
+The axiom for \<^term>\<open>f\<close> implies \<^prop>\<open>n <= f n\<close>, which can
+be proved by induction on \mbox{\<^term>\<open>f n\<close>}. Following the recipe outlined
above, we have to phrase the proposition as follows to allow induction:
\<close>
lemma f_incr_lem: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
txt\<open>\noindent
-To perform induction on @{term k} using @{thm[source]nat_less_induct}, we use
+To perform induction on \<^term>\<open>k\<close> using @{thm[source]nat_less_induct}, we use
the same general induction method as for recursion induction (see
\S\ref{sec:fun-induction}):
\<close>
@@ -164,7 +164,7 @@
We get the following proof state:
@{subgoals[display,indent=0,margin=65]}
After stripping the \<open>\<forall>i\<close>, the proof continues with a case
-distinction on @{term"i"}. The case @{prop"i = (0::nat)"} is trivial and we focus on
+distinction on \<^term>\<open>i\<close>. The case \<^prop>\<open>i = (0::nat)\<close> is trivial and we focus on
the other case:
\<close>
@@ -185,16 +185,16 @@
\rulename{le_less_trans}
\end{isabelle}
%
-The proof goes like this (writing @{term"j"} instead of @{typ"nat"}).
-Since @{prop"i = Suc j"} it suffices to show
-\hbox{@{prop"j < f(Suc j)"}},
+The proof goes like this (writing \<^term>\<open>j\<close> instead of \<^typ>\<open>nat\<close>).
+Since \<^prop>\<open>i = Suc j\<close> it suffices to show
+\hbox{\<^prop>\<open>j < f(Suc j)\<close>},
by @{thm[source]Suc_leI}\@. This is
-proved as follows. From @{thm[source]f_ax} we have @{prop"f (f j) < f (Suc j)"}
-(1) which implies @{prop"f j <= f (f j)"} by the induction hypothesis.
-Using (1) once more we obtain @{prop"f j < f(Suc j)"} (2) by the transitivity
+proved as follows. From @{thm[source]f_ax} we have \<^prop>\<open>f (f j) < f (Suc j)\<close>
+(1) which implies \<^prop>\<open>f j <= f (f j)\<close> by the induction hypothesis.
+Using (1) once more we obtain \<^prop>\<open>f j < f(Suc j)\<close> (2) by the transitivity
rule @{thm[source]le_less_trans}.
-Using the induction hypothesis once more we obtain @{prop"j <= f j"}
-which, together with (2) yields @{prop"j < f (Suc j)"} (again by
+Using the induction hypothesis once more we obtain \<^prop>\<open>j <= f j\<close>
+which, together with (2) yields \<^prop>\<open>j < f (Suc j)\<close> (again by
@{thm[source]le_less_trans}).
This last step shows both the power and the danger of automatic proofs. They
@@ -202,7 +202,7 @@
translate the internal proof into a human-readable format. Automatic
proofs are easy to write but hard to read and understand.
-The desired result, @{prop"i <= f i"}, follows from @{thm[source]f_incr_lem}:
+The desired result, \<^prop>\<open>i <= f i\<close>, follows from @{thm[source]f_incr_lem}:
\<close>
lemmas f_incr = f_incr_lem[rule_format, OF refl]
@@ -217,7 +217,7 @@
text\<open>
\begin{exercise}
-From the axiom and lemma for @{term"f"}, show that @{term"f"} is the
+From the axiom and lemma for \<^term>\<open>f\<close>, show that \<^term>\<open>f\<close> is the
identity function.
\end{exercise}
@@ -234,7 +234,7 @@
@{thm[display]length_induct[no_vars]}
which is a special case of @{thm[source]measure_induct}
@{thm[display]measure_induct[no_vars]}
-where @{term f} may be any function into type @{typ nat}.
+where \<^term>\<open>f\<close> may be any function into type \<^typ>\<open>nat\<close>.
\<close>
subsection\<open>Derivation of New Induction Schemas\<close>
@@ -244,7 +244,7 @@
Induction schemas are ordinary theorems and you can derive new ones
whenever you wish. This section shows you how, using the example
of @{thm[source]nat_less_induct}. Assume we only have structural induction
-available for @{typ"nat"} and want to derive complete induction. We
+available for \<^typ>\<open>nat\<close> and want to derive complete induction. We
must generalize the statement as shown:
\<close>
@@ -252,9 +252,9 @@
apply(induct_tac n)
txt\<open>\noindent
-The base case is vacuously true. For the induction step (@{prop"m <
-Suc n"}) we distinguish two cases: case @{prop"m < n"} is true by induction
-hypothesis and case @{prop"m = n"} follows from the assumption, again using
+The base case is vacuously true. For the induction step (\<^prop>\<open>m <
+Suc n\<close>) we distinguish two cases: case \<^prop>\<open>m < n\<close> is true by induction
+hypothesis and case \<^prop>\<open>m = n\<close> follows from the assumption, again using
the induction hypothesis:
\<close>
apply(blast)
@@ -266,8 +266,8 @@
Now it is straightforward to derive the original version of
@{thm[source]nat_less_induct} by manipulating the conclusion of the above
-lemma: instantiate @{term"n"} by @{term"Suc n"} and @{term"m"} by @{term"n"}
-and remove the trivial condition @{prop"n < Suc n"}. Fortunately, this
+lemma: instantiate \<^term>\<open>n\<close> by \<^term>\<open>Suc n\<close> and \<^term>\<open>m\<close> by \<^term>\<open>n\<close>
+and remove the trivial condition \<^prop>\<open>n < Suc n\<close>. Fortunately, this
happens automatically when we add the lemma as a new premise to the
desired goal:
\<close>
@@ -279,7 +279,7 @@
HOL already provides the mother of
all inductions, well-founded induction (see \S\ref{sec:Well-founded}). For
example theorem @{thm[source]nat_less_induct} is
-a special case of @{thm[source]wf_induct} where @{term r} is \<open><\<close> on
-@{typ nat}. The details can be found in theory \isa{Wellfounded_Recursion}.
+a special case of @{thm[source]wf_induct} where \<^term>\<open>r\<close> is \<open><\<close> on
+\<^typ>\<open>nat\<close>. The details can be found in theory \isa{Wellfounded_Recursion}.
\<close>
(*<*)end(*>*)
--- a/src/Doc/Tutorial/Misc/Itrev.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/Itrev.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,12 +25,12 @@
\begin{itemize}
\item \<open>@\<close> is recursive in
the first argument
-\item @{term xs} occurs only as the first argument of
+\item \<^term>\<open>xs\<close> occurs only as the first argument of
\<open>@\<close>
-\item both @{term ys} and @{term zs} occur at least once as
+\item both \<^term>\<open>ys\<close> and \<^term>\<open>zs\<close> occur at least once as
the second argument of \<open>@\<close>
\end{itemize}
-Hence it is natural to perform induction on~@{term xs}.
+Hence it is natural to perform induction on~\<^term>\<open>xs\<close>.
The key heuristic, and the main point of this section, is to
\emph{generalize the goal before induction}.
@@ -41,7 +41,7 @@
Function \cdx{rev} has quadratic worst-case running time
because it calls function \<open>@\<close> for each element of the list and
\<open>@\<close> is linear in its first argument. A linear time version of
-@{term"rev"} reqires an extra argument where the result is accumulated
+\<^term>\<open>rev\<close> reqires an extra argument where the result is accumulated
gradually, using only~\<open>#\<close>:
\<close>
@@ -53,10 +53,10 @@
The behaviour of \cdx{itrev} is simple: it reverses
its first argument by stacking its elements onto the second argument,
and returning that second argument when the first one becomes
-empty. Note that @{term"itrev"} is tail-recursive: it can be
+empty. Note that \<^term>\<open>itrev\<close> is tail-recursive: it can be
compiled into a loop.
-Naturally, we would like to show that @{term"itrev"} does indeed reverse
+Naturally, we would like to show that \<^term>\<open>itrev\<close> does indeed reverse
its first argument provided the second one is empty:
\<close>
@@ -73,33 +73,33 @@
the induction step:
@{subgoals[display,indent=0,margin=70]}
The induction hypothesis is too weak. The fixed
-argument,~@{term"[]"}, prevents it from rewriting the conclusion.
+argument,~\<^term>\<open>[]\<close>, prevents it from rewriting the conclusion.
This example suggests a heuristic:
\begin{quote}\index{generalizing induction formulae}%
\emph{Generalize goals for induction by replacing constants by variables.}
\end{quote}
-Of course one cannot do this na\"{\i}vely: @{term"itrev xs ys = rev xs"} is
+Of course one cannot do this na\"{\i}vely: \<^term>\<open>itrev xs ys = rev xs\<close> is
just not true. The correct generalization is
\<close>
(*<*)oops(*>*)
lemma "itrev xs ys = rev xs @ ys"
(*<*)apply(induct_tac xs, simp_all)(*>*)
txt\<open>\noindent
-If @{term"ys"} is replaced by @{term"[]"}, the right-hand side simplifies to
-@{term"rev xs"}, as required.
+If \<^term>\<open>ys\<close> is replaced by \<^term>\<open>[]\<close>, the right-hand side simplifies to
+\<^term>\<open>rev xs\<close>, as required.
In this instance it was easy to guess the right generalization.
Other situations can require a good deal of creativity.
-Although we now have two variables, only @{term"xs"} is suitable for
+Although we now have two variables, only \<^term>\<open>xs\<close> is suitable for
induction, and we repeat our proof attempt. Unfortunately, we are still
not there:
@{subgoals[display,indent=0,goals_limit=1]}
The induction hypothesis is still too weak, but this time it takes no
-intuition to generalize: the problem is that @{term"ys"} is fixed throughout
+intuition to generalize: the problem is that \<^term>\<open>ys\<close> is fixed throughout
the subgoal, but the induction hypothesis needs to be applied with
-@{term"a # ys"} instead of @{term"ys"}. Hence we prove the theorem
-for all @{term"ys"} instead of a fixed one:
+\<^term>\<open>a # ys\<close> instead of \<^term>\<open>ys\<close>. Hence we prove the theorem
+for all \<^term>\<open>ys\<close> instead of a fixed one:
\<close>
(*<*)oops(*>*)
lemma "\<forall>ys. itrev xs ys = rev xs @ ys"
@@ -108,7 +108,7 @@
(*>*)
text\<open>\noindent
-This time induction on @{term"xs"} followed by simplification succeeds. This
+This time induction on \<^term>\<open>xs\<close> followed by simplification succeeds. This
leads to another heuristic for generalization:
\begin{quote}
\emph{Generalize goals for induction by universally quantifying all free
@@ -121,16 +121,16 @@
those that change in recursive calls.
A final point worth mentioning is the orientation of the equation we just
-proved: the more complex notion (@{const itrev}) is on the left-hand
-side, the simpler one (@{term rev}) on the right-hand side. This constitutes
+proved: the more complex notion (\<^const>\<open>itrev\<close>) is on the left-hand
+side, the simpler one (\<^term>\<open>rev\<close>) on the right-hand side. This constitutes
another, albeit weak heuristic that is not restricted to induction:
\begin{quote}
\emph{The right-hand side of an equation should (in some sense) be simpler
than the left-hand side.}
\end{quote}
This heuristic is tricky to apply because it is not obvious that
-@{term"rev xs @ ys"} is simpler than @{term"itrev xs ys"}. But see what
-happens if you try to prove @{prop"rev xs @ ys = itrev xs ys"}!
+\<^term>\<open>rev xs @ ys\<close> is simpler than \<^term>\<open>itrev xs ys\<close>. But see what
+happens if you try to prove \<^prop>\<open>rev xs @ ys = itrev xs ys\<close>!
If you have tried these heuristics and still find your
induction does not go through, and no obvious lemma suggests itself, you may
--- a/src/Doc/Tutorial/Misc/Option2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/Option2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,11 +14,11 @@
text\<open>\noindent
Frequently one needs to add a distinguished element to some existing type.
For example, type \<open>t option\<close> can model the result of a computation that
-may either terminate with an error (represented by @{const None}) or return
-some value @{term v} (represented by @{term"Some v"}).
-Similarly, @{typ nat} extended with $\infty$ can be modeled by type
-@{typ"nat option"}. In both cases one could define a new datatype with
-customized constructors like @{term Error} and @{term Infinity},
+may either terminate with an error (represented by \<^const>\<open>None\<close>) or return
+some value \<^term>\<open>v\<close> (represented by \<^term>\<open>Some v\<close>).
+Similarly, \<^typ>\<open>nat\<close> extended with $\infty$ can be modeled by type
+\<^typ>\<open>nat option\<close>. In both cases one could define a new datatype with
+customized constructors like \<^term>\<open>Error\<close> and \<^term>\<open>Infinity\<close>,
but it is often simpler to use \<open>option\<close>. For an application see
\S\ref{sec:Trie}.
\<close>
--- a/src/Doc/Tutorial/Misc/Tree.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/Tree.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
"mirror (Node l x r) = Node (mirror r) x (mirror l)"(*>*)
text\<open>\noindent
-Define a function @{term"mirror"} that mirrors a binary tree
+Define a function \<^term>\<open>mirror\<close> that mirrors a binary tree
by swapping subtrees recursively. Prove
\<close>
@@ -28,7 +28,7 @@
(*>*)
text\<open>\noindent
-Define a function @{term"flatten"} that flattens a tree into a list
+Define a function \<^term>\<open>flatten\<close> that flattens a tree into a list
by traversing it in infix order. Prove
\<close>
--- a/src/Doc/Tutorial/Misc/Tree2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/Tree2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -3,9 +3,9 @@
(*>*)
text\<open>\noindent In Exercise~\ref{ex:Tree} we defined a function
-@{term"flatten"} from trees to lists. The straightforward version of
-@{term"flatten"} is based on \<open>@\<close> and is thus, like @{term"rev"},
-quadratic. A linear time version of @{term"flatten"} again reqires an extra
+\<^term>\<open>flatten\<close> from trees to lists. The straightforward version of
+\<^term>\<open>flatten\<close> is based on \<open>@\<close> and is thus, like \<^term>\<open>rev\<close>,
+quadratic. A linear time version of \<^term>\<open>flatten\<close> again reqires an extra
argument, the accumulator. Define\<close>
(*<*)primrec(*>*)flatten2 :: "'a tree \<Rightarrow> 'a list \<Rightarrow> 'a list"(*<*)where
"flatten2 Tip xs = xs" |
--- a/src/Doc/Tutorial/Misc/case_exprs.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/case_exprs.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,10 +8,10 @@
HOL also features \isa{case}-expressions for analyzing
elements of a datatype. For example,
@{term[display]"case xs of [] => [] | y#ys => y"}
-evaluates to @{term"[]"} if @{term"xs"} is @{term"[]"} and to @{term"y"} if
-@{term"xs"} is @{term"y#ys"}. (Since the result in both branches must be of
-the same type, it follows that @{term y} is of type @{typ"'a list"} and hence
-that @{term xs} is of type @{typ"'a list list"}.)
+evaluates to \<^term>\<open>[]\<close> if \<^term>\<open>xs\<close> is \<^term>\<open>[]\<close> and to \<^term>\<open>y\<close> if
+\<^term>\<open>xs\<close> is \<^term>\<open>y#ys\<close>. (Since the result in both branches must be of
+the same type, it follows that \<^term>\<open>y\<close> is of type \<^typ>\<open>'a list\<close> and hence
+that \<^term>\<open>xs\<close> is of type \<^typ>\<open>'a list list\<close>.)
In general, case expressions are of the form
\[
@@ -21,7 +21,7 @@
\end{array}
\]
Like in functional programming, patterns are expressions consisting of
-datatype constructors (e.g. @{term"[]"} and \<open>#\<close>)
+datatype constructors (e.g. \<^term>\<open>[]\<close> and \<open>#\<close>)
and variables, including the wildcard ``\verb$_$''.
Not all cases need to be covered and the order of cases matters.
However, one is well-advised not to wallow in complex patterns because
@@ -76,10 +76,10 @@
(\<open>case_tac\<close>) works for arbitrary terms, which need to be
quoted if they are non-atomic. However, apart from \<open>\<And>\<close>-bound
variables, the terms must not contain variables that are bound outside.
- For example, given the goal @{prop"\<forall>xs. xs = [] \<or> (\<exists>y ys. xs = y#ys)"},
+ For example, given the goal \<^prop>\<open>\<forall>xs. xs = [] \<or> (\<exists>y ys. xs = y#ys)\<close>,
\<open>case_tac xs\<close> will not work as expected because Isabelle interprets
- the @{term xs} as a new free variable distinct from the bound
- @{term xs} in the goal.
+ the \<^term>\<open>xs\<close> as a new free variable distinct from the bound
+ \<^term>\<open>xs\<close> in the goal.
\end{warn}
\<close>
--- a/src/Doc/Tutorial/Misc/natsum.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/natsum.thy Sat Jan 05 17:24:33 2019 +0100
@@ -28,9 +28,9 @@
\sdx{div}, \sdx{mod}, \cdx{min} and
\cdx{max} are predefined, as are the relations
\isadxboldpos{\isasymle}{$HOL2arithrel} and
-\isadxboldpos{<}{$HOL2arithrel}. As usual, @{prop"m-n = (0::nat)"} if
-@{prop"m<n"}. There is even a least number operation
-\sdx{LEAST}\@. For example, @{prop"(LEAST n. 0 < n) = Suc 0"}.
+\isadxboldpos{<}{$HOL2arithrel}. As usual, \<^prop>\<open>m-n = (0::nat)\<close> if
+\<^prop>\<open>m<n\<close>. There is even a least number operation
+\sdx{LEAST}\@. For example, \<^prop>\<open>(LEAST n. 0 < n) = Suc 0\<close>.
\begin{warn}\index{overloading}
The constants \cdx{0} and \cdx{1} and the operations
\isadxboldpos{+}{$HOL2arithfun}, \isadxboldpos{-}{$HOL2arithfun},
@@ -40,15 +40,15 @@
not just for natural numbers but for other types as well.
For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
that you are talking about natural numbers. Hence Isabelle can only infer
- that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close> are
+ that \<^term>\<open>x\<close> is of some arbitrary type where \<open>0\<close> and \<open>+\<close> are
declared. As a consequence, you will be unable to prove the
goal. To alert you to such pitfalls, Isabelle flags numerals without a
- fixed type in its output: @{prop"x+0 = x"}. (In the absence of a numeral,
+ fixed type in its output: \<^prop>\<open>x+0 = x\<close>. (In the absence of a numeral,
it may take you some time to realize what has happened if \pgmenu{Show
Types} is not set). In this particular example, you need to include
an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
- is enough contextual information this may not be necessary: @{prop"Suc x =
- x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
+ is enough contextual information this may not be necessary: \<^prop>\<open>Suc x =
+ x\<close> automatically implies \<open>x::nat\<close> because \<^term>\<open>Suc\<close> is not
overloaded.
For details on overloading see \S\ref{sec:overloading}.
@@ -58,16 +58,16 @@
\begin{warn}
The symbols \isadxboldpos{>}{$HOL2arithrel} and
\isadxboldpos{\isasymge}{$HOL2arithrel} are merely syntax: \<open>x > y\<close>
- stands for @{prop"y < x"} and similary for \<open>\<ge>\<close> and
+ stands for \<^prop>\<open>y < x\<close> and similary for \<open>\<ge>\<close> and
\<open>\<le>\<close>.
\end{warn}
\begin{warn}
- Constant \<open>1::nat\<close> is defined to equal @{term"Suc 0"}. This definition
+ Constant \<open>1::nat\<close> is defined to equal \<^term>\<open>Suc 0\<close>. This definition
(see \S\ref{sec:ConstDefinitions}) is unfolded automatically by some
tactics (like \<open>auto\<close>, \<open>simp\<close> and \<open>arith\<close>) but not by
others (especially the single step tactics in Chapter~\ref{chap:rules}).
If you need the full set of numerals, see~\S\ref{sec:numerals}.
- \emph{Novices are advised to stick to @{term"0::nat"} and @{term Suc}.}
+ \emph{Novices are advised to stick to \<^term>\<open>0::nat\<close> and \<^term>\<open>Suc\<close>.}
\end{warn}
Both \<open>auto\<close> and \<open>simp\<close>
@@ -93,14 +93,14 @@
\<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, \<open>=\<close>,
\<open>\<forall>\<close>, \<open>\<exists>\<close>), the relations \<open>=\<close>,
\<open>\<le>\<close> and \<open><\<close>, and the operations \<open>+\<close>, \<open>-\<close>,
-@{term min} and @{term max}. For example,\<close>
+\<^term>\<open>min\<close> and \<^term>\<open>max\<close>. For example,\<close>
lemma "min i (max j (k*k)) = max (min (k*k) i) (min i (j::nat))"
apply(arith)
(*<*)done(*>*)
text\<open>\noindent
-succeeds because @{term"k*k"} can be treated as atomic. In contrast,
+succeeds because \<^term>\<open>k*k\<close> can be treated as atomic. In contrast,
\<close>
lemma "n*n = n+1 \<Longrightarrow> n=0"
--- a/src/Doc/Tutorial/Misc/pairs2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/pairs2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,7 +19,7 @@
element denoted by~\cdx{()}. This type can be viewed
as a degenerate product with 0 components.
\item
-Products, like type @{typ nat}, are datatypes, which means
+Products, like type \<^typ>\<open>nat\<close>, are datatypes, which means
in particular that \<open>induct_tac\<close> and \<open>case_tac\<close> are applicable to
terms of product type.
Both split the term into a number of variables corresponding to the tuple structure
--- a/src/Doc/Tutorial/Misc/prime_def.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/prime_def.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,7 +9,7 @@
(where \<open>dvd\<close> means ``divides''):
@{term[display,quotes]"prime(p) \<equiv> 1 < p \<and> (m dvd p \<longrightarrow> (m=1 \<or> m=p))"}
\par\noindent\hangindent=0pt
-Isabelle rejects this ``definition'' because of the extra @{term"m"} on the
+Isabelle rejects this ``definition'' because of the extra \<^term>\<open>m\<close> on the
right-hand side, which would introduce an inconsistency (why?).
The correct version is
@{term[display,quotes]"prime(p) \<equiv> 1 < p \<and> (\<forall>m. m dvd p \<longrightarrow> (m=1 \<or> m=p))"}
--- a/src/Doc/Tutorial/Misc/simp.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/simp.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
Nearly any theorem can become a simplification
rule. The simplifier will try to transform it into an equation.
For example, the theorem
-@{prop"~P"} is turned into @{prop"P = False"}. The details
+\<^prop>\<open>~P\<close> is turned into \<^prop>\<open>P = False\<close>. The details
are explained in \S\ref{sec:SimpHow}.
The simplification attribute of theorems can be turned on and off:%
@@ -102,9 +102,9 @@
done
text\<open>\noindent
-The second assumption simplifies to @{term"xs = []"}, which in turn
-simplifies the first assumption to @{term"zs = ys"}, thus reducing the
-conclusion to @{term"ys = ys"} and hence to @{term"True"}.
+The second assumption simplifies to \<^term>\<open>xs = []\<close>, which in turn
+simplifies the first assumption to \<^term>\<open>zs = ys\<close>, thus reducing the
+conclusion to \<^term>\<open>ys = ys\<close> and hence to \<^term>\<open>True\<close>.
In some cases, using the assumptions can lead to nontermination:
\<close>
@@ -113,7 +113,7 @@
txt\<open>\noindent
An unmodified application of \<open>simp\<close> loops. The culprit is the
-simplification rule @{term"f x = g (f (g x))"}, which is extracted from
+simplification rule \<^term>\<open>f x = g (f (g x))\<close>, which is extracted from
the assumption. (Isabelle notices certain simple forms of
nontermination but not this one.) The problem can be circumvented by
telling the simplifier to ignore the assumptions:
@@ -207,7 +207,7 @@
Proving a goal containing \isa{let}-expressions almost invariably requires the
\<open>let\<close>-con\-structs to be expanded at some point. Since
\<open>let\<close>\ldots\isa{=}\ldots\<open>in\<close>{\ldots} is just syntactic sugar for
-the predefined constant @{term"Let"}, expanding \<open>let\<close>-constructs
+the predefined constant \<^term>\<open>Let\<close>, expanding \<open>let\<close>-constructs
means rewriting with \tdx{Let_def}:\<close>
lemma "(let xs = [] in xs@ys@xs) = ys"
@@ -236,7 +236,7 @@
text\<open>\noindent
Note the use of ``\ttindexboldpos{,}{$Isar}'' to string together a
sequence of methods. Assuming that the simplification rule
-@{term"(rev xs = []) = (xs = [])"}
+\<^term>\<open>(rev xs = []) = (xs = [])\<close>
is present as well,
the lemma below is proved by plain simplification:
\<close>
@@ -247,9 +247,9 @@
(*>*)
text\<open>\noindent
The conditional equation @{thm[source]hd_Cons_tl} above
-can simplify @{term"hd(rev xs) # tl(rev xs)"} to @{term"rev xs"}
-because the corresponding precondition @{term"rev xs ~= []"}
-simplifies to @{term"xs ~= []"}, which is exactly the local
+can simplify \<^term>\<open>hd(rev xs) # tl(rev xs)\<close> to \<^term>\<open>rev xs\<close>
+because the corresponding precondition \<^term>\<open>rev xs ~= []\<close>
+simplifies to \<^term>\<open>xs ~= []\<close>, which is exactly the local
assumption of the subgoal.
\<close>
--- a/src/Doc/Tutorial/Misc/types.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Misc/types.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,7 +15,7 @@
text\<open>\label{sec:ConstDefinitions}\indexbold{definitions}%
Nonrecursive definitions can be made with the \commdx{definition}
command, for example \<open>nand\<close> and \<open>xor\<close> gates
-(based on type @{typ gate} above):
+(based on type \<^typ>\<open>gate\<close> above):
\<close>
definition nand :: gate where "nand A B \<equiv> \<not>(A \<and> B)"
--- a/src/Doc/Tutorial/Protocol/Event.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Protocol/Event.thy Sat Jan 05 17:24:33 2019 +0100
@@ -73,9 +73,9 @@
Says A B X \<Rightarrow> parts {X} \<union> used evs
| Gets A X \<Rightarrow> used evs
| Notes A X \<Rightarrow> parts {X} \<union> used evs)"
- \<comment> \<open>The case for @{term Gets} seems anomalous, but @{term Gets} always
- follows @{term Says} in real protocols. Seems difficult to change.
- See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}.\<close>
+ \<comment> \<open>The case for \<^term>\<open>Gets\<close> seems anomalous, but \<^term>\<open>Gets\<close> always
+ follows \<^term>\<open>Says\<close> in real protocols. Seems difficult to change.
+ See \<^text>\<open>Gets_correct\<close> in theory \<^text>\<open>Guard/Extensions.thy\<close>.\<close>
lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs \<longrightarrow> X \<in> used evs"
apply (induct_tac evs)
@@ -88,7 +88,7 @@
done
-subsection\<open>Function @{term knows}\<close>
+subsection\<open>Function \<^term>\<open>knows\<close>\<close>
(*Simplifying
parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
@@ -100,7 +100,7 @@
by simp
text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
- on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
+ on whether \<^term>\<open>A=Spy\<close> and whether \<^term>\<open>A\<in>bad\<close>\<close>
lemma knows_Spy_Notes [simp]:
"knows Spy (Notes A X # evs) =
(if A\<in>bad then insert X (knows Spy evs) else knows Spy evs)"
@@ -246,10 +246,10 @@
used_Nil [simp del] used_Cons [simp del]
-text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) \<longrightarrow> P"}
+text\<open>For proving theorems of the form \<^term>\<open>X \<notin> analz (knows Spy evs) \<longrightarrow> P\<close>
New events added by induction to "evs" are discarded. Provided
this information isn't needed, the proof will be much shorter, since
- it will omit complicated reasoning about @{term analz}.\<close>
+ it will omit complicated reasoning about \<^term>\<open>analz\<close>.\<close>
lemmas analz_mono_contra =
knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
@@ -358,14 +358,14 @@
Sometimes the protocol requires an agent to generate a new nonce. The
probability that a 20-byte random number has appeared before is effectively
-zero. To formalize this important property, the set @{term "used evs"}
+zero. To formalize this important property, the set \<^term>\<open>used evs\<close>
denotes the set of all items mentioned in the trace~\<open>evs\<close>.
The function \<open>used\<close> has a straightforward
recursive definition. Here is the case for \<open>Says\<close> event:
@{thm [display,indent=5] used_Says [no_vars]}
The function \<open>knows\<close> formalizes an agent's knowledge. Mostly we only
-care about the spy's knowledge, and @{term "knows Spy evs"} is the set of items
+care about the spy's knowledge, and \<^term>\<open>knows Spy evs\<close> is the set of items
available to the spy in the trace~\<open>evs\<close>. Already in the empty trace,
the spy starts with some secrets at his disposal, such as the private keys
of compromised users. After each \<open>Says\<close> event, the spy learns the
@@ -374,9 +374,9 @@
Combinations of functions express other important
sets of messages derived from~\<open>evs\<close>:
\begin{itemize}
-\item @{term "analz (knows Spy evs)"} is everything that the spy could
+\item \<^term>\<open>analz (knows Spy evs)\<close> is everything that the spy could
learn by decryption
-\item @{term "synth (analz (knows Spy evs))"} is everything that the spy
+\item \<^term>\<open>synth (analz (knows Spy evs))\<close> is everything that the spy
could generate
\end{itemize}
\<close>
--- a/src/Doc/Tutorial/Protocol/Message.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Protocol/Message.thy Sat Jan 05 17:24:33 2019 +0100
@@ -242,7 +242,7 @@
text\<open>This allows \<open>blast\<close> to simplify occurrences of
- @{term "parts(G\<union>H)"} in the assumption.\<close>
+ \<^term>\<open>parts(G\<union>H)\<close> in the assumption.\<close>
lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE]
declare in_parts_UnE [elim!]
@@ -484,8 +484,8 @@
text\<open>Case analysis: either the message is secure, or it is not! Effective,
but can cause subgoals to blow up! Use with \<open>if_split\<close>; apparently
-\<open>split_tac\<close> does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"}\<close>
+\<open>split_tac\<close> does not cope with patterns such as \<^term>\<open>analz (insert
+(Crypt K X) H)\<close>\<close>
lemma analz_Crypt_if [simp]:
"analz (insert (Crypt K X) H) =
(if (Key (invKey K) \<in> analz H)
@@ -621,7 +621,7 @@
text \<open>
The set includes all agent names. Nonces and keys are assumed to be
unguessable, so none are included beyond those already in~$H$. Two
-elements of @{term "synth H"} can be combined, and an element can be encrypted
+elements of \<^term>\<open>synth H\<close> can be combined, and an element can be encrypted
using a key present in~$H$.
Like \<open>analz\<close>, this set operator is monotone and idempotent. It also
@@ -636,12 +636,12 @@
text \<open>
\noindent
The resulting elimination rule replaces every assumption of the form
-@{term "Nonce n \<in> synth H"} by @{term "Nonce n \<in> H"},
+\<^term>\<open>Nonce n \<in> synth H\<close> by \<^term>\<open>Nonce n \<in> H\<close>,
expressing that a nonce cannot be guessed.
A third operator, \<open>parts\<close>, is useful for stating correctness
properties. The set
-@{term "parts H"} consists of the components of elements of~$H$. This set
+\<^term>\<open>parts H\<close> consists of the components of elements of~$H$. This set
includes~\<open>H\<close> and is closed under the projections from a compound
message to its immediate parts.
Its definition resembles that of \<open>analz\<close> except in the rule
@@ -728,7 +728,7 @@
by (rule subset_trans [OF parts_mono parts_Un_subset2], blast)
text\<open>More specifically for Fake. Very occasionally we could do with a version
- of the form @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"}\<close>
+ of the form \<^term>\<open>parts{X} \<subseteq> synth (analz H) \<union> parts H\<close>\<close>
lemma Fake_parts_insert:
"X \<in> synth (analz H) \<Longrightarrow>
parts (insert X H) \<subseteq> synth (analz H) \<union> parts H"
@@ -742,8 +742,8 @@
==> Z \<in> synth (analz H) \<union> parts H"
by (blast dest: Fake_parts_insert [THEN subsetD, dest])
-text\<open>@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put
- @{term "G=H"}.\<close>
+text\<open>\<^term>\<open>H\<close> is sometimes \<^term>\<open>Key ` KK \<union> spies evs\<close>, so can't put
+ \<^term>\<open>G=H\<close>.\<close>
lemma Fake_analz_insert:
"X \<in> synth (analz G) \<Longrightarrow>
analz (insert X H) \<subseteq> synth (analz G) \<union> analz (G \<union> H)"
@@ -857,7 +857,7 @@
\<close>
text\<open>By default only \<open>o_apply\<close> is built-in. But in the presence of
-eta-expansion this means that some terms displayed as @{term "f o g"} will be
+eta-expansion this means that some terms displayed as \<^term>\<open>f o g\<close> will be
rewritten, and others will not!\<close>
declare o_def [simp]
--- a/src/Doc/Tutorial/Protocol/NS_Public.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Protocol/NS_Public.thy Sat Jan 05 17:24:33 2019 +0100
@@ -74,7 +74,7 @@
@{term [display,indent=5] "Says A' B (Crypt (pubK B) \<lbrace>Nonce NA, Agent A\<rbrace>)"}
may be extended by an event of the form
@{term [display,indent=5] "Says B A (Crypt (pubK A) \<lbrace>Nonce NA, Nonce NB, Agent B\<rbrace>)"}
-where \<open>NB\<close> is a fresh nonce: @{term "Nonce NB \<notin> used evs2"}.
+where \<open>NB\<close> is a fresh nonce: \<^term>\<open>Nonce NB \<notin> used evs2\<close>.
Writing the sender as \<open>A'\<close> indicates that \<open>B\<close> does not
know who sent the message. Calling the trace variable \<open>evs2\<close> rather
than simply \<open>evs\<close> helps us know where we are in a proof after many
@@ -112,7 +112,7 @@
text \<open>
Secrecy properties can be hard to prove. The conclusion of a typical
secrecy theorem is
-@{term "X \<notin> analz (knows Spy evs)"}. The difficulty arises from
+\<^term>\<open>X \<notin> analz (knows Spy evs)\<close>. The difficulty arises from
having to reason about \<open>analz\<close>, or less formally, showing that the spy
can never learn~\<open>X\<close>. Much easier is to prove that \<open>X\<close> can never
occur at all. Such \emph{regularity} properties are typically expressed
@@ -151,7 +151,7 @@
text \<open>
The \<open>Fake\<close> case is proved automatically. If
-@{term "priK A"} is in the extended trace then either (1) it was already in the
+\<^term>\<open>priK A\<close> is in the extended trace then either (1) it was already in the
original trace or (2) it was
generated by the spy, who must have known this key already.
Either way, the induction hypothesis applies.
@@ -307,7 +307,7 @@
is compromised.
@{named_thms [display,indent=0,margin=50] analz_Crypt_if [no_vars] (analz_Crypt_if)}
The simplifier has also used \<open>Spy_see_priK\<close>, proved in
-{\S}\ref{sec:regularity} above, to yield @{term "Ba \<in> bad"}.
+{\S}\ref{sec:regularity} above, to yield \<^term>\<open>Ba \<in> bad\<close>.
Recall that this subgoal concerns the case
where the last message to be sent was
@@ -315,9 +315,9 @@
This message can compromise $Nb$ only if $Nb=Na'$ and $B'$ is compromised,
allowing the spy to decrypt the message. The Isabelle subgoal says
precisely this, if we allow for its choice of variable names.
-Proving @{term "NB \<noteq> NAa"} is easy: \<open>NB\<close> was
+Proving \<^term>\<open>NB \<noteq> NAa\<close> is easy: \<open>NB\<close> was
sent earlier, while \<open>NAa\<close> is fresh; formally, we have
-the assumption @{term "Nonce NAa \<notin> used evs1"}.
+the assumption \<^term>\<open>Nonce NAa \<notin> used evs1\<close>.
Note that our reasoning concerned \<open>B\<close>'s participation in another
run. Agents may engage in several runs concurrently, and some attacks work
--- a/src/Doc/Tutorial/Rules/Basic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Rules/Basic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -371,7 +371,7 @@
@{subgoals[display,indent=0,margin=65]}
applying @text{someI} automatically instantiates
-@{term f} to @{term "\<lambda>x. SOME y. P x y"}
+\<^term>\<open>f\<close> to \<^term>\<open>\<lambda>x. SOME y. P x y\<close>
\<close>
by (rule someI)
--- a/src/Doc/Tutorial/Sets/Recur.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Sets/Recur.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,14 +34,14 @@
The HOL library formalizes
some of the theory of wellfounded relations. For example
-@{prop"wf r"}\index{*wf|bold} means that relation @{term[show_types]"r::('a*'a)set"} is
+\<^prop>\<open>wf r\<close>\index{*wf|bold} means that relation @{term[show_types]"r::('a*'a)set"} is
wellfounded.
Finally we should mention that HOL already provides the mother of all
inductions, \textbf{wellfounded
induction}\indexbold{induction!wellfounded}\index{wellfounded
induction|see{induction, wellfounded}} (@{thm[source]wf_induct}):
@{thm[display]wf_induct[no_vars]}
-where @{term"wf r"} means that the relation @{term r} is wellfounded
+where \<^term>\<open>wf r\<close> means that the relation \<^term>\<open>r\<close> is wellfounded
\<close>
--- a/src/Doc/Tutorial/ToyList/ToyList.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList.thy Sat Jan 05 17:24:33 2019 +0100
@@ -6,7 +6,7 @@
HOL already has a predefined theory of lists called \<open>List\<close> ---
\<open>ToyList\<close> is merely a small fragment of it chosen as an example.
To avoid some ambiguities caused by defining lists twice, we manipulate
-the concrete syntax and name space of theory @{theory Main} as follows.
+the concrete syntax and name space of theory \<^theory>\<open>Main\<close> as follows.
\<close>
no_notation Nil ("[]") and Cons (infixr "#" 65) and append (infixr "@" 65)
@@ -22,17 +22,17 @@
constructors \cdx{Nil} and \cdx{Cons}, the
empty~list and the operator that adds an element to the front of a list. For
example, the term \isa{Cons True (Cons False Nil)} is a value of
-type @{typ"bool list"}, namely the list with the elements @{term"True"} and
-@{term"False"}. Because this notation quickly becomes unwieldy, the
+type \<^typ>\<open>bool list\<close>, namely the list with the elements \<^term>\<open>True\<close> and
+\<^term>\<open>False\<close>. Because this notation quickly becomes unwieldy, the
datatype declaration is annotated with an alternative syntax: instead of
@{term[source]Nil} and \isa{Cons x xs} we can write
-@{term"[]"}\index{$HOL2list@\isa{[]}|bold} and
-@{term"x # xs"}\index{$HOL2list@\isa{\#}|bold}. In fact, this
+\<^term>\<open>[]\<close>\index{$HOL2list@\isa{[]}|bold} and
+\<^term>\<open>x # xs\<close>\index{$HOL2list@\isa{\#}|bold}. In fact, this
alternative syntax is the familiar one. Thus the list \isa{Cons True
-(Cons False Nil)} becomes @{term"True # False # []"}. The annotation
+(Cons False Nil)} becomes \<^term>\<open>True # False # []\<close>. The annotation
\isacommand{infixr}\index{infixr@\isacommand{infixr} (annotation)}
means that \<open>#\<close> associates to
-the right: the term @{term"x # y # z"} is read as \<open>x # (y # z)\<close>
+the right: the term \<^term>\<open>x # y # z\<close> is read as \<open>x # (y # z)\<close>
and not as \<open>(x # y) # z\<close>.
The \<open>65\<close> is the priority of the infix \<open>#\<close>.
@@ -64,12 +64,12 @@
%
Function \<open>app\<close> is annotated with concrete syntax. Instead of the
prefix syntax \<open>app xs ys\<close> the infix
-@{term"xs @ ys"}\index{$HOL2list@\isa{\at}|bold} becomes the preferred
+\<^term>\<open>xs @ ys\<close>\index{$HOL2list@\isa{\at}|bold} becomes the preferred
form.
\index{*rev (constant)|(}\index{append function|(}
-The equations for \<open>app\<close> and @{term"rev"} hardly need comments:
-\<open>app\<close> appends two lists and @{term"rev"} reverses a list. The
+The equations for \<open>app\<close> and \<^term>\<open>rev\<close> hardly need comments:
+\<open>app\<close> appends two lists and \<^term>\<open>rev\<close> reverses a list. The
keyword \commdx{primrec} indicates that the recursion is
of a particularly primitive kind where each recursive call peels off a datatype
constructor from one of the arguments. Thus the
@@ -115,18 +115,18 @@
Assuming you have processed the declarations and definitions of
\texttt{ToyList} presented so far, you may want to test your
functions by running them. For example, what is the value of
-@{term"rev(True#False#[])"}? Command
+\<^term>\<open>rev(True#False#[])\<close>? Command
\<close>
value "rev (True # False # [])"
-text\<open>\noindent yields the correct result @{term"False # True # []"}.
+text\<open>\noindent yields the correct result \<^term>\<open>False # True # []\<close>.
But we can go beyond mere functional programming and evaluate terms with
variables in them, executing functions symbolically:\<close>
value "rev (a # b # c # [])"
-text\<open>\noindent yields @{term"c # b # a # []"}.
+text\<open>\noindent yields \<^term>\<open>c # b # a # []\<close>.
\section{An Introductory Proof}
\label{sec:intro-proof}
@@ -149,13 +149,13 @@
This \isacommand{theorem} command does several things:
\begin{itemize}
\item
-It establishes a new theorem to be proved, namely @{prop"rev(rev xs) = xs"}.
+It establishes a new theorem to be proved, namely \<^prop>\<open>rev(rev xs) = xs\<close>.
\item
It gives that theorem the name \<open>rev_rev\<close>, for later reference.
\item
It tells Isabelle (via the bracketed attribute \attrdx{simp}) to take the eventual theorem as a simplification rule: future proofs involving
-simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}.
+simplification will replace occurrences of \<^term>\<open>rev(rev xs)\<close> by
+\<^term>\<open>xs\<close>.
\end{itemize}
The name and the simplification attribute are optional.
Isabelle's response is to print the initial proof state consisting
@@ -176,16 +176,16 @@
set the flag \isa{Proof.show_main_goal}\index{*show_main_goal (flag)}
--- this flag used to be set by default.)
-Let us now get back to @{prop"rev(rev xs) = xs"}. Properties of recursively
+Let us now get back to \<^prop>\<open>rev(rev xs) = xs\<close>. Properties of recursively
defined functions are best established by induction. In this case there is
-nothing obvious except induction on @{term"xs"}:
+nothing obvious except induction on \<^term>\<open>xs\<close>:
\<close>
apply(induct_tac xs)
txt\<open>\noindent\index{*induct_tac (method)}%
-This tells Isabelle to perform induction on variable @{term"xs"}. The suffix
-@{term"tac"} stands for \textbf{tactic},\index{tactics}
+This tells Isabelle to perform induction on variable \<^term>\<open>xs\<close>. The suffix
+\<^term>\<open>tac\<close> stands for \textbf{tactic},\index{tactics}
a synonym for ``theorem proving function''.
By default, induction acts on the first subgoal. The new proof state contains
two subgoals, namely the base case (@{term[source]Nil}) and the induction step
@@ -204,8 +204,8 @@
conclusion}\index{conclusion!of subgoal} is the actual proposition to be proved.
Typical proof steps
that add new assumptions are induction and case distinction. In our example
-the only assumption is the induction hypothesis @{term"rev (rev list) =
- list"}, where @{term"list"} is a variable name chosen by Isabelle. If there
+the only assumption is the induction hypothesis \<^term>\<open>rev (rev list) =
+ list\<close>, where \<^term>\<open>list\<close> is a variable name chosen by Isabelle. If there
are multiple assumptions, they are enclosed in the bracket pair
\indexboldpos{\isasymlbrakk}{$Isabrl} and
\indexboldpos{\isasymrbrakk}{$Isabrr} and separated by semicolons.
@@ -219,7 +219,7 @@
This command tells Isabelle to apply a proof strategy called
\<open>auto\<close> to all subgoals. Essentially, \<open>auto\<close> tries to
simplify the subgoals. In our case, subgoal~1 is solved completely (thanks
-to the equation @{prop"rev [] = []"}) and disappears; the simplified version
+to the equation \<^prop>\<open>rev [] = []\<close>) and disappears; the simplified version
of subgoal~2 becomes the new subgoal~1:
@{subgoals[display,indent=0,margin=70]}
In order to simplify this subgoal further, a lemma suggests itself.
@@ -243,9 +243,9 @@
the importance we attach to a proposition. Therefore we use the words
\emph{theorem} and \emph{lemma} pretty much interchangeably, too.
-There are two variables that we could induct on: @{term"xs"} and
-@{term"ys"}. Because \<open>@\<close> is defined by recursion on
-the first argument, @{term"xs"} is the correct one:
+There are two variables that we could induct on: \<^term>\<open>xs\<close> and
+\<^term>\<open>ys\<close>. Because \<open>@\<close> is defined by recursion on
+the first argument, \<^term>\<open>xs\<close> is the correct one:
\<close>
apply(induct_tac xs)
@@ -293,7 +293,7 @@
% Instead of \isacommand{apply} followed by a dot, you can simply write
% \isacommand{by}\indexbold{by}, which we do most of the time.
Notice that in lemma @{thm[source]app_Nil2},
-as printed out after the final \isacommand{done}, the free variable @{term"xs"} has been
+as printed out after the final \isacommand{done}, the free variable \<^term>\<open>xs\<close> has been
replaced by the unknown \<open>?xs\<close>, just as explained in
\S\ref{sec:variables}.
--- a/src/Doc/Tutorial/ToyList/ToyList_Test.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/ToyList/ToyList_Test.thy Sat Jan 05 17:24:33 2019 +0100
@@ -6,7 +6,7 @@
let val text =
map (File.read o Path.append \<^master_dir>) [\<^path>\<open>ToyList1.txt\<close>, \<^path>\<open>ToyList2.txt\<close>]
|> implode
- in Thy_Info.script_thy Position.start text @{theory} end
+ in Thy_Info.script_thy Position.start text \<^theory> end
\<close>
end
--- a/src/Doc/Tutorial/Trie/Trie.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Trie/Trie.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,8 +5,8 @@
To minimize running time, each node of a trie should contain an array that maps
letters to subtries. We have chosen a
representation where the subtries are held in an association list, i.e.\ a
-list of (letter,trie) pairs. Abstracting over the alphabet @{typ"'a"} and the
-values @{typ"'v"} we define a trie as follows:
+list of (letter,trie) pairs. Abstracting over the alphabet \<^typ>\<open>'a\<close> and the
+values \<^typ>\<open>'v\<close> we define a trie as follows:
\<close>
datatype ('a,'v)trie = Trie "'v option" "('a * ('a,'v)trie)list"
@@ -49,7 +49,7 @@
text\<open>
As a first simple property we prove that looking up a string in the empty
-trie @{term"Trie None []"} always returns @{const None}. The proof merely
+trie \<^term>\<open>Trie None []\<close> always returns \<^const>\<open>None\<close>. The proof merely
distinguishes the two cases whether the search string is empty or not:
\<close>
@@ -72,13 +72,13 @@
text\<open>\noindent
The base case is obvious. In the recursive case the subtrie
-@{term tt} associated with the first letter @{term a} is extracted,
+\<^term>\<open>tt\<close> associated with the first letter \<^term>\<open>a\<close> is extracted,
recursively updated, and then placed in front of the association list.
-The old subtrie associated with @{term a} is still in the association list
-but no longer accessible via @{const assoc}. Clearly, there is room here for
+The old subtrie associated with \<^term>\<open>a\<close> is still in the association list
+but no longer accessible via \<^const>\<open>assoc\<close>. Clearly, there is room here for
optimizations!
-Before we start on any proofs about @{const update} we tell the simplifier to
+Before we start on any proofs about \<^const>\<open>update\<close> we tell the simplifier to
expand all \<open>let\<close>s and to split all \<open>case\<close>-constructs over
options:
\<close>
@@ -87,23 +87,23 @@
text\<open>\noindent
The reason becomes clear when looking (probably after a failed proof
-attempt) at the body of @{const update}: it contains both
+attempt) at the body of \<^const>\<open>update\<close>: it contains both
\<open>let\<close> and a case distinction over type \<open>option\<close>.
-Our main goal is to prove the correct interaction of @{const update} and
-@{const lookup}:
+Our main goal is to prove the correct interaction of \<^const>\<open>update\<close> and
+\<^const>\<open>lookup\<close>:
\<close>
theorem "\<forall>t v bs. lookup (update t as v) bs =
(if as=bs then Some v else lookup t bs)"
txt\<open>\noindent
-Our plan is to induct on @{term as}; hence the remaining variables are
+Our plan is to induct on \<^term>\<open>as\<close>; hence the remaining variables are
quantified. From the definitions it is clear that induction on either
-@{term as} or @{term bs} is required. The choice of @{term as} is
-guided by the intuition that simplification of @{const lookup} might be easier
-if @{const update} has already been simplified, which can only happen if
-@{term as} is instantiated.
+\<^term>\<open>as\<close> or \<^term>\<open>bs\<close> is required. The choice of \<^term>\<open>as\<close> is
+guided by the intuition that simplification of \<^const>\<open>lookup\<close> might be easier
+if \<^const>\<open>update\<close> has already been simplified, which can only happen if
+\<^term>\<open>as\<close> is instantiated.
The start of the proof is conventional:
\<close>
apply(induct_tac as, auto)
@@ -115,7 +115,7 @@
~2.~\dots~{\isasymLongrightarrow}~lookup~\dots~bs~=~lookup~t~bs\isanewline
~3.~\dots~{\isasymLongrightarrow}~lookup~\dots~bs~=~lookup~t~bs
\end{isabelle}
-Clearly, if we want to make headway we have to instantiate @{term bs} as
+Clearly, if we want to make headway we have to instantiate \<^term>\<open>bs\<close> as
well now. It turns out that instead of induction, case distinction
suffices:
\<close>
@@ -133,11 +133,11 @@
comes at a cost: the proof script is unreadable because the intermediate
proof states are invisible, and we rely on the (possibly brittle) magic of
\<open>auto\<close> (\<open>simp_all\<close> will not do --- try it) to split the subgoals
-of the induction up in such a way that case distinction on @{term bs} makes
+of the induction up in such a way that case distinction on \<^term>\<open>bs\<close> makes
sense and solves the proof.
\begin{exercise}
- Modify @{const update} (and its type) such that it allows both insertion and
+ Modify \<^const>\<open>update\<close> (and its type) such that it allows both insertion and
deletion of entries with a single function. Prove the corresponding version
of the main theorem above.
Optimize your function such that it shrinks tries after
@@ -145,17 +145,17 @@
\end{exercise}
\begin{exercise}
- Write an improved version of @{const update} that does not suffer from the
+ Write an improved version of \<^const>\<open>update\<close> that does not suffer from the
space leak (pointed out above) caused by not deleting overwritten entries
from the association list. Prove the main theorem for your improved
- @{const update}.
+ \<^const>\<open>update\<close>.
\end{exercise}
\begin{exercise}
Conceptually, each node contains a mapping from letters to optional
subtries. Above we have implemented this by means of an association
- list. Replay the development replacing @{typ "('a * ('a,'v)trie)list"}
- with @{typ"'a \<Rightarrow> ('a,'v)trie option"}.
+ list. Replay the development replacing \<^typ>\<open>('a * ('a,'v)trie)list\<close>
+ with \<^typ>\<open>'a \<Rightarrow> ('a,'v)trie option\<close>.
\end{exercise}
\<close>
--- a/src/Doc/Tutorial/Types/Axioms.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Axioms.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,28 +13,27 @@
subsubsection \<open>Semigroups\<close>
-text\<open>We specify \emph{semigroups} as subclass of @{class plus}:\<close>
+text\<open>We specify \emph{semigroups} as subclass of \<^class>\<open>plus\<close>:\<close>
class semigroup = plus +
assumes assoc: "(x \<oplus> y) \<oplus> z = x \<oplus> (y \<oplus> z)"
text \<open>\noindent This @{command class} specification requires that
-all instances of @{class semigroup} obey @{fact "assoc:"}~@{prop
+all instances of \<^class>\<open>semigroup\<close> obey @{fact "assoc:"}~@{prop
[source] "\<And>x y z :: 'a::semigroup. (x \<oplus> y) \<oplus> z = x \<oplus> (y \<oplus> z)"}.
We can use this class axiom to derive further abstract theorems
-relative to class @{class semigroup}:\<close>
+relative to class \<^class>\<open>semigroup\<close>:\<close>
lemma assoc_left:
fixes x y z :: "'a::semigroup"
shows "x \<oplus> (y \<oplus> z) = (x \<oplus> y) \<oplus> z"
using assoc by (rule sym)
-text \<open>\noindent The @{class semigroup} constraint on type @{typ
-"'a"} restricts instantiations of @{typ "'a"} to types of class
-@{class semigroup} and during the proof enables us to use the fact
+text \<open>\noindent The \<^class>\<open>semigroup\<close> constraint on type \<^typ>\<open>'a\<close> restricts instantiations of \<^typ>\<open>'a\<close> to types of class
+\<^class>\<open>semigroup\<close> and during the proof enables us to use the fact
@{fact assoc} whose type parameter is itself constrained to class
-@{class semigroup}. The main advantage of classes is that theorems
+\<^class>\<open>semigroup\<close>. The main advantage of classes is that theorems
can be proved in the abstract and freely reused for each instance.
On instantiation, we have to give a proof that the given operations
@@ -69,7 +68,7 @@
txt \<open>\noindent Associativity of product semigroups is established
using the hypothetical associativity @{fact assoc} of the type
-components, which holds due to the @{class semigroup} constraints
+components, which holds due to the \<^class>\<open>semigroup\<close> constraints
imposed on the type components by the @{command instance} proposition.
Indeed, this pattern often occurs with parametric types and type
classes.\<close>
@@ -81,7 +80,7 @@
subsubsection \<open>Monoids\<close>
text \<open>We define a subclass \<open>monoidl\<close> (a semigroup with a
-left-hand neutral) by extending @{class semigroup} with one additional
+left-hand neutral) by extending \<^class>\<open>semigroup\<close> with one additional
parameter \<open>neutral\<close> together with its property:\<close>
class monoidl = semigroup +
@@ -133,7 +132,7 @@
class monoid = monoidl +
assumes neutr: "x \<oplus> \<zero> = x"
-text \<open>\noindent Corresponding instances for @{typ nat} and products
+text \<open>\noindent Corresponding instances for \<^typ>\<open>nat\<close> and products
are left as an exercise to the reader.\<close>
subsubsection \<open>Groups\<close>
@@ -223,7 +222,7 @@
constraints are always carried around and Isabelle takes care that
they are never lost, unless the type variable is instantiated with a
type that has been shown to belong to that class. Thus you may be able
-to prove @{prop False} from your axioms, but Isabelle will remind you
+to prove \<^prop>\<open>False\<close> from your axioms, but Isabelle will remind you
that this theorem has the hidden hypothesis that the class is
non-empty.
@@ -235,9 +234,8 @@
subsubsection\<open>Syntactic Classes and Predefined Overloading\<close>
text \<open>In our algebra example, we have started with a \emph{syntactic
-class} @{class plus} which only specifies operations but no axioms; it
-would have been also possible to start immediately with class @{class
-semigroup}, specifying the \<open>\<oplus>\<close> operation and associativity at
+class} \<^class>\<open>plus\<close> which only specifies operations but no axioms; it
+would have been also possible to start immediately with class \<^class>\<open>semigroup\<close>, specifying the \<open>\<oplus>\<close> operation and associativity at
the same time.
Which approach is more appropriate depends. Usually it is more
@@ -253,7 +251,7 @@
\emph{with} axioms.
Further note that classes may contain axioms but \emph{no} operations.
-An example is class @{class finite} from theory @{theory "HOL.Finite_Set"}
+An example is class \<^class>\<open>finite\<close> from theory \<^theory>\<open>HOL.Finite_Set\<close>
which specifies a type to be finite: @{lemma [source] "finite (UNIV :: 'a::finite
set)" by (fact finite_UNIV)}.\<close>
--- a/src/Doc/Tutorial/Types/Numbers.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Numbers.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,8 +17,8 @@
"h i = (if i = 3 then 2 else i)"
text\<open>
-@{term"h 3 = 2"}
-@{term"h i = i"}
+\<^term>\<open>h 3 = 2\<close>
+\<^term>\<open>h i = i\<close>
\<close>
text\<open>
--- a/src/Doc/Tutorial/Types/Overloading.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Overloading.thy Sat Jan 05 17:24:33 2019 +0100
@@ -27,7 +27,7 @@
text \<open>\noindent Command \isacommand{instantiation} opens a local
theory context. Here we can now instantiate @{const [source] plus} on
-@{typ nat}:\<close>
+\<^typ>\<open>nat\<close>:\<close>
primrec plus_nat :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"(0::nat) \<oplus> n = n"
@@ -51,13 +51,13 @@
end
-text \<open>\noindent From now on, terms like @{term "Suc (m \<oplus> 2)"} are
+text \<open>\noindent From now on, terms like \<^term>\<open>Suc (m \<oplus> 2)\<close> are
legal.\<close>
instantiation prod :: (plus, plus) plus
begin
-text \<open>\noindent Here we instantiate the product type @{type prod} to
+text \<open>\noindent Here we instantiate the product type \<^type>\<open>prod\<close> to
class @{class [source] plus}, given that its type arguments are of
class @{class [source] plus}:\<close>
--- a/src/Doc/Tutorial/Types/Pairs.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Pairs.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4,8 +4,8 @@
text\<open>\label{sec:products}
Ordered pairs were already introduced in \S\ref{sec:pairs}, but only with a minimal
-repertoire of operations: pairing and the two projections @{term fst} and
-@{term snd}. In any non-trivial application of pairs you will find that this
+repertoire of operations: pairing and the two projections \<^term>\<open>fst\<close> and
+\<^term>\<open>snd\<close>. In any non-trivial application of pairs you will find that this
quickly leads to unreadable nests of projections. This
section introduces syntactic sugar to overcome this
problem: pattern matching with tuples.
@@ -20,11 +20,11 @@
and they can be nested. Here are
some typical examples:
\begin{quote}
-@{term"let (x,y) = f z in (y,x)"}\\
-@{term"case xs of [] => (0::nat) | (x,y)#zs => x+y"}\\
+\<^term>\<open>let (x,y) = f z in (y,x)\<close>\\
+\<^term>\<open>case xs of [] => (0::nat) | (x,y)#zs => x+y\<close>\\
\<open>\<forall>(x,y)\<in>A. x=y\<close>\\
\<open>{(x,y,z). x=z}\<close>\\
-@{term"\<Union>(x,y)\<in>A. {x+y}"}
+\<^term>\<open>\<Union>(x,y)\<in>A. {x+y}\<close>
\end{quote}
The intuitive meanings of these expressions should be obvious.
Unfortunately, we need to know in more detail what the notation really stands
@@ -32,11 +32,11 @@
over pairs and tuples is merely a convenient shorthand for a more complex
internal representation. Thus the internal and external form of a term may
differ, which can affect proofs. If you want to avoid this complication,
-stick to @{term fst} and @{term snd} and write @{term"%p. fst p + snd p"}
+stick to \<^term>\<open>fst\<close> and \<^term>\<open>snd\<close> and write \<^term>\<open>%p. fst p + snd p\<close>
instead of \<open>\<lambda>(x,y). x+y\<close>. These terms are distinct even though they
denote the same function.
-Internally, @{term"%(x,y). t"} becomes \<open>case_prod (\<lambda>x y. t)\<close>, where
+Internally, \<^term>\<open>%(x,y). t\<close> becomes \<open>case_prod (\<lambda>x y. t)\<close>, where
\cdx{split} is the uncurrying function of type \<open>('a \<Rightarrow> 'b
\<Rightarrow> 'c) \<Rightarrow> 'a \<times> 'b \<Rightarrow> 'c\<close> defined as
\begin{center}
@@ -51,7 +51,7 @@
subsection\<open>Theorem Proving\<close>
text\<open>
-The most obvious approach is the brute force expansion of @{term split}:
+The most obvious approach is the brute force expansion of \<^term>\<open>split\<close>:
\<close>
lemma "(\<lambda>(x,y).x) p = fst p"
@@ -60,19 +60,18 @@
text\<open>\noindent
This works well if rewriting with @{thm[source]split_def} finishes the
proof, as it does above. But if it does not, you end up with exactly what
-we are trying to avoid: nests of @{term fst} and @{term snd}. Thus this
+we are trying to avoid: nests of \<^term>\<open>fst\<close> and \<^term>\<open>snd\<close>. Thus this
approach is neither elegant nor very practical in large examples, although it
can be effective in small ones.
If we consider why this lemma presents a problem,
-we realize that we need to replace variable~@{term
-p} by some pair @{term"(a,b)"}. Then both sides of the
-equation would simplify to @{term a} by the simplification rules
+we realize that we need to replace variable~\<^term>\<open>p\<close> by some pair \<^term>\<open>(a,b)\<close>. Then both sides of the
+equation would simplify to \<^term>\<open>a\<close> by the simplification rules
@{thm split_conv[no_vars]} and @{thm fst_conv[no_vars]}.
To reason about tuple patterns requires some way of
converting a variable of product type into a pair.
-In case of a subterm of the form @{term"case_prod f p"} this is easy: the split
-rule @{thm[source]prod.split} replaces @{term p} by a pair:%
+In case of a subterm of the form \<^term>\<open>case_prod f p\<close> this is easy: the split
+rule @{thm[source]prod.split} replaces \<^term>\<open>p\<close> by a pair:%
\index{*split (method)}
\<close>
@@ -110,7 +109,7 @@
@{subgoals[display,indent=0]}
Again, simplification produces a term suitable for @{thm[source]prod.split}
as above. If you are worried about the strange form of the premise:
-\<open>case_prod (=)\<close> is short for @{term"\<lambda>(x,y). x=y"}.
+\<open>case_prod (=)\<close> is short for \<^term>\<open>\<lambda>(x,y). x=y\<close>.
The same proof procedure works for
\<close>
@@ -119,9 +118,9 @@
txt\<open>\noindent
except that we now have to use @{thm[source]prod.split_asm}, because
-@{term split} occurs in the assumptions.
+\<^term>\<open>split\<close> occurs in the assumptions.
-However, splitting @{term split} is not always a solution, as no @{term split}
+However, splitting \<^term>\<open>split\<close> is not always a solution, as no \<^term>\<open>split\<close>
may be present in the goal. Consider the following function:
\<close>
@@ -137,8 +136,8 @@
txt\<open>\noindent
simplification will do nothing, because the defining equation for
-@{const[source] swap} expects a pair. Again, we need to turn @{term p}
-into a pair first, but this time there is no @{term split} in sight.
+@{const[source] swap} expects a pair. Again, we need to turn \<^term>\<open>p\<close>
+into a pair first, but this time there is no \<^term>\<open>split\<close> in sight.
The only thing we can do is to split the term by hand:
\<close>
apply(case_tac p)
--- a/src/Doc/Tutorial/Types/Records.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Records.thy Sat Jan 05 17:24:33 2019 +0100
@@ -45,9 +45,9 @@
Ycoord :: int
text \<open>\noindent
- Records of type @{typ point} have two fields named @{const Xcoord}
- and @{const Ycoord}, both of type~@{typ int}. We now define a
- constant of type @{typ point}:
+ Records of type \<^typ>\<open>point\<close> have two fields named \<^const>\<open>Xcoord\<close>
+ and \<^const>\<open>Ycoord\<close>, both of type~\<^typ>\<open>int\<close>. We now define a
+ constant of type \<^typ>\<open>point\<close>:
\<close>
definition pt1 :: point where
@@ -65,8 +65,7 @@
text \<open>
For each field, there is a \emph{selector}\index{selector!record}
- function of the same name. For example, if \<open>p\<close> has type @{typ
- point} then \<open>Xcoord p\<close> denotes the value of the \<open>Xcoord\<close> field of~\<open>p\<close>. Expressions involving field selection
+ function of the same name. For example, if \<open>p\<close> has type \<^typ>\<open>point\<close> then \<open>Xcoord p\<close> denotes the value of the \<open>Xcoord\<close> field of~\<open>p\<close>. Expressions involving field selection
of explicit records are simplified automatically:
\<close>
@@ -75,8 +74,8 @@
text \<open>
The \emph{update}\index{update!record} operation is functional. For
- example, @{term "p\<lparr>Xcoord := 0\<rparr>"} is a record whose @{const Xcoord}
- value is zero and whose @{const Ycoord} value is copied from~\<open>p\<close>. Updates of explicit records are also simplified automatically:
+ example, \<^term>\<open>p\<lparr>Xcoord := 0\<rparr>\<close> is a record whose \<^const>\<open>Xcoord\<close>
+ value is zero and whose \<^const>\<open>Ycoord\<close> value is copied from~\<open>p\<close>. Updates of explicit records are also simplified automatically:
\<close>
lemma "\<lparr>Xcoord = a, Ycoord = b\<rparr>\<lparr>Xcoord := 0\<rparr> =
@@ -87,7 +86,7 @@
\begin{warn}
Field names are declared as constants and can no longer be used as
variables. It would be unwise, for example, to call the fields of
- type @{typ point} simply \<open>x\<close> and~\<open>y\<close>.
+ type \<^typ>\<open>point\<close> simply \<open>x\<close> and~\<open>y\<close>.
\end{warn}
\<close>
@@ -107,7 +106,7 @@
col :: colour
text \<open>\noindent
- The fields of this new type are @{const Xcoord}, \<open>Ycoord\<close> and
+ The fields of this new type are \<^const>\<open>Xcoord\<close>, \<open>Ycoord\<close> and
\<open>col\<close>, in that order.
\<close>
@@ -116,14 +115,13 @@
text \<open>
We can define generic operations that work on arbitrary
- instances of a record scheme, e.g.\ covering @{typ point}, @{typ
- cpoint}, and any further extensions. Every record structure has an
+ instances of a record scheme, e.g.\ covering \<^typ>\<open>point\<close>, \<^typ>\<open>cpoint\<close>, and any further extensions. Every record structure has an
implicit pseudo-field, \cdx{more}, that keeps the extension as an
explicit value. Its type is declared as completely
- polymorphic:~@{typ 'a}. When a fixed record value is expressed
+ polymorphic:~\<^typ>\<open>'a\<close>. When a fixed record value is expressed
using just its standard fields, the value of \<open>more\<close> is
implicitly set to \<open>()\<close>, the empty tuple, which has type
- @{typ unit}. Within the record brackets, you can refer to the
+ \<^typ>\<open>unit\<close>. Within the record brackets, you can refer to the
\<open>more\<close> field by writing ``\<open>\<dots>\<close>'' (three dots):
\<close>
@@ -131,7 +129,7 @@
by simp
text \<open>
- This lemma applies to any record whose first two fields are \<open>Xcoord\<close> and~@{const Ycoord}. Note that \<open>\<lparr>Xcoord = a, Ycoord
+ This lemma applies to any record whose first two fields are \<open>Xcoord\<close> and~\<^const>\<open>Ycoord\<close>. Note that \<open>\<lparr>Xcoord = a, Ycoord
= b, \<dots> = ()\<rparr>\<close> is exactly the same as \<open>\<lparr>Xcoord = a, Ycoord
= b\<rparr>\<close>. Selectors and updates are always polymorphic wrt.\ the
\<open>more\<close> part of a record scheme, its value is just ignored (for
@@ -145,30 +143,27 @@
by (simp add: cpt1_def)
text \<open>\noindent
- We see that the colour part attached to this @{typ point} is a
+ We see that the colour part attached to this \<^typ>\<open>point\<close> is a
rudimentary record in its own right, namely \<open>\<lparr>col =
Green\<rparr>\<close>. In order to select or update \<open>col\<close>, this fragment
needs to be put back into the context of the parent type scheme, say
- as \<open>more\<close> part of another @{typ point}.
+ as \<open>more\<close> part of another \<^typ>\<open>point\<close>.
To define generic operations, we need to know a bit more about
- records. Our definition of @{typ point} above has generated two
+ records. Our definition of \<^typ>\<open>point\<close> above has generated two
type abbreviations:
\medskip
\begin{tabular}{l}
- @{typ point}~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int\<rparr>\<close> \\
- @{typ "'a point_scheme"}~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int, \<dots> :: 'a\<rparr>\<close> \\
+ \<^typ>\<open>point\<close>~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int\<rparr>\<close> \\
+ \<^typ>\<open>'a point_scheme\<close>~\<open>=\<close>~\<open>\<lparr>Xcoord :: int, Ycoord :: int, \<dots> :: 'a\<rparr>\<close> \\
\end{tabular}
\medskip
\noindent
- Type @{typ point} is for fixed records having exactly the two fields
- @{const Xcoord} and~\<open>Ycoord\<close>, while the polymorphic type @{typ
- "'a point_scheme"} comprises all possible extensions to those two
- fields. Note that @{typ "unit point_scheme"} coincides with @{typ
- point}, and @{typ "\<lparr>col :: colour\<rparr> point_scheme"} with @{typ
- cpoint}.
+ Type \<^typ>\<open>point\<close> is for fixed records having exactly the two fields
+ \<^const>\<open>Xcoord\<close> and~\<open>Ycoord\<close>, while the polymorphic type \<^typ>\<open>'a point_scheme\<close> comprises all possible extensions to those two
+ fields. Note that \<^typ>\<open>unit point_scheme\<close> coincides with \<^typ>\<open>point\<close>, and \<^typ>\<open>\<lparr>col :: colour\<rparr> point_scheme\<close> with \<^typ>\<open>cpoint\<close>.
In the following example we define two operations --- methods, if we
regard records as objects --- to get and set any point's \<open>Xcoord\<close> field.
@@ -181,9 +176,9 @@
text \<open>
Here is a generic method that modifies a point, incrementing its
- @{const Xcoord} field. The \<open>Ycoord\<close> and \<open>more\<close> fields
+ \<^const>\<open>Xcoord\<close> field. The \<open>Ycoord\<close> and \<open>more\<close> fields
are copied across. It works for any record type scheme derived from
- @{typ point} (including @{typ cpoint} etc.):
+ \<^typ>\<open>point\<close> (including \<^typ>\<open>cpoint\<close> etc.):
\<close>
definition incX :: "'a point_scheme \<Rightarrow> 'a point_scheme" where
@@ -192,7 +187,7 @@
text \<open>
Generic theorems can be proved about generic methods. This trivial
- lemma relates @{const incX} to \<open>getX\<close> and \<open>setX\<close>:
+ lemma relates \<^const>\<open>incX\<close> to \<open>getX\<close> and \<open>setX\<close>:
\<close>
lemma "incX r = setX r (getX r + 1)"
@@ -224,7 +219,7 @@
text \<open>
The following equality is similar, but generic, in that \<open>r\<close>
- can be any instance of @{typ "'a point_scheme"}:
+ can be any instance of \<^typ>\<open>'a point_scheme\<close>:
\<close>
lemma "r\<lparr>Xcoord := a, Ycoord := b\<rparr> = r\<lparr>Ycoord := b, Xcoord := a\<rparr>"
@@ -264,7 +259,7 @@
text \<open>\noindent
Here the simplifier can do nothing, since general record equality is
not eliminated automatically. One way to proceed is by an explicit
- forward step that applies the selector @{const Xcoord} to both sides
+ forward step that applies the selector \<^const>\<open>Xcoord\<close> to both sides
of the assumed record equality:
\<close>
@@ -313,9 +308,8 @@
text \<open>
Each record declaration introduces a number of derived operations to
refer collectively to a record's fields and to convert between fixed
- record types. They can, for instance, convert between types @{typ
- point} and @{typ cpoint}. We can add a colour to a point or convert
- a @{typ cpoint} to a @{typ point} by forgetting its colour.
+ record types. They can, for instance, convert between types \<^typ>\<open>point\<close> and \<^typ>\<open>cpoint\<close>. We can add a colour to a point or convert
+ a \<^typ>\<open>cpoint\<close> to a \<^typ>\<open>point\<close> by forgetting its colour.
\begin{itemize}
@@ -340,13 +334,12 @@
definitions, which are \emph{not} unfolded by default, are made
available by the collective name of \<open>defs\<close> (\<open>point.defs\<close>, \<open>cpoint.defs\<close>, etc.).
For example, here are the versions of those functions generated for
- record @{typ point}. We omit \<open>point.fields\<close>, which happens to
+ record \<^typ>\<open>point\<close>. We omit \<open>point.fields\<close>, which happens to
be the same as \<open>point.make\<close>.
@{thm [display, indent = 0, margin = 65] point.make_def [no_vars]
point.extend_def [no_vars] point.truncate_def [no_vars]}
- Contrast those with the corresponding functions for record @{typ
- cpoint}. Observe \<open>cpoint.fields\<close> in particular.
+ Contrast those with the corresponding functions for record \<^typ>\<open>cpoint\<close>. Observe \<open>cpoint.fields\<close> in particular.
@{thm [display, indent = 0, margin = 65] cpoint.make_def [no_vars]
cpoint.fields_def [no_vars] cpoint.extend_def [no_vars]
cpoint.truncate_def [no_vars]}
@@ -361,10 +354,10 @@
"cpt2 \<equiv> point.extend pt1 (cpoint.fields Green)"
text \<open>
- The coloured points @{const cpt1} and \<open>cpt2\<close> are equal. The
+ The coloured points \<^const>\<open>cpt1\<close> and \<open>cpt2\<close> are equal. The
proof is trivial, by unfolding all the definitions. We deliberately
omit the definition of~\<open>pt1\<close> in order to reveal the underlying
- comparison on type @{typ point}.
+ comparison on type \<^typ>\<open>point\<close>.
\<close>
lemma "cpt1 = cpt2"
@@ -383,7 +376,7 @@
text \<open>
\begin{exercise}
- Extend record @{typ cpoint} to have a further field, \<open>intensity\<close>, of type~@{typ nat}. Experiment with generic operations
+ Extend record \<^typ>\<open>cpoint\<close> to have a further field, \<open>intensity\<close>, of type~\<^typ>\<open>nat\<close>. Experiment with generic operations
(using polymorphic selectors and updates) and explicit coercions
(using \<open>extend\<close>, \<open>truncate\<close> etc.) among the three record
types.
--- a/src/Doc/Tutorial/Types/Typedefs.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Tutorial/Types/Typedefs.thy Sat Jan 05 17:24:33 2019 +0100
@@ -3,7 +3,7 @@
section\<open>Introducing New Types\<close>
text\<open>\label{sec:adv-typedef}
-For most applications, a combination of predefined types like @{typ bool} and
+For most applications, a combination of predefined types like \<^typ>\<open>bool\<close> and
\<open>\<Rightarrow>\<close> with recursive datatypes and records is quite sufficient. Very
occasionally you may feel the need for a more advanced type. If you
are certain that your type is not definable by any of the
@@ -25,7 +25,7 @@
typedecl my_new_type
text\<open>\noindent
-This does not define @{typ my_new_type} at all but merely introduces its
+This does not define \<^typ>\<open>my_new_type\<close> at all but merely introduces its
name. Thus we know nothing about this type, except that it is
non-empty. Such declarations without definitions are
useful if that type can be viewed as a parameter of the theory.
@@ -49,7 +49,7 @@
of your development. It is extremely easy to write down contradictory sets of
axioms, in which case you will be able to prove everything but it will mean
nothing. In the example above, the axiomatic approach is
-unnecessary: a one-element type called @{typ unit} is already defined in HOL.
+unnecessary: a one-element type called \<^typ>\<open>unit\<close> is already defined in HOL.
\index{types!declaring|)}
\<close>
@@ -83,15 +83,15 @@
by simp
text\<open>
-This type definition introduces the new type @{typ three} and asserts
-that it is a copy of the set @{term"{0::nat,1,2}"}. This assertion
-is expressed via a bijection between the \emph{type} @{typ three} and the
-\emph{set} @{term"{0::nat,1,2}"}. To this end, the command declares the following
+This type definition introduces the new type \<^typ>\<open>three\<close> and asserts
+that it is a copy of the set \<^term>\<open>{0::nat,1,2}\<close>. This assertion
+is expressed via a bijection between the \emph{type} \<^typ>\<open>three\<close> and the
+\emph{set} \<^term>\<open>{0::nat,1,2}\<close>. To this end, the command declares the following
constants behind the scenes:
\begin{center}
\begin{tabular}{rcl}
-@{term Rep_three} &::& @{typ"three \<Rightarrow> nat"}\\
-@{term Abs_three} &::& @{typ"nat \<Rightarrow> three"}
+\<^term>\<open>Rep_three\<close> &::& \<^typ>\<open>three \<Rightarrow> nat\<close>\\
+\<^term>\<open>Abs_three\<close> &::& \<^typ>\<open>nat \<Rightarrow> three\<close>
\end{tabular}
\end{center}
The situation is best summarized with the help of the following diagram,
@@ -99,9 +99,8 @@
\begin{center}
\includegraphics[scale=.8]{typedef}
\end{center}
-Finally, \isacommand{typedef} asserts that @{term Rep_three} is
-surjective on the subset and @{term Abs_three} and @{term
-Rep_three} are inverses of each other:
+Finally, \isacommand{typedef} asserts that \<^term>\<open>Rep_three\<close> is
+surjective on the subset and \<^term>\<open>Abs_three\<close> and \<^term>\<open>Rep_three\<close> are inverses of each other:
\begin{center}
\begin{tabular}{@ {}r@ {\qquad\qquad}l@ {}}
@{thm Rep_three[no_vars]} & (@{thm[source]Rep_three}) \\
@@ -112,7 +111,7 @@
%
From this example it should be clear what \isacommand{typedef} does
in general given a name (here \<open>three\<close>) and a set
-(here @{term"{0::nat,1,2}"}).
+(here \<^term>\<open>{0::nat,1,2}\<close>).
Our next step is to define the basic functions expected on the new type.
Although this depends on the type at hand, the following strategy works well:
@@ -120,10 +119,10 @@
\item define a small kernel of basic functions that can express all other
functions you anticipate.
\item define the kernel in terms of corresponding functions on the
-representing type using @{term Abs} and @{term Rep} to convert between the
+representing type using \<^term>\<open>Abs\<close> and \<^term>\<open>Rep\<close> to convert between the
two levels.
\end{itemize}
-In our example it suffices to give the three elements of type @{typ three}
+In our example it suffices to give the three elements of type \<^typ>\<open>three\<close>
names:
\<close>
@@ -132,18 +131,16 @@
definition C :: three where "C \<equiv> Abs_three 2"
text\<open>
-So far, everything was easy. But it is clear that reasoning about @{typ
-three} will be hell if we have to go back to @{typ nat} every time. Thus our
+So far, everything was easy. But it is clear that reasoning about \<^typ>\<open>three\<close> will be hell if we have to go back to \<^typ>\<open>nat\<close> every time. Thus our
aim must be to raise our level of abstraction by deriving enough theorems
-about type @{typ three} to characterize it completely. And those theorems
-should be phrased in terms of @{term A}, @{term B} and @{term C}, not @{term
-Abs_three} and @{term Rep_three}. Because of the simplicity of the example,
-we merely need to prove that @{term A}, @{term B} and @{term C} are distinct
+about type \<^typ>\<open>three\<close> to characterize it completely. And those theorems
+should be phrased in terms of \<^term>\<open>A\<close>, \<^term>\<open>B\<close> and \<^term>\<open>C\<close>, not \<^term>\<open>Abs_three\<close> and \<^term>\<open>Rep_three\<close>. Because of the simplicity of the example,
+we merely need to prove that \<^term>\<open>A\<close>, \<^term>\<open>B\<close> and \<^term>\<open>C\<close> are distinct
and that they exhaust the type.
In processing our \isacommand{typedef} declaration,
Isabelle proves several helpful lemmas. The first two
-express injectivity of @{term Rep_three} and @{term Abs_three}:
+express injectivity of \<^term>\<open>Rep_three\<close> and \<^term>\<open>Abs_three\<close>:
\begin{center}
\begin{tabular}{@ {}r@ {\qquad}l@ {}}
@{thm Rep_three_inject[no_vars]} & (@{thm[source]Rep_three_inject}) \\
@@ -154,7 +151,7 @@
\end{tabular}
\end{center}
The following ones allow to replace some \<open>x::three\<close> by
-\<open>Abs_three(y::nat)\<close>, and conversely @{term y} by @{term"Rep_three x"}:
+\<open>Abs_three(y::nat)\<close>, and conversely \<^term>\<open>y\<close> by \<^term>\<open>Rep_three x\<close>:
\begin{center}
\begin{tabular}{@ {}r@ {\qquad}l@ {}}
@{thm Rep_three_cases[no_vars]} & (@{thm[source]Rep_three_cases}) \\
@@ -166,21 +163,21 @@
These theorems are proved for any type definition, with \<open>three\<close>
replaced by the name of the type in question.
-Distinctness of @{term A}, @{term B} and @{term C} follows immediately
+Distinctness of \<^term>\<open>A\<close>, \<^term>\<open>B\<close> and \<^term>\<open>C\<close> follows immediately
if we expand their definitions and rewrite with the injectivity
-of @{term Abs_three}:
+of \<^term>\<open>Abs_three\<close>:
\<close>
lemma "A \<noteq> B \<and> B \<noteq> A \<and> A \<noteq> C \<and> C \<noteq> A \<and> B \<noteq> C \<and> C \<noteq> B"
by(simp add: Abs_three_inject A_def B_def C_def)
text\<open>\noindent
-Of course we rely on the simplifier to solve goals like @{prop"(0::nat) \<noteq> 1"}.
+Of course we rely on the simplifier to solve goals like \<^prop>\<open>(0::nat) \<noteq> 1\<close>.
-The fact that @{term A}, @{term B} and @{term C} exhaust type @{typ three} is
-best phrased as a case distinction theorem: if you want to prove @{prop"P x"}
-(where @{term x} is of type @{typ three}) it suffices to prove @{prop"P A"},
-@{prop"P B"} and @{prop"P C"}:\<close>
+The fact that \<^term>\<open>A\<close>, \<^term>\<open>B\<close> and \<^term>\<open>C\<close> exhaust type \<^typ>\<open>three\<close> is
+best phrased as a case distinction theorem: if you want to prove \<^prop>\<open>P x\<close>
+(where \<^term>\<open>x\<close> is of type \<^typ>\<open>three\<close>) it suffices to prove \<^prop>\<open>P A\<close>,
+\<^prop>\<open>P B\<close> and \<^prop>\<open>P C\<close>:\<close>
lemma three_cases: "\<lbrakk> P A; P B; P C \<rbrakk> \<Longrightarrow> P x"
@@ -190,8 +187,8 @@
txt\<open>
@{subgoals[display,indent=0]}
-Simplification leads to the disjunction @{prop"y
-= 0 \<or> y = 1 \<or> y = (2::nat)"} which \isa{auto} separates into three
+Simplification leads to the disjunction \<^prop>\<open>y
+= 0 \<or> y = 1 \<or> y = (2::nat)\<close> which \isa{auto} separates into three
subgoals, each of which is easily solved by simplification:\<close>
apply(auto simp add: A_def B_def C_def)
@@ -199,7 +196,7 @@
text\<open>\noindent
This concludes the derivation of the characteristic theorems for
-type @{typ three}.
+type \<^typ>\<open>three\<close>.
The attentive reader has realized long ago that the
above lengthy definition can be collapsed into one line:
@@ -212,7 +209,7 @@
the same derivations as we did, which gives you some idea what life would be
like without \isacommand{datatype}.
-Although @{typ three} could be defined in one line, we have chosen this
+Although \<^typ>\<open>three\<close> could be defined in one line, we have chosen this
example to demonstrate \isacommand{typedef} because its simplicity makes the
key concepts particularly easy to grasp. If you would like to see a
non-trivial example that cannot be defined more directly, we recommend the
--- a/src/Doc/Typeclass_Hierarchy/Setup.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Typeclass_Hierarchy/Setup.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,7 +29,7 @@
val (vTs', _) = fold_map frugal vTs [];
in Logic.list_all (vTs', map_types (K dummyT) body) end;
in
- Term_Style.setup @{binding frugal_sorts}
+ Term_Style.setup \<^binding>\<open>frugal_sorts\<close>
(Scan.succeed (K frugal_sorts))
end
\<close>
--- a/src/Doc/Typeclass_Hierarchy/Typeclass_Hierarchy.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Typeclass_Hierarchy/Typeclass_Hierarchy.thy Sat Jan 05 17:24:33 2019 +0100
@@ -39,8 +39,8 @@
always have been numerical types, which form an inclusion chain:
\begin{center}
- @{typ nat} \<open>\<sqsubset>\<close> @{typ int} \<open>\<sqsubset>\<close> @{typ rat}
- \<open>\<sqsubset>\<close> @{typ real} \<open>\<sqsubset>\<close> @{typ complex}
+ \<^typ>\<open>nat\<close> \<open>\<sqsubset>\<close> \<^typ>\<open>int\<close> \<open>\<sqsubset>\<close> \<^typ>\<open>rat\<close>
+ \<open>\<sqsubset>\<close> \<^typ>\<open>real\<close> \<open>\<sqsubset>\<close> \<^typ>\<open>complex\<close>
\end{center}
\noindent The inclusion \<open>\<sqsubset>\<close> means that any value of the numerical
@@ -57,15 +57,15 @@
of numerical types into them:
\begin{center}\begin{tabular}{lccc}
- @{term of_nat} \<open>::\<close> & @{typ nat} & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::semiring_1"} \\
+ \<^term>\<open>of_nat\<close> \<open>::\<close> & \<^typ>\<open>nat\<close> & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::semiring_1"} \\
& \<open>\<sqinter>\<close> & & \<open>\<up>\<close> \\
- @{term of_int} \<open>::\<close> & @{typ int} & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::ring_1"} \\
+ \<^term>\<open>of_int\<close> \<open>::\<close> & \<^typ>\<open>int\<close> & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::ring_1"} \\
& \<open>\<sqinter>\<close> & & \<open>\<up>\<close> \\
- @{term of_rat} \<open>::\<close> & @{typ rat} & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::field_char_0"} \\
+ \<^term>\<open>of_rat\<close> \<open>::\<close> & \<^typ>\<open>rat\<close> & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::field_char_0"} \\
& \<open>\<sqinter>\<close> & & \<open>\<up>\<close> \\
- @{term of_real} \<open>::\<close> & @{typ real} & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::real_algebra_1"} \\
+ \<^term>\<open>of_real\<close> \<open>::\<close> & \<^typ>\<open>real\<close> & \<open>\<Rightarrow>\<close> & @{typ [source] "'a::real_algebra_1"} \\
& \<open>\<sqinter>\<close> \\
- & @{typ complex}
+ & \<^typ>\<open>complex\<close>
\end{tabular}\end{center}
\noindent \<open>d \<leftarrow> c\<close> means that \<open>c\<close> is subclass of \<open>d\<close>.
@@ -101,10 +101,10 @@
\<^item> Parameters are fixed over the whole type class
hierarchy and cannot be refined in specific situations:
- think of integral domains with a predicate @{term is_unit};
+ think of integral domains with a predicate \<^term>\<open>is_unit\<close>;
for natural numbers, this degenerates to the much simpler
@{term [source] "HOL.equal (1::nat)"} but facts
- refer to @{term is_unit} nonetheless.
+ refer to \<^term>\<open>is_unit\<close> nonetheless.
\<^item> Type classes are not apt for meta-theory. There
is no practically usable way to express that the units
@@ -135,13 +135,13 @@
classes, ie. classes with operations but with no axioms,
most notably:
- \<^item> @{command class} @{class plus} with @{term [source] "(a::'a::plus) + b"}
+ \<^item> @{command class} \<^class>\<open>plus\<close> with @{term [source] "(a::'a::plus) + b"}
- \<^item> @{command class} @{class zero} with @{term [source] "0::'a::zero"}
+ \<^item> @{command class} \<^class>\<open>zero\<close> with @{term [source] "0::'a::zero"}
- \<^item> @{command class} @{class times} with @{term [source] "(a::'a::times) * b"}
+ \<^item> @{command class} \<^class>\<open>times\<close> with @{term [source] "(a::'a::times) * b"}
- \<^item> @{command class} @{class one} with @{term [source] "1::'a::one"}
+ \<^item> @{command class} \<^class>\<open>one\<close> with @{term [source] "1::'a::one"}
\noindent Before the introduction of the @{command class} statement in
Isabelle @{cite "Haftmann-Wenzel:2006:classes"} it was impossible
@@ -159,7 +159,7 @@
exotic examples.
\<^item> Type classes might share operations but not necessarily
- axioms on them, e.g. @{term gcd} (see \secref{sec:gcd}).
+ axioms on them, e.g. \<^term>\<open>gcd\<close> (see \secref{sec:gcd}).
Hence their common base is a syntactic type class.
\noindent However syntactic type classes should only be used with striking
@@ -169,7 +169,7 @@
(see \secref{sec:numerals}): @{lemma "2 + 2 = 4" by simp} is
provable without further ado, and this also meets the typical
expectation towards a numeral notation; in more ancient releases
- numerals were purely syntactic and @{prop "2 + 2 = 4"} was
+ numerals were purely syntactic and \<^prop>\<open>2 + 2 = 4\<close> was
not provable without particular type constraints.
\<close>
@@ -183,13 +183,13 @@
In {Isabelle/HOL}, this is accomplished using the following
abstract setup:
- \<^item> A @{locale semigroup} introduces an abstract binary
+ \<^item> A \<^locale>\<open>semigroup\<close> introduces an abstract binary
associative operation.
- \<^item> A @{locale monoid} is an extension of @{locale semigroup}
+ \<^item> A \<^locale>\<open>monoid\<close> is an extension of \<^locale>\<open>semigroup\<close>
with a neutral element.
- \<^item> Both @{locale semigroup} and @{locale monoid} provide
+ \<^item> Both \<^locale>\<open>semigroup\<close> and \<^locale>\<open>monoid\<close> provide
dedicated syntax for their operations \<open>(\<^bold>*, \<^bold>1)\<close>.
This syntax is not visible on the global theory level
but only for abstract reasoning inside the respective
@@ -199,19 +199,19 @@
syntactic type classes \secref{sec:syntactic-type-class}
using the following classes:
- \<^item> @{command class} @{class semigroup_mult} = @{class times}
+ \<^item> @{command class} \<^class>\<open>semigroup_mult\<close> = \<^class>\<open>times\<close>
- \<^item> @{command class} @{class monoid_mult} = @{class one} + @{class semigroup_mult}
+ \<^item> @{command class} \<^class>\<open>monoid_mult\<close> = \<^class>\<open>one\<close> + \<^class>\<open>semigroup_mult\<close>
- \<^item> @{command class} @{class semigroup_add} = @{class plus}
+ \<^item> @{command class} \<^class>\<open>semigroup_add\<close> = \<^class>\<open>plus\<close>
- \<^item> @{command class} @{class monoid_add} = @{class zero} + @{class semigroup_add}
+ \<^item> @{command class} \<^class>\<open>monoid_add\<close> = \<^class>\<open>zero\<close> + \<^class>\<open>semigroup_add\<close>
- Locales @{locale semigroup} and @{locale monoid} are
+ Locales \<^locale>\<open>semigroup\<close> and \<^locale>\<open>monoid\<close> are
interpreted (using @{command sublocale}) into their
corresponding type classes, with prefixes \<open>add\<close>
- and \<open>mult\<close>; hence facts derived in @{locale semigroup}
- and @{locale monoid} are propagated simultaneously to
+ and \<open>mult\<close>; hence facts derived in \<^locale>\<open>semigroup\<close>
+ and \<^locale>\<open>monoid\<close> are propagated simultaneously to
\<^emph>\<open>both\<close> using a consistent naming policy, ie.
\<^item> @{fact semigroup.assoc}: @{thm (frugal_sorts) semigroup.assoc [all, no_vars]}
@@ -226,7 +226,7 @@
\<^item> @{fact add.right_neutral}: @{thm (frugal_sorts) add.right_neutral [all, no_vars]}
- \<^item> Note that the syntax in @{locale semigroup} and @{locale monoid}
+ \<^item> Note that the syntax in \<^locale>\<open>semigroup\<close> and \<^locale>\<open>monoid\<close>
is bold; this avoids clashes when writing properties
inside one of these locales in presence of that global
concrete type class syntax.
@@ -236,18 +236,18 @@
designation \<^emph>\<open>abelian\<close> is quite standard concerning
(semi)groups, but not for monoids}:
- \<^item> Locales @{locale abel_semigroup} and @{locale comm_monoid}
+ \<^item> Locales \<^locale>\<open>abel_semigroup\<close> and \<^locale>\<open>comm_monoid\<close>
add commutativity as property.
\<^item> Concrete syntax emerges through
- \<^item> @{command class} @{class ab_semigroup_add} = @{class semigroup_add}
+ \<^item> @{command class} \<^class>\<open>ab_semigroup_add\<close> = \<^class>\<open>semigroup_add\<close>
- \<^item> @{command class} @{class ab_semigroup_mult} = @{class semigroup_mult}
+ \<^item> @{command class} \<^class>\<open>ab_semigroup_mult\<close> = \<^class>\<open>semigroup_mult\<close>
- \<^item> @{command class} @{class comm_monoid_add} = @{class zero} + @{class ab_semigroup_add}
+ \<^item> @{command class} \<^class>\<open>comm_monoid_add\<close> = \<^class>\<open>zero\<close> + \<^class>\<open>ab_semigroup_add\<close>
- \<^item> @{command class} @{class comm_monoid_mult} = @{class one} + @{class ab_semigroup_mult}
+ \<^item> @{command class} \<^class>\<open>comm_monoid_mult\<close> = \<^class>\<open>one\<close> + \<^class>\<open>ab_semigroup_mult\<close>
and corresponding interpretation of the locales above, yielding
@@ -271,7 +271,7 @@
are declared as members. Due to interpretation, also
@{fact mult.assoc}, @{fact mult.commute} and @{fact mult.left_commute}
are also members of @{fact ac_simps}, as any corresponding facts
- stemming from interpretation of @{locale abel_semigroup}.
+ stemming from interpretation of \<^locale>\<open>abel_semigroup\<close>.
Hence adding @{fact ac_simps} to the simplification rules for
a single method call uses all associativity and commutativity
rules known by means of interpretation.
@@ -284,32 +284,32 @@
The hierarchy for inverse group operations takes into account
that there are weaker algebraic structures with only a partially
inverse operation. E. g. the natural numbers have bounded
- subtraction @{term "m - (n::nat)"} which is only an inverse
- operation if @{term "m \<ge> (n::nat)"}; unary minus \<open>-\<close>
+ subtraction \<^term>\<open>m - (n::nat)\<close> which is only an inverse
+ operation if \<^term>\<open>m \<ge> (n::nat)\<close>; unary minus \<open>-\<close>
is pointless on the natural numbers.
Hence for both additive and multiplicative notation there
are syntactic classes for inverse operations, both unary
and binary:
- \<^item> @{command class} @{class minus} with @{term [source] "(a::'a::minus) - b"}
+ \<^item> @{command class} \<^class>\<open>minus\<close> with @{term [source] "(a::'a::minus) - b"}
- \<^item> @{command class} @{class uminus} with @{term [source] "- a::'a::uminus"}
+ \<^item> @{command class} \<^class>\<open>uminus\<close> with @{term [source] "- a::'a::uminus"}
- \<^item> @{command class} @{class divide} with @{term [source] "(a::'a::divide) div b"}
+ \<^item> @{command class} \<^class>\<open>divide\<close> with @{term [source] "(a::'a::divide) div b"}
- \<^item> @{command class} @{class inverse} = @{class divide} with @{term [source] "inverse a::'a::inverse"}
+ \<^item> @{command class} \<^class>\<open>inverse\<close> = \<^class>\<open>divide\<close> with @{term [source] "inverse a::'a::inverse"}
\\ and @{term [source] "(a::'a::inverse) / b"}
- \noindent Here @{class inverse} specializes the ``partial'' syntax
+ \noindent Here \<^class>\<open>inverse\<close> specializes the ``partial'' syntax
@{term [source] "a div b"} to the more specific
@{term [source] "a / b"}.
Semantic properties are added by
- \<^item> @{command class} @{class cancel_ab_semigroup_add} = @{class ab_semigroup_add} + @{class minus}
+ \<^item> @{command class} \<^class>\<open>cancel_ab_semigroup_add\<close> = \<^class>\<open>ab_semigroup_add\<close> + \<^class>\<open>minus\<close>
- \<^item> @{command class} @{class cancel_comm_monoid_add} = @{class cancel_ab_semigroup_add} + @{class comm_monoid_add}
+ \<^item> @{command class} \<^class>\<open>cancel_comm_monoid_add\<close> = \<^class>\<open>cancel_ab_semigroup_add\<close> + \<^class>\<open>comm_monoid_add\<close>
\noindent which specify a minimal binary partially inverse operation as
@@ -323,16 +323,16 @@
\noindent The total inverse operation is established as follows:
- \<^item> Locale @{locale group} extends the abstract hierarchy with
+ \<^item> Locale \<^locale>\<open>group\<close> extends the abstract hierarchy with
the inverse operation.
\<^item> The concrete additive inverse operation emerges through
- \<^item> @{command class} @{class group_add} = @{class minus} + @{class uminus} + @{class monoid_add} (in @{theory HOL.Groups}) \\
+ \<^item> @{command class} \<^class>\<open>group_add\<close> = \<^class>\<open>minus\<close> + \<^class>\<open>uminus\<close> + \<^class>\<open>monoid_add\<close> (in \<^theory>\<open>HOL.Groups\<close>) \\
- \<^item> @{command class} @{class ab_group_add} = @{class minus} + @{class uminus} + @{class comm_monoid_add} (in @{theory HOL.Groups})
+ \<^item> @{command class} \<^class>\<open>ab_group_add\<close> = \<^class>\<open>minus\<close> + \<^class>\<open>uminus\<close> + \<^class>\<open>comm_monoid_add\<close> (in \<^theory>\<open>HOL.Groups\<close>)
- and corresponding interpretation of locale @{locale group}, yielding e.g.
+ and corresponding interpretation of locale \<^locale>\<open>group\<close>, yielding e.g.
\<^item> @{fact group.left_inverse}: @{thm (frugal_sorts) group.left_inverse [all, no_vars]}
--- a/src/Doc/more_antiquote.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/more_antiquote.ML Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
(* class specifications *)
val _ =
- Theory.setup (Thy_Output.antiquotation_pretty @{binding class_spec} (Scan.lift Args.name)
+ Theory.setup (Thy_Output.antiquotation_pretty \<^binding>\<open>class_spec\<close> (Scan.lift Args.name)
(fn ctxt => fn s =>
let
val thy = Proof_Context.theory_of ctxt;
@@ -27,7 +27,7 @@
in thm end;
val _ =
- Theory.setup (Thy_Output.antiquotation_pretty @{binding code_thms} Args.term
+ Theory.setup (Thy_Output.antiquotation_pretty \<^binding>\<open>code_thms\<close> Args.term
(fn ctxt => fn raw_const =>
let
val thy = Proof_Context.theory_of ctxt;
--- a/src/FOL/FOL.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/FOL/FOL.thy Sat Jan 05 17:24:33 2019 +0100
@@ -203,7 +203,7 @@
structure Blast = Blast
(
structure Classical = Cla
- val Trueprop_const = dest_Const @{const Trueprop}
+ val Trueprop_const = dest_Const \<^const>\<open>Trueprop\<close>
val equality_name = \<^const_name>\<open>eq\<close>
val not_name = \<^const_name>\<open>Not\<close>
val notE = @{thm notE}
--- a/src/HOL/Algebra/AbelCoset.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/AbelCoset.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
subsubsection \<open>Definitions\<close>
-text \<open>Hiding \<open><+>\<close> from @{theory HOL.Sum_Type} until I come
+text \<open>Hiding \<open><+>\<close> from \<^theory>\<open>HOL.Sum_Type\<close> until I come
up with better syntax here\<close>
no_notation Sum_Type.Plus (infixr "<+>" 65)
@@ -503,8 +503,8 @@
by (rule normal.inv_FactGroup [OF a_normal,
folded A_FactGroup_def A_SET_INV_def, simplified monoid_record_simps])
-text\<open>The coset map is a homomorphism from @{term G} to the quotient group
- @{term "G Mod H"}\<close>
+text\<open>The coset map is a homomorphism from \<^term>\<open>G\<close> to the quotient group
+ \<^term>\<open>G Mod H\<close>\<close>
lemma (in abelian_subgroup) a_r_coset_hom_A_Mod:
"(\<lambda>a. H +> a) \<in> hom (add_monoid G) (G A_Mod H)"
by (rule normal.r_coset_hom_Mod [OF a_normal,
@@ -611,7 +611,7 @@
by (rule group_hom.FactGroup_inj_on[OF a_group_hom,
folded a_kernel_def A_FactGroup_def, simplified ring_record_simps])
-text\<open>If the homomorphism @{term h} is onto @{term H}, then so is the
+text\<open>If the homomorphism \<^term>\<open>h\<close> is onto \<^term>\<open>H\<close>, then so is the
homomorphism from the quotient group\<close>
lemma (in abelian_group_hom) A_FactGroup_onto:
assumes h: "h ` carrier G = carrier H"
@@ -619,8 +619,8 @@
by (rule group_hom.FactGroup_onto[OF a_group_hom,
folded a_kernel_def A_FactGroup_def, simplified ring_record_simps]) (rule h)
-text\<open>If @{term h} is a homomorphism from @{term G} onto @{term H}, then the
- quotient group @{term "G Mod (kernel G H h)"} is isomorphic to @{term H}.\<close>
+text\<open>If \<^term>\<open>h\<close> is a homomorphism from \<^term>\<open>G\<close> onto \<^term>\<open>H\<close>, then the
+ quotient group \<^term>\<open>G Mod (kernel G H h)\<close> is isomorphic to \<^term>\<open>H\<close>.\<close>
theorem (in abelian_group_hom) A_FactGroup_iso_set:
"h ` carrier G = carrier H
\<Longrightarrow> (\<lambda>X. the_elem (h`X)) \<in> iso (G A_Mod (a_kernel G H h)) (add_monoid H)"
--- a/src/HOL/Algebra/Complete_Lattice.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Complete_Lattice.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1153,8 +1153,8 @@
fix B
assume "B \<subseteq> carrier ?L"
then have "greatest ?L (\<Inter> B \<inter> A) (Lower ?L B)"
- txt \<open>@{term "\<Inter> B"} is not the infimum of @{term B}:
- @{term "\<Inter> {} = UNIV"} which is in general bigger than @{term "A"}! \<close>
+ txt \<open>\<^term>\<open>\<Inter> B\<close> is not the infimum of \<^term>\<open>B\<close>:
+ \<^term>\<open>\<Inter> {} = UNIV\<close> which is in general bigger than \<^term>\<open>A\<close>! \<close>
by (fastforce intro!: greatest_LowerI simp: Lower_def)
then show "\<exists>i. greatest ?L i (Lower ?L B)" ..
qed
--- a/src/HOL/Algebra/Coset.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Coset.thy Sat Jan 05 17:24:33 2019 +0100
@@ -206,7 +206,7 @@
lemma (in group) coset_join2:
assumes "x \<in> carrier G" "subgroup H G" "x \<in> H"
shows "H #> x = H" using assms
- \<comment> \<open>Alternative proof is to put @{term "x=\<one>"} in \<open>repr_independence\<close>.\<close>
+ \<comment> \<open>Alternative proof is to put \<^term>\<open>x=\<one>\<close> in \<open>repr_independence\<close>.\<close>
by (force simp add: subgroup.m_closed r_coset_def solve_equation)
lemma (in group) coset_join3:
@@ -934,8 +934,8 @@
by (simp add: FactGroup_def group.inv_equality)
qed
-text\<open>The coset map is a homomorphism from @{term G} to the quotient group
- @{term "G Mod H"}\<close>
+text\<open>The coset map is a homomorphism from \<^term>\<open>G\<close> to the quotient group
+ \<^term>\<open>G Mod H\<close>\<close>
lemma (in normal) r_coset_hom_Mod:
"(\<lambda>a. H #> a) \<in> hom G (G Mod H)"
by (auto simp add: FactGroup_def RCOSETS_def Pi_def hom_def rcos_sum)
@@ -1041,7 +1041,7 @@
show "X=X'" by (rule equalityI) (simp_all add: FactGroup_subset h gX)
qed
-text\<open>If the homomorphism @{term h} is onto @{term H}, then so is the
+text\<open>If the homomorphism \<^term>\<open>h\<close> is onto \<^term>\<open>H\<close>, then so is the
homomorphism from the quotient group\<close>
lemma (in group_hom) FactGroup_onto:
assumes h: "h ` carrier G = carrier H"
@@ -1066,8 +1066,8 @@
qed
-text\<open>If @{term h} is a homomorphism from @{term G} onto @{term H}, then the
- quotient group @{term "G Mod (kernel G H h)"} is isomorphic to @{term H}.\<close>
+text\<open>If \<^term>\<open>h\<close> is a homomorphism from \<^term>\<open>G\<close> onto \<^term>\<open>H\<close>, then the
+ quotient group \<^term>\<open>G Mod (kernel G H h)\<close> is isomorphic to \<^term>\<open>H\<close>.\<close>
theorem (in group_hom) FactGroup_iso_set:
"h ` carrier G = carrier H
\<Longrightarrow> (\<lambda>X. the_elem (h`X)) \<in> iso (G Mod (kernel G H h)) H"
--- a/src/HOL/Algebra/Divisibility.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Divisibility.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1909,7 +1909,7 @@
qed
-\<comment> \<open>A version using @{const factors}, more complicated\<close>
+\<comment> \<open>A version using \<^const>\<open>factors\<close>, more complicated\<close>
lemma (in factorial_monoid) factors_irreducible_prime:
assumes pirr: "irreducible G p" and pcarr: "p \<in> carrier G"
shows "prime G p"
--- a/src/HOL/Algebra/Embedded_Algebras.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Embedded_Algebras.thy Sat Jan 05 17:24:33 2019 +0100
@@ -361,7 +361,7 @@
using Span_eq_combine_set_length_version[OF assms] by blast
-subsection \<open>Span as the minimal subgroup that contains @{term"K <#> (set Us)"}\<close>
+subsection \<open>Span as the minimal subgroup that contains \<^term>\<open>K <#> (set Us)\<close>\<close>
text \<open>Now we show the link between Span and Group.generate\<close>
--- a/src/HOL/Algebra/Group.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Group.thy Sat Jan 05 17:24:33 2019 +0100
@@ -625,7 +625,7 @@
qed
text \<open>
- Since @{term H} is nonempty, it contains some element @{term x}. Since
+ Since \<^term>\<open>H\<close> is nonempty, it contains some element \<^term>\<open>x\<close>. Since
it is closed under inverse, it contains \<open>inv x\<close>. Since
it is closed under product, it contains \<open>x \<otimes> inv x = \<one>\<close>.
\<close>
@@ -1021,8 +1021,8 @@
using DirProd_iso_set_trans assms unfolding is_iso_def by blast
-text\<open>Basis for homomorphism proofs: we assume two groups @{term G} and
- @{term H}, with a homomorphism @{term h} between them\<close>
+text\<open>Basis for homomorphism proofs: we assume two groups \<^term>\<open>G\<close> and
+ \<^term>\<open>H\<close>, with a homomorphism \<^term>\<open>h\<close> between them\<close>
locale group_hom = G?: group G + H?: group H for G (structure) and H (structure) +
fixes h
assumes homh: "h \<in> hom G H"
--- a/src/HOL/Algebra/Ideal.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Ideal.thy Sat Jan 05 17:24:33 2019 +0100
@@ -43,7 +43,7 @@
qed
-subsubsection (in ring) \<open>Ideals Generated by a Subset of @{term "carrier R"}\<close>
+subsubsection (in ring) \<open>Ideals Generated by a Subset of \<^term>\<open>carrier R\<close>\<close>
definition genideal :: "_ \<Rightarrow> 'a set \<Rightarrow> 'a set" ("Idl\<index> _" [80] 79)
where "genideal R S = \<Inter>{I. ideal I R \<and> S \<subseteq> I}"
@@ -214,7 +214,7 @@
subsection \<open>Intersection of Ideals\<close>
paragraph \<open>Intersection of two ideals\<close>
-text \<open>The intersection of any two ideals is again an ideal in @{term R}\<close>
+text \<open>The intersection of any two ideals is again an ideal in \<^term>\<open>R\<close>\<close>
lemma (in ring) i_intersect:
assumes "ideal I R"
@@ -231,7 +231,7 @@
done
qed
-text \<open>The intersection of any Number of Ideals is again an Ideal in @{term R}\<close>
+text \<open>The intersection of any Number of Ideals is again an Ideal in \<^term>\<open>R\<close>\<close>
lemma (in ring) i_Intersect:
assumes Sideals: "\<And>I. I \<in> S \<Longrightarrow> ideal I R" and notempty: "S \<noteq> {}"
@@ -299,9 +299,9 @@
qed
qed
-subsection (in ring) \<open>Ideals generated by a subset of @{term "carrier R"}\<close>
+subsection (in ring) \<open>Ideals generated by a subset of \<^term>\<open>carrier R\<close>\<close>
-text \<open>@{term genideal} generates an ideal\<close>
+text \<open>\<^term>\<open>genideal\<close> generates an ideal\<close>
lemma (in ring) genideal_ideal:
assumes Scarr: "S \<subseteq> carrier R"
shows "ideal (Idl S) R"
@@ -321,7 +321,7 @@
shows "i \<in> Idl {i}"
by (simp add: genideal_def)
-text \<open>@{term genideal} generates the minimal ideal\<close>
+text \<open>\<^term>\<open>genideal\<close> generates the minimal ideal\<close>
lemma (in ring) genideal_minimal:
assumes "ideal I R" "S \<subseteq> I"
shows "Idl S \<subseteq> I"
@@ -425,7 +425,7 @@
by fast
qed
-text \<open>@{const "cgenideal"} is minimal\<close>
+text \<open>\<^const>\<open>cgenideal\<close> is minimal\<close>
lemma (in ring) cgenideal_minimal:
assumes "ideal J R"
--- a/src/HOL/Algebra/IntRing.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/IntRing.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,7 +9,7 @@
section \<open>The Ring of Integers\<close>
-subsection \<open>Some properties of @{typ int}\<close>
+subsection \<open>Some properties of \<^typ>\<open>int\<close>\<close>
lemma dvds_eq_abseq:
fixes k :: int
@@ -146,7 +146,7 @@
qed (simp add: int_carrier_eq int_zero_eq int_add_eq int_finsum_eq int_a_inv_eq int_a_minus_eq)+
-text \<open>Removal of occurrences of @{term UNIV} in interpretation result
+text \<open>Removal of occurrences of \<^term>\<open>UNIV\<close> in interpretation result
--- experimental.\<close>
lemma UNIV:
--- a/src/HOL/Algebra/Multiplicative_Group.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Multiplicative_Group.thy Sat Jan 05 17:24:33 2019 +0100
@@ -187,25 +187,25 @@
We analyze the reduced form $a/d = m/n$ for any of those fractions.
We want to know how many fractions $m/n$ have the reduced form denominator $d$.
The condition $1 \leq m \leq n$ is equivalent to the condition $1 \leq a \leq d$.
- Therefore we want to know how many $a$ with $1 \leq a \leq d$ exist, s.t. @{term "gcd a d = 1"}.
- This number is exactly @{term "phi' d"}.
+ Therefore we want to know how many $a$ with $1 \leq a \leq d$ exist, s.t. \<^term>\<open>gcd a d = 1\<close>.
+ This number is exactly \<^term>\<open>phi' d\<close>.
Finally, by counting the fractions $m/n$ according to their reduced form denominator,
we get: @{term [display] "(\<Sum>d | d dvd n . phi' d) = n"}.
To formalize this proof in Isabelle, we analyze for an arbitrary divisor $d$ of $n$
\begin{itemize}
- \item the set of reduced form numerators @{term "{a. (1::nat) \<le> a \<and> a \<le> d \<and> coprime a d}"}
+ \item the set of reduced form numerators \<^term>\<open>{a. (1::nat) \<le> a \<and> a \<le> d \<and> coprime a d}\<close>
\item the set of numerators $m$, for which $m/n$ has the reduced form denominator $d$,
- i.e. the set @{term "{m \<in> {1::nat .. n}. n div gcd m n = d}"}
+ i.e. the set \<^term>\<open>{m \<in> {1::nat .. n}. n div gcd m n = d}\<close>
\end{itemize}
- We show that @{term "\<lambda>a. a*n div d"} with the inverse @{term "\<lambda>a. a div gcd a n"} is
+ We show that \<^term>\<open>\<lambda>a. a*n div d\<close> with the inverse \<^term>\<open>\<lambda>a. a div gcd a n\<close> is
a bijection between theses sets, thus yielding the equality
@{term [display] "phi' d = card {m \<in> {1 .. n}. n div gcd m n = d}"}
This gives us
@{term [display] "(\<Sum>d | d dvd n . phi' d)
= card (\<Union>d \<in> {d. d dvd n}. {m \<in> {1 .. n}. n div gcd m n = d})"}
and by showing
- @{term "(\<Union>d \<in> {d. d dvd n}. {m \<in> {1::nat .. n}. n div gcd m n = d}) \<supseteq> {1 .. n}"}
+ \<^term>\<open>(\<Union>d \<in> {d. d dvd n}. {m \<in> {1::nat .. n}. n div gcd m n = d}) \<supseteq> {1 .. n}\<close>
(this is our counting argument) the thesis follows.
\<close>
lemma sum_phi'_factors:
--- a/src/HOL/Algebra/Order.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Order.thy Sat Jan 05 17:24:33 2019 +0100
@@ -276,7 +276,7 @@
greatest :: "[_, 'a, 'a set] => bool"
where "greatest L g A \<longleftrightarrow> A \<subseteq> carrier L \<and> g \<in> A \<and> (\<forall>x\<in>A. x \<sqsubseteq>\<^bsub>L\<^esub> g)"
-text (in weak_partial_order) \<open>Could weaken these to @{term "l \<in> carrier L \<and> l .\<in> A"} and @{term "g \<in> carrier L \<and> g .\<in> A"}.\<close>
+text (in weak_partial_order) \<open>Could weaken these to \<^term>\<open>l \<in> carrier L \<and> l .\<in> A\<close> and \<^term>\<open>g \<in> carrier L \<and> g .\<in> A\<close>.\<close>
lemma least_dual [simp]:
"least (inv_gorder L) x A = greatest L x A"
@@ -311,8 +311,8 @@
abbreviation is_lub :: "[_, 'a, 'a set] => bool"
where "is_lub L x A \<equiv> least L x (Upper L A)"
-text (in weak_partial_order) \<open>@{const least} is not congruent in the second parameter for
- @{term "A {.=} A'"}\<close>
+text (in weak_partial_order) \<open>\<^const>\<open>least\<close> is not congruent in the second parameter for
+ \<^term>\<open>A {.=} A'\<close>\<close>
lemma (in weak_partial_order) least_Upper_cong_l:
assumes "x .= x'"
@@ -370,8 +370,8 @@
abbreviation is_glb :: "[_, 'a, 'a set] => bool"
where "is_glb L x A \<equiv> greatest L x (Lower L A)"
-text (in weak_partial_order) \<open>@{const greatest} is not congruent in the second parameter for
- @{term "A {.=} A'"} \<close>
+text (in weak_partial_order) \<open>\<^const>\<open>greatest\<close> is not congruent in the second parameter for
+ \<^term>\<open>A {.=} A'\<close> \<close>
lemma (in weak_partial_order) greatest_Lower_cong_l:
assumes "x .= x'"
--- a/src/HOL/Algebra/QuotRing.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/QuotRing.thy Sat Jan 05 17:24:33 2019 +0100
@@ -16,7 +16,7 @@
where "rcoset_mult R I A B = (\<Union>a\<in>A. \<Union>b\<in>B. I +>\<^bsub>R\<^esub> (a \<otimes>\<^bsub>R\<^esub> b))"
-text \<open>@{const "rcoset_mult"} fulfils the properties required by congruences\<close>
+text \<open>\<^const>\<open>rcoset_mult\<close> fulfils the properties required by congruences\<close>
lemma (in ideal) rcoset_mult_add:
assumes "x \<in> carrier R" "y \<in> carrier R"
shows "[mod I:] (I +> x) \<Otimes> (I +> y) = I +> (x \<otimes> y)"
@@ -179,7 +179,7 @@
then have Jcarr: "J = carrier R"
using I_maximal IinJ additive_subgroup.a_subset idealJ ideal_def by blast
- \<comment> \<open>Calculating an inverse for @{term "a"}\<close>
+ \<comment> \<open>Calculating an inverse for \<^term>\<open>a\<close>\<close>
from one_closed[folded Jcarr]
obtain r i where rcarr: "r \<in> carrier R"
and iI: "i \<in> I" and one: "\<one> = r \<otimes> a \<oplus> i"
--- a/src/HOL/Algebra/Ring_Divisibility.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Ring_Divisibility.thy Sat Jan 05 17:24:33 2019 +0100
@@ -58,7 +58,7 @@
using assms by unfold_locales auto
-subsection \<open>Passing from @{term R} to @{term "mult_of R"} and vice-versa. \<close>
+subsection \<open>Passing from \<^term>\<open>R\<close> to \<^term>\<open>mult_of R\<close> and vice-versa. \<close>
lemma divides_mult_imp_divides [simp]: "a divides\<^bsub>(mult_of R)\<^esub> b \<Longrightarrow> a divides\<^bsub>R\<^esub> b"
unfolding factor_def by auto
@@ -194,8 +194,8 @@
subsection \<open>Irreducible\<close>
text \<open>The following lemmas justify the need for a definition of irreducible specific to rings:
- for @{term "irreducible R"}, we need to suppose we are not in a field (which is plausible,
- but @{term "\<not> field R"} is an assumption we want to avoid; for @{term "irreducible (mult_of R)"}, zero
+ for \<^term>\<open>irreducible R\<close>, we need to suppose we are not in a field (which is plausible,
+ but \<^term>\<open>\<not> field R\<close> is an assumption we want to avoid; for \<^term>\<open>irreducible (mult_of R)\<close>, zero
is allowed. \<close>
lemma (in domain) zero_is_irreducible_mult:
--- a/src/HOL/Algebra/Sylow.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Sylow.thy Sat Jan 05 17:24:33 2019 +0100
@@ -204,7 +204,7 @@
subsection \<open>Equal Cardinalities of \<open>M\<close> and the Set of Cosets\<close>
-text \<open>Injections between @{term M} and @{term "rcosets\<^bsub>G\<^esub> H"} show that
+text \<open>Injections between \<^term>\<open>M\<close> and \<^term>\<open>rcosets\<^bsub>G\<^esub> H\<close> show that
their cardinalities are equal.\<close>
lemma ElemClassEquiv: "\<lbrakk>equiv A r; C \<in> A // r\<rbrakk> \<Longrightarrow> \<forall>x \<in> C. \<forall>y \<in> C. (x, y) \<in> r"
@@ -339,8 +339,8 @@
qed
text \<open>Needed because the locale's automatic definition refers to
- @{term "semigroup G"} and @{term "group_axioms G"} rather than
- simply to @{term "group G"}.\<close>
+ \<^term>\<open>semigroup G\<close> and \<^term>\<open>group_axioms G\<close> rather than
+ simply to \<^term>\<open>group G\<close>.\<close>
lemma sylow_eq: "sylow G p a m \<longleftrightarrow> group G \<and> sylow_axioms G p a m"
by (simp add: sylow_def group_def)
--- a/src/HOL/Algebra/Sym_Groups.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/Sym_Groups.thy Sat Jan 05 17:24:33 2019 +0100
@@ -317,7 +317,7 @@
subsection \<open>Unsolvability of Symmetric Groups\<close>
-text \<open>We show that symmetric groups (@{term\<open>sym_group n\<close>}) are unsolvable for @{term\<open>n \<ge> 5\<close>}.\<close>
+text \<open>We show that symmetric groups (\<^term>\<open>sym_group n\<close>) are unsolvable for \<^term>\<open>n \<ge> 5\<close>.\<close>
abbreviation three_cycles :: "nat \<Rightarrow> (nat \<Rightarrow> nat) set"
where "three_cycles n \<equiv>
--- a/src/HOL/Algebra/UnivPoly.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Algebra/UnivPoly.thy Sat Jan 05 17:24:33 2019 +0100
@@ -68,7 +68,7 @@
coeff = (\<lambda>p\<in>up R. \<lambda>n. p n)\<rparr>"
text \<open>
- Properties of the set of polynomials @{term up}.
+ Properties of the set of polynomials \<^term>\<open>up\<close>.
\<close>
lemma mem_upI [intro]:
@@ -246,7 +246,7 @@
context UP_ring
begin
-text \<open>Operations are closed over @{term P}.\<close>
+text \<open>Operations are closed over \<^term>\<open>P\<close>.\<close>
lemma UP_mult_closed [simp]:
"[| p \<in> carrier P; q \<in> carrier P |] ==> p \<otimes>\<^bsub>P\<^esub> q \<in> carrier P" by (simp add: UP_def up_mult_closed)
@@ -498,7 +498,7 @@
end
text \<open>
- Interpretation of lemmas from @{term algebra}.
+ Interpretation of lemmas from \<^term>\<open>algebra\<close>.
\<close>
lemma (in cring) cring:
@@ -639,7 +639,7 @@
qed
text\<open>The following corollary follows from lemmas @{thm "monom_one_Suc"}
- and @{thm "monom_one_Suc2"}, and is trivial in @{term UP_cring}\<close>
+ and @{thm "monom_one_Suc2"}, and is trivial in \<^term>\<open>UP_cring\<close>\<close>
corollary monom_one_comm: shows "monom P \<one> k \<otimes>\<^bsub>P\<^esub> monom P \<one> 1 = monom P \<one> 1 \<otimes>\<^bsub>P\<^esub> monom P \<one> k"
unfolding monom_one_Suc [symmetric] monom_one_Suc2 [symmetric] ..
@@ -950,7 +950,7 @@
end
-text\<open>The following lemmas also can be lifted to @{term UP_ring}.\<close>
+text\<open>The following lemmas also can be lifted to \<^term>\<open>UP_ring\<close>.\<close>
context UP_ring
begin
@@ -1073,7 +1073,7 @@
end
text \<open>
- Interpretation of theorems from @{term domain}.
+ Interpretation of theorems from \<^term>\<open>domain\<close>.
\<close>
sublocale UP_domain < "domain" P
@@ -1202,7 +1202,7 @@
assumes indet_img_carrier [simp, intro]: "s \<in> carrier S"
defines Eval_def: "Eval == eval R S h s"
-text\<open>JE: I have moved the following lemma from Ring.thy and lifted then to the locale @{term ring_hom_ring} from @{term ring_hom_cring}.\<close>
+text\<open>JE: I have moved the following lemma from Ring.thy and lifted then to the locale \<^term>\<open>ring_hom_ring\<close> from \<^term>\<open>ring_hom_cring\<close>.\<close>
text\<open>JE: I was considering using it in \<open>eval_ring_hom\<close>, but that property does not hold for non commutative rings, so
maybe it is not that necessary.\<close>
@@ -1306,7 +1306,7 @@
text \<open>Further properties of the evaluation homomorphism.\<close>
text \<open>The following proof is complicated by the fact that in arbitrary
- rings one might have @{term "one R = zero R"}.\<close>
+ rings one might have \<^term>\<open>one R = zero R\<close>.\<close>
(* TODO: simplify by cases "one R = zero R" *)
@@ -1818,8 +1818,7 @@
text \<open>
Interpretation now enables to import all theorems and lemmas
- valid in the context of homomorphisms between @{term INTEG} and @{term
- "UP INTEG"} globally.
+ valid in the context of homomorphisms between \<^term>\<open>INTEG\<close> and \<^term>\<open>UP INTEG\<close> globally.
\<close>
interpretation INTEG: UP_pre_univ_prop INTEG INTEG id "UP INTEG"
--- a/src/HOL/Analysis/Bochner_Integration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Bochner_Integration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -2410,7 +2410,7 @@
has_bochner_integral M (\<lambda>x. f (g x)) x \<Longrightarrow> has_bochner_integral (distr M N g) f x"
by%unimportant (simp add: has_bochner_integral_iff integrable_distr_eq integral_distr)
-subsection%important \<open>Lebesgue integration on @{const count_space}\<close>
+subsection%important \<open>Lebesgue integration on \<^const>\<open>count_space\<close>\<close>
lemma%unimportant integrable_count_space:
fixes f :: "'a \<Rightarrow> 'b::{banach,second_countable_topology}"
@@ -2515,7 +2515,7 @@
apply (auto simp: AE_count_space integrable_count_space)
done
-subsection%important \<open>Lebesgue integration on @{const null_measure}\<close>
+subsection%important \<open>Lebesgue integration on \<^const>\<open>null_measure\<close>\<close>
lemma%unimportant has_bochner_integral_null_measure_iff[iff]:
"has_bochner_integral (null_measure M) f 0 \<longleftrightarrow> f \<in> borel_measurable M"
--- a/src/HOL/Analysis/Borel_Space.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Borel_Space.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1688,7 +1688,7 @@
subsection%important "Borel space on the extended non-negative reals"
-text \<open> @{type ennreal} is a topological monoid, so no rules for plus are required, also all order
+text \<open> \<^type>\<open>ennreal\<close> is a topological monoid, so no rules for plus are required, also all order
statements are usually done on type classes. \<close>
lemma%unimportant measurable_enn2ereal[measurable]: "enn2ereal \<in> borel \<rightarrow>\<^sub>M borel"
--- a/src/HOL/Analysis/Bounded_Linear_Function.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Bounded_Linear_Function.thy Sat Jan 05 17:24:33 2019 +0100
@@ -37,7 +37,7 @@
lemmas onorm_componentwise_le = order_trans[OF onorm_componentwise]
-subsection%unimportant \<open>Intro rules for @{term bounded_linear}\<close>
+subsection%unimportant \<open>Intro rules for \<^term>\<open>bounded_linear\<close>\<close>
named_theorems bounded_linear_intros
@@ -85,30 +85,30 @@
\<open>Scan.succeed (Thm.declaration_attribute (fn thm =>
fold (fn (r, s) => Named_Theorems.add_thm s (thm RS r))
[
- (@{thm bounded_linear.has_derivative}, @{named_theorems derivative_intros}),
- (@{thm bounded_linear.tendsto}, @{named_theorems tendsto_intros}),
- (@{thm bounded_linear.continuous}, @{named_theorems continuous_intros}),
- (@{thm bounded_linear.continuous_on}, @{named_theorems continuous_intros}),
- (@{thm bounded_linear.uniformly_continuous_on}, @{named_theorems continuous_intros}),
- (@{thm bounded_linear_compose}, @{named_theorems bounded_linear_intros})
+ (@{thm bounded_linear.has_derivative}, \<^named_theorems>\<open>derivative_intros\<close>),
+ (@{thm bounded_linear.tendsto}, \<^named_theorems>\<open>tendsto_intros\<close>),
+ (@{thm bounded_linear.continuous}, \<^named_theorems>\<open>continuous_intros\<close>),
+ (@{thm bounded_linear.continuous_on}, \<^named_theorems>\<open>continuous_intros\<close>),
+ (@{thm bounded_linear.uniformly_continuous_on}, \<^named_theorems>\<open>continuous_intros\<close>),
+ (@{thm bounded_linear_compose}, \<^named_theorems>\<open>bounded_linear_intros\<close>)
]))\<close>
attribute_setup bounded_bilinear =
\<open>Scan.succeed (Thm.declaration_attribute (fn thm =>
fold (fn (r, s) => Named_Theorems.add_thm s (thm RS r))
[
- (@{thm bounded_bilinear.FDERIV}, @{named_theorems derivative_intros}),
- (@{thm bounded_bilinear.tendsto}, @{named_theorems tendsto_intros}),
- (@{thm bounded_bilinear.continuous}, @{named_theorems continuous_intros}),
- (@{thm bounded_bilinear.continuous_on}, @{named_theorems continuous_intros}),
+ (@{thm bounded_bilinear.FDERIV}, \<^named_theorems>\<open>derivative_intros\<close>),
+ (@{thm bounded_bilinear.tendsto}, \<^named_theorems>\<open>tendsto_intros\<close>),
+ (@{thm bounded_bilinear.continuous}, \<^named_theorems>\<open>continuous_intros\<close>),
+ (@{thm bounded_bilinear.continuous_on}, \<^named_theorems>\<open>continuous_intros\<close>),
(@{thm bounded_linear_compose[OF bounded_bilinear.bounded_linear_left]},
- @{named_theorems bounded_linear_intros}),
+ \<^named_theorems>\<open>bounded_linear_intros\<close>),
(@{thm bounded_linear_compose[OF bounded_bilinear.bounded_linear_right]},
- @{named_theorems bounded_linear_intros}),
+ \<^named_theorems>\<open>bounded_linear_intros\<close>),
(@{thm bounded_linear.uniformly_continuous_on[OF bounded_bilinear.bounded_linear_left]},
- @{named_theorems continuous_intros}),
+ \<^named_theorems>\<open>continuous_intros\<close>),
(@{thm bounded_linear.uniformly_continuous_on[OF bounded_bilinear.bounded_linear_right]},
- @{named_theorems continuous_intros})
+ \<^named_theorems>\<open>continuous_intros\<close>)
]))\<close>
--- a/src/HOL/Analysis/Cartesian_Euclidean_Space.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Cartesian_Euclidean_Space.thy Sat Jan 05 17:24:33 2019 +0100
@@ -813,7 +813,7 @@
vector_cart[of "\<lambda>j. frechet_derivative f (at x) j $ k"]
by (simp add: Basis_vec_def axis_eq_axis inner_axis jacobian_def matrix_def)
-subsection%unimportant \<open>Lemmas for working on @{typ "real^1"}\<close>
+subsection%unimportant \<open>Lemmas for working on \<^typ>\<open>real^1\<close>\<close>
lemma forall_1[simp]: "(\<forall>i::1. P i) \<longleftrightarrow> P 1"
by (metis (full_types) num1_eq_iff)
@@ -1083,7 +1083,7 @@
shows "rank(A ** B) \<le> rank A"
by (metis matrix_transpose_mul rank_mul_le_right rank_transpose)
-subsection%unimportant\<open>Routine results connecting the types @{typ "real^1"} and @{typ real}\<close>
+subsection%unimportant\<open>Routine results connecting the types \<^typ>\<open>real^1\<close> and \<^typ>\<open>real\<close>\<close>
lemma vector_one_nth [simp]:
fixes x :: "'a^1" shows "vec (x $ 1) = x"
--- a/src/HOL/Analysis/Cauchy_Integral_Theorem.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Cauchy_Integral_Theorem.thy Sat Jan 05 17:24:33 2019 +0100
@@ -3113,8 +3113,8 @@
(if atends then pathstart h = pathstart g \<and> pathfinish h = pathfinish g
else pathfinish g = pathstart g \<and> pathfinish h = pathstart h)"
-text\<open>This formulation covers two cases: @{term g} and @{term h} share their
- start and end points; @{term g} and @{term h} both loop upon themselves.\<close>
+text\<open>This formulation covers two cases: \<^term>\<open>g\<close> and \<^term>\<open>h\<close> share their
+ start and end points; \<^term>\<open>g\<close> and \<^term>\<open>h\<close> both loop upon themselves.\<close>
lemma contour_integral_nearby:
assumes os: "open S" and p: "path p" "path_image p \<subseteq> S"
shows "\<exists>d. 0 < d \<and>
@@ -6360,7 +6360,7 @@
and w: "w \<in> ball z r"
shows "((\<lambda>n. (deriv ^^ n) f z / (fact n) * (w - z)^n) sums f w)"
proof -
- \<comment> \<open>Replacing @{term r} and the original (weak) premises with stronger ones\<close>
+ \<comment> \<open>Replacing \<^term>\<open>r\<close> and the original (weak) premises with stronger ones\<close>
obtain r where "r > 0" and holfc: "f holomorphic_on cball z r" and w: "w \<in> ball z r"
proof
have "cball z ((r + dist w z) / 2) \<subseteq> ball z r"
@@ -7274,7 +7274,7 @@
qed
qed
-text\<open>This version has @{term"polynomial_function \<gamma>"} as an additional assumption.\<close>
+text\<open>This version has \<^term>\<open>polynomial_function \<gamma>\<close> as an additional assumption.\<close>
lemma Cauchy_integral_formula_global_weak:
assumes "open U" and holf: "f holomorphic_on U"
and z: "z \<in> U" and \<gamma>: "polynomial_function \<gamma>"
--- a/src/HOL/Analysis/Complex_Transcendental.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Complex_Transcendental.thy Sat Jan 05 17:24:33 2019 +0100
@@ -89,7 +89,7 @@
subsection\<open>Euler and de Moivre formulas\<close>
-text\<open>The sine series times @{term i}\<close>
+text\<open>The sine series times \<^term>\<open>i\<close>\<close>
lemma sin_i_eq: "(\<lambda>n. (\<i> * sin_coeff n) * z^n) sums (\<i> * sin z)"
proof -
have "(\<lambda>n. \<i> * sin_coeff n *\<^sub>R z^n) sums (\<i> * sin z)"
@@ -883,7 +883,7 @@
qed
text\<open>This function returns the angle of a complex number from its representation in polar coordinates.
-Due to periodicity, its range is arbitrary. @{term Arg2pi} follows HOL Light in adopting the interval \<open>[0,2\<pi>)\<close>.
+Due to periodicity, its range is arbitrary. \<^term>\<open>Arg2pi\<close> follows HOL Light in adopting the interval \<open>[0,2\<pi>)\<close>.
But we have the same periodicity issue with logarithms, and it is usual to adopt the same interval
for the complex logarithm and argument functions. Further on down, we shall define both functions for the interval \<open>(-\<pi>,\<pi>]\<close>.
The present version is provided for compatibility.\<close>
--- a/src/HOL/Analysis/Conformal_Mappings.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Conformal_Mappings.thy Sat Jan 05 17:24:33 2019 +0100
@@ -463,7 +463,7 @@
by force
qed
-text\<open>No need for @{term S} to be connected. But the nonconstant condition is stronger.\<close>
+text\<open>No need for \<^term>\<open>S\<close> to be connected. But the nonconstant condition is stronger.\<close>
corollary%unimportant open_mapping_thm2:
assumes holf: "f holomorphic_on S"
and S: "open S"
@@ -505,8 +505,8 @@
subsection\<open>Maximum modulus principle\<close>
-text\<open>If @{term f} is holomorphic, then its norm (modulus) cannot exhibit a true local maximum that is
- properly within the domain of @{term f}.\<close>
+text\<open>If \<^term>\<open>f\<close> is holomorphic, then its norm (modulus) cannot exhibit a true local maximum that is
+ properly within the domain of \<^term>\<open>f\<close>.\<close>
proposition maximum_modulus_principle:
assumes holf: "f holomorphic_on S"
@@ -3056,8 +3056,8 @@
using is_pole_basic[of f A 0] assms by simp
text \<open>The proposition
- @{term "\<exists>x. ((f::complex\<Rightarrow>complex) \<longlongrightarrow> x) (at z) \<or> is_pole f z"}
-can be interpreted as the complex function @{term f} has a non-essential singularity at @{term z}
+ \<^term>\<open>\<exists>x. ((f::complex\<Rightarrow>complex) \<longlongrightarrow> x) (at z) \<or> is_pole f z\<close>
+can be interpreted as the complex function \<^term>\<open>f\<close> has a non-essential singularity at \<^term>\<open>z\<close>
(i.e. the singularity is either removable or a pole).\<close>
definition not_essential::"[complex \<Rightarrow> complex, complex] \<Rightarrow> bool" where
"not_essential f z = (\<exists>x. f\<midarrow>z\<rightarrow>x \<or> is_pole f z)"
@@ -3140,8 +3140,8 @@
lemma holomorphic_factor_puncture:
assumes f_iso:"isolated_singularity_at f z"
- and "not_essential f z" \<comment> \<open>@{term f} has either a removable singularity or a pole at @{term z}\<close>
- and non_zero:"\<exists>\<^sub>Fw in (at z). f w\<noteq>0" \<comment> \<open>@{term f} will not be constantly zero in a neighbour of @{term z}\<close>
+ and "not_essential f z" \<comment> \<open>\<^term>\<open>f\<close> has either a removable singularity or a pole at \<^term>\<open>z\<close>\<close>
+ and non_zero:"\<exists>\<^sub>Fw in (at z). f w\<noteq>0" \<comment> \<open>\<^term>\<open>f\<close> will not be constantly zero in a neighbour of \<^term>\<open>z\<close>\<close>
shows "\<exists>!n::int. \<exists>g r. 0 < r \<and> g holomorphic_on cball z r \<and> g z\<noteq>0
\<and> (\<forall>w\<in>cball z r-{z}. f w = g w * (w-z) powr n \<and> g w\<noteq>0)"
proof -
@@ -4652,7 +4652,7 @@
theorem argument_principle:
fixes f::"complex \<Rightarrow> complex" and poles s:: "complex set"
- defines "pz \<equiv> {w. f w = 0 \<or> w \<in> poles}" \<comment> \<open>@{term "pz"} is the set of poles and zeros\<close>
+ defines "pz \<equiv> {w. f w = 0 \<or> w \<in> poles}" \<comment> \<open>\<^term>\<open>pz\<close> is the set of poles and zeros\<close>
assumes "open s" and
"connected s" and
f_holo:"f holomorphic_on s-poles" and
--- a/src/HOL/Analysis/Connected.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Connected.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5153,7 +5153,7 @@
lemma Euclidean_dist_upper: "i \<in> Basis \<Longrightarrow> dist (x \<bullet> i) (y \<bullet> i) \<le> dist x y"
by (metis (no_types) member_le_L2_set euclidean_dist_l2 finite_Basis)
-text\<open>But is the premise @{term \<open>i \<in> Basis\<close>} really necessary?\<close>
+text\<open>But is the premise \<^term>\<open>i \<in> Basis\<close> really necessary?\<close>
lemma open_preimage_inner:
assumes "open S" "i \<in> Basis"
shows "open {x. x \<bullet> i \<in> S}"
--- a/src/HOL/Analysis/Continuum_Not_Denumerable.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Continuum_Not_Denumerable.thy Sat Jan 05 17:24:33 2019 +0100
@@ -38,7 +38,7 @@
assume "\<exists>f::nat \<Rightarrow> real. surj f"
then obtain f :: "nat \<Rightarrow> real" where "surj f" ..
- txt \<open>First we construct a sequence of nested intervals, ignoring @{term "range f"}.\<close>
+ txt \<open>First we construct a sequence of nested intervals, ignoring \<^term>\<open>range f\<close>.\<close>
have "a < b \<Longrightarrow> \<exists>ka kb. ka < kb \<and> {ka..kb} \<subseteq> {a..b} \<and> c \<notin> {ka..kb}" for a b c :: real
by (auto simp add: not_le cong: conj_cong)
--- a/src/HOL/Analysis/Derivative.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Derivative.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1931,7 +1931,7 @@
subsection \<open>Derivative as a vector\<close>
-text \<open>Considering derivative @{typ "real \<Rightarrow> 'b::real_normed_vector"} as a vector.\<close>
+text \<open>Considering derivative \<^typ>\<open>real \<Rightarrow> 'b::real_normed_vector\<close> as a vector.\<close>
definition "vector_derivative f net = (SOME f'. (f has_vector_derivative f') net)"
--- a/src/HOL/Analysis/Equivalence_Lebesgue_Henstock_Integration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Equivalence_Lebesgue_Henstock_Integration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -335,7 +335,7 @@
by (simp add: ac_simps)
qed
-subsection \<open>Equivalence Lebesgue integral on @{const lborel} and HK-integral\<close>
+subsection \<open>Equivalence Lebesgue integral on \<^const>\<open>lborel\<close> and HK-integral\<close>
lemma has_integral_measure_lborel:
fixes A :: "'a::euclidean_space set"
--- a/src/HOL/Analysis/Euclidean_Space.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Euclidean_Space.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,9 +36,9 @@
syntax "_type_dimension" :: "type \<Rightarrow> nat" ("(1DIM/(1'(_')))")
translations "DIM('a)" \<rightharpoonup> "CONST card (CONST Basis :: 'a set)"
typed_print_translation \<open>
- [(@{const_syntax card},
- fn ctxt => fn _ => fn [Const (@{const_syntax Basis}, Type (@{type_name set}, [T]))] =>
- Syntax.const @{syntax_const "_type_dimension"} $ Syntax_Phases.term_of_typ ctxt T)]
+ [(\<^const_syntax>\<open>card\<close>,
+ fn ctxt => fn _ => fn [Const (\<^const_syntax>\<open>Basis\<close>, Type (\<^type_name>\<open>set\<close>, [T]))] =>
+ Syntax.const \<^syntax_const>\<open>_type_dimension\<close> $ Syntax_Phases.term_of_typ ctxt T)]
\<close>
lemma (in euclidean_space) norm_Basis[simp]: "u \<in> Basis \<Longrightarrow> norm u = 1"
@@ -234,7 +234,7 @@
subsection \<open>Class instances\<close>
-subsubsection%unimportant \<open>Type @{typ real}\<close>
+subsubsection%unimportant \<open>Type \<^typ>\<open>real\<close>\<close>
instantiation real :: euclidean_space
begin
@@ -250,7 +250,7 @@
lemma DIM_real[simp]: "DIM(real) = 1"
by simp
-subsubsection%unimportant \<open>Type @{typ complex}\<close>
+subsubsection%unimportant \<open>Type \<^typ>\<open>complex\<close>\<close>
instantiation complex :: euclidean_space
begin
@@ -271,7 +271,7 @@
lemma complex_Basis_i [iff]: "\<i> \<in> Basis"
by (simp add: Basis_complex_def)
-subsubsection%unimportant \<open>Type @{typ "'a \<times> 'b"}\<close>
+subsubsection%unimportant \<open>Type \<^typ>\<open>'a \<times> 'b\<close>\<close>
instantiation prod :: (real_inner, real_inner) real_inner
begin
--- a/src/HOL/Analysis/FPS_Convergence.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/FPS_Convergence.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
subsection%unimportant \<open>Balls with extended real radius\<close>
text \<open>
- The following is a variant of @{const ball} that also allows an infinite radius.
+ The following is a variant of \<^const>\<open>ball\<close> that also allows an infinite radius.
\<close>
definition eball :: "'a :: metric_space \<Rightarrow> ereal \<Rightarrow> 'a set" where
"eball z r = {z'. ereal (dist z z') < r}"
--- a/src/HOL/Analysis/Finite_Cartesian_Product.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Finite_Cartesian_Product.thy Sat Jan 05 17:24:33 2019 +0100
@@ -42,17 +42,17 @@
syntax "_vec_type" :: "type \<Rightarrow> type \<Rightarrow> type" (infixl "^" 15)
parse_translation \<open>
let
- fun vec t u = Syntax.const @{type_syntax vec} $ t $ u;
+ fun vec t u = Syntax.const \<^type_syntax>\<open>vec\<close> $ t $ u;
fun finite_vec_tr [t, u] =
(case Term_Position.strip_positions u of
v as Free (x, _) =>
if Lexicon.is_tid x then
- vec t (Syntax.const @{syntax_const "_ofsort"} $ v $
- Syntax.const @{class_syntax finite})
+ vec t (Syntax.const \<^syntax_const>\<open>_ofsort\<close> $ v $
+ Syntax.const \<^class_syntax>\<open>finite\<close>)
else vec t u
| _ => vec t u)
in
- [(@{syntax_const "_vec_type"}, K finite_vec_tr)]
+ [(\<^syntax_const>\<open>_vec_type\<close>, K finite_vec_tr)]
end
\<close>
@@ -800,12 +800,12 @@
method_setup vector = \<open>
let
val ss1 =
- simpset_of (put_simpset HOL_basic_ss @{context}
+ simpset_of (put_simpset HOL_basic_ss \<^context>
addsimps [@{thm sum.distrib} RS sym,
@{thm sum_subtractf} RS sym, @{thm sum_distrib_left},
@{thm sum_distrib_right}, @{thm sum_negf} RS sym])
val ss2 =
- simpset_of (@{context} addsimps
+ simpset_of (\<^context> addsimps
[@{thm plus_vec_def}, @{thm times_vec_def},
@{thm minus_vec_def}, @{thm uminus_vec_def},
@{thm one_vec_def}, @{thm zero_vec_def}, @{thm vec_def},
@@ -1006,7 +1006,7 @@
subsection%important \<open>Matrix operations\<close>
-text\<open>Matrix notation. NB: an MxN matrix is of type @{typ "'a^'n^'m"}, not @{typ "'a^'m^'n"}\<close>
+text\<open>Matrix notation. NB: an MxN matrix is of type \<^typ>\<open>'a^'n^'m\<close>, not \<^typ>\<open>'a^'m^'n\<close>\<close>
definition%important map_matrix::"('a \<Rightarrow> 'b) \<Rightarrow> (('a, 'i::finite)vec, 'j::finite) vec \<Rightarrow> (('b, 'i)vec, 'j) vec" where
"map_matrix f x = (\<chi> i j. f (x $ i $ j))"
--- a/src/HOL/Analysis/Finite_Product_Measure.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Finite_Product_Measure.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,7 +15,7 @@
lemma%unimportant case_prod_const: "(\<lambda>(i, j). c) = (\<lambda>_. c)"
by auto
-subsubsection%unimportant \<open>More about Function restricted by @{const extensional}\<close>
+subsubsection%unimportant \<open>More about Function restricted by \<^const>\<open>extensional\<close>\<close>
definition
"merge I J = (\<lambda>(x, y) i. if i \<in> I then x i else if i \<in> J then y i else undefined)"
--- a/src/HOL/Analysis/Function_Topology.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Function_Topology.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
to each factor is continuous.
To form a product of objects in Isabelle/HOL, all these objects should be subsets of a common type
-'a. The product is then @{term "Pi\<^sub>E I X"}, the set of elements from \<open>'i\<close> to \<open>'a\<close> such that the \<open>i\<close>-th
+'a. The product is then \<^term>\<open>Pi\<^sub>E I X\<close>, the set of elements from \<open>'i\<close> to \<open>'a\<close> such that the \<open>i\<close>-th
coordinate belongs to \<open>X i\<close> for all \<open>i \<in> I\<close>.
Hence, to form a product of topological spaces, all these spaces should be subsets of a common type.
--- a/src/HOL/Analysis/Gamma_Function.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Gamma_Function.thy Sat Jan 05 17:24:33 2019 +0100
@@ -20,7 +20,7 @@
Based on the Gamma function, we also prove the Weierstraß product form of the
sin function and, based on this, the solution of the Basel problem (the
- sum over all @{term "1 / (n::nat)^2"}.
+ sum over all \<^term>\<open>1 / (n::nat)^2\<close>.
\<close>
lemma pochhammer_eq_0_imp_nonpos_Int:
@@ -2073,7 +2073,7 @@
let ?h = "\<lambda>z::complex. (of_real pi * cot (of_real pi*z) + Digamma z - Digamma (1 - z))"
define h where [abs_def]: "h z = (if z \<in> \<int> then 0 else ?h z)" for z :: complex
- \<comment> \<open>@{term g} is periodic with period 1.\<close>
+ \<comment> \<open>\<^term>\<open>g\<close> is periodic with period 1.\<close>
interpret g: periodic_fun_simple' g
proof
fix z :: complex
@@ -2093,7 +2093,7 @@
qed (simp add: g_def)
qed
- \<comment> \<open>@{term g} is entire.\<close>
+ \<comment> \<open>\<^term>\<open>g\<close> is entire.\<close>
have g_g' [derivative_intros]: "(g has_field_derivative (h z * g z)) (at z)" for z :: complex
proof (cases "z \<in> \<int>")
let ?h' = "\<lambda>z. Beta z (1 - z) * ((Digamma z - Digamma (1 - z)) * sin (z * of_real pi) +
--- a/src/HOL/Analysis/Harmonic_Numbers.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Harmonic_Numbers.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
text \<open>
The definition of the Harmonic Numbers and the Euler-Mascheroni constant.
- Also provides a reasonably accurate approximation of @{term "ln 2 :: real"}
+ Also provides a reasonably accurate approximation of \<^term>\<open>ln 2 :: real\<close>
and the Euler-Mascheroni constant.
\<close>
@@ -515,7 +515,7 @@
text \<open>
- Approximation of @{term "ln 2"}. The lower bound is accurate to about 0.03; the upper
+ Approximation of \<^term>\<open>ln 2\<close>. The lower bound is accurate to about 0.03; the upper
bound is accurate to about 0.0015.
\<close>
lemma ln2_ge_two_thirds: "2/3 \<le> ln (2::real)"
--- a/src/HOL/Analysis/Henstock_Kurzweil_Integration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Henstock_Kurzweil_Integration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -490,7 +490,7 @@
shows "(f has_integral y) S \<Longrightarrow> ((\<lambda>x. f x * c) has_integral (y * c)) S"
using has_integral_linear[OF _ bounded_linear_mult_left] by (simp add: comp_def)
-text\<open>The case analysis eliminates the condition @{term "f integrable_on S"} at the cost
+text\<open>The case analysis eliminates the condition \<^term>\<open>f integrable_on S\<close> at the cost
of the type class constraint \<open>division_ring\<close>\<close>
corollary integral_mult_left [simp]:
fixes c:: "'a::{real_normed_algebra,division_ring}"
--- a/src/HOL/Analysis/Infinite_Set_Sum.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Infinite_Set_Sum.thy Sat Jan 05 17:24:33 2019 +0100
@@ -128,7 +128,7 @@
print_translation \<open>
let
- fun sum_tr' [Abs (x, Tx, t), Const (@{const_syntax Collect}, _) $ Abs (y, Ty, P)] =
+ fun sum_tr' [Abs (x, Tx, t), Const (\<^const_syntax>\<open>Collect\<close>, _) $ Abs (y, Ty, P)] =
if x <> y then raise Match
else
let
@@ -136,10 +136,10 @@
val t' = subst_bound (x', t);
val P' = subst_bound (x', P);
in
- Syntax.const @{syntax_const "_qinfsetsum"} $ Syntax_Trans.mark_bound_abs (x, Tx) $ P' $ t'
+ Syntax.const \<^syntax_const>\<open>_qinfsetsum\<close> $ Syntax_Trans.mark_bound_abs (x, Tx) $ P' $ t'
end
| sum_tr' _ = raise Match;
-in [(@{const_syntax infsetsum}, K sum_tr')] end
+in [(\<^const_syntax>\<open>infsetsum\<close>, K sum_tr')] end
\<close>
--- a/src/HOL/Analysis/Inner_Product.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Inner_Product.thy Sat Jan 05 17:24:33 2019 +0100
@@ -11,21 +11,21 @@
subsection \<open>Real inner product spaces\<close>
text \<open>
- Temporarily relax type constraints for @{term "open"}, @{term "uniformity"},
- @{term dist}, and @{term norm}.
+ Temporarily relax type constraints for \<^term>\<open>open\<close>, \<^term>\<open>uniformity\<close>,
+ \<^term>\<open>dist\<close>, and \<^term>\<open>norm\<close>.
\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name "open"}, SOME @{typ "'a::open set \<Rightarrow> bool"})\<close>
+ (\<^const_name>\<open>open\<close>, SOME \<^typ>\<open>'a::open set \<Rightarrow> bool\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name dist}, SOME @{typ "'a::dist \<Rightarrow> 'a \<Rightarrow> real"})\<close>
+ (\<^const_name>\<open>dist\<close>, SOME \<^typ>\<open>'a::dist \<Rightarrow> 'a \<Rightarrow> real\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name uniformity}, SOME @{typ "('a::uniformity \<times> 'a) filter"})\<close>
+ (\<^const_name>\<open>uniformity\<close>, SOME \<^typ>\<open>('a::uniformity \<times> 'a) filter\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name norm}, SOME @{typ "'a::norm \<Rightarrow> real"})\<close>
+ (\<^const_name>\<open>norm\<close>, SOME \<^typ>\<open>'a::norm \<Rightarrow> real\<close>)\<close>
class real_inner = real_vector + sgn_div_norm + dist_norm + uniformity_dist + open_uniformity +
fixes inner :: "'a \<Rightarrow> 'a \<Rightarrow> real"
@@ -202,21 +202,21 @@
by (metis inner_commute inner_divide_left)
text \<open>
- Re-enable constraints for @{term "open"}, @{term "uniformity"},
- @{term dist}, and @{term norm}.
+ Re-enable constraints for \<^term>\<open>open\<close>, \<^term>\<open>uniformity\<close>,
+ \<^term>\<open>dist\<close>, and \<^term>\<open>norm\<close>.
\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name "open"}, SOME @{typ "'a::topological_space set \<Rightarrow> bool"})\<close>
+ (\<^const_name>\<open>open\<close>, SOME \<^typ>\<open>'a::topological_space set \<Rightarrow> bool\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name uniformity}, SOME @{typ "('a::uniform_space \<times> 'a) filter"})\<close>
+ (\<^const_name>\<open>uniformity\<close>, SOME \<^typ>\<open>('a::uniform_space \<times> 'a) filter\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name dist}, SOME @{typ "'a::metric_space \<Rightarrow> 'a \<Rightarrow> real"})\<close>
+ (\<^const_name>\<open>dist\<close>, SOME \<^typ>\<open>'a::metric_space \<Rightarrow> 'a \<Rightarrow> real\<close>)\<close>
setup \<open>Sign.add_const_constraint
- (@{const_name norm}, SOME @{typ "'a::real_normed_vector \<Rightarrow> real"})\<close>
+ (\<^const_name>\<open>norm\<close>, SOME \<^typ>\<open>'a::real_normed_vector \<Rightarrow> real\<close>)\<close>
lemma bounded_bilinear_inner:
"bounded_bilinear (inner::'a::real_inner \<Rightarrow> 'a \<Rightarrow> real)"
--- a/src/HOL/Analysis/Linear_Algebra.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Linear_Algebra.thy Sat Jan 05 17:24:33 2019 +0100
@@ -35,7 +35,7 @@
notation inner (infix "\<bullet>" 70)
-text\<open>Equality of vectors in terms of @{term "(\<bullet>)"} products.\<close>
+text\<open>Equality of vectors in terms of \<^term>\<open>(\<bullet>)\<close> products.\<close>
lemma linear_componentwise:
fixes f:: "'a::euclidean_space \<Rightarrow> 'b::real_inner"
--- a/src/HOL/Analysis/Measurable.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Measurable.thy Sat Jan 05 17:24:33 2019 +0100
@@ -70,7 +70,7 @@
simproc_setup%important measurable ("A \<in> sets M" | "f \<in> measurable M N") = \<open>K Measurable.simproc\<close>
setup \<open>
- Global_Theory.add_thms_dynamic (@{binding measurable}, Measurable.get_all)
+ Global_Theory.add_thms_dynamic (\<^binding>\<open>measurable\<close>, Measurable.get_all)
\<close>
declare
--- a/src/HOL/Analysis/Measure_Space.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Measure_Space.thy Sat Jan 05 17:24:33 2019 +0100
@@ -55,7 +55,7 @@
qed
text \<open>
- The type for emeasure spaces is already defined in @{theory "HOL-Analysis.Sigma_Algebra"}, as it
+ The type for emeasure spaces is already defined in \<^theory>\<open>HOL-Analysis.Sigma_Algebra\<close>, as it
is also used to represent sigma algebras (with an arbitrary emeasure).
\<close>
@@ -91,11 +91,11 @@
shows "f {} = 0 \<Longrightarrow> (\<Sum>n. f (binaryset A B n)) = f A + f B"
by (metis binaryset_sums sums_unique)
-subsection%unimportant \<open>Properties of a premeasure @{term \<mu>}\<close>
+subsection%unimportant \<open>Properties of a premeasure \<^term>\<open>\<mu>\<close>\<close>
text \<open>
- The definitions for @{const positive} and @{const countably_additive} should be here, by they are
- necessary to define @{typ "'a measure"} in @{theory "HOL-Analysis.Sigma_Algebra"}.
+ The definitions for \<^const>\<open>positive\<close> and \<^const>\<open>countably_additive\<close> should be here, by they are
+ necessary to define \<^typ>\<open>'a measure\<close> in \<^theory>\<open>HOL-Analysis.Sigma_Algebra\<close>.
\<close>
definition subadditive where
@@ -442,7 +442,7 @@
using empty_continuous_imp_continuous_from_below[OF f fin] cont
by blast
-subsection%unimportant \<open>Properties of @{const emeasure}\<close>
+subsection%unimportant \<open>Properties of \<^const>\<open>emeasure\<close>\<close>
lemma emeasure_positive: "positive (sets M) (emeasure M)"
by (cases M) (auto simp: sets_def emeasure_def Abs_measure_inverse measure_space_def)
@@ -1385,7 +1385,7 @@
then show ?thesis using that by blast
qed
-subsection \<open>Measure space induced by distribution of @{const measurable}-functions\<close>
+subsection \<open>Measure space induced by distribution of \<^const>\<open>measurable\<close>-functions\<close>
definition%important distr :: "'a measure \<Rightarrow> 'b measure \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'b measure" where
"distr M N f =
@@ -2071,7 +2071,7 @@
ultimately show ?thesis by auto
qed
-subsection \<open>Measure spaces with @{term "emeasure M (space M) < \<infinity>"}\<close>
+subsection \<open>Measure spaces with \<^term>\<open>emeasure M (space M) < \<infinity>\<close>\<close>
locale%important finite_measure = sigma_finite_measure M for M +
assumes finite_emeasure_space: "emeasure M (space M) \<noteq> top"
@@ -2830,7 +2830,7 @@
qed
text%important \<open>
- Define a lexicographical order on @{type measure}, in the order space, sets and measure. The parts
+ Define a lexicographical order on \<^type>\<open>measure\<close>, in the order space, sets and measure. The parts
of the lexicographical order are point-wise ordered.
\<close>
--- a/src/HOL/Analysis/Polytope.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Polytope.thy Sat Jan 05 17:24:33 2019 +0100
@@ -3072,7 +3072,7 @@
subsection\<open>Simplexes\<close>
-text\<open>The notion of n-simplex for integer @{term"n \<ge> -1"}\<close>
+text\<open>The notion of n-simplex for integer \<^term>\<open>n \<ge> -1\<close>\<close>
definition simplex :: "int \<Rightarrow> 'a::euclidean_space set \<Rightarrow> bool" (infix "simplex" 50)
where "n simplex S \<equiv> \<exists>C. \<not> affine_dependent C \<and> int(card C) = n + 1 \<and> S = convex hull C"
--- a/src/HOL/Analysis/Sigma_Algebra.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Sigma_Algebra.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1637,7 +1637,7 @@
by (metis Pow_empty Sup_bot_conv(1) cSup_singleton empty_iff
sets.sigma_sets_eq sets.space_closed sigma_sets_top subset_singletonD)
-subsubsection \<open>Constructing simple @{typ "'a measure"}\<close>
+subsubsection \<open>Constructing simple \<^typ>\<open>'a measure\<close>\<close>
proposition emeasure_measure_of:
assumes M: "M = measure_of \<Omega> A \<mu>"
--- a/src/HOL/Analysis/Summation_Tests.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Summation_Tests.thy Sat Jan 05 17:24:33 2019 +0100
@@ -469,7 +469,7 @@
text \<open>
The radius of convergence of a power series. This value always exists, ranges from
- @{term "0::ereal"} to @{term "\<infinity>::ereal"}, and the power series is guaranteed to converge for
+ \<^term>\<open>0::ereal\<close> to \<^term>\<open>\<infinity>::ereal\<close>, and the power series is guaranteed to converge for
all inputs with a norm that is smaller than that radius and to diverge for all inputs with a
norm that is greater.
\<close>
--- a/src/HOL/Analysis/Tagged_Division.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Tagged_Division.thy Sat Jan 05 17:24:33 2019 +0100
@@ -41,7 +41,7 @@
apply (simp add: trans_wf_iff wf_iff_acyclic_if_finite converse_def assms)
using acyclic_def assms irrefl_def trans_Restr by fastforce
-text\<open>For creating values between @{term u} and @{term v}.\<close>
+text\<open>For creating values between \<^term>\<open>u\<close> and \<^term>\<open>v\<close>.\<close>
lemma scaling_mono:
fixes u::"'a::linordered_field"
assumes "u \<le> v" "0 \<le> r" "r \<le> s"
@@ -1289,8 +1289,8 @@
subsection%important \<open>Functions closed on boxes: morphisms from boxes to monoids\<close>
text \<open>This auxiliary structure is used to sum up over the elements of a division. Main theorem is
- \<open>operative_division\<close>. Instances for the monoid are @{typ "'a option"}, @{typ real}, and
- @{typ bool}.\<close>
+ \<open>operative_division\<close>. Instances for the monoid are \<^typ>\<open>'a option\<close>, \<^typ>\<open>real\<close>, and
+ \<^typ>\<open>bool\<close>.\<close>
paragraph%important \<open>Using additivity of lifted function to encode definedness.\<close>
text%important \<open>%whitespace\<close>
@@ -2351,7 +2351,7 @@
for m n \<comment> \<open>The symmetry argument requires a single HOL formula\<close>
proof (rule linorder_wlog [where a=m and b=n], intro allI impI)
fix v w m and n::nat
- assume "m \<le> n" \<comment> \<open>WLOG we can assume @{term"m \<le> n"}, when the first disjunct becomes impossible\<close>
+ assume "m \<le> n" \<comment> \<open>WLOG we can assume \<^term>\<open>m \<le> n\<close>, when the first disjunct becomes impossible\<close>
have "?K0(n,w) \<subseteq> ?K0(m,v) \<or> interior(?K0(m,v)) \<inter> interior(?K0(n,w)) = {}"
apply (simp add: subset_box disjoint_interval)
apply (rule ccontr)
--- a/src/HOL/Analysis/Uniform_Limit.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Uniform_Limit.thy Sat Jan 05 17:24:33 2019 +0100
@@ -406,9 +406,9 @@
named_theorems uniform_limit_intros "introduction rules for uniform_limit"
setup \<open>
- Global_Theory.add_thms_dynamic (@{binding uniform_limit_eq_intros},
+ Global_Theory.add_thms_dynamic (\<^binding>\<open>uniform_limit_eq_intros\<close>,
fn context =>
- Named_Theorems.get (Context.proof_of context) @{named_theorems uniform_limit_intros}
+ Named_Theorems.get (Context.proof_of context) \<^named_theorems>\<open>uniform_limit_intros\<close>
|> map_filter (try (fn thm => @{thm uniform_limit_eq_rhs} OF [thm])))
\<close>
--- a/src/HOL/Analysis/Weierstrass_Theorems.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/Weierstrass_Theorems.thy Sat Jan 05 17:24:33 2019 +0100
@@ -436,7 +436,7 @@
by blast
qed
-text\<open>Non-trivial case, with @{term A} and @{term B} both non-empty\<close>
+text\<open>Non-trivial case, with \<^term>\<open>A\<close> and \<^term>\<open>B\<close> both non-empty\<close>
lemma%unimportant (in function_ring_on) two_special:
assumes A: "closed A" "A \<subseteq> S" "a \<in> A"
and B: "closed B" "B \<subseteq> S" "b \<in> B"
@@ -565,7 +565,7 @@
done
qed
-text\<open>The special case where @{term f} is non-negative and @{term"e<1/3"}\<close>
+text\<open>The special case where \<^term>\<open>f\<close> is non-negative and \<^term>\<open>e<1/3\<close>\<close>
lemma%important (in function_ring_on) Stone_Weierstrass_special:
assumes f: "continuous_on S f" and fpos: "\<And>x. x \<in> S \<Longrightarrow> f x \<ge> 0"
and e: "0 < e" "e < 1/3"
--- a/src/HOL/Analysis/ex/Approximations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/ex/Approximations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -447,7 +447,7 @@
subsubsection \<open>Machin-like formulae for pi\<close>
text \<open>
- We first define a small proof method that can prove Machin-like formulae for @{term "pi"}
+ We first define a small proof method that can prove Machin-like formulae for \<^term>\<open>pi\<close>
automatically. Unfortunately, this takes far too much time for larger formulae because
the numbers involved become too large.
\<close>
@@ -476,14 +476,14 @@
val ctxt' = ctxt addsimps @{thms arctan_double' arctan_add_small}
in
case Thm.term_of ct of
- Const (@{const_name MACHIN_TAG}, _) $ _ $
- (Const (@{const_name "Transcendental.arctan"}, _) $ _) =>
+ Const (\<^const_name>\<open>MACHIN_TAG\<close>, _) $ _ $
+ (Const (\<^const_name>\<open>Transcendental.arctan\<close>, _) $ _) =>
Simplifier.rewrite ctxt' ct
|
- Const (@{const_name MACHIN_TAG}, _) $ _ $
- (Const (@{const_name "Groups.plus"}, _) $
- (Const (@{const_name "Transcendental.arctan"}, _) $ _) $
- (Const (@{const_name "Transcendental.arctan"}, _) $ _)) =>
+ Const (\<^const_name>\<open>MACHIN_TAG\<close>, _) $ _ $
+ (Const (\<^const_name>\<open>Groups.plus\<close>, _) $
+ (Const (\<^const_name>\<open>Transcendental.arctan\<close>, _) $ _) $
+ (Const (\<^const_name>\<open>Transcendental.arctan\<close>, _) $ _)) =>
Simplifier.rewrite ctxt' ct
| _ => raise CTERM ("machin_conv", [ct])
end
@@ -520,8 +520,8 @@
text \<open>
We can use the simple Machin formula and the Taylor series expansion of the arctangent
- to approximate pi. For a given even natural number $n$, we expand @{term "arctan (1/5)"}
- to $3n$ summands and @{term "arctan (1/239)"} to $n$ summands. This gives us at least
+ to approximate pi. For a given even natural number $n$, we expand \<^term>\<open>arctan (1/5)\<close>
+ to $3n$ summands and \<^term>\<open>arctan (1/239)\<close> to $n$ summands. This gives us at least
$13n-2$ bits of precision.
\<close>
--- a/src/HOL/Analysis/measurable.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/measurable.ML Sat Jan 05 17:24:33 2019 +0100
@@ -67,10 +67,10 @@
);
val debug =
- Attrib.setup_config_bool @{binding measurable_debug} (K false)
+ Attrib.setup_config_bool \<^binding>\<open>measurable_debug\<close> (K false)
val split =
- Attrib.setup_config_bool @{binding measurable_split} (K true)
+ Attrib.setup_config_bool \<^binding>\<open>measurable_split\<close> (K true)
fun map_data f1 f2 f3 f4
{measurable_thms = t1, dest_thms = t2, cong_thms = t3, preprocessors = t4 } =
@@ -130,15 +130,15 @@
fun dest_measurable_fun t =
(case t of
- (Const (@{const_name "Set.member"}, _) $ f $ (Const (@{const_name "measurable"}, _) $ _ $ _)) => f
+ (Const (\<^const_name>\<open>Set.member\<close>, _) $ f $ (Const (\<^const_name>\<open>measurable\<close>, _) $ _ $ _)) => f
| _ => raise (TERM ("not a measurability predicate", [t])))
fun not_measurable_prop n thm =
if length (Thm.prems_of thm) < n then false
else
(case nth_hol_goal thm n of
- (Const (@{const_name "Set.member"}, _) $ _ $ (Const (@{const_name "sets"}, _) $ _)) => false
- | (Const (@{const_name "Set.member"}, _) $ _ $ (Const (@{const_name "measurable"}, _) $ _ $ _)) => false
+ (Const (\<^const_name>\<open>Set.member\<close>, _) $ _ $ (Const (\<^const_name>\<open>sets\<close>, _) $ _)) => false
+ | (Const (\<^const_name>\<open>Set.member\<close>, _) $ _ $ (Const (\<^const_name>\<open>measurable\<close>, _) $ _ $ _)) => false
| _ => true)
handle TERM _ => true;
@@ -149,7 +149,7 @@
fun cnt_prefixes ctxt (Abs (n, T, t)) =
let
- fun is_countable ty = Sign.of_sort (Proof_Context.theory_of ctxt) (ty, @{sort countable})
+ fun is_countable ty = Sign.of_sort (Proof_Context.theory_of ctxt) (ty, \<^sort>\<open>countable\<close>)
fun cnt_walk (Abs (ns, T, t)) Ts =
map (fn (t', t'') => (Abs (ns, T, t'), t'')) (cnt_walk t (T::Ts))
| cnt_walk (f $ g) Ts = let
@@ -219,12 +219,12 @@
val (thms, ctxt) = prepare_facts ctxt facts
- fun is_sets_eq (Const (@{const_name "HOL.eq"}, _) $
- (Const (@{const_name "sets"}, _) $ _) $
- (Const (@{const_name "sets"}, _) $ _)) = true
- | is_sets_eq (Const (@{const_name "HOL.eq"}, _) $
- (Const (@{const_name "measurable"}, _) $ _ $ _) $
- (Const (@{const_name "measurable"}, _) $ _ $ _)) = true
+ fun is_sets_eq (Const (\<^const_name>\<open>HOL.eq\<close>, _) $
+ (Const (\<^const_name>\<open>sets\<close>, _) $ _) $
+ (Const (\<^const_name>\<open>sets\<close>, _) $ _)) = true
+ | is_sets_eq (Const (\<^const_name>\<open>HOL.eq\<close>, _) $
+ (Const (\<^const_name>\<open>measurable\<close>, _) $ _ $ _) $
+ (Const (\<^const_name>\<open>measurable\<close>, _) $ _ $ _)) = true
| is_sets_eq _ = false
val cong_thms = get_cong (Context.Proof ctxt) @
--- a/src/HOL/Analysis/normarith.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Analysis/normarith.ML Sat Jan 05 17:24:33 2019 +0100
@@ -16,17 +16,17 @@
open Conv;
val bool_eq = op = : bool *bool -> bool
fun dest_ratconst t = case Thm.term_of t of
- Const(@{const_name divide}, _)$a$b => Rat.make(HOLogic.dest_number a |> snd, HOLogic.dest_number b |> snd)
- | Const(@{const_name inverse}, _)$a => Rat.make(1, HOLogic.dest_number a |> snd)
+ Const(\<^const_name>\<open>divide\<close>, _)$a$b => Rat.make(HOLogic.dest_number a |> snd, HOLogic.dest_number b |> snd)
+ | Const(\<^const_name>\<open>inverse\<close>, _)$a => Rat.make(1, HOLogic.dest_number a |> snd)
| _ => Rat.of_int (HOLogic.dest_number (Thm.term_of t) |> snd)
fun is_ratconst t = can dest_ratconst t
fun augment_norm b t acc = case Thm.term_of t of
- Const(@{const_name norm}, _) $ _ => insert (eq_pair bool_eq (op aconvc)) (b,Thm.dest_arg t) acc
+ Const(\<^const_name>\<open>norm\<close>, _) $ _ => insert (eq_pair bool_eq (op aconvc)) (b,Thm.dest_arg t) acc
| _ => acc
fun find_normedterms t acc = case Thm.term_of t of
- @{term "(+) :: real => _"}$_$_ =>
+ \<^term>\<open>(+) :: real => _\<close>$_$_ =>
find_normedterms (Thm.dest_arg1 t) (find_normedterms (Thm.dest_arg t) acc)
- | @{term "(*) :: real => _"}$_$_ =>
+ | \<^term>\<open>(*) :: real => _\<close>$_$_ =>
if not (is_ratconst (Thm.dest_arg1 t)) then acc else
augment_norm (dest_ratconst (Thm.dest_arg1 t) >= @0)
(Thm.dest_arg t) acc
@@ -51,13 +51,13 @@
*)
fun vector_lincomb t = case Thm.term_of t of
- Const(@{const_name plus}, _) $ _ $ _ =>
+ Const(\<^const_name>\<open>plus\<close>, _) $ _ $ _ =>
cterm_lincomb_add (vector_lincomb (Thm.dest_arg1 t)) (vector_lincomb (Thm.dest_arg t))
- | Const(@{const_name minus}, _) $ _ $ _ =>
+ | Const(\<^const_name>\<open>minus\<close>, _) $ _ $ _ =>
cterm_lincomb_sub (vector_lincomb (Thm.dest_arg1 t)) (vector_lincomb (Thm.dest_arg t))
- | Const(@{const_name scaleR}, _)$_$_ =>
+ | Const(\<^const_name>\<open>scaleR\<close>, _)$_$_ =>
cterm_lincomb_cmul (dest_ratconst (Thm.dest_arg1 t)) (vector_lincomb (Thm.dest_arg t))
- | Const(@{const_name uminus}, _)$_ =>
+ | Const(\<^const_name>\<open>uminus\<close>, _)$_ =>
cterm_lincomb_neg (vector_lincomb (Thm.dest_arg t))
(* FIXME: how should we handle numerals?
| Const(@ {const_name vec},_)$_ =>
@@ -82,8 +82,8 @@
| SOME _ => fns) ts []
fun replacenegnorms cv t = case Thm.term_of t of
- @{term "(+) :: real => _"}$_$_ => binop_conv (replacenegnorms cv) t
-| @{term "(*) :: real => _"}$_$_ =>
+ \<^term>\<open>(+) :: real => _\<close>$_$_ => binop_conv (replacenegnorms cv) t
+| \<^term>\<open>(*) :: real => _\<close>$_$_ =>
if dest_ratconst (Thm.dest_arg1 t) < @0 then arg_conv cv t else Thm.reflexive t
| _ => Thm.reflexive t
(*
@@ -148,10 +148,10 @@
fun cterm_of_rat x =
let val (a, b) = Rat.dest x
in
- if b = 1 then Numeral.mk_cnumber @{ctyp "real"} a
- else Thm.apply (Thm.apply @{cterm "(/) :: real => _"}
- (Numeral.mk_cnumber @{ctyp "real"} a))
- (Numeral.mk_cnumber @{ctyp "real"} b)
+ if b = 1 then Numeral.mk_cnumber \<^ctyp>\<open>real\<close> a
+ else Thm.apply (Thm.apply \<^cterm>\<open>(/) :: real => _\<close>
+ (Numeral.mk_cnumber \<^ctyp>\<open>real\<close> a))
+ (Numeral.mk_cnumber \<^ctyp>\<open>real\<close> b)
end;
fun norm_cmul_rule c th = Thm.instantiate' [] [SOME (cterm_of_rat c)] (th RS @{thm norm_cmul_rule_thm});
@@ -164,7 +164,7 @@
(* FIXME : Should be computed statically!! *)
val real_poly_conv =
Semiring_Normalizer.semiring_normalize_wrapper ctxt
- (the (Semiring_Normalizer.match ctxt @{cterm "(0::real) + 1"}))
+ (the (Semiring_Normalizer.match ctxt \<^cterm>\<open>(0::real) + 1\<close>))
in
fconv_rule (arg_conv ((rewr_conv @{thm ge_iff_diff_ge_0}) then_conv
arg_conv (Numeral_Simprocs.field_comp_conv ctxt then_conv real_poly_conv)))
@@ -190,9 +190,9 @@
val apply_pthd = try_conv (rewr_conv @{thm pth_d});
fun headvector t = case t of
- Const(@{const_name plus}, _)$
- (Const(@{const_name scaleR}, _)$_$v)$_ => v
- | Const(@{const_name scaleR}, _)$_$v => v
+ Const(\<^const_name>\<open>plus\<close>, _)$
+ (Const(\<^const_name>\<open>scaleR\<close>, _)$_$v)$_ => v
+ | Const(\<^const_name>\<open>scaleR\<close>, _)$_$v => v
| _ => error "headvector: non-canonical term"
fun vector_cmul_conv ctxt ct =
@@ -204,7 +204,7 @@
(apply_pth8 ctxt ct
handle CTERM _ =>
(case Thm.term_of ct of
- Const(@{const_name plus},_)$lt$rt =>
+ Const(\<^const_name>\<open>plus\<close>,_)$lt$rt =>
let
val l = headvector lt
val r = headvector rt
@@ -220,7 +220,7 @@
| _ => Thm.reflexive ct))
fun vector_canon_conv ctxt ct = case Thm.term_of ct of
- Const(@{const_name plus},_)$_$_ =>
+ Const(\<^const_name>\<open>plus\<close>,_)$_$_ =>
let
val ((p,l),r) = Thm.dest_comb ct |>> Thm.dest_comb
val lth = vector_canon_conv ctxt l
@@ -228,16 +228,16 @@
val th = Drule.binop_cong_rule p lth rth
in fconv_rule (arg_conv (vector_add_conv ctxt)) th end
-| Const(@{const_name scaleR}, _)$_$_ =>
+| Const(\<^const_name>\<open>scaleR\<close>, _)$_$_ =>
let
val (p,r) = Thm.dest_comb ct
val rth = Drule.arg_cong_rule p (vector_canon_conv ctxt r)
in fconv_rule (arg_conv (apply_pth4 else_conv (vector_cmul_conv ctxt))) rth
end
-| Const(@{const_name minus},_)$_$_ => (apply_pth2 then_conv (vector_canon_conv ctxt)) ct
+| Const(\<^const_name>\<open>minus\<close>,_)$_$_ => (apply_pth2 then_conv (vector_canon_conv ctxt)) ct
-| Const(@{const_name uminus},_)$_ => (apply_pth3 then_conv (vector_canon_conv ctxt)) ct
+| Const(\<^const_name>\<open>uminus\<close>,_)$_ => (apply_pth3 then_conv (vector_canon_conv ctxt)) ct
(* FIXME
| Const(@{const_name vec},_)$n =>
@@ -249,7 +249,7 @@
| _ => apply_pth1 ct
fun norm_canon_conv ctxt ct = case Thm.term_of ct of
- Const(@{const_name norm},_)$_ => arg_conv (vector_canon_conv ctxt) ct
+ Const(\<^const_name>\<open>norm\<close>,_)$_ => arg_conv (vector_canon_conv ctxt) ct
| _ => raise CTERM ("norm_canon_conv", [ct])
fun int_flip v eq =
@@ -267,7 +267,7 @@
(* FIXME: Should be computed statically!!*)
val real_poly_conv =
Semiring_Normalizer.semiring_normalize_wrapper ctxt
- (the (Semiring_Normalizer.match ctxt @{cterm "(0::real) + 1"}))
+ (the (Semiring_Normalizer.match ctxt \<^cterm>\<open>(0::real) + 1\<close>))
val sources = map (Thm.dest_arg o Thm.dest_arg1 o concl) nubs
val rawdests = fold_rev (find_normedterms o Thm.dest_arg o concl) (ges @ gts) []
val _ = if not (forall fst rawdests) then error "real_vector_combo_prover: Sanity check"
@@ -347,13 +347,13 @@
fun instantiate_cterm' ty tms = Drule.cterm_rule (Thm.instantiate' ty tms)
fun mk_norm t =
let val T = Thm.typ_of_cterm t
- in Thm.apply (Thm.cterm_of ctxt' (Const (@{const_name norm}, T --> @{typ real}))) t end
+ in Thm.apply (Thm.cterm_of ctxt' (Const (\<^const_name>\<open>norm\<close>, T --> \<^typ>\<open>real\<close>))) t end
fun mk_equals l r =
let
val T = Thm.typ_of_cterm l
- val eq = Thm.cterm_of ctxt (Const (@{const_name Pure.eq}, T --> T --> propT))
+ val eq = Thm.cterm_of ctxt (Const (\<^const_name>\<open>Pure.eq\<close>, T --> T --> propT))
in Thm.apply (Thm.apply eq l) r end
- val asl = map2 (fn (t,_) => fn n => Thm.assume (mk_equals (mk_norm t) (Thm.cterm_of ctxt' (Free(n,@{typ real}))))) lctab fxns
+ val asl = map2 (fn (t,_) => fn n => Thm.assume (mk_equals (mk_norm t) (Thm.cterm_of ctxt' (Free(n,\<^typ>\<open>real\<close>))))) lctab fxns
val replace_conv = try_conv (rewrs_conv asl)
val replace_rule = fconv_rule (funpow 2 arg_conv (replacenegnorms replace_conv))
val ges' =
@@ -380,7 +380,7 @@
let
val real_poly_neg_conv = #neg
(Semiring_Normalizer.semiring_normalizers_ord_wrapper ctxt
- (the (Semiring_Normalizer.match ctxt @{cterm "(0::real) + 1"})) Thm.term_ord)
+ (the (Semiring_Normalizer.match ctxt \<^cterm>\<open>(0::real) + 1\<close>)) Thm.term_ord)
val (th1,th2) = conj_pair(rawrule th)
in th1::fconv_rule (arg_conv (arg_conv (real_poly_neg_conv ctxt))) th2::acc
end
@@ -401,7 +401,7 @@
let
val ctxt' = Variable.declare_term (Thm.term_of ct) ctxt
val th = init_conv ctxt' ct
- in Thm.equal_elim (Drule.arg_cong_rule @{cterm Trueprop} (Thm.symmetric th))
+ in Thm.equal_elim (Drule.arg_cong_rule \<^cterm>\<open>Trueprop\<close> (Thm.symmetric th))
(pure ctxt' (Thm.rhs_of th))
end
--- a/src/HOL/Auth/CertifiedEmail.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/CertifiedEmail.thy Sat Jan 05 17:24:33 2019 +0100
@@ -55,8 +55,8 @@
Number cleartext, Nonce q, S2TTP\<rbrace> # evs1
\<in> certified_mail"
-| CM2: \<comment> \<open>The recipient records @{term S2TTP} while transmitting it and her
- password to @{term TTP} over an SSL channel.\<close>
+| CM2: \<comment> \<open>The recipient records \<^term>\<open>S2TTP\<close> while transmitting it and her
+ password to \<^term>\<open>TTP\<close> over an SSL channel.\<close>
"[|evs2 \<in> certified_mail;
Gets R \<lbrace>Agent S, Agent TTP, em, Number BothAuth, Number cleartext,
Nonce q, S2TTP\<rbrace> \<in> set evs2;
@@ -66,10 +66,10 @@
Notes TTP \<lbrace>Agent R, Agent TTP, S2TTP, Key(RPwd R), hr\<rbrace> # evs2
\<in> certified_mail"
-| CM3: \<comment> \<open>@{term TTP} simultaneously reveals the key to the recipient and gives
+| CM3: \<comment> \<open>\<^term>\<open>TTP\<close> simultaneously reveals the key to the recipient and gives
a receipt to the sender. The SSL channel does not authenticate
- the client (@{term R}), but @{term TTP} accepts the message only
- if the given password is that of the claimed sender, @{term R}.
+ the client (\<^term>\<open>R\<close>), but \<^term>\<open>TTP\<close> accepts the message only
+ if the given password is that of the claimed sender, \<^term>\<open>R\<close>.
He replies over the established SSL channel.\<close>
"[|evs3 \<in> certified_mail;
Notes TTP \<lbrace>Agent R, Agent TTP, S2TTP, Key(RPwd R), hr\<rbrace> \<in> set evs3;
@@ -137,9 +137,9 @@
apply (synth_analz_mono_contra, simp_all, blast+)
done
-text\<open>Cannot strengthen the first disjunct to @{term "R\<noteq>Spy"} because
+text\<open>Cannot strengthen the first disjunct to \<^term>\<open>R\<noteq>Spy\<close> because
the fakessl rule allows Spy to spoof the sender's name. Maybe can
-strengthen the second disjunct with @{term "R\<noteq>Spy"}.\<close>
+strengthen the second disjunct with \<^term>\<open>R\<noteq>Spy\<close>.\<close>
lemma hr_form:
"[|Notes TTP \<lbrace>Agent R, Agent TTP, S2TTP, pwd, hr\<rbrace> \<in> set evs;
evs \<in> certified_mail|]
@@ -177,8 +177,8 @@
"evs \<in> certified_mail ==> Key (privateKey b TTP) \<notin> analz(spies evs)"
by auto
-text\<open>Thus, prove any goal that assumes that @{term Spy} knows a private key
-belonging to @{term TTP}\<close>
+text\<open>Thus, prove any goal that assumes that \<^term>\<open>Spy\<close> knows a private key
+belonging to \<^term>\<open>TTP\<close>\<close>
declare Spy_dont_know_TTPKey_parts [THEN [2] rev_notE, elim!]
@@ -269,8 +269,8 @@
(K = KAB | Key K \<in> analz (spies evs))"
by (simp only: analz_image_freshK analz_image_freshK_simps)
-text\<open>@{term S2TTP} must have originated from a valid sender
- provided @{term K} is secure. Proof is surprisingly hard.\<close>
+text\<open>\<^term>\<open>S2TTP\<close> must have originated from a valid sender
+ provided \<^term>\<open>K\<close> is secure. Proof is surprisingly hard.\<close>
lemma Notes_SSL_imp_used:
"[|Notes B \<lbrace>Agent A, Agent B, X\<rbrace> \<in> set evs|] ==> X \<in> used evs"
@@ -340,9 +340,9 @@
done
-text\<open>Less easy to prove @{term "m'=m"}. Maybe needs a separate unicity
-theorem for ciphertexts of the form @{term "Crypt K (Number m)"},
-where @{term K} is secure.\<close>
+text\<open>Less easy to prove \<^term>\<open>m'=m\<close>. Maybe needs a separate unicity
+theorem for ciphertexts of the form \<^term>\<open>Crypt K (Number m)\<close>,
+where \<^term>\<open>K\<close> is secure.\<close>
lemma Key_unique_lemma [rule_format]:
"evs \<in> certified_mail ==>
Key K \<notin> analz (spies evs) \<longrightarrow>
@@ -387,7 +387,7 @@
subsection\<open>The Guarantees for Sender and Recipient\<close>
text\<open>A Sender's guarantee:
- If Spy gets the key then @{term R} is bad and @{term S} moreover
+ If Spy gets the key then \<^term>\<open>R\<close> is bad and \<^term>\<open>S\<close> moreover
gets his return receipt (and therefore has no grounds for complaint).\<close>
theorem S_fairness_bad_R:
"[|Says S R \<lbrace>Agent S, Agent TTP, Crypt K (Number m), Number AO,
@@ -427,8 +427,8 @@
by (blast dest: S_fairness_bad_R)
-text\<open>Agent @{term R}, who may be the Spy, doesn't receive the key
- until @{term S} has access to the return receipt.\<close>
+text\<open>Agent \<^term>\<open>R\<close>, who may be the Spy, doesn't receive the key
+ until \<^term>\<open>S\<close> has access to the return receipt.\<close>
theorem S_guarantee:
"[|Says S R \<lbrace>Agent S, Agent TTP, Crypt K (Number m), Number AO,
Number cleartext, Nonce q, S2TTP\<rbrace> \<in> set evs;
@@ -447,9 +447,9 @@
done
-text\<open>If @{term R} sends message 2, and a delivery certificate exists,
- then @{term R} receives the necessary key. This result is also important
- to @{term S}, as it confirms the validity of the return receipt.\<close>
+text\<open>If \<^term>\<open>R\<close> sends message 2, and a delivery certificate exists,
+ then \<^term>\<open>R\<close> receives the necessary key. This result is also important
+ to \<^term>\<open>S\<close>, as it confirms the validity of the return receipt.\<close>
theorem RR_validity:
"[|Crypt (priSK TTP) S2TTP \<in> used evs;
S2TTP = Crypt (pubEK TTP)
--- a/src/HOL/Auth/Event.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Event.thy Sat Jan 05 17:24:33 2019 +0100
@@ -72,8 +72,8 @@
Says A B X \<Rightarrow> parts {X} \<union> used evs
| Gets A X \<Rightarrow> used evs
| Notes A X \<Rightarrow> parts {X} \<union> used evs)"
- \<comment> \<open>The case for @{term Gets} seems anomalous, but @{term Gets} always
- follows @{term Says} in real protocols. Seems difficult to change.
+ \<comment> \<open>The case for \<^term>\<open>Gets\<close> seems anomalous, but \<^term>\<open>Gets\<close> always
+ follows \<^term>\<open>Says\<close> in real protocols. Seems difficult to change.
See \<open>Gets_correct\<close> in theory \<open>Guard/Extensions.thy\<close>.\<close>
lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs \<longrightarrow> X \<in> used evs"
@@ -87,7 +87,7 @@
done
-subsection\<open>Function @{term knows}\<close>
+subsection\<open>Function \<^term>\<open>knows\<close>\<close>
(*Simplifying
parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
@@ -99,7 +99,7 @@
by simp
text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
- on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
+ on whether \<^term>\<open>A=Spy\<close> and whether \<^term>\<open>A\<in>bad\<close>\<close>
lemma knows_Spy_Notes [simp]:
"knows Spy (Notes A X # evs) =
(if A\<in>bad then insert X (knows Spy evs) else knows Spy evs)"
@@ -236,10 +236,10 @@
used_Nil [simp del] used_Cons [simp del]
-text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) \<longrightarrow> P"}
+text\<open>For proving theorems of the form \<^term>\<open>X \<notin> analz (knows Spy evs) \<longrightarrow> P\<close>
New events added by induction to "evs" are discarded. Provided
this information isn't needed, the proof will be much shorter, since
- it will omit complicated reasoning about @{term analz}.\<close>
+ it will omit complicated reasoning about \<^term>\<open>analz\<close>.\<close>
lemmas analz_mono_contra =
knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
--- a/src/HOL/Auth/Guard/Analz.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/Analz.thy Sat Jan 05 17:24:33 2019 +0100
@@ -7,8 +7,8 @@
theory Analz imports Extensions begin
-text\<open>decomposition of @{term analz} into two parts:
- @{term pparts} (for pairs) and analz of @{term kparts}\<close>
+text\<open>decomposition of \<^term>\<open>analz\<close> into two parts:
+ \<^term>\<open>pparts\<close> (for pairs) and analz of \<^term>\<open>kparts\<close>\<close>
subsection\<open>messages that do not contribute to analz\<close>
@@ -20,7 +20,7 @@
| Fst [dest]: "[| \<lbrace>X,Y\<rbrace> \<in> pparts H; is_MPair X |] ==> X \<in> pparts H"
| Snd [dest]: "[| \<lbrace>X,Y\<rbrace> \<in> pparts H; is_MPair Y |] ==> Y \<in> pparts H"
-subsection\<open>basic facts about @{term pparts}\<close>
+subsection\<open>basic facts about \<^term>\<open>pparts\<close>\<close>
lemma pparts_is_MPair [dest]: "X \<in> pparts H \<Longrightarrow> is_MPair X"
by (erule pparts.induct, auto)
@@ -98,13 +98,13 @@
lemma in_pparts: "Y \<in> pparts H \<Longrightarrow> \<exists>X. X \<in> H \<and> Y \<in> pparts {X}"
by (erule pparts.induct, auto)
-subsection\<open>facts about @{term pparts} and @{term parts}\<close>
+subsection\<open>facts about \<^term>\<open>pparts\<close> and \<^term>\<open>parts\<close>\<close>
lemma pparts_no_Nonce [dest]: "[| X \<in> pparts {Y}; Nonce n \<notin> parts {Y} |]
==> Nonce n \<notin> parts {X}"
by (erule pparts.induct, simp_all)
-subsection\<open>facts about @{term pparts} and @{term analz}\<close>
+subsection\<open>facts about \<^term>\<open>pparts\<close> and \<^term>\<open>analz\<close>\<close>
lemma pparts_analz: "X \<in> pparts H \<Longrightarrow> X \<in> analz H"
by (erule pparts.induct, auto)
@@ -122,7 +122,7 @@
| Fst [intro]: "[| \<lbrace>X,Y\<rbrace> \<in> pparts H; not_MPair X |] ==> X \<in> kparts H"
| Snd [intro]: "[| \<lbrace>X,Y\<rbrace> \<in> pparts H; not_MPair Y |] ==> Y \<in> kparts H"
-subsection\<open>basic facts about @{term kparts}\<close>
+subsection\<open>basic facts about \<^term>\<open>kparts\<close>\<close>
lemma kparts_not_MPair [dest]: "X \<in> kparts H \<Longrightarrow> not_MPair X"
by (erule kparts.induct, auto)
@@ -195,7 +195,7 @@
lemma kparts_has_no_pair [iff]: "has_no_pair (kparts H)"
by auto
-subsection\<open>facts about @{term kparts} and @{term parts}\<close>
+subsection\<open>facts about \<^term>\<open>kparts\<close> and \<^term>\<open>parts\<close>\<close>
lemma kparts_no_Nonce [dest]: "[| X \<in> kparts {Y}; Nonce n \<notin> parts {Y} |]
==> Nonce n \<notin> parts {X}"
@@ -212,7 +212,7 @@
Nonce n \<in> parts {Y} |] ==> Nonce n \<in> parts {Z}"
by auto
-subsection\<open>facts about @{term kparts} and @{term analz}\<close>
+subsection\<open>facts about \<^term>\<open>kparts\<close> and \<^term>\<open>analz\<close>\<close>
lemma kparts_analz: "X \<in> kparts H \<Longrightarrow> X \<in> analz H"
by (erule kparts.induct, auto dest: pparts_analz)
--- a/src/HOL/Auth/Guard/Extensions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/Extensions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -442,7 +442,7 @@
spies_max :: "event list => msg set" where
"spies_max evs == knows_max Spy evs"
-subsubsection\<open>basic facts about @{term knows_max}\<close>
+subsubsection\<open>basic facts about \<^term>\<open>knows_max\<close>\<close>
lemma spies_max_spies [iff]: "spies_max evs = spies evs"
by (induct evs, auto simp: knows_max_def split: event.splits)
--- a/src/HOL/Auth/Guard/Guard.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/Guard.thy Sat Jan 05 17:24:33 2019 +0100
@@ -21,7 +21,7 @@
| Crypt [intro]: "X \<in> guard n Ks \<Longrightarrow> Crypt K X \<in> guard n Ks"
| Pair [intro]: "[| X \<in> guard n Ks; Y \<in> guard n Ks |] ==> \<lbrace>X,Y\<rbrace> \<in> guard n Ks"
-subsection\<open>basic facts about @{term guard}\<close>
+subsection\<open>basic facts about \<^term>\<open>guard\<close>\<close>
lemma Key_is_guard [iff]: "Key K \<in> guard n Ks"
by auto
@@ -73,7 +73,7 @@
definition Guard :: "nat \<Rightarrow> key set \<Rightarrow> msg set \<Rightarrow> bool" where
"Guard n Ks H \<equiv> \<forall>X. X \<in> H \<longrightarrow> X \<in> guard n Ks"
-subsection\<open>basic facts about @{term Guard}\<close>
+subsection\<open>basic facts about \<^term>\<open>Guard\<close>\<close>
lemma Guard_empty [iff]: "Guard n Ks {}"
by (simp add: Guard_def)
@@ -178,7 +178,7 @@
| "crypt_nb \<lbrace>X,Y\<rbrace> = crypt_nb X + crypt_nb Y"
| "crypt_nb X = 0" (* otherwise *)
-subsection\<open>basic facts about @{term crypt_nb}\<close>
+subsection\<open>basic facts about \<^term>\<open>crypt_nb\<close>\<close>
lemma non_empty_crypt_msg: "Crypt K Y \<in> parts {X} \<Longrightarrow> crypt_nb X \<noteq> 0"
by (induct X, simp_all, safe, simp_all)
@@ -190,7 +190,7 @@
"cnb [] = 0"
| "cnb (X#l) = crypt_nb X + cnb l"
-subsection\<open>basic facts about @{term cnb}\<close>
+subsection\<open>basic facts about \<^term>\<open>cnb\<close>\<close>
lemma cnb_app [simp]: "cnb (l @ l') = cnb l + cnb l'"
by (induct l, auto)
@@ -241,7 +241,7 @@
declare decrypt'_def [simp]
-subsection\<open>basic facts about @{term decrypt'}\<close>
+subsection\<open>basic facts about \<^term>\<open>decrypt'\<close>\<close>
lemma decrypt_minus: "decrypt (set l) K Y <= set (decrypt' l K Y)"
by (induct l, auto)
--- a/src/HOL/Auth/Guard/GuardK.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/GuardK.thy Sat Jan 05 17:24:33 2019 +0100
@@ -28,7 +28,7 @@
| Crypt [intro]: "X \<in> guardK n Ks \<Longrightarrow> Crypt K X \<in> guardK n Ks"
| Pair [intro]: "[| X \<in> guardK n Ks; Y \<in> guardK n Ks |] ==> \<lbrace>X,Y\<rbrace> \<in> guardK n Ks"
-subsection\<open>basic facts about @{term guardK}\<close>
+subsection\<open>basic facts about \<^term>\<open>guardK\<close>\<close>
lemma Nonce_is_guardK [iff]: "Nonce p \<in> guardK n Ks"
by auto
@@ -82,7 +82,7 @@
definition GuardK :: "nat \<Rightarrow> key set \<Rightarrow> msg set \<Rightarrow> bool" where
"GuardK n Ks H \<equiv> \<forall>X. X \<in> H \<longrightarrow> X \<in> guardK n Ks"
-subsection\<open>basic facts about @{term GuardK}\<close>
+subsection\<open>basic facts about \<^term>\<open>GuardK\<close>\<close>
lemma GuardK_empty [iff]: "GuardK n Ks {}"
by (simp add: GuardK_def)
@@ -175,7 +175,7 @@
"crypt_nb \<lbrace>X,Y\<rbrace> = crypt_nb X + crypt_nb Y" |
"crypt_nb X = 0" (* otherwise *)
-subsection\<open>basic facts about @{term crypt_nb}\<close>
+subsection\<open>basic facts about \<^term>\<open>crypt_nb\<close>\<close>
lemma non_empty_crypt_msg: "Crypt K Y \<in> parts {X} \<Longrightarrow> crypt_nb X \<noteq> 0"
by (induct X, simp_all, safe, simp_all)
@@ -186,7 +186,7 @@
"cnb [] = 0" |
"cnb (X#l) = crypt_nb X + cnb l"
-subsection\<open>basic facts about @{term cnb}\<close>
+subsection\<open>basic facts about \<^term>\<open>cnb\<close>\<close>
lemma cnb_app [simp]: "cnb (l @ l') = cnb l + cnb l'"
by (induct l, auto)
@@ -235,7 +235,7 @@
declare decrypt'_def [simp]
-subsection\<open>basic facts about @{term decrypt'}\<close>
+subsection\<open>basic facts about \<^term>\<open>decrypt'\<close>\<close>
lemma decrypt_minus: "decrypt (set l) K Y <= set (decrypt' l K Y)"
by (induct l, auto)
--- a/src/HOL/Auth/Guard/Guard_Public.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/Guard_Public.thy Sat Jan 05 17:24:33 2019 +0100
@@ -30,7 +30,7 @@
lemma agt_pubK [simp]: "agt (pubK A) = A"
by (simp add: agt_def)
-subsubsection\<open>basic facts about @{term initState}\<close>
+subsubsection\<open>basic facts about \<^term>\<open>initState\<close>\<close>
lemma no_Crypt_in_parts_init [simp]: "Crypt K X \<notin> parts (initState A)"
by (cases A, auto simp: initState.simps)
--- a/src/HOL/Auth/Guard/Guard_Shared.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Guard/Guard_Shared.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,7 +25,7 @@
lemma agt_shrK [simp]: "agt (shrK A) = A"
by (simp add: agt_def)
-subsubsection\<open>basic facts about @{term initState}\<close>
+subsubsection\<open>basic facts about \<^term>\<open>initState\<close>\<close>
lemma no_Crypt_in_parts_init [simp]: "Crypt K X \<notin> parts (initState A)"
by (cases A, auto simp: initState.simps)
--- a/src/HOL/Auth/KerberosIV.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/KerberosIV.thy Sat Jan 05 17:24:33 2019 +0100
@@ -292,7 +292,7 @@
lemmas parts_spies_takeWhile_mono = spies_takeWhile [THEN parts_mono]
-subsection\<open>Lemmas about @{term authKeys}\<close>
+subsection\<open>Lemmas about \<^term>\<open>authKeys\<close>\<close>
lemma authKeys_empty: "authKeys [] = {}"
apply (unfold authKeys_def)
@@ -1006,7 +1006,7 @@
done
-subsection\<open>Lemmas About the Predicate @{term AKcryptSK}\<close>
+subsection\<open>Lemmas About the Predicate \<^term>\<open>AKcryptSK\<close>\<close>
lemma not_AKcryptSK_Nil [iff]: "\<not> AKcryptSK authK servK []"
by (simp add: AKcryptSK_def)
@@ -1344,10 +1344,10 @@
apply (erule kerbIV.induct)
apply (rule_tac [9] impI)+
\<comment> \<open>The Oops1 case is unusual: must simplify
- @{term "Authkey \<notin> analz (spies (ev#evs))"}, not letting
+ \<^term>\<open>Authkey \<notin> analz (spies (ev#evs))\<close>, not letting
\<open>analz_mono_contra\<close> weaken it to
- @{term "Authkey \<notin> analz (spies evs)"},
- for we then conclude @{term "authK \<noteq> authKa"}.\<close>
+ \<^term>\<open>Authkey \<notin> analz (spies evs)\<close>,
+ for we then conclude \<^term>\<open>authK \<noteq> authKa\<close>.\<close>
apply analz_mono_contra
apply (frule_tac [10] Oops_range_spies2)
apply (frule_tac [9] Oops_range_spies1)
--- a/src/HOL/Auth/KerberosIV_Gets.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/KerberosIV_Gets.thy Sat Jan 05 17:24:33 2019 +0100
@@ -260,7 +260,7 @@
"\<lbrakk> Gets B X \<in> set evs; evs \<in> kerbIV_gets \<rbrakk> \<Longrightarrow> X \<in> knows B evs"
by (metis Gets_imp_knows_Spy Gets_imp_knows_agents)
-subsection\<open>Lemmas about @{term authKeys}\<close>
+subsection\<open>Lemmas about \<^term>\<open>authKeys\<close>\<close>
lemma authKeys_empty: "authKeys [] = {}"
by (simp add: authKeys_def)
@@ -717,7 +717,7 @@
txt\<open>Besides, since authKa originated with Kas anyway...\<close>
apply (clarify, drule K3_imp_K2, assumption, assumption)
apply (clarify, drule Says_Kas_message_form, assumption)
-txt\<open>...it cannot be a shared key*. Therefore @{term servK_authentic} applies.
+txt\<open>...it cannot be a shared key*. Therefore \<^term>\<open>servK_authentic\<close> applies.
Contradition: Tgs used authK as a servkey,
while Kas used it as an authkey\<close>
apply (blast dest: servK_authentic Says_Tgs_message_form)
@@ -868,7 +868,7 @@
done
-subsection\<open>Lemmas About the Predicate @{term AKcryptSK}\<close>
+subsection\<open>Lemmas About the Predicate \<^term>\<open>AKcryptSK\<close>\<close>
lemma not_AKcryptSK_Nil [iff]: "\<not> AKcryptSK authK servK []"
by (simp add: AKcryptSK_def)
@@ -1212,10 +1212,10 @@
apply (erule kerbIV_gets.induct)
apply (rule_tac [10] impI)+
\<comment> \<open>The Oops1 case is unusual: must simplify
- @{term "Authkey \<notin> analz (spies (ev#evs))"}, not letting
+ \<^term>\<open>Authkey \<notin> analz (spies (ev#evs))\<close>, not letting
\<open>analz_mono_contra\<close> weaken it to
- @{term "Authkey \<notin> analz (spies evs)"},
- for we then conclude @{term "authK \<noteq> authKa"}.\<close>
+ \<^term>\<open>Authkey \<notin> analz (spies evs)\<close>,
+ for we then conclude \<^term>\<open>authK \<noteq> authKa\<close>.\<close>
apply analz_mono_contra
apply (frule_tac [11] Oops_range_spies2)
apply (frule_tac [10] Oops_range_spies1)
--- a/src/HOL/Auth/KerberosV.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/KerberosV.thy Sat Jan 05 17:24:33 2019 +0100
@@ -243,7 +243,7 @@
lemmas parts_spies_takeWhile_mono = spies_takeWhile [THEN parts_mono]
-subsection\<open>Lemmas about @{term authKeys}\<close>
+subsection\<open>Lemmas about \<^term>\<open>authKeys\<close>\<close>
lemma authKeys_empty: "authKeys [] = {}"
by (simp add: authKeys_def)
@@ -749,7 +749,7 @@
apply blast+
done
-subsection\<open>Lemmas About the Predicate @{term AKcryptSK}\<close>
+subsection\<open>Lemmas About the Predicate \<^term>\<open>AKcryptSK\<close>\<close>
lemma not_AKcryptSK_Nil [iff]: "\<not> AKcryptSK authK servK []"
apply (simp add: AKcryptSK_def)
@@ -1063,10 +1063,10 @@
apply (erule kerbV.induct)
apply (rule_tac [9] impI)+
\<comment> \<open>The Oops1 case is unusual: must simplify
- @{term "Authkey \<notin> analz (spies (ev#evs))"}, not letting
+ \<^term>\<open>Authkey \<notin> analz (spies (ev#evs))\<close>, not letting
\<open>analz_mono_contra\<close> weaken it to
- @{term "Authkey \<notin> analz (spies evs)"},
- for we then conclude @{term "authK \<noteq> authKa"}.\<close>
+ \<^term>\<open>Authkey \<notin> analz (spies evs)\<close>,
+ for we then conclude \<^term>\<open>authK \<noteq> authKa\<close>.\<close>
apply analz_mono_contra
apply (frule_tac [10] Oops_range_spies2)
apply (frule_tac [9] Oops_range_spies1)
@@ -1415,7 +1415,7 @@
apply (frule servK_authentic_ter, blast, assumption+)
apply (drule parts_spies_takeWhile_mono [THEN subsetD])
apply (drule parts_spies_evs_revD2 [THEN subsetD])
-txt\<open>@{term Says_K5} closes the proof in version IV because it is clear which
+txt\<open>\<^term>\<open>Says_K5\<close> closes the proof in version IV because it is clear which
servTicket an authenticator appears with in msg 5. In version V an authenticator can appear with any item that the spy could replace the servTicket with\<close>
apply (frule Says_K5, blast)
txt\<open>We need to state that an honest agent wouldn't send the wrong timestamp
--- a/src/HOL/Auth/Message.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Message.thy Sat Jan 05 17:24:33 2019 +0100
@@ -223,7 +223,7 @@
text\<open>This allows \<open>blast\<close> to simplify occurrences of
- @{term "parts(G\<union>H)"} in the assumption.\<close>
+ \<^term>\<open>parts(G\<union>H)\<close> in the assumption.\<close>
lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE]
declare in_parts_UnE [elim!]
@@ -469,8 +469,8 @@
text\<open>Case analysis: either the message is secure, or it is not! Effective,
but can cause subgoals to blow up! Use with \<open>if_split\<close>; apparently
-\<open>split_tac\<close> does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"}\<close>
+\<open>split_tac\<close> does not cope with patterns such as \<^term>\<open>analz (insert
+(Crypt K X) H)\<close>\<close>
lemma analz_Crypt_if [simp]:
"analz (insert (Crypt K X) H) =
(if (Key (invKey K) \<in> analz H)
@@ -580,7 +580,7 @@
by (auto, erule synth.induct, auto)
text\<open>NO \<open>Agent_synth\<close>, as any Agent name can be synthesized.
- The same holds for @{term Number}\<close>
+ The same holds for \<^term>\<open>Number\<close>\<close>
inductive_simps synth_simps [iff]:
"Nonce n \<in> synth H"
@@ -674,8 +674,8 @@
\<Longrightarrow> Z \<in> synth (analz H) \<union> parts H"
by (metis Fake_parts_insert set_mp)
-text\<open>@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put
- @{term "G=H"}.\<close>
+text\<open>\<^term>\<open>H\<close> is sometimes \<^term>\<open>Key ` KK \<union> spies evs\<close>, so can't put
+ \<^term>\<open>G=H\<close>.\<close>
lemma Fake_analz_insert:
"X\<in> synth (analz G) ==>
analz (insert X H) \<subseteq> synth (analz G) \<union> analz (G \<union> H)"
@@ -876,7 +876,7 @@
\<close>
text\<open>By default only \<open>o_apply\<close> is built-in. But in the presence of
-eta-expansion this means that some terms displayed as @{term "f o g"} will be
+eta-expansion this means that some terms displayed as \<^term>\<open>f o g\<close> will be
rewritten, and others will not!\<close>
declare o_def [simp]
--- a/src/HOL/Auth/NS_Shared.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/NS_Shared.thy Sat Jan 05 17:24:33 2019 +0100
@@ -105,7 +105,7 @@
*)
-subsection\<open>Inductive proofs about @{term ns_shared}\<close>
+subsection\<open>Inductive proofs about \<^term>\<open>ns_shared\<close>\<close>
subsubsection\<open>Forwarding lemmas, to aid simplification\<close>
@@ -120,8 +120,8 @@
\<Longrightarrow> K \<in> parts (spies evs)"
by blast
-text\<open>Theorems of the form @{term "X \<notin> parts (spies evs)"} imply that NOBODY
- sends messages containing @{term X}\<close>
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (spies evs)\<close> imply that NOBODY
+ sends messages containing \<^term>\<open>X\<close>\<close>
text\<open>Spy never sees another agent's shared key! (unless it's bad at start)\<close>
lemma Spy_see_shrK [simp]:
@@ -311,8 +311,8 @@
Says B A (Crypt K (Nonce NB)) \<in> set evs"
apply (erule ns_shared.induct, force, drule_tac [4] NS3_msg_in_parts_spies)
apply (analz_mono_contra, simp_all, blast)
-txt\<open>NS2: contradiction from the assumptions @{term "Key K \<notin> used evs2"} and
- @{term "Crypt K (Nonce NB) \<in> parts (spies evs2)"}\<close>
+txt\<open>NS2: contradiction from the assumptions \<^term>\<open>Key K \<notin> used evs2\<close> and
+ \<^term>\<open>Crypt K (Nonce NB) \<in> parts (spies evs2)\<close>\<close>
apply (force dest!: Crypt_imp_keysFor)
txt\<open>NS4\<close>
apply (metis B_trusts_NS3 Crypt_Spy_analz_bad Says_imp_analz_Spy Says_imp_parts_knows_Spy analz.Fst unique_session_keys)
@@ -502,7 +502,7 @@
apply (simp_all add: takeWhile_tail)
txt\<open>NS3 remains by pure coincidence!\<close>
apply (force dest!: A_trusts_NS2 Says_Server_message_form)
-txt\<open>NS5 is the non-trivial case and cannot be solved as in @{term B_Issues_A}! because NB is not fresh. We need @{term A_trusts_NS5}, proved for this very purpose\<close>
+txt\<open>NS5 is the non-trivial case and cannot be solved as in \<^term>\<open>B_Issues_A\<close>! because NB is not fresh. We need \<^term>\<open>A_trusts_NS5\<close>, proved for this very purpose\<close>
apply (blast dest: A_trusts_NS5 parts_spies_takeWhile_mono [THEN subsetD]
parts_spies_evs_revD2 [THEN subsetD])
done
--- a/src/HOL/Auth/OtwayRees.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/OtwayRees.thy Sat Jan 05 17:24:33 2019 +0100
@@ -127,7 +127,7 @@
some reason proofs work without them!*)
-text\<open>Theorems of the form @{term "X \<notin> parts (spies evs)"} imply that
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (spies evs)\<close> imply that
NOBODY sends messages containing X!\<close>
text\<open>Spy never sees a good agent's shared key!\<close>
@@ -146,7 +146,7 @@
by (blast dest: Spy_see_shrK)
-subsection\<open>Towards Secrecy: Proofs Involving @{term analz}\<close>
+subsection\<open>Towards Secrecy: Proofs Involving \<^term>\<open>analz\<close>\<close>
(*Describes the form of K and NA when the Server sends this message. Also
for Oops case.*)
@@ -287,7 +287,7 @@
text\<open>Crucial secrecy property: Spy does not see the keys sent in msg OR3
Does not in itself guarantee security: an attack could violate
- the premises, e.g. by having @{term "A=Spy"}\<close>
+ the premises, e.g. by having \<^term>\<open>A=Spy\<close>\<close>
lemma secrecy_lemma:
"\<lbrakk>A \<notin> bad; B \<notin> bad; evs \<in> otway\<rbrakk>
\<Longrightarrow> Says Server B
@@ -319,11 +319,11 @@
text\<open>This form is an immediate consequence of the previous result. It is
similar to the assertions established by other methods. It is equivalent
-to the previous result in that the Spy already has @{term analz} and
-@{term synth} at his disposal. However, the conclusion
-@{term "Key K \<notin> knows Spy evs"} appears not to be inductive: all the cases
+to the previous result in that the Spy already has \<^term>\<open>analz\<close> and
+\<^term>\<open>synth\<close> at his disposal. However, the conclusion
+\<^term>\<open>Key K \<notin> knows Spy evs\<close> appears not to be inductive: all the cases
other than Fake are trivial, while Fake requires
-@{term "Key K \<notin> analz (knows Spy evs)"}.\<close>
+\<^term>\<open>Key K \<notin> analz (knows Spy evs)\<close>.\<close>
lemma Spy_not_know_encrypted_key:
"\<lbrakk>Says Server B
\<lbrace>NA, Crypt (shrK A) \<lbrace>NA, Key K\<rbrace>,
--- a/src/HOL/Auth/OtwayReesBella.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/OtwayReesBella.thy Sat Jan 05 17:24:33 2019 +0100
@@ -239,7 +239,7 @@
val analz_image_freshK_ss =
simpset_of
- (@{context} delsimps [image_insert, image_Un]
+ (\<^context> delsimps [image_insert, image_Un]
delsimps [@{thm imp_disjL}] (*reduces blow-up*)
addsimps @{thms analz_image_freshK_simps})
--- a/src/HOL/Auth/OtwayRees_AN.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/OtwayRees_AN.thy Sat Jan 05 17:24:33 2019 +0100
@@ -59,7 +59,7 @@
| OR4: \<comment> \<open>Bob receives the Server's (?) message and compares the Nonces with
those in the message he previously sent the Server.
- Need @{term "B \<noteq> Server"} because we allow messages to self.\<close>
+ Need \<^term>\<open>B \<noteq> Server\<close> because we allow messages to self.\<close>
"[| evs4 \<in> otway; B \<noteq> Server;
Says B Server \<lbrace>Agent A, Agent B, Nonce NA, Nonce NB\<rbrace> \<in>set evs4;
Gets B \<lbrace>X, Crypt(shrK B)\<lbrace>Nonce NB,Agent A,Agent B,Key K\<rbrace>\<rbrace>
@@ -109,7 +109,7 @@
by blast
-text\<open>Theorems of the form @{term "X \<notin> parts (spies evs)"} imply that
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (spies evs)\<close> imply that
NOBODY sends messages containing X!\<close>
text\<open>Spy never sees a good agent's shared key!\<close>
@@ -219,7 +219,7 @@
text\<open>Crucial secrecy property: Spy does not see the keys sent in msg OR3
Does not in itself guarantee security: an attack could violate
- the premises, e.g. by having @{term "A=Spy"}\<close>
+ the premises, e.g. by having \<^term>\<open>A=Spy\<close>\<close>
lemma secrecy_lemma:
"[| A \<notin> bad; B \<notin> bad; evs \<in> otway |]
==> Says Server B
--- a/src/HOL/Auth/OtwayRees_Bad.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/OtwayRees_Bad.thy Sat Jan 05 17:24:33 2019 +0100
@@ -67,7 +67,7 @@
| OR4: \<comment> \<open>Bob receives the Server's (?) message and compares the Nonces with
those in the message he previously sent the Server.
- Need @{term "B \<noteq> Server"} because we allow messages to self.\<close>
+ Need \<^term>\<open>B \<noteq> Server\<close> because we allow messages to self.\<close>
"[| evs4 \<in> otway; B \<noteq> Server;
Says B Server \<lbrace>Nonce NA, Agent A, Agent B, X', Nonce NB,
Crypt (shrK B) \<lbrace>Nonce NA, Agent A, Agent B\<rbrace>\<rbrace>
@@ -131,7 +131,7 @@
OR2_analz_knows_Spy [THEN analz_into_parts]
-text\<open>Theorems of the form @{term "X \<notin> parts (spies evs)"} imply that
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (spies evs)\<close> imply that
NOBODY sends messages containing X!\<close>
text\<open>Spy never sees a good agent's shared key!\<close>
@@ -208,7 +208,7 @@
text\<open>Crucial secrecy property: Spy does not see the keys sent in msg OR3
Does not in itself guarantee security: an attack could violate
- the premises, e.g. by having @{term "A=Spy"}\<close>
+ the premises, e.g. by having \<^term>\<open>A=Spy\<close>\<close>
lemma secrecy_lemma:
"[| A \<notin> bad; B \<notin> bad; evs \<in> otway |]
==> Says Server B
@@ -239,7 +239,7 @@
subsection\<open>Attempting to prove stronger properties\<close>
text\<open>Only OR1 can have caused such a part of a message to appear. The premise
- @{term "A \<noteq> B"} prevents OR2's similar-looking cryptogram from being picked
+ \<^term>\<open>A \<noteq> B\<close> prevents OR2's similar-looking cryptogram from being picked
up. Original Otway-Rees doesn't need it.\<close>
lemma Crypt_imp_OR1 [rule_format]:
"[| A \<notin> bad; A \<noteq> B; evs \<in> otway |]
@@ -252,7 +252,7 @@
text\<open>Crucial property: If the encrypted message appears, and A has used NA
to start a run, then it originated with the Server!
- The premise @{term "A \<noteq> B"} allows use of \<open>Crypt_imp_OR1\<close>\<close>
+ The premise \<^term>\<open>A \<noteq> B\<close> allows use of \<open>Crypt_imp_OR1\<close>\<close>
text\<open>Only it is FALSE. Somebody could make a fake message to Server
substituting some other nonce NA' for NB.\<close>
lemma "[| A \<notin> bad; A \<noteq> B; evs \<in> otway |]
--- a/src/HOL/Auth/Public.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Public.thy Sat Jan 05 17:24:33 2019 +0100
@@ -56,7 +56,7 @@
text\<open>By freeness of agents, no two agents have the same key. Since
- @{term "True\<noteq>False"}, no agent has identical signing and encryption keys\<close>
+ \<^term>\<open>True\<noteq>False\<close>, no agent has identical signing and encryption keys\<close>
specification (publicKey)
injective_publicKey:
"publicKey b A = publicKey c A' ==> b=c \<and> A=A'"
@@ -77,7 +77,7 @@
declare publicKey_neq_privateKey [iff]
-subsection\<open>Basic properties of @{term pubK} and @{term priK}\<close>
+subsection\<open>Basic properties of \<^term>\<open>pubK\<close> and \<^term>\<open>priK\<close>\<close>
lemma publicKey_inject [iff]: "(publicKey b A = publicKey c A') = (b=c \<and> A=A')"
by (blast dest!: injective_publicKey)
@@ -224,9 +224,9 @@
end
-text\<open>These lemmas allow reasoning about @{term "used evs"} rather than
- @{term "knows Spy evs"}, which is useful when there are private Notes.
- Because they depend upon the definition of @{term initState}, they cannot
+text\<open>These lemmas allow reasoning about \<^term>\<open>used evs\<close> rather than
+ \<^term>\<open>knows Spy evs\<close>, which is useful when there are private Notes.
+ Because they depend upon the definition of \<^term>\<open>initState\<close>, they cannot
be moved up.\<close>
lemma used_parts_subset_parts [rule_format]:
@@ -251,7 +251,7 @@
by (blast dest: MPair_used_D)
-text\<open>Rewrites should not refer to @{term "initState(Friend i)"} because
+text\<open>Rewrites should not refer to \<^term>\<open>initState(Friend i)\<close> because
that expression is not in normal form.\<close>
lemma keysFor_parts_initState [simp]: "keysFor (parts (initState C)) = {}"
@@ -293,7 +293,7 @@
declare neq_shrK [simp]
-subsection\<open>Function @{term spies}\<close>
+subsection\<open>Function \<^term>\<open>spies\<close>\<close>
lemma not_SignatureE [elim!]: "b \<noteq> Signature \<Longrightarrow> b = Encryption"
by (cases b, auto)
@@ -374,7 +374,7 @@
apply (rule someI, fast)
done
-subsection\<open>Specialized Rewriting for Theorems About @{term analz} and Image\<close>
+subsection\<open>Specialized Rewriting for Theorems About \<^term>\<open>analz\<close> and Image\<close>
lemma insert_Key_singleton: "insert (Key K) H = Key ` {K} \<union> H"
by blast
@@ -407,7 +407,7 @@
val analz_image_freshK_ss =
simpset_of
- (@{context} delsimps [image_insert, image_Un]
+ (\<^context> delsimps [image_insert, image_Un]
delsimps [@{thm imp_disjL}] (*reduces blow-up*)
addsimps @{thms analz_image_freshK_simps})
--- a/src/HOL/Auth/Recur.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Recur.thy Sat Jan 05 17:24:33 2019 +0100
@@ -163,7 +163,7 @@
[THEN respond.Cons, THEN respond.Cons]],
THEN recur.RA4, THEN recur.RA4])
apply basic_possibility
-apply (tactic "DEPTH_SOLVE (swap_res_tac @{context} [refl, conjI, disjCI] 1)")
+apply (tactic "DEPTH_SOLVE (swap_res_tac \<^context> [refl, conjI, disjCI] 1)")
done
@@ -396,8 +396,8 @@
apply blast
txt\<open>Inductive step of respond\<close>
apply (intro allI conjI impI, simp_all)
-txt\<open>by unicity, either @{term "B=Aa"} or @{term "B=A'"}, a contradiction
- if @{term "B \<in> bad"}\<close>
+txt\<open>by unicity, either \<^term>\<open>B=Aa\<close> or \<^term>\<open>B=A'\<close>, a contradiction
+ if \<^term>\<open>B \<in> bad\<close>\<close>
apply (blast dest: unique_session_keys respond_certificate)
apply (blast dest!: respond_certificate)
apply (blast dest!: resp_analz_insert)
--- a/src/HOL/Auth/Shared.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Shared.thy Sat Jan 05 17:24:33 2019 +0100
@@ -55,7 +55,7 @@
\<open>analz.Decrypt\<close> in its declaration.\<close>
declare analz.Decrypt [rule del]
-text\<open>Rewrites should not refer to @{term "initState(Friend i)"} because
+text\<open>Rewrites should not refer to \<^term>\<open>initState(Friend i)\<close> because
that expression is not in normal form.\<close>
lemma keysFor_parts_initState [simp]: "keysFor (parts (initState C)) = {}"
@@ -153,13 +153,13 @@
done
text\<open>Unlike the corresponding property of nonces, we cannot prove
- @{term "finite KK ==> \<exists>K. K \<notin> KK \<and> Key K \<notin> used evs"}.
+ \<^term>\<open>finite KK ==> \<exists>K. K \<notin> KK \<and> Key K \<notin> used evs\<close>.
We have infinitely many agents and there is nothing to stop their
long-term keys from exhausting all the natural numbers. Instead,
possibility theorems must assume the existence of a few keys.\<close>
-subsection\<open>Specialized Rewriting for Theorems About @{term analz} and Image\<close>
+subsection\<open>Specialized Rewriting for Theorems About \<^term>\<open>analz\<close> and Image\<close>
lemma subset_Compl_range: "A \<subseteq> - (range shrK) \<Longrightarrow> shrK x \<notin> A"
by blast
@@ -218,7 +218,7 @@
val analz_image_freshK_ss =
simpset_of
- (@{context} delsimps [image_insert, image_Un]
+ (\<^context> delsimps [image_insert, image_Un]
delsimps [@{thm imp_disjL}] (*reduces blow-up*)
addsimps @{thms analz_image_freshK_simps})
--- a/src/HOL/Auth/Smartcard/EventSC.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Smartcard/EventSC.thy Sat Jan 05 17:24:33 2019 +0100
@@ -95,9 +95,9 @@
| C_Gets C X => used evs
| Outpts C A X => parts{X} \<union> (used evs)
| A_Gets A X => used evs)"
- \<comment> \<open>@{term Gets} always follows @{term Says} in real protocols.
- Likewise, @{term C_Gets} will always have to follow @{term Inputs}
- and @{term A_Gets} will always have to follow @{term Outpts}\<close>
+ \<comment> \<open>\<^term>\<open>Gets\<close> always follows \<^term>\<open>Says\<close> in real protocols.
+ Likewise, \<^term>\<open>C_Gets\<close> will always have to follow \<^term>\<open>Inputs\<close>
+ and \<^term>\<open>A_Gets\<close> will always have to follow \<^term>\<open>Outpts\<close>\<close>
lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs \<longrightarrow> X \<in> used evs"
apply (induct_tac evs)
@@ -116,7 +116,7 @@
done
-subsection\<open>Function @{term knows}\<close>
+subsection\<open>Function \<^term>\<open>knows\<close>\<close>
(*Simplifying
parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
@@ -128,7 +128,7 @@
by simp
text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
- on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
+ on whether \<^term>\<open>A=Spy\<close> and whether \<^term>\<open>A\<in>bad\<close>\<close>
lemma knows_Spy_Notes [simp]:
"knows Spy (Notes A X # evs) =
(if A\<in>bad then insert X (knows Spy evs) else knows Spy evs)"
--- a/src/HOL/Auth/Smartcard/ShoupRubin.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Smartcard/ShoupRubin.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1320,7 +1320,7 @@
apply auto
done
-text\<open>@{term step2_integrity} also is a reliability theorem\<close>
+text\<open>\<^term>\<open>step2_integrity\<close> also is a reliability theorem\<close>
lemma Says_Server_message_form:
"\<lbrakk> Says Server A \<lbrace>Pk, Certificate\<rbrace> \<in> set evs;
evs \<in> sr \<rbrakk>
@@ -1334,9 +1334,9 @@
(*cannot be made useful to A in form of a Gets event*)
text\<open>
- step4integrity is @{term Outpts_A_Card_form_4}
+ step4integrity is \<^term>\<open>Outpts_A_Card_form_4\<close>
- step7integrity is @{term Outpts_B_Card_form_7}
+ step7integrity is \<^term>\<open>Outpts_B_Card_form_7\<close>
\<close>
lemma step8_integrity:
@@ -1351,9 +1351,9 @@
done
-text\<open>step9integrity is @{term Inputs_A_Card_form_9}
+text\<open>step9integrity is \<^term>\<open>Inputs_A_Card_form_9\<close>
- step10integrity is @{term Outpts_A_Card_form_10}.
+ step10integrity is \<^term>\<open>Outpts_A_Card_form_10\<close>.
\<close>
lemma step11_integrity:
--- a/src/HOL/Auth/Smartcard/ShoupRubinBella.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Smartcard/ShoupRubinBella.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1319,7 +1319,7 @@
apply auto
done
-text\<open>@{term step2_integrity} also is a reliability theorem\<close>
+text\<open>\<^term>\<open>step2_integrity\<close> also is a reliability theorem\<close>
lemma Says_Server_message_form:
"\<lbrakk> Says Server A \<lbrace>Pk, Certificate\<rbrace> \<in> set evs;
evs \<in> srb \<rbrakk>
@@ -1333,9 +1333,9 @@
(*cannot be made useful to A in form of a Gets event*)
text\<open>
- step4integrity is @{term Outpts_A_Card_form_4}
+ step4integrity is \<^term>\<open>Outpts_A_Card_form_4\<close>
- step7integrity is @{term Outpts_B_Card_form_7}
+ step7integrity is \<^term>\<open>Outpts_B_Card_form_7\<close>
\<close>
lemma step8_integrity:
@@ -1350,8 +1350,8 @@
done
-text\<open>step9integrity is @{term Inputs_A_Card_form_9}
- step10integrity is @{term Outpts_A_Card_form_10}.
+text\<open>step9integrity is \<^term>\<open>Inputs_A_Card_form_9\<close>
+ step10integrity is \<^term>\<open>Outpts_A_Card_form_10\<close>.
\<close>
--- a/src/HOL/Auth/Smartcard/Smartcard.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Smartcard/Smartcard.thy Sat Jan 05 17:24:33 2019 +0100
@@ -110,7 +110,7 @@
\<open>analz.Decrypt\<close> in its declaration.\<close>
declare analz.Decrypt [rule del]
-text\<open>Rewrites should not refer to @{term "initState(Friend i)"} because
+text\<open>Rewrites should not refer to \<^term>\<open>initState(Friend i)\<close> because
that expression is not in normal form.\<close>
text\<open>Added to extend initstate with set of nonces\<close>
@@ -307,13 +307,13 @@
text\<open>Unlike the corresponding property of nonces, we cannot prove
- @{term "finite KK \<Longrightarrow> \<exists>K. K \<notin> KK & Key K \<notin> used evs"}.
+ \<^term>\<open>finite KK \<Longrightarrow> \<exists>K. K \<notin> KK & Key K \<notin> used evs\<close>.
We have infinitely many agents and there is nothing to stop their
long-term keys from exhausting all the natural numbers. Instead,
possibility theorems must assume the existence of a few keys.\<close>
-subsection\<open>Specialized Rewriting for Theorems About @{term analz} and Image\<close>
+subsection\<open>Specialized Rewriting for Theorems About \<^term>\<open>analz\<close> and Image\<close>
lemma subset_Compl_range_shrK: "A \<subseteq> - (range shrK) \<Longrightarrow> shrK x \<notin> A"
by blast
@@ -383,7 +383,7 @@
val analz_image_freshK_ss =
simpset_of
- (@{context} delsimps [image_insert, image_Un]
+ (\<^context> delsimps [image_insert, image_Un]
delsimps [@{thm imp_disjL}] (*reduces blow-up*)
addsimps @{thms analz_image_freshK_simps})
end
--- a/src/HOL/Auth/TLS.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/TLS.thy Sat Jan 05 17:24:33 2019 +0100
@@ -105,7 +105,7 @@
"[| evsf \<in> tls; X \<in> synth (analz (spies evsf)) |]
==> Says Spy B X # evsf \<in> tls"
- | SpyKeys: \<comment> \<open>The spy may apply @{term PRF} and @{term sessionK}
+ | SpyKeys: \<comment> \<open>The spy may apply \<^term>\<open>PRF\<close> and \<^term>\<open>sessionK\<close>
to available nonces\<close>
"[| evsSK \<in> tls;
{Nonce NA, Nonce NB, Nonce M} \<subseteq> analz (spies evsSK) |]
@@ -118,7 +118,7 @@
It is uninterpreted but will be confirmed in the FINISHED messages.
NA is CLIENT RANDOM, while SID is \<open>SESSION_ID\<close>.
UNIX TIME is omitted because the protocol doesn't use it.
- May assume @{term "NA \<notin> range PRF"} because CLIENT RANDOM is
+ May assume \<^term>\<open>NA \<notin> range PRF\<close> because CLIENT RANDOM is
28 bytes while MASTER SECRET is 48 bytes\<close>
"[| evsCH \<in> tls; Nonce NA \<notin> used evsCH; NA \<notin> range PRF |]
==> Says A B \<lbrace>Agent A, Nonce NA, Number SID, Number PA\<rbrace>
@@ -142,7 +142,7 @@
\<comment> \<open>CLIENT KEY EXCHANGE (7.4.7).
The client, A, chooses PMS, the PREMASTER SECRET.
She encrypts PMS using the supplied KB, which ought to be pubK B.
- We assume @{term "PMS \<notin> range PRF"} because a clash betweem the PMS
+ We assume \<^term>\<open>PMS \<notin> range PRF\<close> because a clash betweem the PMS
and another MASTER SECRET is highly unlikely (even though
both items have the same length, 48 bytes).
The Note event records in the trace that she knows PMS
@@ -174,7 +174,7 @@
rule's applying when the Spy has satisfied the \<open>Says A B\<close> by
repaying messages sent by the true client; in that case, the
Spy does not know PMS and could not send ClientFinished. One
- could simply put @{term "A\<noteq>Spy"} into the rule, but one should not
+ could simply put \<^term>\<open>A\<noteq>Spy\<close> into the rule, but one should not
expect the spy to be well-behaved.\<close>
"[| evsCF \<in> tls;
Says A B \<lbrace>Agent A, Nonce NA, Number SID, Number PA\<rbrace>
@@ -265,7 +265,7 @@
| Oops:
\<comment> \<open>The most plausible compromise is of an old session key. Losing
the MASTER SECRET or PREMASTER SECRET is more serious but
- rather unlikely. The assumption @{term "A\<noteq>Spy"} is essential:
+ rather unlikely. The assumption \<^term>\<open>A\<noteq>Spy\<close> is essential:
otherwise the Spy could learn session keys merely by
replaying messages!\<close>
"[| evso \<in> tls; A \<noteq> Spy;
@@ -558,7 +558,7 @@
subsection\<open>Secrecy Theorems\<close>
-text\<open>Key compromise lemma needed to prove @{term analz_image_keys}.
+text\<open>Key compromise lemma needed to prove \<^term>\<open>analz_image_keys\<close>.
No collection of keys can help the spy get new private keys.\<close>
lemma analz_image_priK [rule_format]:
"evs \<in> tls
@@ -688,7 +688,7 @@
apply (blast dest: Notes_Crypt_parts_spies)
apply (blast dest: Notes_Crypt_parts_spies)
apply (blast dest: Notes_Crypt_parts_spies)
-txt\<open>ClientAccepts and ServerAccepts: because @{term "PMS \<notin> range PRF"}\<close>
+txt\<open>ClientAccepts and ServerAccepts: because \<^term>\<open>PMS \<notin> range PRF\<close>\<close>
apply force+
done
--- a/src/HOL/Auth/Yahalom.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Yahalom.thy Sat Jan 05 17:24:33 2019 +0100
@@ -55,7 +55,7 @@
| YM4:
\<comment> \<open>Alice receives the Server's (?) message, checks her Nonce, and
uses the new session key to send Bob his Nonce. The premise
- @{term "A \<noteq> Server"} is needed for \<open>Says_Server_not_range\<close>.
+ \<^term>\<open>A \<noteq> Server\<close> is needed for \<open>Says_Server_not_range\<close>.
Alice can check that K is symmetric by its length.\<close>
"\<lbrakk>evs4 \<in> yahalom; A \<noteq> Server; K \<in> symKeys;
Gets A \<lbrace>Crypt(shrK A) \<lbrace>Agent B, Key K, Nonce NA, Nonce NB\<rbrace>, X\<rbrace>
@@ -129,7 +129,7 @@
\<Longrightarrow> K \<in> parts (knows Spy evs)"
by (metis parts.Body parts.Fst parts.Snd Says_imp_knows_Spy parts.Inj)
-text\<open>Theorems of the form @{term "X \<notin> parts (knows Spy evs)"} imply
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (knows Spy evs)\<close> imply
that NOBODY sends messages containing X!\<close>
text\<open>Spy never sees a good agent's shared key!\<close>
@@ -299,8 +299,8 @@
text\<open>B knows, by the second part of A's message, that the Server
distributed the key quoting nonce NB. This part says nothing about
- agent names. Secrecy of NB is crucial. Note that @{term "Nonce NB
- \<notin> analz(knows Spy evs)"} must be the FIRST antecedent of the
+ agent names. Secrecy of NB is crucial. Note that \<^term>\<open>Nonce NB
+ \<notin> analz(knows Spy evs)\<close> must be the FIRST antecedent of the
induction formula.\<close>
lemma B_trusts_YM4_newK [rule_format]:
@@ -393,14 +393,14 @@
analz_image_freshK fresh_not_KeyWithNonce
imp_disj_not1 (*Moves NBa\<noteq>NB to the front*)
Says_Server_KeyWithNonce)
-txt\<open>For Oops, simplification proves @{prop "NBa\<noteq>NB"}. By
- @{term Says_Server_KeyWithNonce}, we get @{prop "\<not> KeyWithNonce K NB
- evs"}; then simplification can apply the induction hypothesis with
- @{term "KK = {K}"}.\<close>
+txt\<open>For Oops, simplification proves \<^prop>\<open>NBa\<noteq>NB\<close>. By
+ \<^term>\<open>Says_Server_KeyWithNonce\<close>, we get \<^prop>\<open>\<not> KeyWithNonce K NB
+ evs\<close>; then simplification can apply the induction hypothesis with
+ \<^term>\<open>KK = {K}\<close>.\<close>
subgoal \<comment> \<open>Fake\<close> by spy_analz
subgoal \<comment> \<open>YM2\<close> by blast
subgoal \<comment> \<open>YM3\<close> by blast
- subgoal \<comment> \<open>YM4: If @{prop "A \<in> bad"} then @{term NBa} is known, therefore @{prop "NBa \<noteq> NB"}.\<close>
+ subgoal \<comment> \<open>YM4: If \<^prop>\<open>A \<in> bad\<close> then \<^term>\<open>NBa\<close> is known, therefore \<^prop>\<open>NBa \<noteq> NB\<close>.\<close>
by (metis A_trusts_YM3 Gets_imp_analz_Spy Gets_imp_knows_Spy KeyWithNonce_def
Spy_analz_shrK analz.Fst analz.Snd analz_shrK_Decrypt parts.Fst parts.Inj)
done
@@ -491,7 +491,7 @@
by (blast dest!: no_nonce_YM1_YM2 dest: Gets_imp_Says Says_unique_NB)
subgoal \<comment> \<open>YM4: key K is visible to Spy, contradicting session key secrecy theorem\<close>
\<comment> \<open>Case analysis on whether Aa is bad;
- use \<open>Says_unique_NB\<close> to identify message components: @{term "Aa=A"}, @{term "Ba=B"}\<close>
+ use \<open>Says_unique_NB\<close> to identify message components: \<^term>\<open>Aa=A\<close>, \<^term>\<open>Ba=B\<close>\<close>
apply clarify
apply (blast dest!: Says_unique_NB analz_shrK_Decrypt
parts.Inj [THEN parts.Fst, THEN A_trusts_YM3]
@@ -581,7 +581,7 @@
subsection\<open>Authenticating A to B using the certificate
- @{term "Crypt K (Nonce NB)"}\<close>
+ \<^term>\<open>Crypt K (Nonce NB)\<close>\<close>
text\<open>Assuming the session key is secure, if both certificates are present then
A has said NB. We can't be sure about the rest of A's message, but only
@@ -597,9 +597,9 @@
frule_tac [6] YM4_parts_knows_Spy)
apply (analz_mono_contra, simp_all)
subgoal \<comment> \<open>Fake\<close> by blast
- subgoal \<comment> \<open>YM3 because the message @{term "Crypt K (Nonce NB)"} could not exist\<close>
+ subgoal \<comment> \<open>YM3 because the message \<^term>\<open>Crypt K (Nonce NB)\<close> could not exist\<close>
by (force dest!: Crypt_imp_keysFor)
- subgoal \<comment> \<open>YM4: was @{term "Crypt K (Nonce NB)"} the very last message? If not, use the induction hypothesis,
+ subgoal \<comment> \<open>YM4: was \<^term>\<open>Crypt K (Nonce NB)\<close> the very last message? If not, use the induction hypothesis,
otherwise by unicity of session keys\<close>
by (blast dest!: Gets_imp_Says A_trusts_YM3 B_trusts_YM4_shrK Crypt_Spy_analz_bad
dest: Says_imp_knows_Spy [THEN parts.Inj] unique_session_keys)
--- a/src/HOL/Auth/Yahalom2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Yahalom2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -202,7 +202,7 @@
done
-subsection\<open>Crucial Secrecy Property: Spy Does Not See Key @{term KAB}\<close>
+subsection\<open>Crucial Secrecy Property: Spy Does Not See Key \<^term>\<open>KAB\<close>\<close>
lemma secrecy_lemma:
"\<lbrakk>A \<notin> bad; B \<notin> bad; evs \<in> yahalom\<rbrakk>
@@ -234,11 +234,11 @@
text\<open>This form is an immediate consequence of the previous result. It is
similar to the assertions established by other methods. It is equivalent
-to the previous result in that the Spy already has @{term analz} and
-@{term synth} at his disposal. However, the conclusion
-@{term "Key K \<notin> knows Spy evs"} appears not to be inductive: all the cases
+to the previous result in that the Spy already has \<^term>\<open>analz\<close> and
+\<^term>\<open>synth\<close> at his disposal. However, the conclusion
+\<^term>\<open>Key K \<notin> knows Spy evs\<close> appears not to be inductive: all the cases
other than Fake are trivial, while Fake requires
-@{term "Key K \<notin> analz (knows Spy evs)"}.\<close>
+\<^term>\<open>Key K \<notin> analz (knows Spy evs)\<close>.\<close>
lemma Spy_not_know_encrypted_key:
"\<lbrakk>Says Server A
\<lbrace>nb, Crypt (shrK A) \<lbrace>Agent B, Key K, na\<rbrace>,
@@ -371,11 +371,11 @@
subsection\<open>Authenticating A to B\<close>
-text\<open>using the certificate @{term "Crypt K (Nonce NB)"}\<close>
+text\<open>using the certificate \<^term>\<open>Crypt K (Nonce NB)\<close>\<close>
text\<open>Assuming the session key is secure, if both certificates are present then
A has said NB. We can't be sure about the rest of A's message, but only
- NB matters for freshness. Note that @{term "Key K \<notin> analz (knows Spy evs)"}
+ NB matters for freshness. Note that \<^term>\<open>Key K \<notin> analz (knows Spy evs)\<close>
must be the FIRST antecedent of the induction formula.\<close>
text\<open>This lemma allows a use of \<open>unique_session_keys\<close> in the next proof,
@@ -401,9 +401,9 @@
frule_tac [6] YM4_parts_knows_Spy)
apply (analz_mono_contra, simp_all)
subgoal \<comment> \<open>Fake\<close> by blast
- subgoal \<comment> \<open>YM3 because the message @{term "Crypt K (Nonce NB)"} could not exist\<close>
+ subgoal \<comment> \<open>YM3 because the message \<^term>\<open>Crypt K (Nonce NB)\<close> could not exist\<close>
by (force dest!: Crypt_imp_keysFor)
- subgoal \<comment> \<open>YM4: was @{term "Crypt K (Nonce NB)"} the very last message? If not, use the induction hypothesis,
+ subgoal \<comment> \<open>YM4: was \<^term>\<open>Crypt K (Nonce NB)\<close> the very last message? If not, use the induction hypothesis,
otherwise by unicity of session keys\<close>
by (blast dest!: B_trusts_YM4_shrK dest: secure_unique_session_keys)
done
--- a/src/HOL/Auth/Yahalom_Bad.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/Yahalom_Bad.thy Sat Jan 05 17:24:33 2019 +0100
@@ -106,7 +106,7 @@
YM4_analz_knows_Spy [THEN analz_into_parts]
-text\<open>Theorems of the form @{term "X \<notin> parts (knows Spy evs)"} imply
+text\<open>Theorems of the form \<^term>\<open>X \<notin> parts (knows Spy evs)\<close> imply
that NOBODY sends messages containing X!\<close>
text\<open>Spy never sees a good agent's shared key!\<close>
@@ -334,9 +334,9 @@
txt\<open>Fake\<close>
apply blast
txt\<open>YM3: by \<open>new_keys_not_used\<close>, the message
- @{term "Crypt K (Nonce NB)"} could not exist\<close>
+ \<^term>\<open>Crypt K (Nonce NB)\<close> could not exist\<close>
apply (force dest!: Crypt_imp_keysFor)
-txt\<open>YM4: was @{term "Crypt K (Nonce NB)"} the very last message?
+txt\<open>YM4: was \<^term>\<open>Crypt K (Nonce NB)\<close> the very last message?
If not, use the induction hypothesis\<close>
apply (simp add: ex_disj_distrib)
txt\<open>yes: apply unicity of session keys\<close>
--- a/src/HOL/Auth/ZhouGollmann.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Auth/ZhouGollmann.thy Sat Jan 05 17:24:33 2019 +0100
@@ -117,8 +117,8 @@
by (blast dest!: Gets_imp_Says Says_imp_knows_Spy)
-text\<open>Lets us replace proofs about @{term "used evs"} by simpler proofs
-about @{term "parts (spies evs)"}.\<close>
+text\<open>Lets us replace proofs about \<^term>\<open>used evs\<close> by simpler proofs
+about \<^term>\<open>parts (spies evs)\<close>.\<close>
lemma Crypt_used_imp_spies:
"[| Crypt K X \<in> used evs; evs \<in> zg |]
==> Crypt K X \<in> parts (spies evs)"
@@ -158,10 +158,10 @@
by auto
-subsection\<open>About NRO: Validity for @{term B}\<close>
+subsection\<open>About NRO: Validity for \<^term>\<open>B\<close>\<close>
-text\<open>Below we prove that if @{term NRO} exists then @{term A} definitely
-sent it, provided @{term A} is not broken.\<close>
+text\<open>Below we prove that if \<^term>\<open>NRO\<close> exists then \<^term>\<open>A\<close> definitely
+sent it, provided \<^term>\<open>A\<close> is not broken.\<close>
text\<open>Strong conclusion for a good agent\<close>
lemma NRO_validity_good:
@@ -182,7 +182,7 @@
apply (erule zg.induct, simp_all)
done
-text\<open>Holds also for @{term "A = Spy"}!\<close>
+text\<open>Holds also for \<^term>\<open>A = Spy\<close>!\<close>
theorem NRO_validity:
"[|Gets B \<lbrace>Number f_nro, Agent B, Nonce L, C, NRO\<rbrace> \<in> set evs;
NRO = Crypt (priK A) \<lbrace>Number f_nro, Agent B, Nonce L, C\<rbrace>;
@@ -191,17 +191,17 @@
apply (drule Gets_imp_Says, assumption)
apply clarify
apply (frule NRO_sender, auto)
-txt\<open>We are left with the case where the sender is @{term Spy} and not
- equal to @{term A}, because @{term "A \<notin> bad"}.
+txt\<open>We are left with the case where the sender is \<^term>\<open>Spy\<close> and not
+ equal to \<^term>\<open>A\<close>, because \<^term>\<open>A \<notin> bad\<close>.
Thus theorem \<open>NRO_validity_good\<close> applies.\<close>
apply (blast dest: NRO_validity_good [OF refl])
done
-subsection\<open>About NRR: Validity for @{term A}\<close>
+subsection\<open>About NRR: Validity for \<^term>\<open>A\<close>\<close>
-text\<open>Below we prove that if @{term NRR} exists then @{term B} definitely
-sent it, provided @{term B} is not broken.\<close>
+text\<open>Below we prove that if \<^term>\<open>NRR\<close> exists then \<^term>\<open>B\<close> definitely
+sent it, provided \<^term>\<open>B\<close> is not broken.\<close>
text\<open>Strong conclusion for a good agent\<close>
lemma NRR_validity_good:
@@ -222,7 +222,7 @@
apply (erule zg.induct, simp_all)
done
-text\<open>Holds also for @{term "B = Spy"}!\<close>
+text\<open>Holds also for \<^term>\<open>B = Spy\<close>!\<close>
theorem NRR_validity:
"[|Says B' A \<lbrace>Number f_nrr, Agent A, Nonce L, NRR\<rbrace> \<in> set evs;
NRR = Crypt (priK B) \<lbrace>Number f_nrr, Agent A, Nonce L, C\<rbrace>;
@@ -230,16 +230,16 @@
==> Says B A \<lbrace>Number f_nrr, Agent A, Nonce L, NRR\<rbrace> \<in> set evs"
apply clarify
apply (frule NRR_sender, auto)
-txt\<open>We are left with the case where @{term "B' = Spy"} and @{term "B' \<noteq> B"},
- i.e. @{term "B \<notin> bad"}, when we can apply \<open>NRR_validity_good\<close>.\<close>
+txt\<open>We are left with the case where \<^term>\<open>B' = Spy\<close> and \<^term>\<open>B' \<noteq> B\<close>,
+ i.e. \<^term>\<open>B \<notin> bad\<close>, when we can apply \<open>NRR_validity_good\<close>.\<close>
apply (blast dest: NRR_validity_good [OF refl])
done
-subsection\<open>Proofs About @{term sub_K}\<close>
+subsection\<open>Proofs About \<^term>\<open>sub_K\<close>\<close>
-text\<open>Below we prove that if @{term sub_K} exists then @{term A} definitely
-sent it, provided @{term A} is not broken.\<close>
+text\<open>Below we prove that if \<^term>\<open>sub_K\<close> exists then \<^term>\<open>A\<close> definitely
+sent it, provided \<^term>\<open>A\<close> is not broken.\<close>
text\<open>Strong conclusion for a good agent\<close>
lemma sub_K_validity_good:
@@ -262,7 +262,7 @@
apply (erule zg.induct, simp_all)
done
-text\<open>Holds also for @{term "A = Spy"}!\<close>
+text\<open>Holds also for \<^term>\<open>A = Spy\<close>!\<close>
theorem sub_K_validity:
"[|Gets TTP \<lbrace>Number f_sub, Agent B, Nonce L, Key K, sub_K\<rbrace> \<in> set evs;
sub_K = Crypt (priK A) \<lbrace>Number f_sub, Agent B, Nonce L, Key K\<rbrace>;
@@ -271,19 +271,19 @@
apply (drule Gets_imp_Says, assumption)
apply clarify
apply (frule sub_K_sender, auto)
-txt\<open>We are left with the case where the sender is @{term Spy} and not
- equal to @{term A}, because @{term "A \<notin> bad"}.
+txt\<open>We are left with the case where the sender is \<^term>\<open>Spy\<close> and not
+ equal to \<^term>\<open>A\<close>, because \<^term>\<open>A \<notin> bad\<close>.
Thus theorem \<open>sub_K_validity_good\<close> applies.\<close>
apply (blast dest: sub_K_validity_good [OF refl])
done
-subsection\<open>Proofs About @{term con_K}\<close>
+subsection\<open>Proofs About \<^term>\<open>con_K\<close>\<close>
-text\<open>Below we prove that if @{term con_K} exists, then @{term TTP} has it,
-and therefore @{term A} and @{term B}) can get it too. Moreover, we know
-that @{term A} sent @{term sub_K}\<close>
+text\<open>Below we prove that if \<^term>\<open>con_K\<close> exists, then \<^term>\<open>TTP\<close> has it,
+and therefore \<^term>\<open>A\<close> and \<^term>\<open>B\<close>) can get it too. Moreover, we know
+that \<^term>\<open>A\<close> sent \<^term>\<open>sub_K\<close>\<close>
lemma con_K_validity:
"[|con_K \<in> used evs;
@@ -302,9 +302,9 @@
apply (blast dest: parts_cut)
done
-text\<open>If @{term TTP} holds @{term con_K} then @{term A} sent
- @{term sub_K}. We assume that @{term A} is not broken. Importantly, nothing
- needs to be assumed about the form of @{term con_K}!\<close>
+text\<open>If \<^term>\<open>TTP\<close> holds \<^term>\<open>con_K\<close> then \<^term>\<open>A\<close> sent
+ \<^term>\<open>sub_K\<close>. We assume that \<^term>\<open>A\<close> is not broken. Importantly, nothing
+ needs to be assumed about the form of \<^term>\<open>con_K\<close>!\<close>
lemma Notes_TTP_imp_Says_A:
"[|Notes TTP \<lbrace>Number f_con, Agent A, Agent B, Nonce L, Key K, con_K\<rbrace>
\<in> set evs;
@@ -320,8 +320,8 @@
apply (rule sub_K_validity, auto)
done
-text\<open>If @{term con_K} exists, then @{term A} sent @{term sub_K}. We again
- assume that @{term A} is not broken.\<close>
+text\<open>If \<^term>\<open>con_K\<close> exists, then \<^term>\<open>A\<close> sent \<^term>\<open>sub_K\<close>. We again
+ assume that \<^term>\<open>A\<close> is not broken.\<close>
theorem B_sub_K_validity:
"[|con_K \<in> used evs;
con_K = Crypt (priK TTP) \<lbrace>Number f_con, Agent A, Agent B,
@@ -334,11 +334,11 @@
subsection\<open>Proving fairness\<close>
-text\<open>Cannot prove that, if @{term B} has NRO, then @{term A} has her NRR.
-It would appear that @{term B} has a small advantage, though it is
-useless to win disputes: @{term B} needs to present @{term con_K} as well.\<close>
+text\<open>Cannot prove that, if \<^term>\<open>B\<close> has NRO, then \<^term>\<open>A\<close> has her NRR.
+It would appear that \<^term>\<open>B\<close> has a small advantage, though it is
+useless to win disputes: \<^term>\<open>B\<close> needs to present \<^term>\<open>con_K\<close> as well.\<close>
-text\<open>Strange: unicity of the label protects @{term A}?\<close>
+text\<open>Strange: unicity of the label protects \<^term>\<open>A\<close>?\<close>
lemma A_unicity:
"[|NRO = Crypt (priK A) \<lbrace>Number f_nro, Agent B, Nonce L, Crypt K M\<rbrace>;
NRO \<in> parts (spies evs);
@@ -356,7 +356,7 @@
done
-text\<open>Fairness lemma: if @{term sub_K} exists, then @{term A} holds
+text\<open>Fairness lemma: if \<^term>\<open>sub_K\<close> exists, then \<^term>\<open>A\<close> holds
NRR. Relies on unicity of labels.\<close>
lemma sub_K_implies_NRR:
"[| NRO = Crypt (priK A) \<lbrace>Number f_nro, Agent B, Nonce L, Crypt K M\<rbrace>;
@@ -393,9 +393,9 @@
done
-text\<open>Fairness for @{term A}: if @{term con_K} and @{term NRO} exist,
-then @{term A} holds NRR. @{term A} must be uncompromised, but there is no
-assumption about @{term B}.\<close>
+text\<open>Fairness for \<^term>\<open>A\<close>: if \<^term>\<open>con_K\<close> and \<^term>\<open>NRO\<close> exist,
+then \<^term>\<open>A\<close> holds NRR. \<^term>\<open>A\<close> must be uncompromised, but there is no
+assumption about \<^term>\<open>B\<close>.\<close>
theorem A_fairness_NRO:
"[|con_K \<in> used evs;
NRO \<in> parts (spies evs);
@@ -422,9 +422,8 @@
dest: Gets_imp_knows_Spy [THEN parts.Inj])
done
-text\<open>Fairness for @{term B}: NRR exists at all, then @{term B} holds NRO.
-@{term B} must be uncompromised, but there is no assumption about @{term
-A}.\<close>
+text\<open>Fairness for \<^term>\<open>B\<close>: NRR exists at all, then \<^term>\<open>B\<close> holds NRO.
+\<^term>\<open>B\<close> must be uncompromised, but there is no assumption about \<^term>\<open>A\<close>.\<close>
theorem B_fairness_NRR:
"[|NRR \<in> used evs;
NRR = Crypt (priK B) \<lbrace>Number f_nrr, Agent A, Nonce L, C\<rbrace>;
@@ -442,8 +441,8 @@
done
-text\<open>If @{term con_K} exists at all, then @{term B} can get it, by \<open>con_K_validity\<close>. Cannot conclude that also NRO is available to @{term B},
-because if @{term A} were unfair, @{term A} could build message 3 without
+text\<open>If \<^term>\<open>con_K\<close> exists at all, then \<^term>\<open>B\<close> can get it, by \<open>con_K_validity\<close>. Cannot conclude that also NRO is available to \<^term>\<open>B\<close>,
+because if \<^term>\<open>A\<close> were unfair, \<^term>\<open>A\<close> could build message 3 without
building message 1, which contains NRO.\<close>
end
--- a/src/HOL/Bali/AxCompl.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/AxCompl.thy Sat Jan 05 17:24:33 2019 +0100
@@ -719,7 +719,7 @@
show "Y' = \<diamondsuit> \<and> G\<turnstile>Norm s \<midarrow>l\<bullet> While(e) c\<rightarrow> s'"
proof -
from asm obtain v t where
- \<comment> \<open>@{term "Z'"} gets instantiated with @{term "Norm s"}\<close>
+ \<comment> \<open>\<^term>\<open>Z'\<close> gets instantiated with \<^term>\<open>Norm s\<close>\<close>
unroll: "(Norm s, t) \<in> (unroll G l e c)\<^sup>*" and
eval_e: "G\<turnstile>t \<midarrow>e-\<succ>v\<rightarrow> s'" and
normal_termination: "normal s' \<longrightarrow> \<not> the_Bool v" and
@@ -1369,8 +1369,8 @@
using finU uA
apply -
apply (induct_tac "n")
-apply (tactic "ALLGOALS (clarsimp_tac @{context})")
-apply (tactic \<open>dresolve_tac @{context} [Thm.permute_prems 0 1 @{thm card_seteq}] 1\<close>)
+apply (tactic "ALLGOALS (clarsimp_tac \<^context>)")
+apply (tactic \<open>dresolve_tac \<^context> [Thm.permute_prems 0 1 @{thm card_seteq}] 1\<close>)
apply simp
apply (erule finite_imageI)
apply (simp add: MGF_asm ax_derivs_asm)
--- a/src/HOL/Bali/AxExample.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/AxExample.thy Sat Jan 05 17:24:33 2019 +0100
@@ -58,13 +58,13 @@
.test [Class Base].
{\<lambda>Y s Z. abrupt s = Some (Xcpt (Std IndOutBound))}"
apply (unfold test_def arr_viewed_from_def)
-apply (tactic "ax_tac @{context} 1" (*;;*))
+apply (tactic "ax_tac \<^context> 1" (*;;*))
defer (* We begin with the last assertion, to synthesise the intermediate
assertions, like in the fashion of the weakest
precondition. *)
-apply (tactic "ax_tac @{context} 1" (* Try *))
+apply (tactic "ax_tac \<^context> 1" (* Try *))
defer
-apply (tactic \<open>inst1_tac @{context} "Q"
+apply (tactic \<open>inst1_tac \<^context> "Q"
"\<lambda>Y s Z. arr_inv (snd s) \<and> tprg,s\<turnstile>catch SXcpt NullPointer" []\<close>)
prefer 2
apply simp
@@ -74,26 +74,26 @@
apply (rule_tac Q' = "(\<lambda>Y s Z. Q Y s Z)\<leftarrow>=False\<down>=\<diamondsuit>" and Q = Q for Q in conseq2)
prefer 2
apply simp
-apply (tactic "ax_tac @{context} 1" (* While *))
+apply (tactic "ax_tac \<^context> 1" (* While *))
prefer 2
apply (rule ax_impossible [THEN conseq1], clarsimp)
apply (rule_tac P' = "Normal P" and P = P for P in conseq1)
prefer 2
apply clarsimp
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1" (* AVar *))
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1" (* AVar *))
prefer 2
apply (rule ax_subst_Val_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>a. Normal (PP a\<leftarrow>x)" ["PP", "x"]\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>a. Normal (PP a\<leftarrow>x)" ["PP", "x"]\<close>)
apply (simp del: avar_def2 peek_and_def2)
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
(* just for clarification: *)
apply (rule_tac Q' = "Normal (\<lambda>Var:(v, f) u ua. fst (snd (avar tprg (Intg 2) v u)) = Some (Xcpt (Std IndOutBound)))" in conseq2)
prefer 2
apply (clarsimp simp add: split_beta)
-apply (tactic "ax_tac @{context} 1" (* FVar *))
-apply (tactic "ax_tac @{context} 2" (* StatRef *))
+apply (tactic "ax_tac \<^context> 1" (* FVar *))
+apply (tactic "ax_tac \<^context> 2" (* StatRef *))
apply (rule ax_derivs.Done [THEN conseq1])
apply (clarsimp simp add: arr_inv_def inited_def in_bounds_def)
defer
@@ -101,20 +101,20 @@
apply (rule_tac Q' = "(\<lambda>Y (x, s) Z. x = Some (Xcpt (Std NullPointer)) \<and> arr_inv s) \<and>. heap_free two" in conseq2)
prefer 2
apply (simp add: arr_inv_new_obj)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply (rule_tac C = "Ext" in ax_Call_known_DynT)
apply (unfold DynT_prop_def)
apply (simp (no_asm))
apply (intro strip)
apply (rule_tac P' = "Normal P" and P = P for P in conseq1)
-apply (tactic "ax_tac @{context} 1" (* Methd *))
+apply (tactic "ax_tac \<^context> 1" (* Methd *))
apply (rule ax_thin [OF _ empty_subsetI])
apply (simp (no_asm) add: body_def2)
-apply (tactic "ax_tac @{context} 1" (* Body *))
+apply (tactic "ax_tac \<^context> 1" (* Body *))
(* apply (rule_tac [2] ax_derivs.Abrupt) *)
defer
apply (simp (no_asm))
-apply (tactic "ax_tac @{context} 1") (* Comp *)
+apply (tactic "ax_tac \<^context> 1") (* Comp *)
(* The first statement in the composition
((Ext)z).vee = 1; Return Null
will throw an exception (since z is null). So we can handle
@@ -122,33 +122,33 @@
apply (rule_tac [2] ax_derivs.Abrupt)
apply (rule ax_derivs.Expr) (* Expr *)
-apply (tactic "ax_tac @{context} 1") (* Ass *)
+apply (tactic "ax_tac \<^context> 1") (* Ass *)
prefer 2
apply (rule ax_subst_Var_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>a vs l vf. PP a vs l vf\<leftarrow>x \<and>. p" ["PP", "x", "p"]\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>a vs l vf. PP a vs l vf\<leftarrow>x \<and>. p" ["PP", "x", "p"]\<close>)
apply (rule allI)
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac" delsimps [@{thm peek_and_def2}, @{thm heap_def2}, @{thm subst_res_def2}, @{thm normal_def2}]) 1\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac" delsimps [@{thm peek_and_def2}, @{thm heap_def2}, @{thm subst_res_def2}, @{thm normal_def2}]) 1\<close>)
apply (rule ax_derivs.Abrupt)
apply (simp (no_asm))
-apply (tactic "ax_tac @{context} 1" (* FVar *))
-apply (tactic "ax_tac @{context} 2", tactic "ax_tac @{context} 2", tactic "ax_tac @{context} 2")
-apply (tactic "ax_tac @{context} 1")
-apply (tactic \<open>inst1_tac @{context} "R" "\<lambda>a'. Normal ((\<lambda>Vals:vs (x, s) Z. arr_inv s \<and> inited Ext (globs s) \<and> a' \<noteq> Null \<and> vs = [Null]) \<and>. heap_free two)" []\<close>)
+apply (tactic "ax_tac \<^context> 1" (* FVar *))
+apply (tactic "ax_tac \<^context> 2", tactic "ax_tac \<^context> 2", tactic "ax_tac \<^context> 2")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic \<open>inst1_tac \<^context> "R" "\<lambda>a'. Normal ((\<lambda>Vals:vs (x, s) Z. arr_inv s \<and> inited Ext (globs s) \<and> a' \<noteq> Null \<and> vs = [Null]) \<and>. heap_free two)" []\<close>)
apply fastforce
prefer 4
apply (rule ax_derivs.Done [THEN conseq1],force)
apply (rule ax_subst_Val_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>a. Normal (PP a\<leftarrow>x)" ["PP", "x"]\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>a. Normal (PP a\<leftarrow>x)" ["PP", "x"]\<close>)
apply (simp (no_asm) del: peek_and_def2 heap_free_def2 normal_def2 o_apply)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
prefer 2
apply (rule ax_subst_Val_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>aa v. Normal (QQ aa v\<leftarrow>y)" ["QQ", "y"]\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>aa v. Normal (QQ aa v\<leftarrow>y)" ["QQ", "y"]\<close>)
apply (simp del: peek_and_def2 heap_free_def2 normal_def2)
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
(* end method call *)
apply (simp (no_asm))
(* just for clarification: *)
@@ -158,14 +158,14 @@
in conseq2)
prefer 2
apply clarsimp
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
defer
apply (rule ax_subst_Var_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>vf. Normal (PP vf \<and>. p)" ["PP", "p"]\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>vf. Normal (PP vf \<and>. p)" ["PP", "p"]\<close>)
apply (simp (no_asm) del: split_paired_All peek_and_def2 initd_def2 heap_free_def2 normal_def2)
-apply (tactic "ax_tac @{context} 1" (* NewC *))
-apply (tactic "ax_tac @{context} 1" (* ax_Alloc *))
+apply (tactic "ax_tac \<^context> 1" (* NewC *))
+apply (tactic "ax_tac \<^context> 1" (* ax_Alloc *))
(* just for clarification: *)
apply (rule_tac Q' = "Normal ((\<lambda>Y s Z. arr_inv (store s) \<and> vf=lvar (VName e) (store s)) \<and>. heap_free three \<and>. initd Ext)" in conseq2)
prefer 2
@@ -177,47 +177,47 @@
apply (rule ax_InitS)
apply force
apply (simp (no_asm))
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac") 1\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac") 1\<close>)
apply (rule ax_Init_Skip_lemma)
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac") 1\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac") 1\<close>)
apply (rule ax_InitS [THEN conseq1] (* init Base *))
apply force
apply (simp (no_asm))
apply (unfold arr_viewed_from_def)
apply (rule allI)
apply (rule_tac P' = "Normal P" and P = P for P in conseq1)
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac") 1\<close>)
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac") 1\<close>)
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
apply (rule_tac [2] ax_subst_Var_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>vf l vfa. Normal (P vf l vfa)" ["P"]\<close>)
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac" delsimps [@{thm split_paired_All}, @{thm peek_and_def2}, @{thm heap_free_def2}, @{thm initd_def2}, @{thm normal_def2}, @{thm supd_lupd}]) 2\<close>)
-apply (tactic "ax_tac @{context} 2" (* NewA *))
-apply (tactic "ax_tac @{context} 3" (* ax_Alloc_Arr *))
-apply (tactic "ax_tac @{context} 3")
-apply (tactic \<open>inst1_tac @{context} "P" "\<lambda>vf l vfa. Normal (P vf l vfa\<leftarrow>\<diamondsuit>)" ["P"]\<close>)
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac") 2\<close>)
-apply (tactic "ax_tac @{context} 2")
-apply (tactic "ax_tac @{context} 1" (* FVar *))
-apply (tactic "ax_tac @{context} 2" (* StatRef *))
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>vf l vfa. Normal (P vf l vfa)" ["P"]\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac" delsimps [@{thm split_paired_All}, @{thm peek_and_def2}, @{thm heap_free_def2}, @{thm initd_def2}, @{thm normal_def2}, @{thm supd_lupd}]) 2\<close>)
+apply (tactic "ax_tac \<^context> 2" (* NewA *))
+apply (tactic "ax_tac \<^context> 3" (* ax_Alloc_Arr *))
+apply (tactic "ax_tac \<^context> 3")
+apply (tactic \<open>inst1_tac \<^context> "P" "\<lambda>vf l vfa. Normal (P vf l vfa\<leftarrow>\<diamondsuit>)" ["P"]\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac") 2\<close>)
+apply (tactic "ax_tac \<^context> 2")
+apply (tactic "ax_tac \<^context> 1" (* FVar *))
+apply (tactic "ax_tac \<^context> 2" (* StatRef *))
apply (rule ax_derivs.Done [THEN conseq1])
-apply (tactic \<open>inst1_tac @{context} "Q" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf=lvar (VName e) (snd s)) \<and>. heap_free four \<and>. initd Base \<and>. initd Ext)" []\<close>)
+apply (tactic \<open>inst1_tac \<^context> "Q" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf=lvar (VName e) (snd s)) \<and>. heap_free four \<and>. initd Base \<and>. initd Ext)" []\<close>)
apply (clarsimp split del: if_split)
apply (frule atleast_free_weaken [THEN atleast_free_weaken])
apply (drule initedD)
apply (clarsimp elim!: atleast_free_SucD simp add: arr_inv_def)
apply force
-apply (tactic \<open>simp_tac (@{context} delloop "split_all_tac") 1\<close>)
+apply (tactic \<open>simp_tac (\<^context> delloop "split_all_tac") 1\<close>)
apply (rule ax_triv_Init_Object [THEN peek_and_forget2, THEN conseq1])
apply (rule wf_tprg)
apply clarsimp
-apply (tactic \<open>inst1_tac @{context} "P" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf = lvar (VName e) (snd s)) \<and>. heap_free four \<and>. initd Ext)" []\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf = lvar (VName e) (snd s)) \<and>. heap_free four \<and>. initd Ext)" []\<close>)
apply clarsimp
-apply (tactic \<open>inst1_tac @{context} "PP" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf = lvar (VName e) (snd s)) \<and>. heap_free four \<and>. Not \<circ> initd Base)" []\<close>)
+apply (tactic \<open>inst1_tac \<^context> "PP" "\<lambda>vf. Normal ((\<lambda>Y s Z. vf = lvar (VName e) (snd s)) \<and>. heap_free four \<and>. Not \<circ> initd Base)" []\<close>)
apply clarsimp
(* end init *)
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
done
@@ -234,48 +234,48 @@
(Expr (Ass (LVar i) (Acc (LVar j))))). {Q}"
apply (rule_tac P' = "Q" and Q' = "Q\<leftarrow>=False\<down>=\<diamondsuit>" in conseq12)
apply safe
-apply (tactic "ax_tac @{context} 1" (* Loop *))
+apply (tactic "ax_tac \<^context> 1" (* Loop *))
apply (rule ax_Normal_cases)
prefer 2
apply (rule ax_derivs.Abrupt [THEN conseq1], clarsimp simp add: Let_def)
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
prefer 2
apply clarsimp
-apply (tactic "ax_tac @{context} 1" (* If *))
+apply (tactic "ax_tac \<^context> 1" (* If *))
apply (tactic
- \<open>inst1_tac @{context} "P'" "Normal (\<lambda>s.. (\<lambda>Y s Z. True)\<down>=Val (the (locals s i)))" []\<close>)
-apply (tactic "ax_tac @{context} 1")
+ \<open>inst1_tac \<^context> "P'" "Normal (\<lambda>s.. (\<lambda>Y s Z. True)\<down>=Val (the (locals s i)))" []\<close>)
+apply (tactic "ax_tac \<^context> 1")
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
apply (rule allI)
apply (rule ax_escape)
apply auto
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1" (* Throw *))
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1" (* Throw *))
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
apply (rule_tac Q' = "Normal (\<lambda>Y s Z. True)" in conseq2)
prefer 2
apply clarsimp
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1")
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
+apply (tactic "ax_tac \<^context> 1")
prefer 2
apply (rule ax_subst_Var_allI)
-apply (tactic \<open>inst1_tac @{context} "P'" "\<lambda>b Y ba Z vf. \<lambda>Y (x,s) Z. x=None \<and> snd vf = snd (lvar i s)" []\<close>)
+apply (tactic \<open>inst1_tac \<^context> "P'" "\<lambda>b Y ba Z vf. \<lambda>Y (x,s) Z. x=None \<and> snd vf = snd (lvar i s)" []\<close>)
apply (rule allI)
apply (rule_tac P' = "Normal P" and P = P for P in conseq1)
prefer 2
apply clarsimp
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply (rule conseq1)
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
-apply (tactic "ax_tac @{context} 1")
+apply (tactic "ax_tac \<^context> 1")
apply clarsimp
done
--- a/src/HOL/Bali/AxSem.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/AxSem.thy Sat Jan 05 17:24:33 2019 +0100
@@ -660,18 +660,18 @@
lemma ax_thin [rule_format (no_asm)]:
"G,(A'::'a triple set)|\<turnstile>(ts::'a triple set) \<Longrightarrow> \<forall>A. A' \<subseteq> A \<longrightarrow> G,A|\<turnstile>ts"
apply (erule ax_derivs.induct)
-apply (tactic "ALLGOALS (EVERY'[clarify_tac @{context}, REPEAT o smp_tac @{context} 1])")
+apply (tactic "ALLGOALS (EVERY'[clarify_tac \<^context>, REPEAT o smp_tac \<^context> 1])")
apply (rule ax_derivs.empty)
apply (erule (1) ax_derivs.insert)
apply (fast intro: ax_derivs.asm)
(*apply (fast intro: ax_derivs.cut) *)
apply (fast intro: ax_derivs.weaken)
-apply (rule ax_derivs.conseq, intro strip, tactic "smp_tac @{context} 3 1",clarify,
- tactic "smp_tac @{context} 1 1",rule exI, rule exI, erule (1) conjI)
+apply (rule ax_derivs.conseq, intro strip, tactic "smp_tac \<^context> 3 1",clarify,
+ tactic "smp_tac \<^context> 1 1",rule exI, rule exI, erule (1) conjI)
(* 37 subgoals *)
prefer 18 (* Methd *)
apply (rule ax_derivs.Methd, drule spec, erule mp, fast)
-apply (tactic \<open>TRYALL (resolve_tac @{context} ((funpow 5 tl) @{thms ax_derivs.intros}))\<close>)
+apply (tactic \<open>TRYALL (resolve_tac \<^context> ((funpow 5 tl) @{thms ax_derivs.intros}))\<close>)
apply auto
done
@@ -690,21 +690,21 @@
"G,(A::'a triple set)|\<turnstile>(ts'::'a triple set) \<Longrightarrow> \<forall>ts. ts \<subseteq> ts' \<longrightarrow> G,A|\<turnstile>ts"
apply (erule ax_derivs.induct)
(*42 subgoals*)
-apply (tactic "ALLGOALS (strip_tac @{context})")
-apply (tactic \<open>ALLGOALS(REPEAT o (EVERY'[dresolve_tac @{context} @{thms subset_singletonD},
- eresolve_tac @{context} [disjE],
- fast_tac (@{context} addSIs @{thms ax_derivs.empty})]))\<close>)
-apply (tactic "TRYALL (hyp_subst_tac @{context})")
+apply (tactic "ALLGOALS (strip_tac \<^context>)")
+apply (tactic \<open>ALLGOALS(REPEAT o (EVERY'[dresolve_tac \<^context> @{thms subset_singletonD},
+ eresolve_tac \<^context> [disjE],
+ fast_tac (\<^context> addSIs @{thms ax_derivs.empty})]))\<close>)
+apply (tactic "TRYALL (hyp_subst_tac \<^context>)")
apply (simp, rule ax_derivs.empty)
apply (drule subset_insertD)
apply (blast intro: ax_derivs.insert)
apply (fast intro: ax_derivs.asm)
(*apply (blast intro: ax_derivs.cut) *)
apply (fast intro: ax_derivs.weaken)
-apply (rule ax_derivs.conseq, clarify, tactic "smp_tac @{context} 3 1", blast(* unused *))
+apply (rule ax_derivs.conseq, clarify, tactic "smp_tac \<^context> 3 1", blast(* unused *))
(*37 subgoals*)
-apply (tactic \<open>TRYALL (resolve_tac @{context} ((funpow 5 tl) @{thms ax_derivs.intros})
- THEN_ALL_NEW fast_tac @{context})\<close>)
+apply (tactic \<open>TRYALL (resolve_tac \<^context> ((funpow 5 tl) @{thms ax_derivs.intros})
+ THEN_ALL_NEW fast_tac \<^context>)\<close>)
(*1 subgoal*)
apply (clarsimp simp add: subset_mtriples_iff)
apply (rule ax_derivs.Methd)
@@ -719,11 +719,11 @@
subsubsection "rules derived from conseq"
text \<open>In the following rules we often have to give some type annotations like:
- @{term "G,(A::'a triple set)\<turnstile>{P::'a assn} t\<succ> {Q}"}.
+ \<^term>\<open>G,(A::'a triple set)\<turnstile>{P::'a assn} t\<succ> {Q}\<close>.
Given only the term above without annotations, Isabelle would infer a more
general type were we could have
-different types of auxiliary variables in the assumption set (@{term A}) and
-in the triple itself (@{term P} and @{term Q}). But
+different types of auxiliary variables in the assumption set (\<^term>\<open>A\<close>) and
+in the triple itself (\<^term>\<open>P\<close> and \<^term>\<open>Q\<close>). But
\<open>ax_derivs.Methd\<close> enforces the same type in the inductive definition of
the derivation. So we have to restrict the types to be able to apply the
rules.
@@ -1007,7 +1007,7 @@
apply (auto simp add: type_ok_def)
done
-ML \<open>ML_Thms.bind_thms ("ax_Abrupts", sum3_instantiate @{context} @{thm ax_derivs.Abrupt})\<close>
+ML \<open>ML_Thms.bind_thms ("ax_Abrupts", sum3_instantiate \<^context> @{thm ax_derivs.Abrupt})\<close>
declare ax_Abrupts [intro!]
lemmas ax_Normal_cases = ax_cases [of _ _ _ normal]
--- a/src/HOL/Bali/AxSound.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/AxSound.thy Sat Jan 05 17:24:33 2019 +0100
@@ -50,13 +50,13 @@
apply (rule iffI)
apply fast
apply clarify
-apply (tactic "smp_tac @{context} 3 1")
+apply (tactic "smp_tac \<^context> 3 1")
apply (case_tac "normal s")
apply clarsimp
apply (elim conjE impE)
apply blast
-apply (tactic "smp_tac @{context} 2 1")
+apply (tactic "smp_tac \<^context> 2 1")
apply (drule evaln_eval)
apply (drule (1) eval_type_sound [THEN conjunct1],simp, assumption+)
apply simp
@@ -84,7 +84,7 @@
"\<lbrakk>G\<Turnstile>n\<Colon>{Normal P} body G C sig-\<succ>{Q}\<rbrakk>
\<Longrightarrow> G\<Turnstile>Suc n\<Colon>{Normal P} Methd C sig-\<succ> {Q}"
apply (simp (no_asm_use) add: triple_valid2_def2)
-apply (intro strip, tactic "smp_tac @{context} 3 1", clarify)
+apply (intro strip, tactic "smp_tac \<^context> 3 1", clarify)
apply (erule wt_elim_cases, erule da_elim_cases, erule evaln_elim_cases)
apply (unfold body_def Let_def)
apply (clarsimp simp add: inj_term_simps)
@@ -400,14 +400,14 @@
from valid_A conf wt da eval P con
have "Q v s1 Z"
apply (simp add: ax_valids2_def triple_valid2_def2)
- apply (tactic "smp_tac @{context} 3 1")
+ apply (tactic "smp_tac \<^context> 3 1")
apply clarify
- apply (tactic "smp_tac @{context} 1 1")
+ apply (tactic "smp_tac \<^context> 1 1")
apply (erule allE,erule allE, erule mp)
apply (intro strip)
- apply (tactic "smp_tac @{context} 3 1")
- apply (tactic "smp_tac @{context} 2 1")
- apply (tactic "smp_tac @{context} 1 1")
+ apply (tactic "smp_tac \<^context> 3 1")
+ apply (tactic "smp_tac \<^context> 2 1")
+ apply (tactic "smp_tac \<^context> 1 1")
by blast
moreover have "s1\<Colon>\<preceq>(G, L)"
proof (cases "normal s0")
@@ -1995,9 +1995,9 @@
proof -
\<comment> \<open>From the given hypothesises \<open>valid_e\<close> and \<open>valid_c\<close>
we can only reach the state after unfolding the loop once, i.e.
- @{term "P \<diamondsuit> s2 Z"}, where @{term s2} is the state after executing
- @{term c}. To gain validity of the further execution of while, to
- finally get @{term "(P'\<leftarrow>=False\<down>=\<diamondsuit>) \<diamondsuit> s3 Z"} we have to get
+ \<^term>\<open>P \<diamondsuit> s2 Z\<close>, where \<^term>\<open>s2\<close> is the state after executing
+ \<^term>\<open>c\<close>. To gain validity of the further execution of while, to
+ finally get \<^term>\<open>(P'\<leftarrow>=False\<down>=\<diamondsuit>) \<diamondsuit> s3 Z\<close> we have to get
a hypothesis about the subsequent unfoldings (the whole loop again),
too. We can achieve this, by performing induction on the
evaluation relation, with all
@@ -2064,7 +2064,7 @@
(abupd (absorb (Cont l')) s2') \<diamondsuit> s3'"
apply (simp only: True if_True eqs)
apply (elim conjE)
- apply (tactic "smp_tac @{context} 3 1")
+ apply (tactic "smp_tac \<^context> 3 1")
apply fast
done
from eval_e
--- a/src/HOL/Bali/DeclConcepts.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/DeclConcepts.thy Sat Jan 05 17:24:33 2019 +0100
@@ -800,10 +800,10 @@
\<and> (G\<turnstile>cls \<preceq>\<^sub>C accclass \<or> is_static membr))
| Public \<Rightarrow> True)"
text \<open>
-The subcondition of the @{term "Protected"} case:
-@{term "G\<turnstile>accclass \<prec>\<^sub>C declclass membr"} could also be relaxed to:
-@{term "G\<turnstile>accclass \<preceq>\<^sub>C declclass membr"} since in case both classes are the
-same the other condition @{term "(pid (declclass membr) = pid accclass)"}
+The subcondition of the \<^term>\<open>Protected\<close> case:
+\<^term>\<open>G\<turnstile>accclass \<prec>\<^sub>C declclass membr\<close> could also be relaxed to:
+\<^term>\<open>G\<turnstile>accclass \<preceq>\<^sub>C declclass membr\<close> since in case both classes are the
+same the other condition \<^term>\<open>(pid (declclass membr) = pid accclass)\<close>
holds anyway.
\<close>
@@ -1422,7 +1422,7 @@
subcls_mthds
++
table_of (map (\<lambda>(s,m). (s,C,m)) (methods c)))"
-text \<open>@{term "methd G C"}: methods of a class C (statically visible from C),
+text \<open>\<^term>\<open>methd G C\<close>: methods of a class C (statically visible from C),
with inheritance and hiding cf. 8.4.6;
Overriding is captured by \<open>dynmethd\<close>.
Every new method with the same signature coalesces the
@@ -1432,15 +1432,15 @@
accmethd :: "prog \<Rightarrow> qtname \<Rightarrow> qtname \<Rightarrow> (sig,qtname \<times> methd) table" where
"accmethd G S C =
filter_tab (\<lambda>sig m. G\<turnstile>method sig m of C accessible_from S) (methd G C)"
-text \<open>@{term "accmethd G S C"}: only those methods of @{term "methd G C"},
+text \<open>\<^term>\<open>accmethd G S C\<close>: only those methods of \<^term>\<open>methd G C\<close>,
accessible from S\<close>
text \<open>Note the class component in the accessibility filter. The class where
- method @{term m} is declared (@{term declC}) isn't necessarily accessible
- from the current scope @{term S}. The method can be made accessible
+ method \<^term>\<open>m\<close> is declared (\<^term>\<open>declC\<close>) isn't necessarily accessible
+ from the current scope \<^term>\<open>S\<close>. The method can be made accessible
through inheritance, too.
- So we must test accessibility of method @{term m} of class @{term C}
- (not @{term "declclass m"})\<close>
+ So we must test accessibility of method \<^term>\<open>m\<close> of class \<^term>\<open>C\<close>
+ (not \<^term>\<open>declclass m\<close>)\<close>
definition
dynmethd :: "prog \<Rightarrow> qtname \<Rightarrow> qtname \<Rightarrow> (sig,qtname \<times> methd) table" where
@@ -1461,11 +1461,11 @@
)
else None))"
-text \<open>@{term "dynmethd G statC dynC"}: dynamic method lookup of a reference
- with dynamic class @{term dynC} and static class @{term statC}\<close>
-text \<open>Note some kind of duality between @{term methd} and @{term dynmethd}
- in the @{term class_rec} arguments. Whereas @{term methd} filters the
- subclass methods (to get only the inherited ones), @{term dynmethd}
+text \<open>\<^term>\<open>dynmethd G statC dynC\<close>: dynamic method lookup of a reference
+ with dynamic class \<^term>\<open>dynC\<close> and static class \<^term>\<open>statC\<close>\<close>
+text \<open>Note some kind of duality between \<^term>\<open>methd\<close> and \<^term>\<open>dynmethd\<close>
+ in the \<^term>\<open>class_rec\<close> arguments. Whereas \<^term>\<open>methd\<close> filters the
+ subclass methods (to get only the inherited ones), \<^term>\<open>dynmethd\<close>
filters the new methods (to get only those methods which actually
override the methods of the static class)\<close>
@@ -1475,14 +1475,14 @@
(\<lambda>sig. if imethds G I sig \<noteq> {}
then methd G dynC sig
else dynmethd G Object dynC sig)"
-text \<open>@{term "dynimethd G I dynC"}: dynamic method lookup of a reference with
+text \<open>\<^term>\<open>dynimethd G I dynC\<close>: dynamic method lookup of a reference with
dynamic class dynC and static interface type I\<close>
text \<open>
When calling an interface method, we must distinguish if the method signature
was defined in the interface or if it must be an Object method in the other
case. If it was an interface method we search the class hierarchy
starting at the dynamic class of the object up to Object to find the
- first matching method (@{term methd}). Since all interface methods have
+ first matching method (\<^term>\<open>methd\<close>). Since all interface methods have
public access the method can't be coalesced due to some odd visibility
effects like in case of dynmethd. The method will be inherited or
overridden in all classes from the first class implementing the interface
@@ -1497,7 +1497,7 @@
| IfaceT I \<Rightarrow> dynimethd G I dynC
| ClassT statC \<Rightarrow> dynmethd G statC dynC
| ArrayT ty \<Rightarrow> dynmethd G Object dynC)"
-text \<open>@{term "dynlookup G statT dynC"}: dynamic lookup of a method within the
+text \<open>\<^term>\<open>dynlookup G statT dynC\<close>: dynamic lookup of a method within the
static reference type statT and the dynamic class dynC.
In a wellformd context statT will not be NullT and in case
statT is an array type, dynC=Object\<close>
@@ -1506,7 +1506,7 @@
fields :: "prog \<Rightarrow> qtname \<Rightarrow> ((vname \<times> qtname) \<times> field) list" where
"fields G C =
class_rec G C [] (\<lambda>C c ts. map (\<lambda>(n,t). ((n,C),t)) (cfields c) @ ts)"
-text \<open>@{term "fields G C"}
+text \<open>\<^term>\<open>fields G C\<close>
list of fields of a class, including all the fields of the superclasses
(private, inherited and hidden ones) not only the accessible ones
(an instance of a object allocates all these fields\<close>
@@ -1517,15 +1517,15 @@
(let field_tab = table_of((map (\<lambda>((n,d),f).(n,(d,f)))) (fields G C))
in filter_tab (\<lambda>n (declC,f). G\<turnstile> (declC,fdecl (n,f)) of C accessible_from S)
field_tab)"
-text \<open>@{term "accfield G C S"}: fields of a class @{term C} which are
+text \<open>\<^term>\<open>accfield G C S\<close>: fields of a class \<^term>\<open>C\<close> which are
accessible from scope of class
- @{term S} with inheritance and hiding, cf. 8.3\<close>
+ \<^term>\<open>S\<close> with inheritance and hiding, cf. 8.3\<close>
text \<open>note the class component in the accessibility filter (see also
- @{term methd}).
- The class declaring field @{term f} (@{term declC}) isn't necessarily
- accessible from scope @{term S}. The field can be made visible through
- inheritance, too. So we must test accessibility of field @{term f} of class
- @{term C} (not @{term "declclass f"})\<close>
+ \<^term>\<open>methd\<close>).
+ The class declaring field \<^term>\<open>f\<close> (\<^term>\<open>declC\<close>) isn't necessarily
+ accessible from scope \<^term>\<open>S\<close>. The field can be made visible through
+ inheritance, too. So we must test accessibility of field \<^term>\<open>f\<close> of class
+ \<^term>\<open>C\<close> (not \<^term>\<open>declclass f\<close>)\<close>
definition
is_methd :: "prog \<Rightarrow> qtname \<Rightarrow> sig \<Rightarrow> bool"
--- a/src/HOL/Bali/DefiniteAssignment.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/DefiniteAssignment.thy Sat Jan 05 17:24:33 2019 +0100
@@ -60,7 +60,7 @@
jumpNestingOkS jmps c2)"
| "jumpNestingOkS jmps (l\<bullet> While(e) c) = jumpNestingOkS ({Cont l} \<union> jmps) c"
\<comment> \<open>The label of the while loop only handles continue jumps. Breaks are only
- handled by @{term Lab}\<close>
+ handled by \<^term>\<open>Lab\<close>\<close>
| "jumpNestingOkS jmps (Jmp j) = (j \<in> jmps)"
| "jumpNestingOkS jmps (Throw e) = True"
| "jumpNestingOkS jmps (Try c1 Catch(C vn) c2) = (jumpNestingOkS jmps c1 \<and>
@@ -70,7 +70,7 @@
| "jumpNestingOkS jmps (Init C) = True"
\<comment> \<open>wellformedness of the program must enshure that for all initializers
jumpNestingOkS {} holds\<close>
-\<comment> \<open>Dummy analysis for intermediate smallstep term @{term FinA}\<close>
+\<comment> \<open>Dummy analysis for intermediate smallstep term \<^term>\<open>FinA\<close>\<close>
| "jumpNestingOkS jmps (FinA a c) = False"
@@ -141,7 +141,7 @@
| "assignsE ({accC,statT,mode}objRef\<cdot>mn({pTs}args))
= (assignsE objRef) \<union> (assignsEs args)"
\<comment> \<open>Only dummy analysis for intermediate expressions
- @{term Methd}, @{term Body}, @{term InsInitE} and @{term Callee}\<close>
+ \<^term>\<open>Methd\<close>, \<^term>\<open>Body\<close>, \<^term>\<open>InsInitE\<close> and \<^term>\<open>Callee\<close>\<close>
| "assignsE (Methd C sig) = {}"
| "assignsE (Body C s) = {}"
| "assignsE (InsInitE s e) = {}"
@@ -218,7 +218,7 @@
| Some v \<Rightarrow> constVal e2)))"
\<comment> \<open>Note that \<open>constVal (Cond b e1 e2)\<close> is stricter as it could be.
It requires that all tree expressions are constant even if we can decide
- which branch to choose, provided the constant value of @{term b}\<close>
+ which branch to choose, provided the constant value of \<^term>\<open>b\<close>\<close>
| "constVal (Call accC statT mode objRef mn pTs args) = None"
| "constVal (Methd C sig) = None"
| "constVal (Body C s) = None"
@@ -278,7 +278,7 @@
text \<open>Assigned local variables after evaluating the expression if it evaluates
to a specific boolean value. If the expression cannot evaluate to a
-@{term Boolean} value UNIV is returned. If we expect true/false the opposite
+\<^term>\<open>Boolean\<close> value UNIV is returned. If we expect true/false the opposite
constant false/true will also lead to UNIV.\<close>
primrec assigns_if :: "bool \<Rightarrow> expr \<Rightarrow> lname set"
where
@@ -324,7 +324,7 @@
| "assigns_if b ({accC,statT,mode}objRef\<cdot>mn({pTs}args))
= assignsE ({accC,statT,mode}objRef\<cdot>mn({pTs}args)) "
\<comment> \<open>Only dummy analysis for intermediate expressions
- @{term Methd}, @{term Body}, @{term InsInitE} and @{term Callee}\<close>
+ \<^term>\<open>Methd\<close>, \<^term>\<open>Body\<close>, \<^term>\<open>InsInitE\<close> and \<^term>\<open>Callee\<close>\<close>
| "assigns_if b (Methd C sig) = {}"
| "assigns_if b (Body C s) = {}"
| "assigns_if b (InsInitE s e) = {}"
@@ -526,7 +526,7 @@
In \<open>E\<turnstile> B \<guillemotright>t\<guillemotright> A\<close>,
\<open>B\<close> denotes the ''assigned'' variables before evaluating term \<open>t\<close>,
whereas \<open>A\<close> denotes the ''assigned'' variables after evaluating term \<open>t\<close>.
-The environment @{term E} is only needed for the conditional \<open>_ ? _ : _\<close>.
+The environment \<^term>\<open>E\<close> is only needed for the conditional \<open>_ ? _ : _\<close>.
The definite assignment rules refer to the typing rules here to
distinguish boolean and other expressions.
\<close>
@@ -556,21 +556,21 @@
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>If(e) c1 Else c2\<rangle>\<guillemotright> A"
-\<comment> \<open>Note that @{term E} is not further used, because we take the specialized
+\<comment> \<open>Note that \<^term>\<open>E\<close> is not further used, because we take the specialized
sets that also consider if the expression evaluates to true or false.
- Inside of @{term e} there is no {\tt break} or {\tt finally}, so the break
- map of @{term E} will be the trivial one. So
- @{term "Env\<turnstile>B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E"} is just used to ensure the definite assignment in
- expression @{term e}.
- Notice the implicit analysis of a constant boolean expression @{term e}
- in this rule. For example, if @{term e} is constantly @{term True} then
- @{term "assigns_if False e = UNIV"} and therefor @{term "nrm C2=UNIV"}.
- So finally @{term "nrm A = nrm C1"}. For the break maps this trick
+ Inside of \<^term>\<open>e\<close> there is no {\tt break} or {\tt finally}, so the break
+ map of \<^term>\<open>E\<close> will be the trivial one. So
+ \<^term>\<open>Env\<turnstile>B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E\<close> is just used to ensure the definite assignment in
+ expression \<^term>\<open>e\<close>.
+ Notice the implicit analysis of a constant boolean expression \<^term>\<open>e\<close>
+ in this rule. For example, if \<^term>\<open>e\<close> is constantly \<^term>\<open>True\<close> then
+ \<^term>\<open>assigns_if False e = UNIV\<close> and therefor \<^term>\<open>nrm C2=UNIV\<close>.
+ So finally \<^term>\<open>nrm A = nrm C1\<close>. For the break maps this trick
workd too, because the trival break map will map all labels to
- @{term UNIV}. In the example, if no break occurs in @{term c2} the break
- maps will trivially map to @{term UNIV} and if a break occurs it will map
- to @{term UNIV} too, because @{term "assigns_if False e = UNIV"}. So
- in the intersection of the break maps the path @{term c2} will have no
+ \<^term>\<open>UNIV\<close>. In the example, if no break occurs in \<^term>\<open>c2\<close> the break
+ maps will trivially map to \<^term>\<open>UNIV\<close> and if a break occurs it will map
+ to \<^term>\<open>UNIV\<close> too, because \<^term>\<open>assigns_if False e = UNIV\<close>. So
+ in the intersection of the break maps the path \<^term>\<open>c2\<close> will have no
contribution.\<close>
| Loop: "\<lbrakk>Env\<turnstile> B \<guillemotright>\<langle>e\<rangle>\<guillemotright> E;
@@ -580,13 +580,13 @@
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>l\<bullet> While(e) c\<rangle>\<guillemotright> A"
\<comment> \<open>The \<open>Loop\<close> rule resembles some of the ideas of the \<open>If\<close> rule.
- For the @{term "nrm A"} the set @{term "B \<union> assigns_if False e"}
- will be @{term UNIV} if the condition is constantly true. To normally exit
- the while loop, we must consider the body @{term c} to be completed
- normally (@{term "nrm C"}) or with a break. But in this model,
- the label @{term l} of the loop
+ For the \<^term>\<open>nrm A\<close> the set \<^term>\<open>B \<union> assigns_if False e\<close>
+ will be \<^term>\<open>UNIV\<close> if the condition is constantly true. To normally exit
+ the while loop, we must consider the body \<^term>\<open>c\<close> to be completed
+ normally (\<^term>\<open>nrm C\<close>) or with a break. But in this model,
+ the label \<^term>\<open>l\<close> of the loop
only handles continue labels, not break labels. The break label will be
- handled by an enclosing @{term Lab} statement. So we don't have to
+ handled by an enclosing \<^term>\<open>Lab\<close> statement. So we don't have to
handle the breaks specially.\<close>
| Jmp: "\<lbrakk>jump=Ret \<longrightarrow> Result \<in> B;
@@ -597,9 +597,9 @@
| Ret \<Rightarrow> \<lambda> k. UNIV)\<rbrakk>
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>Jmp jump\<rangle>\<guillemotright> A"
-\<comment> \<open>In case of a break to label @{term l} the corresponding break set is all
+\<comment> \<open>In case of a break to label \<^term>\<open>l\<close> the corresponding break set is all
variables assigned before the break. The assigned variables for normal
- completion of the @{term Jmp} is @{term UNIV}, because the statement will
+ completion of the \<^term>\<open>Jmp\<close> is \<^term>\<open>UNIV\<close>, because the statement will
never complete normally. For continue and return the break map is the
trivial one. In case of a return we enshure that the result value is
assigned.\<close>
@@ -619,25 +619,25 @@
brk A = ((brk C1) \<Rightarrow>\<union>\<^sub>\<forall> (nrm C2)) \<Rightarrow>\<inter> (brk C2)\<rbrakk>
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>c1 Finally c2\<rangle>\<guillemotright> A"
-\<comment> \<open>The set of assigned variables before execution @{term c2} are the same
- as before execution @{term c1}, because @{term c1} could throw an exception
- and so we can't guarantee that any variable will be assigned in @{term c1}.
+\<comment> \<open>The set of assigned variables before execution \<^term>\<open>c2\<close> are the same
+ as before execution \<^term>\<open>c1\<close>, because \<^term>\<open>c1\<close> could throw an exception
+ and so we can't guarantee that any variable will be assigned in \<^term>\<open>c1\<close>.
The \<open>Finally\<close> statement completes
- normally if both @{term c1} and @{term c2} complete normally. If @{term c1}
- completes abruptly with a break, then @{term c2} also will be executed
+ normally if both \<^term>\<open>c1\<close> and \<^term>\<open>c2\<close> complete normally. If \<^term>\<open>c1\<close>
+ completes abruptly with a break, then \<^term>\<open>c2\<close> also will be executed
and may terminate normally or with a break. The overall break map then is
- the intersection of the maps of both paths. If @{term c2} terminates
- normally we have to extend all break sets in @{term "brk C1"} with
- @{term "nrm C2"} (\<open>\<Rightarrow>\<union>\<^sub>\<forall>\<close>). If @{term c2} exits with a break this
+ the intersection of the maps of both paths. If \<^term>\<open>c2\<close> terminates
+ normally we have to extend all break sets in \<^term>\<open>brk C1\<close> with
+ \<^term>\<open>nrm C2\<close> (\<open>\<Rightarrow>\<union>\<^sub>\<forall>\<close>). If \<^term>\<open>c2\<close> exits with a break this
break will appear in the overall result state. We don't know if
- @{term c1} completed normally or abruptly (maybe with an exception not only
- a break) so @{term c1} has no contribution to the break map following this
+ \<^term>\<open>c1\<close> completed normally or abruptly (maybe with an exception not only
+ a break) so \<^term>\<open>c1\<close> has no contribution to the break map following this
path.\<close>
\<comment> \<open>Evaluation of expressions and the break sets of definite assignment:
Thinking of a Java expression we assume that we can never have
a break statement inside of a expression. So for all expressions the
- break sets could be set to the trivial one: @{term "\<lambda> l. UNIV"}.
+ break sets could be set to the trivial one: \<^term>\<open>\<lambda> l. UNIV\<close>.
But we can't
trivially proof, that evaluating an expression will never result in a
break, allthough Java expressions allready syntactically don't allow
@@ -645,8 +645,8 @@
statements which are inserted by the evaluation rules. So to proof the
absence of a break we need to ensure, that the initialization statements
will never end up in a break. In a wellfromed initialization statement,
- of course, were breaks are nested correctly inside of @{term Lab}
- or @{term Loop} statements evaluation of the whole initialization
+ of course, were breaks are nested correctly inside of \<^term>\<open>Lab\<close>
+ or \<^term>\<open>Loop\<close> statements evaluation of the whole initialization
statement will never result in a break, because this break will be
handled inside of the statement. But for simplicity we haven't added
the analysis of the correct nesting of breaks in the typing judgments
@@ -658,7 +658,7 @@
| Init: "Env\<turnstile> B \<guillemotright>\<langle>Init C\<rangle>\<guillemotright> \<lparr>nrm=B,brk=\<lambda> l. UNIV\<rparr>"
\<comment> \<open>Wellformedness of a program will ensure, that every static initialiser
is definetly assigned and the jumps are nested correctly. The case here
- for @{term Init} is just for convenience, to get a proper precondition
+ for \<^term>\<open>Init\<close> is just for convenience, to get a proper precondition
for the induction hypothesis in various proofs, so that we don't have to
expand the initialisation on every point where it is triggerred by the
evaluation rules.\<close>
@@ -710,7 +710,7 @@
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>Acc (LVar vn)\<rangle>\<guillemotright> A"
\<comment> \<open>To properly access a local variable we have to test the definite
- assignment here. The variable must occur in the set @{term B}\<close>
+ assignment here. The variable must occur in the set \<^term>\<open>B\<close>\<close>
| Acc: "\<lbrakk>\<forall> vn. v \<noteq> LVar vn;
Env\<turnstile> B \<guillemotright>\<langle>v\<rangle>\<guillemotright> A\<rbrakk>
@@ -747,21 +747,21 @@
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>{accC,statT,mode}e\<cdot>mn({pTs}args)\<rangle>\<guillemotright> A"
-\<comment> \<open>The interplay of @{term Call}, @{term Methd} and @{term Body}:
- Why rules for @{term Methd} and @{term Body} at all? Note that a
- Java source program will not include bare @{term Methd} or @{term Body}
+\<comment> \<open>The interplay of \<^term>\<open>Call\<close>, \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close>:
+ Why rules for \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close> at all? Note that a
+ Java source program will not include bare \<^term>\<open>Methd\<close> or \<^term>\<open>Body\<close>
terms. These terms are just introduced during evaluation. So definite
- assignment of @{term Call} does not consider @{term Methd} or
- @{term Body} at all. So for definite assignment alone we could omit the
- rules for @{term Methd} and @{term Body}.
+ assignment of \<^term>\<open>Call\<close> does not consider \<^term>\<open>Methd\<close> or
+ \<^term>\<open>Body\<close> at all. So for definite assignment alone we could omit the
+ rules for \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close>.
But since evaluation of the method invocation is
split up into three rules we must ensure that we have enough information
- about the call even in the @{term Body} term to make sure that we can
+ about the call even in the \<^term>\<open>Body\<close> term to make sure that we can
proof type safety. Also we must be able transport this information
- from @{term Call} to @{term Methd} and then further to @{term Body}
- during evaluation to establish the definite assignment of @{term Methd}
- during evaluation of @{term Call}, and of @{term Body} during evaluation
- of @{term Methd}. This is necessary since definite assignment will be
+ from \<^term>\<open>Call\<close> to \<^term>\<open>Methd\<close> and then further to \<^term>\<open>Body\<close>
+ during evaluation to establish the definite assignment of \<^term>\<open>Methd\<close>
+ during evaluation of \<^term>\<open>Call\<close>, and of \<^term>\<open>Body\<close> during evaluation
+ of \<^term>\<open>Methd\<close>. This is necessary since definite assignment will be
a precondition for each induction hypothesis coming out of the evaluation
rules, and therefor we have to establish the definite assignment of the
sub-evaluation during the type-safety proof. Note that well-typedness is
@@ -777,8 +777,8 @@
nrm A = B; brk A = (\<lambda> l. UNIV)\<rbrakk>
\<Longrightarrow>
Env\<turnstile> B \<guillemotright>\<langle>Body D c\<rangle>\<guillemotright> A"
-\<comment> \<open>Note that @{term A} is not correlated to @{term C}. If the body
- statement returns abruptly with return, evaluation of @{term Body}
+\<comment> \<open>Note that \<^term>\<open>A\<close> is not correlated to \<^term>\<open>C\<close>. If the body
+ statement returns abruptly with return, evaluation of \<^term>\<open>Body\<close>
will absorb this return and complete normally. So we cannot trivially
get the assigned variables of the body statement since it has not
completed normally or with a break.
@@ -786,8 +786,8 @@
is set with this rule. But if the body completes abruptly with a return
we can't guarantee that the result variable is set here, since
definite assignment only talks about normal completion and breaks. So
- for a return the @{term Jump} rule ensures that the result variable is
- set and then this information must be carried over to the @{term Body}
+ for a return the \<^term>\<open>Jump\<close> rule ensures that the result variable is
+ set and then this information must be carried over to the \<^term>\<open>Body\<close>
rule by the conformance predicate of the state.\<close>
| LVar: "Env\<turnstile> B \<guillemotright>\<langle>LVar vn\<rangle>\<guillemotright> \<lparr>nrm=B, brk=\<lambda> l. UNIV\<rparr>"
--- a/src/HOL/Bali/DefiniteAssignmentCorrect.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/DefiniteAssignmentCorrect.thy Sat Jan 05 17:24:33 2019 +0100
@@ -260,9 +260,9 @@
To get an induction hypothesis which is strong enough to perform the
proof, we can't just
-assume @{term jumpNestingOk} for the empty set and conlcude, that no jump at
+assume \<^term>\<open>jumpNestingOk\<close> for the empty set and conlcude, that no jump at
all will be in the resulting state, because the set is altered by
-the statements @{term Lab} and @{term While}.
+the statements \<^term>\<open>Lab\<close> and \<^term>\<open>While\<close>.
The wellformedness of the program is used to enshure that for all
classinitialisations and methods the nesting of jumps is wellformed, too.
@@ -296,9 +296,9 @@
have "\<And> jmps T Env. \<lbrakk>?Jmp jmps s0; jumpNestingOk jmps t; Env\<turnstile>t\<Colon>T;prg Env=G\<rbrakk>
\<Longrightarrow> ?Jmp jmps s1 \<and> ?Upd v s1"
(is "PROP ?Hyp t s0 s1 v")
- \<comment> \<open>We need to abstract over @{term jmps} since @{term jmps} are extended
- during analysis of @{term Lab}. Also we need to abstract over
- @{term T} and @{term Env} since they are altered in various
+ \<comment> \<open>We need to abstract over \<^term>\<open>jmps\<close> since \<^term>\<open>jmps\<close> are extended
+ during analysis of \<^term>\<open>Lab\<close>. Also we need to abstract over
+ \<^term>\<open>T\<close> and \<^term>\<open>Env\<close> since they are altered in various
typing judgements.\<close>
proof (induct)
case Abrupt thus ?case by simp
@@ -1659,7 +1659,7 @@
from eval normal show ?thesis
proof (induct)
case Abrupt thus ?case by simp
- next \<comment> \<open>For statements its trivial, since then @{term "assigns t = {}"}\<close>
+ next \<comment> \<open>For statements its trivial, since then \<^term>\<open>assigns t = {}\<close>\<close>
case Skip show ?case by simp
next
case Expr show ?case by simp
--- a/src/HOL/Bali/Eval.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/Eval.thy Sat Jan 05 17:24:33 2019 +0100
@@ -107,7 +107,7 @@
(e.g. see the rules below for \<open>LVar\<close>, \<open>FVar\<close> and \<open>AVar\<close>).
So evaluation of a variable must capture both possible further uses:
read (rule \<open>Acc\<close>) or write (rule \<open>Ass\<close>) to the variable.
- Therefor a variable evaluates to a special value @{term vvar}, which is
+ Therefor a variable evaluates to a special value \<^term>\<open>vvar\<close>, which is
a pair, consisting of the current value (for later read access) and an update
function (for later write access). Because
during assignment to an array variable an exception may occur if the types
@@ -542,9 +542,9 @@
\<comment> \<open>cf. 14.10, 14.10.1\<close>
- \<comment> \<open>A continue jump from the while body @{term c} is handled by
+ \<comment> \<open>A continue jump from the while body \<^term>\<open>c\<close> is handled by
this rule. If a continue jump with the proper label was invoked inside
- @{term c} this label (Cont l) is deleted out of the abrupt component of
+ \<^term>\<open>c\<close> this label (Cont l) is deleted out of the abrupt component of
the state before the iterative evaluation of the while statement.
A break jump is handled by the Lab Statement \<open>Lab l (while\<dots>)\<close>.\<close>
| Loop: "\<lbrakk>G\<turnstile>Norm s0 \<midarrow>e-\<succ>b\<rightarrow> s1;
@@ -654,19 +654,19 @@
G\<turnstile>Norm s0 \<midarrow>e0 ? e1 : e2-\<succ>v\<rightarrow> s2"
-\<comment> \<open>The interplay of @{term Call}, @{term Methd} and @{term Body}:
+\<comment> \<open>The interplay of \<^term>\<open>Call\<close>, \<^term>\<open>Methd\<close> and \<^term>\<open>Body\<close>:
Method invocation is split up into these three rules:
\begin{itemize}
- \item [@{term Call}] Calculates the target address and evaluates the
+ \item [\<^term>\<open>Call\<close>] Calculates the target address and evaluates the
arguments of the method, and then performs dynamic
or static lookup of the method, corresponding to the
- call mode. Then the @{term Methd} rule is evaluated
+ call mode. Then the \<^term>\<open>Methd\<close> rule is evaluated
on the calculated declaration class of the method
invocation.
- \item [@{term Methd}] A syntactic bridge for the folded method body.
+ \item [\<^term>\<open>Methd\<close>] A syntactic bridge for the folded method body.
It is used by the axiomatic semantics to add the
proper hypothesis for recursive calls of the method.
- \item [@{term Body}] An extra syntactic entity for the unfolded method
+ \item [\<^term>\<open>Body\<close>] An extra syntactic entity for the unfolded method
body was introduced to properly trigger class
initialisation. Without class initialisation we
could just evaluate the body statement.
@@ -680,8 +680,8 @@
G\<turnstile>s3' \<midarrow>Methd D \<lparr>name=mn,parTs=pTs\<rparr>-\<succ>v\<rightarrow> s4\<rbrakk>
\<Longrightarrow>
G\<turnstile>Norm s0 \<midarrow>{accC,statT,mode}e\<cdot>mn({pTs}args)-\<succ>v\<rightarrow> (restore_lvars s2 s4)"
-\<comment> \<open>The accessibility check is after @{term init_lvars}, to keep it simple.
- @{term init_lvars} already tests for the absence of a null-pointer
+\<comment> \<open>The accessibility check is after \<^term>\<open>init_lvars\<close>, to keep it simple.
+ \<^term>\<open>init_lvars\<close> already tests for the absence of a null-pointer
reference in case of an instance method invocation.\<close>
| Methd: "\<lbrakk>G\<turnstile>Norm s0 \<midarrow>body G D sig-\<succ>v\<rightarrow> s1\<rbrakk> \<Longrightarrow>
@@ -695,7 +695,7 @@
G\<turnstile>Norm s0 \<midarrow>Body D c-\<succ>the (locals (store s2) Result)
\<rightarrow>abupd (absorb Ret) s3"
\<comment> \<open>cf. 14.15, 12.4.1\<close>
- \<comment> \<open>We filter out a break/continue in @{term s2}, so that we can proof
+ \<comment> \<open>We filter out a break/continue in \<^term>\<open>s2\<close>, so that we can proof
definite assignment
correct, without the need of conformance of the state. By this the
different parts of the typesafety proof can be disentangled a little.\<close>
@@ -710,8 +710,8 @@
(v,s2') = fvar statDeclC stat fn a s2;
s3 = check_field_access G accC statDeclC fn stat a s2' \<rbrakk> \<Longrightarrow>
G\<turnstile>Norm s0 \<midarrow>{accC,statDeclC,stat}e..fn=\<succ>v\<rightarrow> s3"
- \<comment> \<open>The accessibility check is after @{term fvar}, to keep it simple.
- @{term fvar} already tests for the absence of a null-pointer reference
+ \<comment> \<open>The accessibility check is after \<^term>\<open>fvar\<close>, to keep it simple.
+ \<^term>\<open>fvar\<close> already tests for the absence of a null-pointer reference
in case of an instance field\<close>
\<comment> \<open>cf. 15.12.1, 15.25.1\<close>
@@ -834,8 +834,8 @@
text \<open>The following simplification procedures set up the proper injections of
terms and their corresponding values in the evaluation relation:
E.g. an expression
- (injection @{term In1l} into terms) always evaluates to ordinary values
- (injection @{term In1} into generalised values @{term vals}).
+ (injection \<^term>\<open>In1l\<close> into terms) always evaluates to ordinary values
+ (injection \<^term>\<open>In1\<close> into generalised values \<^term>\<open>vals\<close>).
\<close>
lemma eval_expr_eq: "G\<turnstile>s \<midarrow>In1l t\<succ>\<rightarrow> (w, s') = (\<exists>v. w=In1 v \<and> G\<turnstile>s \<midarrow>t-\<succ>v \<rightarrow> s')"
@@ -875,7 +875,7 @@
| _ => SOME (mk_meta_eq @{thm eval_stmt_eq}))\<close>
ML \<open>
-ML_Thms.bind_thms ("AbruptIs", sum3_instantiate @{context} @{thm eval.Abrupt})
+ML_Thms.bind_thms ("AbruptIs", sum3_instantiate \<^context> @{thm eval.Abrupt})
\<close>
declare halloc.Abrupt [intro!] eval.Abrupt [intro!] AbruptIs [intro!]
@@ -949,7 +949,7 @@
simproc_setup eval_no_abrupt ("G\<turnstile>(x,s) \<midarrow>e\<succ>\<rightarrow> (w,Norm s')") = \<open>
fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- (_ $ _ $ (Const (@{const_name Pair}, _) $ (Const (@{const_name None}, _)) $ _) $ _ $ _ $ _) => NONE
+ (_ $ _ $ (Const (\<^const_name>\<open>Pair\<close>, _) $ (Const (\<^const_name>\<open>None\<close>, _)) $ _) $ _ $ _ $ _) => NONE
| _ => SOME (mk_meta_eq @{thm eval_no_abrupt}))
\<close>
@@ -969,7 +969,7 @@
simproc_setup eval_abrupt ("G\<turnstile>(Some xc,s) \<midarrow>e\<succ>\<rightarrow> (w,s')") = \<open>
fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- (_ $ _ $ _ $ _ $ _ $ (Const (@{const_name Pair}, _) $ (Const (@{const_name Some}, _) $ _)$ _)) => NONE
+ (_ $ _ $ _ $ _ $ _ $ (Const (\<^const_name>\<open>Pair\<close>, _) $ (Const (\<^const_name>\<open>Some\<close>, _) $ _)$ _)) => NONE
| _ => SOME (mk_meta_eq @{thm eval_abrupt}))
\<close>
@@ -1157,7 +1157,7 @@
"G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (w, s') \<Longrightarrow> (\<forall>w' s''. G\<turnstile>s \<midarrow>t\<succ>\<rightarrow> (w', s'') \<longrightarrow> w' = w \<and> s'' = s')"
apply (erule eval_induct)
apply (tactic \<open>ALLGOALS (EVERY'
- [strip_tac @{context}, rotate_tac ~1, eresolve_tac @{context} @{thms eval_elim_cases}])\<close>)
+ [strip_tac \<^context>, rotate_tac ~1, eresolve_tac \<^context> @{thms eval_elim_cases}])\<close>)
(* 31 subgoals *)
prefer 28 (* Try *)
apply (simp (no_asm_use) only: split: if_split_asm)
--- a/src/HOL/Bali/Evaln.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/Evaln.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,21 +9,21 @@
text \<open>
-Variant of @{term eval} relation with counter for bounded recursive depth.
-In principal @{term evaln} could replace @{term eval}.
+Variant of \<^term>\<open>eval\<close> relation with counter for bounded recursive depth.
+In principal \<^term>\<open>evaln\<close> could replace \<^term>\<open>eval\<close>.
-Validity of the axiomatic semantics builds on @{term evaln}.
+Validity of the axiomatic semantics builds on \<^term>\<open>evaln\<close>.
For recursive method calls the axiomatic semantics rule assumes the method ok
to derive a proof for the body. To prove the method rule sound we need to
perform induction on the recursion depth.
For the completeness proof of the axiomatic semantics the notion of the most
general formula is used. The most general formula right now builds on the
-ordinary evaluation relation @{term eval}.
-So sometimes we have to switch between @{term evaln} and @{term eval} and vice
+ordinary evaluation relation \<^term>\<open>eval\<close>.
+So sometimes we have to switch between \<^term>\<open>evaln\<close> and \<^term>\<open>eval\<close> and vice
versa. To make
-this switch easy @{term evaln} also does all the technical accessibility tests
-@{term check_field_access} and @{term check_method_access} like @{term eval}.
-If it would omit them @{term evaln} and @{term eval} would only be equivalent
+this switch easy \<^term>\<open>evaln\<close> also does all the technical accessibility tests
+\<^term>\<open>check_field_access\<close> and \<^term>\<open>check_method_access\<close> like \<^term>\<open>eval\<close>.
+If it would omit them \<^term>\<open>evaln\<close> and \<^term>\<open>eval\<close> would only be equivalent
for welltyped, and definitely assigned terms.
\<close>
@@ -252,8 +252,8 @@
text \<open>The following simplification procedures set up the proper injections of
terms and their corresponding values in the evaluation relation:
E.g. an expression
- (injection @{term In1l} into terms) always evaluates to ordinary values
- (injection @{term In1} into generalised values @{term vals}).
+ (injection \<^term>\<open>In1l\<close> into terms) always evaluates to ordinary values
+ (injection \<^term>\<open>In1\<close> into generalised values \<^term>\<open>vals\<close>).
\<close>
lemma evaln_expr_eq: "G\<turnstile>s \<midarrow>In1l t\<succ>\<midarrow>n\<rightarrow> (w, s') = (\<exists>v. w=In1 v \<and> G\<turnstile>s \<midarrow>t-\<succ>v \<midarrow>n\<rightarrow> s')"
@@ -292,7 +292,7 @@
(_ $ _ $ _ $ _ $ _ $ (Const _ $ _) $ _) => NONE
| _ => SOME (mk_meta_eq @{thm evaln_stmt_eq}))\<close>
-ML \<open>ML_Thms.bind_thms ("evaln_AbruptIs", sum3_instantiate @{context} @{thm evaln.Abrupt})\<close>
+ML \<open>ML_Thms.bind_thms ("evaln_AbruptIs", sum3_instantiate \<^context> @{thm evaln.Abrupt})\<close>
declare evaln_AbruptIs [intro!]
lemma evaln_Callee: "G\<turnstile>Norm s\<midarrow>In1l (Callee l e)\<succ>\<midarrow>n\<rightarrow> (v,s') = False"
@@ -358,7 +358,7 @@
simproc_setup evaln_abrupt ("G\<turnstile>(Some xc,s) \<midarrow>e\<succ>\<midarrow>n\<rightarrow> (w,s')") = \<open>
fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- (_ $ _ $ _ $ _ $ _ $ _ $ (Const (@{const_name Pair}, _) $ (Const (@{const_name Some},_) $ _)$ _))
+ (_ $ _ $ _ $ _ $ _ $ _ $ (Const (\<^const_name>\<open>Pair\<close>, _) $ (Const (\<^const_name>\<open>Some\<close>,_) $ _)$ _))
=> NONE
| _ => SOME (mk_meta_eq @{thm evaln_abrupt}))
\<close>
@@ -448,10 +448,10 @@
lemma evaln_nonstrict [rule_format (no_asm), elim]:
"G\<turnstile>s \<midarrow>t\<succ>\<midarrow>n\<rightarrow> (w, s') \<Longrightarrow> \<forall>m. n\<le>m \<longrightarrow> G\<turnstile>s \<midarrow>t\<succ>\<midarrow>m\<rightarrow> (w, s')"
apply (erule evaln.induct)
-apply (tactic \<open>ALLGOALS (EVERY' [strip_tac @{context},
- TRY o eresolve_tac @{context} @{thms Suc_le_D_lemma},
- REPEAT o smp_tac @{context} 1,
- resolve_tac @{context} @{thms evaln.intros} THEN_ALL_NEW TRY o assume_tac @{context}])\<close>)
+apply (tactic \<open>ALLGOALS (EVERY' [strip_tac \<^context>,
+ TRY o eresolve_tac \<^context> @{thms Suc_le_D_lemma},
+ REPEAT o smp_tac \<^context> 1,
+ resolve_tac \<^context> @{thms evaln.intros} THEN_ALL_NEW TRY o assume_tac \<^context>])\<close>)
(* 3 subgoals *)
apply (auto split del: if_split)
done
--- a/src/HOL/Bali/Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -898,7 +898,7 @@
declare member_is_static_simp [simp]
declare wt.Skip [rule del] wt.Init [rule del]
-ML \<open>ML_Thms.bind_thms ("wt_intros", map (rewrite_rule @{context} @{thms id_def}) @{thms wt.intros})\<close>
+ML \<open>ML_Thms.bind_thms ("wt_intros", map (rewrite_rule \<^context> @{thms id_def}) @{thms wt.intros})\<close>
lemmas wtIs = wt_Call wt_Super wt_FVar wt_StatRef wt_intros
lemmas daIs = assigned.select_convs da_Skip da_NewC da_Lit da_Super da.intros
@@ -1192,8 +1192,8 @@
Base_foo_defs [simp]
ML \<open>ML_Thms.bind_thms ("eval_intros", map
- (simplify (@{context} delsimps @{thms Skip_eq} addsimps @{thms lvar_def}) o
- rewrite_rule @{context} [@{thm assign_def}, @{thm Let_def}]) @{thms eval.intros})\<close>
+ (simplify (\<^context> delsimps @{thms Skip_eq} addsimps @{thms lvar_def}) o
+ rewrite_rule \<^context> [@{thm assign_def}, @{thm Let_def}]) @{thms eval.intros})\<close>
lemmas eval_Is = eval_Init eval_StatRef AbruptIs eval_intros
axiomatization
--- a/src/HOL/Bali/Table.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/Table.thy Sat Jan 05 17:24:33 2019 +0100
@@ -396,7 +396,7 @@
apply (rename_tac "ba")
apply (drule_tac x = "ba" in spec)
apply clarify
-apply (tactic "smp_tac @{context} 2 1")
+apply (tactic "smp_tac \<^context> 2 1")
apply (erule (1) notE impE)
apply (case_tac "aa = b")
apply fast+
--- a/src/HOL/Bali/Term.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/Term.thy Sat Jan 05 17:24:33 2019 +0100
@@ -144,7 +144,7 @@
= LVar lname \<comment> \<open>local variable (incl. parameters)\<close>
| FVar qtname qtname bool expr vname ("{_,_,_}_.._"[10,10,10,85,99]90)
\<comment> \<open>class field\<close>
- \<comment> \<open>@{term "{accC,statDeclC,stat}e..fn"}\<close>
+ \<comment> \<open>\<^term>\<open>{accC,statDeclC,stat}e..fn\<close>\<close>
\<comment> \<open>\<open>accC\<close>: accessing class (static class were\<close>
\<comment> \<open>the code is declared. Annotation only needed for\<close>
\<comment> \<open>evaluation to check accessibility)\<close>
@@ -154,7 +154,7 @@
\<comment> \<open>\<open>fn\<close>: field name\<close>
| AVar expr expr ("_.[_]"[90,10 ]90)
\<comment> \<open>array component\<close>
- \<comment> \<open>@{term "e1.[e2]"}: e1 array reference; e2 index\<close>
+ \<comment> \<open>\<^term>\<open>e1.[e2]\<close>: e1 array reference; e2 index\<close>
| InsInitV stmt var
\<comment> \<open>insertion of initialization before evaluation\<close>
\<comment> \<open>of var (technical term for smallstep semantics.)\<close>
@@ -178,7 +178,7 @@
| Call qtname ref_ty inv_mode expr mname "(ty list)" "(expr list)"
("{_,_,_}_\<cdot>_'( {_}_')"[10,10,10,85,99,10,10]85)
\<comment> \<open>method call\<close>
- \<comment> \<open>@{term "{accC,statT,mode}e\<cdot>mn({pTs}args)"} "\<close>
+ \<comment> \<open>\<^term>\<open>{accC,statT,mode}e\<cdot>mn({pTs}args)\<close> "\<close>
\<comment> \<open>\<open>accC\<close>: accessing class (static class were\<close>
\<comment> \<open>the call code is declared. Annotation only needed for\<close>
\<comment> \<open>evaluation to check accessibility)\<close>
@@ -207,7 +207,7 @@
| Jmp jump \<comment> \<open>break, continue, return\<close>
| Throw expr
| TryC stmt qtname vname stmt ("Try _ Catch'(_ _') _" [79,99,80,79]70)
- \<comment> \<open>@{term "Try c1 Catch(C vn) c2"}\<close>
+ \<comment> \<open>\<^term>\<open>Try c1 Catch(C vn) c2\<close>\<close>
\<comment> \<open>\<open>c1\<close>: block were exception may be thrown\<close>
\<comment> \<open>\<open>C\<close>: execption class to catch\<close>
\<comment> \<open>\<open>vn\<close>: local name for exception used in \<open>c2\<close>\<close>
@@ -264,7 +264,7 @@
is_stmt :: "term \<Rightarrow> bool"
where "is_stmt t = (\<exists>c. t=In1r c)"
-ML \<open>ML_Thms.bind_thms ("is_stmt_rews", sum3_instantiate @{context} @{thm is_stmt_def})\<close>
+ML \<open>ML_Thms.bind_thms ("is_stmt_rews", sum3_instantiate \<^context> @{thm is_stmt_def})\<close>
declare is_stmt_rews [simp]
@@ -469,7 +469,7 @@
need_second_arg :: "binop \<Rightarrow> val \<Rightarrow> bool" where
"need_second_arg binop v1 = (\<not> ((binop=CondAnd \<and> \<not> the_Bool v1) \<or>
(binop=CondOr \<and> the_Bool v1)))"
-text \<open>@{term CondAnd} and @{term CondOr} only evalulate the second argument
+text \<open>\<^term>\<open>CondAnd\<close> and \<^term>\<open>CondOr\<close> only evalulate the second argument
if the value isn't already determined by the first argument\<close>
lemma need_second_arg_CondAnd [simp]: "need_second_arg CondAnd (Bool b) = b"
--- a/src/HOL/Bali/TypeSafe.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/TypeSafe.thy Sat Jan 05 17:24:33 2019 +0100
@@ -115,12 +115,12 @@
| Inr Ts \<Rightarrow> list_all2 (conf G s) (the_In3 v) Ts)"
text \<open>
- With @{term rconf} we describe the conformance of the result value of a term.
+ With \<^term>\<open>rconf\<close> we describe the conformance of the result value of a term.
This definition gets rather complicated because of the relations between the
injections of the different terms, types and values. The main case distinction
is between single values and value lists. In case of value lists, every
value has to conform to its type. For single values we have to do a further
- case distinction, between values of variables @{term "\<exists>var. t=In2 var" } and
+ case distinction, between values of variables \<^term>\<open>\<exists>var. t=In2 var\<close> and
ordinary values. Values of variables are modelled as pairs consisting of the
current value and an update function which will perform an assignment to the
variable. This stems form the decision, that we only have one evaluation rule
@@ -129,7 +129,7 @@
variable-values must ensure that both the current value and an update will
conform to the type. With the introduction of definite assignment of local
variables we have to do another case distinction. For the notion of conformance
- local variables are allowed to be @{term None}, since the definedness is not
+ local variables are allowed to be \<^term>\<open>None\<close>, since the definedness is not
ensured by conformance but by definite assignment. Field and array variables
must contain a value.
\<close>
@@ -1899,14 +1899,14 @@
with wt show ?thesis
by simp
qed
- \<comment> \<open>Note that we don't have to show that @{term b} really is a boolean
- value. With @{term the_Bool} we enforce to get a value of boolean
+ \<comment> \<open>Note that we don't have to show that \<^term>\<open>b\<close> really is a boolean
+ value. With \<^term>\<open>the_Bool\<close> we enforce to get a value of boolean
type. So execution will be type safe, even if b would be
a string, for example. We might not expect such a behaviour to be
called type safe. To remedy the situation we would have to change
the evaulation rule, so that it only has a type safe evaluation if
we actually get a boolean value for the condition. That b is actually
- a boolean value is part of @{term hyp_e}. See also Loop\<close>
+ a boolean value is part of \<^term>\<open>hyp_e\<close>. See also Loop\<close>
next
case (Loop s0 e b s1 c s2 l s3 L accC T A)
note eval_e = \<open>G\<turnstile>Norm s0 \<midarrow>e-\<succ>b\<rightarrow> s1\<close>
--- a/src/HOL/Bali/WellForm.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/WellForm.thy Sat Jan 05 17:24:33 2019 +0100
@@ -812,8 +812,8 @@
qed
text \<open>Compare this lemma about static
-overriding @{term "G \<turnstile>new overrides\<^sub>S old"} with the definition of
-dynamic overriding @{term "G \<turnstile>new overrides old"}.
+overriding \<^term>\<open>G \<turnstile>new overrides\<^sub>S old\<close> with the definition of
+dynamic overriding \<^term>\<open>G \<turnstile>new overrides old\<close>.
Conforming result types and restrictions on the access modifiers of the old
and the new method are not part of the predicate for static overriding. But
they are enshured in a wellfromed program. Dynamic overriding has
@@ -2058,7 +2058,7 @@
The following table gives an overview of the current framework. We assume
to have a reference with static type statT and a dynamic class dynC. Between
both of these types the widening relation holds
-@{term "G\<turnstile>Class dynC\<preceq> statT"}. Unfortunately this ordinary widening relation
+\<^term>\<open>G\<turnstile>Class dynC\<preceq> statT\<close>. Unfortunately this ordinary widening relation
isn't enough to describe the valid lookup classes, since we must cope the
special cases of arrays and interfaces,too. If we statically expect an array or
inteface we may lookup a field or a method in Object which isn't covered in
@@ -2077,7 +2077,7 @@
The limitation to classes in the field column is artificial and comes out
of the typing rule for the field access (see rule \<open>FVar\<close> in the
-welltyping relation @{term "wt"} in theory WellType).
+welltyping relation \<^term>\<open>wt\<close> in theory WellType).
I stems out of the fact, that Object
indeed has no non private fields. So interfaces and arrays can actually
have no fields at all and a field access would be senseless. (In Java
--- a/src/HOL/Bali/WellType.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Bali/WellType.thy Sat Jan 05 17:24:33 2019 +0100
@@ -305,10 +305,10 @@
| Init: "\<lbrakk>is_class (prg E) C\<rbrakk> \<Longrightarrow>
E,dt\<Turnstile>Init C\<Colon>\<surd>"
- \<comment> \<open>@{term Init} is created on the fly during evaluation (see Eval.thy).
- The class isn't necessarily accessible from the points @{term Init}
- is called. Therefor we only demand @{term is_class} and not
- @{term is_acc_class} here.\<close>
+ \<comment> \<open>\<^term>\<open>Init\<close> is created on the fly during evaluation (see Eval.thy).
+ The class isn't necessarily accessible from the points \<^term>\<open>Init\<close>
+ is called. Therefor we only demand \<^term>\<open>is_class\<close> and not
+ \<^term>\<open>is_acc_class\<close> here.\<close>
\<comment> \<open>well-typed expressions\<close>
@@ -376,12 +376,12 @@
methd (prg E) C sig = Some m;
E,dt\<Turnstile>Body (declclass m) (stmt (mbody (mthd m)))\<Colon>-T\<rbrakk> \<Longrightarrow>
E,dt\<Turnstile>Methd C sig\<Colon>-T"
- \<comment> \<open>The class @{term C} is the dynamic class of the method call
+ \<comment> \<open>The class \<^term>\<open>C\<close> is the dynamic class of the method call
(cf. Eval.thy).
It hasn't got to be directly accessible from the current package
- @{term "(pkg E)"}.
+ \<^term>\<open>(pkg E)\<close>.
Only the static class must be accessible (enshured indirectly by
- @{term Call}).
+ \<^term>\<open>Call\<close>).
Note that l is just a dummy value. It is only used in the smallstep
semantics. To proof typesafety directly for the smallstep semantics
we would have to assume conformance of l here!\<close>
@@ -391,12 +391,12 @@
(lcl E) Result = Some T;
is_type (prg E) T\<rbrakk> \<Longrightarrow>
E,dt\<Turnstile>Body D blk\<Colon>-T"
-\<comment> \<open>The class @{term D} implementing the method must not directly be
- accessible from the current package @{term "(pkg E)"}, but can also
- be indirectly accessible due to inheritance (enshured in @{term Call})
+\<comment> \<open>The class \<^term>\<open>D\<close> implementing the method must not directly be
+ accessible from the current package \<^term>\<open>(pkg E)\<close>, but can also
+ be indirectly accessible due to inheritance (enshured in \<^term>\<open>Call\<close>)
The result type hasn't got to be accessible in Java! (If it is not
accessible you can only assign it to Object).
- For dummy value l see rule @{term Methd}.\<close>
+ For dummy value l see rule \<^term>\<open>Methd\<close>.\<close>
\<comment> \<open>well-typed variables\<close>
@@ -587,8 +587,8 @@
\<comment> \<open>In the special syntax to distinguish the typing judgements for expressions,
statements, variables and expression lists the kind of term corresponds
- to the kind of type in the end e.g. An statement (injection @{term In3}
- into terms, always has type void (injection @{term Inl} into the generalised
+ to the kind of type in the end e.g. An statement (injection \<^term>\<open>In3\<close>
+ into terms, always has type void (injection \<^term>\<open>Inl\<close> into the generalised
types. The following simplification procedures establish these kinds of
correlation.\<close>
@@ -656,12 +656,12 @@
(* 17 subgoals *)
apply (tactic \<open>ALLGOALS (fn i =>
if i = 11 then EVERY'
- [Rule_Insts.thin_tac @{context} "E,dt\<Turnstile>e0\<Colon>-PrimT Boolean" [(@{binding E}, NONE, NoSyn)],
- Rule_Insts.thin_tac @{context} "E,dt\<Turnstile>e1\<Colon>-T1" [(@{binding E}, NONE, NoSyn), (@{binding T1}, NONE, NoSyn)],
- Rule_Insts.thin_tac @{context} "E,dt\<Turnstile>e2\<Colon>-T2" [(@{binding E}, NONE, NoSyn), (@{binding T2}, NONE, NoSyn)]] i
- else Rule_Insts.thin_tac @{context} "All P" [(@{binding P}, NONE, NoSyn)] i)\<close>)
+ [Rule_Insts.thin_tac \<^context> "E,dt\<Turnstile>e0\<Colon>-PrimT Boolean" [(\<^binding>\<open>E\<close>, NONE, NoSyn)],
+ Rule_Insts.thin_tac \<^context> "E,dt\<Turnstile>e1\<Colon>-T1" [(\<^binding>\<open>E\<close>, NONE, NoSyn), (\<^binding>\<open>T1\<close>, NONE, NoSyn)],
+ Rule_Insts.thin_tac \<^context> "E,dt\<Turnstile>e2\<Colon>-T2" [(\<^binding>\<open>E\<close>, NONE, NoSyn), (\<^binding>\<open>T2\<close>, NONE, NoSyn)]] i
+ else Rule_Insts.thin_tac \<^context> "All P" [(\<^binding>\<open>P\<close>, NONE, NoSyn)] i)\<close>)
(*apply (safe del: disjE elim!: wt_elim_cases)*)
-apply (tactic \<open>ALLGOALS (eresolve_tac @{context} @{thms wt_elim_cases})\<close>)
+apply (tactic \<open>ALLGOALS (eresolve_tac \<^context> @{thms wt_elim_cases})\<close>)
apply (simp_all (no_asm_use) split del: if_split_asm)
apply (erule_tac [12] V = "All P" for P in thin_rl) (* Call *)
apply (blast del: equalityCE dest: sym [THEN trans])+
--- a/src/HOL/Cardinals/Wellorder_Extension.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Cardinals/Wellorder_Extension.thy Sat Jan 05 17:24:33 2019 +0100
@@ -125,7 +125,7 @@
with assms [unfolded wf_eq_minimal, THEN spec, of ?Q]
obtain x where "x \<in> Field p" and "x \<notin> Field m" and
min: "\<forall>y. (y, x) \<in> p \<longrightarrow> y \<notin> ?Q" by blast
- txt \<open>Add @{term x} as topmost element to @{term m}.\<close>
+ txt \<open>Add \<^term>\<open>x\<close> as topmost element to \<^term>\<open>m\<close>.\<close>
let ?s = "{(y, x) | y. y \<in> Field m}"
let ?m = "insert (x, x) m \<union> ?s"
have Fm: "Field ?m = insert x (Field m)" by (auto simp: Field_def)
--- a/src/HOL/Codegenerator_Test/Candidates.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Codegenerator_Test/Candidates.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,14 +18,14 @@
"HOL-ex.Records"
begin
-text \<open>Drop technical stuff from @{theory HOL.Quickcheck_Narrowing} which is tailored towards Haskell\<close>
+text \<open>Drop technical stuff from \<^theory>\<open>HOL.Quickcheck_Narrowing\<close> which is tailored towards Haskell\<close>
setup \<open>
fn thy =>
let
val tycos = Sign.logical_types thy;
val consts = map_filter (try (curry (Axclass.param_of_inst thy)
- @{const_name "Quickcheck_Narrowing.partial_term_of"})) tycos;
+ \<^const_name>\<open>Quickcheck_Narrowing.partial_term_of\<close>)) tycos;
in fold Code.declare_unimplemented_global consts thy end
\<close>
--- a/src/HOL/Codegenerator_Test/Code_Lazy_Test.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Codegenerator_Test/Code_Lazy_Test.thy Sat Jan 05 17:24:33 2019 +0100
@@ -47,7 +47,7 @@
value [code] "lhd (lfilter odd llist)"
definition lfilter_test :: "nat llist \<Rightarrow> _" where "lfilter_test xs = lhd (lfilter even xs)"
- \<comment> \<open>Filtering @{term llist} for @{term even} fails because only the datatype is lazy, not the
+ \<comment> \<open>Filtering \<^term>\<open>llist\<close> for \<^term>\<open>even\<close> fails because only the datatype is lazy, not the
filter function itself.\<close>
ML_val \<open> (@{code lfilter_test} @{code llist}; raise Fail "Failure expected") handle Match => () \<close>
--- a/src/HOL/Codegenerator_Test/Code_Test_GHC.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Codegenerator_Test/Code_Test_GHC.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
value [GHC] "14 + 7 * -12 :: integer"
-test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of \<^const>\<open>gcd\<close> on \<^typ>\<open>integer\<close>\<close>
"gcd 15 10 = (5 :: integer)"
"gcd 15 (- 10) = (5 :: integer)"
"gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Codegenerator_Test/Code_Test_OCaml.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Codegenerator_Test/Code_Test_OCaml.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
value [OCaml] "14 + 7 * -12 :: integer"
-test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of \<^const>\<open>gcd\<close> on \<^typ>\<open>integer\<close>\<close>
"gcd 15 10 = (5 :: integer)"
"gcd 15 (- 10) = (5 :: integer)"
"gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Codegenerator_Test/Code_Test_Scala.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Codegenerator_Test/Code_Test_Scala.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
value [Scala] "14 + 7 * -12 :: integer"
-test_code \<comment> \<open>Tests for the serialisation of @{const gcd} on @{typ integer}\<close>
+test_code \<comment> \<open>Tests for the serialisation of \<^const>\<open>gcd\<close> on \<^typ>\<open>integer\<close>\<close>
"gcd 15 10 = (5 :: integer)"
"gcd 15 (- 10) = (5 :: integer)"
"gcd (- 10) 15 = (5 :: integer)"
--- a/src/HOL/Computational_Algebra/Formal_Power_Series.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Computational_Algebra/Formal_Power_Series.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4533,7 +4533,7 @@
finally show ?thesis by simp
qed
-text \<open>Connection to @{const "fps_exp"} over the complex numbers --- Euler and de Moivre.\<close>
+text \<open>Connection to \<^const>\<open>fps_exp\<close> over the complex numbers --- Euler and de Moivre.\<close>
lemma fps_exp_ii_sin_cos: "fps_exp (\<i> * c) = fps_cos c + fps_const \<i> * fps_sin c"
(is "?l = ?r")
--- a/src/HOL/Computational_Algebra/Polynomial.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Computational_Algebra/Polynomial.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1577,7 +1577,7 @@
subsubsection \<open>Synthetic division\<close>
-text \<open>Synthetic division is simply division by the linear polynomial @{term "x - c"}.\<close>
+text \<open>Synthetic division is simply division by the linear polynomial \<^term>\<open>x - c\<close>.\<close>
definition synthetic_divmod :: "'a::comm_semiring_0 poly \<Rightarrow> 'a \<Rightarrow> 'a poly \<times> 'a"
where "synthetic_divmod p c = fold_coeffs (\<lambda>a (q, r). (pCons r q, a + c * r)) p (0, 0)"
--- a/src/HOL/Computational_Algebra/Polynomial_FPS.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Computational_Algebra/Polynomial_FPS.thy Sat Jan 05 17:24:33 2019 +0100
@@ -163,11 +163,11 @@
text \<open>
The following simproc can reduce the equality of two polynomial FPSs two equality of the
respective polynomials. A polynomial FPS is one that only has finitely many non-zero
- coefficients and can therefore be written as @{term "fps_of_poly p"} for some
+ coefficients and can therefore be written as \<^term>\<open>fps_of_poly p\<close> for some
polynomial \<open>p\<close>.
This may sound trivial, but it covers a number of annoying side conditions like
- @{term "1 + fps_X \<noteq> 0"} that would otherwise not be solved automatically.
+ \<^term>\<open>1 + fps_X \<noteq> 0\<close> that would otherwise not be solved automatically.
\<close>
ML \<open>
@@ -200,21 +200,21 @@
val bin = Conv.binop_conv reify_conv
in
case Thm.term_of ct of
- (Const (@{const_name "fps_of_poly"}, _) $ _) => ct |> Conv.all_conv
- | (Const (@{const_name "Groups.plus"}, _) $ _ $ _) => ct |> (
+ (Const (\<^const_name>\<open>fps_of_poly\<close>, _) $ _) => ct |> Conv.all_conv
+ | (Const (\<^const_name>\<open>Groups.plus\<close>, _) $ _ $ _) => ct |> (
bin then_conv rewr @{thms fps_of_poly_add [symmetric]})
- | (Const (@{const_name "Groups.uminus"}, _) $ _) => ct |> (
+ | (Const (\<^const_name>\<open>Groups.uminus\<close>, _) $ _) => ct |> (
un then_conv rewr @{thms fps_of_poly_uminus [symmetric]})
- | (Const (@{const_name "Groups.minus"}, _) $ _ $ _) => ct |> (
+ | (Const (\<^const_name>\<open>Groups.minus\<close>, _) $ _ $ _) => ct |> (
bin then_conv rewr @{thms fps_of_poly_diff [symmetric]})
- | (Const (@{const_name "Groups.times"}, _) $ _ $ _) => ct |> (
+ | (Const (\<^const_name>\<open>Groups.times\<close>, _) $ _ $ _) => ct |> (
bin then_conv rewr @{thms fps_of_poly_mult [symmetric]})
- | (Const (@{const_name "Rings.divide"}, _) $ _ $ (Const (@{const_name "Num.numeral"}, _) $ _))
+ | (Const (\<^const_name>\<open>Rings.divide\<close>, _) $ _ $ (Const (\<^const_name>\<open>Num.numeral\<close>, _) $ _))
=> ct |> (Conv.fun_conv (Conv.arg_conv reify_conv)
then_conv rewr @{thms fps_of_poly_divide_numeral [symmetric]})
- | (Const (@{const_name "Power.power"}, _) $ Const (@{const_name "fps_X"},_) $ _) => ct |> (
+ | (Const (\<^const_name>\<open>Power.power\<close>, _) $ Const (\<^const_name>\<open>fps_X\<close>,_) $ _) => ct |> (
rewr @{thms fps_of_poly_monom' [symmetric]})
- | (Const (@{const_name "Power.power"}, _) $ _ $ _) => ct |> (
+ | (Const (\<^const_name>\<open>Power.power\<close>, _) $ _ $ _) => ct |> (
Conv.fun_conv (Conv.arg_conv reify_conv)
then_conv rewr @{thms fps_of_poly_power [symmetric]})
| _ => ct |> (
@@ -224,7 +224,7 @@
fun eq_conv ct =
case Thm.term_of ct of
- (Const (@{const_name "HOL.eq"}, _) $ _ $ _) => ct |> (
+ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ _) => ct |> (
Conv.binop_conv reify_conv
then_conv Conv.rewr_conv @{thm fps_of_poly_eq_iff[THEN eq_reflection]})
| _ => raise CTERM ("poly_fps_eq_conv", [ct])
--- a/src/HOL/Computational_Algebra/Primes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Computational_Algebra/Primes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -42,7 +42,7 @@
imports Euclidean_Algorithm
begin
-subsection \<open>Primes on @{typ nat} and @{typ int}\<close>
+subsection \<open>Primes on \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close>\<close>
lemma Suc_0_not_prime_nat [simp]: "\<not> prime (Suc 0)"
using not_prime_1 [where ?'a = nat] by simp
--- a/src/HOL/Data_Structures/AA_Set.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/AA_Set.thy Sat Jan 05 17:24:33 2019 +0100
@@ -72,7 +72,7 @@
\<Rightarrow> Node (Node l x (lv-1) t2) a (lva+1)
(split (Node t3 b (if sngl t1 then lva else lva+1) t4)))))"
-text\<open>In the paper, the last case of @{const adjust} is expressed with the help of an
+text\<open>In the paper, the last case of \<^const>\<open>adjust\<close> is expressed with the help of an
incorrect auxiliary function \texttt{nlvl}.
Function \<open>split_max\<close> below is called \texttt{dellrg} in the paper.
--- a/src/HOL/Data_Structures/AList_Upd_Del.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/AList_Upd_Del.thy Sat Jan 05 17:24:33 2019 +0100
@@ -30,7 +30,7 @@
"del_list x ((a,b)#ps) = (if x = a then ps else (a,b) # del_list x ps)"
-subsection \<open>Lemmas for @{const map_of}\<close>
+subsection \<open>Lemmas for \<^const>\<open>map_of\<close>\<close>
lemma map_of_ins_list: "map_of (upd_list x y ps) = (map_of ps)(x := Some y)"
by(induction ps) auto
@@ -61,7 +61,7 @@
lemmas map_of_simps = sorted_lems map_of_append map_of_sorteds
-subsection \<open>Lemmas for @{const upd_list}\<close>
+subsection \<open>Lemmas for \<^const>\<open>upd_list\<close>\<close>
lemma sorted_upd_list: "sorted1 ps \<Longrightarrow> sorted1 (upd_list x y ps)"
apply(induction ps)
@@ -89,7 +89,7 @@
lemmas upd_list_simps = sorted_lems upd_list_sorted1 upd_list_sorted2
-text\<open>Splay trees need two additional @{const upd_list} lemmas:\<close>
+text\<open>Splay trees need two additional \<^const>\<open>upd_list\<close> lemmas:\<close>
lemma upd_list_Cons:
"sorted1 ((x,y) # xs) \<Longrightarrow> upd_list x y xs = (x,y) # xs"
@@ -100,7 +100,7 @@
by(induction xs) (auto simp add: sorted_mid_iff2)
-subsection \<open>Lemmas for @{const del_list}\<close>
+subsection \<open>Lemmas for \<^const>\<open>del_list\<close>\<close>
lemma sorted_del_list: "sorted1 ps \<Longrightarrow> sorted1 (del_list x ps)"
apply(induction ps)
@@ -153,7 +153,7 @@
del_list_sorted4
del_list_sorted5
-text\<open>Splay trees need two additional @{const del_list} lemmas:\<close>
+text\<open>Splay trees need two additional \<^const>\<open>del_list\<close> lemmas:\<close>
lemma del_list_notin_Cons: "sorted (x # map fst xs) \<Longrightarrow> del_list x xs = xs"
by(induction xs)(fastforce simp: sorted_wrt_Cons)+
--- a/src/HOL/Data_Structures/AVL_Set.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/AVL_Set.thy Sat Jan 05 17:24:33 2019 +0100
@@ -492,7 +492,7 @@
thus ?case using assms by (cases n) (auto simp: eval_nat_numeral)
qed (insert assms, auto)
-text \<open>An exponential lower bound for @{const fib}:\<close>
+text \<open>An exponential lower bound for \<^const>\<open>fib\<close>:\<close>
lemma fib_lowerbound:
defines "\<phi> \<equiv> (1 + sqrt 5) / 2"
@@ -534,8 +534,8 @@
finally show ?thesis .
qed
-text \<open>The height of an AVL tree is most @{term "(1/log 2 \<phi>)"} \<open>\<approx> 1.44\<close> times worse
-than @{term "log 2 (size1 t)"}:\<close>
+text \<open>The height of an AVL tree is most \<^term>\<open>(1/log 2 \<phi>)\<close> \<open>\<approx> 1.44\<close> times worse
+than \<^term>\<open>log 2 (size1 t)\<close>:\<close>
lemma avl_height_upperbound:
defines "\<phi> \<equiv> (1 + sqrt 5) / 2"
--- a/src/HOL/Data_Structures/Array_Braun.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Array_Braun.thy Sat Jan 05 17:24:33 2019 +0100
@@ -51,7 +51,7 @@
declare upt_Suc[simp del]
-paragraph \<open>@{const lookup1}\<close>
+paragraph \<open>\<^const>\<open>lookup1\<close>\<close>
lemma nth_list_lookup1: "\<lbrakk>braun t; i < size t\<rbrakk> \<Longrightarrow> list t ! i = lookup1 t (i+1)"
proof(induction t arbitrary: i)
@@ -66,7 +66,7 @@
by(auto simp add: list_eq_iff_nth_eq size_list nth_list_lookup1)
-paragraph \<open>@{const update1}\<close>
+paragraph \<open>\<^const>\<open>update1\<close>\<close>
lemma size_update1: "\<lbrakk> braun t; n \<in> {1.. size t} \<rbrakk> \<Longrightarrow> size(update1 n x t) = size t"
proof(induction t arbitrary: n)
@@ -118,7 +118,7 @@
qed
-paragraph \<open>@{const adds}\<close>
+paragraph \<open>\<^const>\<open>adds\<close>\<close>
lemma splice_last: shows
"size ys \<le> size xs \<Longrightarrow> splice (xs @ [x]) ys = splice xs ys @ [x]"
@@ -192,7 +192,7 @@
subsubsection "Functional Correctness"
-paragraph \<open>@{const add_lo}\<close>
+paragraph \<open>\<^const>\<open>add_lo\<close>\<close>
lemma list_add_lo: "braun t \<Longrightarrow> list (add_lo a t) = a # list t"
by(induction t arbitrary: a) auto
@@ -201,7 +201,7 @@
by(induction t arbitrary: x) (auto simp add: list_add_lo simp flip: size_list)
-paragraph \<open>@{const del_lo}\<close>
+paragraph \<open>\<^const>\<open>del_lo\<close>\<close>
lemma list_merge: "braun (Node l x r) \<Longrightarrow> list(merge l r) = splice (list l) (list r)"
by (induction l r rule: merge.induct) auto
@@ -216,7 +216,7 @@
by (cases t) (simp_all add: braun_merge)
-paragraph \<open>@{const del_hi}\<close>
+paragraph \<open>\<^const>\<open>del_hi\<close>\<close>
lemma list_Nil_iff: "list t = [] \<longleftrightarrow> t = Leaf"
by(cases t) simp_all
--- a/src/HOL/Data_Structures/Binomial_Heap.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Binomial_Heap.thy Sat Jan 05 17:24:33 2019 +0100
@@ -633,7 +633,7 @@
shows "t_get_min_rest ts \<le> log 2 (size (mset_heap ts) + 1)"
using assms t_get_min_rest_bound_aux unfolding invar_def by blast
-text\<open>Note that although the definition of function @{const rev} has quadratic complexity,
+text\<open>Note that although the definition of function \<^const>\<open>rev\<close> has quadratic complexity,
it can and is implemented (via suitable code lemmas) as a linear time function.
Thus the following definition is justified:\<close>
--- a/src/HOL/Data_Structures/Less_False.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Less_False.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,7 +14,7 @@
fun prove_less_False ((less as Const(_,T)) $ r $ s) =
let val prems = Simplifier.prems_of ctxt;
- val le = Const (@{const_name less_eq}, T);
+ val le = Const (\<^const_name>\<open>less_eq\<close>, T);
val t = HOLogic.mk_Trueprop(le $ s $ r);
in case find_first (prp t) prems of
NONE =>
--- a/src/HOL/Data_Structures/List_Ins_Del.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/List_Ins_Del.thy Sat Jan 05 17:24:33 2019 +0100
@@ -69,7 +69,7 @@
lemmas ins_list_simps = sorted_lems ins_list_sorted1 ins_list_sorted2
-text\<open>Splay trees need two additional @{const ins_list} lemmas:\<close>
+text\<open>Splay trees need two additional \<^const>\<open>ins_list\<close> lemmas:\<close>
lemma ins_list_Cons: "sorted (x # xs) \<Longrightarrow> ins_list x xs = x # xs"
by (induction xs) auto
@@ -135,7 +135,7 @@
del_list_sorted4
del_list_sorted5
-text\<open>Splay trees need two additional @{const del_list} lemmas:\<close>
+text\<open>Splay trees need two additional \<^const>\<open>del_list\<close> lemmas:\<close>
lemma del_list_notin_Cons: "sorted (x # xs) \<Longrightarrow> del_list x xs = xs"
by(induction xs)(fastforce simp: sorted_Cons_iff)+
--- a/src/HOL/Data_Structures/Set2_Join.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Set2_Join.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,11 +12,11 @@
All operations are reduced to a single operation \<open>join l x r\<close> that joins two BSTs \<open>l\<close> and \<open>r\<close>
and an element \<open>x\<close> such that \<open>l < x < r\<close>.
-The theory is based on theory @{theory "HOL-Data_Structures.Tree2"} where nodes have an additional field.
+The theory is based on theory \<^theory>\<open>HOL-Data_Structures.Tree2\<close> where nodes have an additional field.
This field is ignored here but it means that this theory can be instantiated
-with red-black trees (see theory @{file "Set2_Join_RBT.thy"}) and other balanced trees.
+with red-black trees (see theory \<^file>\<open>Set2_Join_RBT.thy\<close>) and other balanced trees.
This approach is very concrete and fixes the type of trees.
-Alternatively, one could assume some abstract type @{typ 't} of trees with suitable decomposition
+Alternatively, one could assume some abstract type \<^typ>\<open>'t\<close> of trees with suitable decomposition
and recursion operators on it.\<close>
locale Set2_Join =
@@ -304,7 +304,7 @@
split!: tree.split prod.split dest: inv_Node)
qed
-text \<open>Locale @{locale Set2_Join} implements locale @{locale Set2}:\<close>
+text \<open>Locale \<^locale>\<open>Set2_Join\<close> implements locale \<^locale>\<open>Set2\<close>:\<close>
sublocale Set2
where empty = Leaf and insert = insert and delete = delete and isin = isin
--- a/src/HOL/Data_Structures/Set2_Join_RBT.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Set2_Join_RBT.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,7 +12,7 @@
text \<open>
Function \<open>joinL\<close> joins two trees (and an element).
-Precondition: @{prop "bheight l \<le> bheight r"}.
+Precondition: \<^prop>\<open>bheight l \<le> bheight r\<close>.
Method:
Descend along the left spine of \<open>r\<close>
until you find a subtree with the same \<open>bheight\<close> as \<open>l\<close>,
@@ -44,29 +44,29 @@
declare joinR.simps[simp del]
text \<open>
-One would expect @{const joinR} to be be completely dual to @{const joinL}.
-Thus the condition should be @{prop"bheight l = bheight r"}. What we have done
-is totalize the function. On the intended domain (@{prop "bheight l \<ge> bheight r"})
+One would expect \<^const>\<open>joinR\<close> to be be completely dual to \<^const>\<open>joinL\<close>.
+Thus the condition should be \<^prop>\<open>bheight l = bheight r\<close>. What we have done
+is totalize the function. On the intended domain (\<^prop>\<open>bheight l \<ge> bheight r\<close>)
the two versions behave exactly the same, including complexity. Thus from a programmer's
perspective they are equivalent. However, not from a verifier's perspective:
-the total version of @{const joinR} is easier
+the total version of \<^const>\<open>joinR\<close> is easier
to reason about because lemmas about it may not require preconditions. In particular
-@{prop"set_tree (joinR l x r) = set_tree l \<union> {x} \<union> set_tree r"}
+\<^prop>\<open>set_tree (joinR l x r) = set_tree l \<union> {x} \<union> set_tree r\<close>
is provable outright and hence also
-@{prop"set_tree (join l x r) = set_tree l \<union> {x} \<union> set_tree r"}.
-This is necessary because locale @{locale Set2_Join} unconditionally assumes
+\<^prop>\<open>set_tree (join l x r) = set_tree l \<union> {x} \<union> set_tree r\<close>.
+This is necessary because locale \<^locale>\<open>Set2_Join\<close> unconditionally assumes
exactly that. Adding preconditions to this assumptions significantly complicates
-the proofs within @{locale Set2_Join}, which we want to avoid.
+the proofs within \<^locale>\<open>Set2_Join\<close>, which we want to avoid.
-Why not work with the partial version of @{const joinR} and add the precondition
-@{prop "bheight l \<ge> bheight r"} to lemmas about @{const joinR}? After all, that is how
-we worked with @{const joinL}, and @{const join} ensures that @{const joinL} and @{const joinR}
-are only called under the respective precondition. But function @{const bheight}
-makes the difference: it descends along the left spine, just like @{const joinL}.
-Function @{const joinR}, however, descends along the right spine and thus @{const bheight}
-may change all the time. Thus we would need the further precondition @{prop "invh l"}.
+Why not work with the partial version of \<^const>\<open>joinR\<close> and add the precondition
+\<^prop>\<open>bheight l \<ge> bheight r\<close> to lemmas about \<^const>\<open>joinR\<close>? After all, that is how
+we worked with \<^const>\<open>joinL\<close>, and \<^const>\<open>join\<close> ensures that \<^const>\<open>joinL\<close> and \<^const>\<open>joinR\<close>
+are only called under the respective precondition. But function \<^const>\<open>bheight\<close>
+makes the difference: it descends along the left spine, just like \<^const>\<open>joinL\<close>.
+Function \<^const>\<open>joinR\<close>, however, descends along the right spine and thus \<^const>\<open>bheight\<close>
+may change all the time. Thus we would need the further precondition \<^prop>\<open>invh l\<close>.
This is what we really wanted to avoid in order to satisfy the unconditional assumption
-in @{locale Set2_Join}.
+in \<^locale>\<open>Set2_Join\<close>.
\<close>
subsection "Properties"
@@ -142,7 +142,7 @@
subsubsection "Inorder properties"
-text "Currently unused. Instead @{const set_tree} and @{const bst} properties are proved directly."
+text "Currently unused. Instead \<^const>\<open>set_tree\<close> and \<^const>\<open>bst\<close> properties are proved directly."
lemma inorder_joinL: "bheight l \<le> bheight r \<Longrightarrow> inorder(joinL l x r) = inorder l @ x # inorder r"
proof(induction l x r rule: joinL.induct)
@@ -231,7 +231,7 @@
by(auto simp: bst_paint bst_joinL bst_joinR)
-subsubsection "Interpretation of @{locale Set2_Join} with Red-Black Tree"
+subsubsection "Interpretation of \<^locale>\<open>Set2_Join\<close> with Red-Black Tree"
global_interpretation RBT: Set2_Join
where join = join and inv = "\<lambda>t. invc t \<and> invh t"
--- a/src/HOL/Data_Structures/Sorted_Less.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Sorted_Less.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,7 +15,7 @@
lemmas sorted_wrt_Cons = sorted_wrt.simps(2)
-text \<open>The definition of @{const sorted_wrt} relates each element to all the elements after it.
+text \<open>The definition of \<^const>\<open>sorted_wrt\<close> relates each element to all the elements after it.
This causes a blowup of the formulas. Thus we simplify matters by only comparing adjacent elements.\<close>
declare
@@ -49,7 +49,7 @@
lemmas sorted_lems = sorted_mid_iff' sorted_mid_iff2 sorted_cons' sorted_snoc'
-text\<open>Splay trees need two additional @{const sorted} lemmas:\<close>
+text\<open>Splay trees need two additional \<^const>\<open>sorted\<close> lemmas:\<close>
lemma sorted_snoc_le:
"ASSUMPTION(sorted(xs @ [x])) \<Longrightarrow> x \<le> y \<Longrightarrow> sorted (xs @ [y])"
--- a/src/HOL/Data_Structures/Sorting.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Sorting.thy Sat Jan 05 17:24:33 2019 +0100
@@ -374,7 +374,7 @@
subsection "Insertion Sort w.r.t. Keys and Stability"
-text \<open>Note that @{const insort_key} is already defined in theory @{theory HOL.List}.
+text \<open>Note that \<^const>\<open>insort_key\<close> is already defined in theory \<^theory>\<open>HOL.List\<close>.
Thus some of the lemmas are already present as well.\<close>
fun isort_key :: "('a \<Rightarrow> 'k::linorder) \<Rightarrow> 'a list \<Rightarrow> 'a list" where
--- a/src/HOL/Data_Structures/Tree234_Set.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Tree234_Set.thy Sat Jan 05 17:24:33 2019 +0100
@@ -285,7 +285,7 @@
subsubsection "Proofs for insert"
-text\<open>First a standard proof that @{const ins} preserves @{const bal}.\<close>
+text\<open>First a standard proof that \<^const>\<open>ins\<close> preserves \<^const>\<open>bal\<close>.\<close>
instantiation up\<^sub>i :: (type)height
begin
@@ -350,11 +350,9 @@
lemma bal_iff_full: "bal t \<longleftrightarrow> (\<exists>n. full n t)"
by (auto elim!: bal_imp_full full_imp_bal)
-text \<open>The @{const "insert"} function either preserves the height of the
-tree, or increases it by one. The constructor returned by the @{term
-"insert"} function determines which: A return value of the form @{term
-"T\<^sub>i t"} indicates that the height will be the same. A value of the
-form @{term "Up\<^sub>i l p r"} indicates an increase in height.\<close>
+text \<open>The \<^const>\<open>insert\<close> function either preserves the height of the
+tree, or increases it by one. The constructor returned by the \<^term>\<open>insert\<close> function determines which: A return value of the form \<^term>\<open>T\<^sub>i t\<close> indicates that the height will be the same. A value of the
+form \<^term>\<open>Up\<^sub>i l p r\<close> indicates an increase in height.\<close>
primrec full\<^sub>i :: "nat \<Rightarrow> 'a up\<^sub>i \<Rightarrow> bool" where
"full\<^sub>i n (T\<^sub>i t) \<longleftrightarrow> full n t" |
@@ -363,7 +361,7 @@
lemma full\<^sub>i_ins: "full n t \<Longrightarrow> full\<^sub>i n (ins a t)"
by (induct rule: full.induct) (auto, auto split: up\<^sub>i.split)
-text \<open>The @{const insert} operation preserves balance.\<close>
+text \<open>The \<^const>\<open>insert\<close> operation preserves balance.\<close>
lemma bal_insert: "bal t \<Longrightarrow> bal (insert a t)"
unfolding bal_iff_full insert_def
--- a/src/HOL/Data_Structures/Tree23_Set.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Data_Structures/Tree23_Set.thy Sat Jan 05 17:24:33 2019 +0100
@@ -208,7 +208,7 @@
subsubsection "Proofs for insert"
-text\<open>First a standard proof that @{const ins} preserves @{const bal}.\<close>
+text\<open>First a standard proof that \<^const>\<open>ins\<close> preserves \<^const>\<open>bal\<close>.\<close>
instantiation up\<^sub>i :: (type)height
begin
@@ -266,11 +266,9 @@
lemma bal_iff_full: "bal t \<longleftrightarrow> (\<exists>n. full n t)"
by (auto elim!: bal_imp_full full_imp_bal)
-text \<open>The @{const "insert"} function either preserves the height of the
-tree, or increases it by one. The constructor returned by the @{term
-"insert"} function determines which: A return value of the form @{term
-"T\<^sub>i t"} indicates that the height will be the same. A value of the
-form @{term "Up\<^sub>i l p r"} indicates an increase in height.\<close>
+text \<open>The \<^const>\<open>insert\<close> function either preserves the height of the
+tree, or increases it by one. The constructor returned by the \<^term>\<open>insert\<close> function determines which: A return value of the form \<^term>\<open>T\<^sub>i t\<close> indicates that the height will be the same. A value of the
+form \<^term>\<open>Up\<^sub>i l p r\<close> indicates an increase in height.\<close>
fun full\<^sub>i :: "nat \<Rightarrow> 'a up\<^sub>i \<Rightarrow> bool" where
"full\<^sub>i n (T\<^sub>i t) \<longleftrightarrow> full n t" |
@@ -279,7 +277,7 @@
lemma full\<^sub>i_ins: "full n t \<Longrightarrow> full\<^sub>i n (ins a t)"
by (induct rule: full.induct) (auto split: up\<^sub>i.split)
-text \<open>The @{const insert} operation preserves balance.\<close>
+text \<open>The \<^const>\<open>insert\<close> operation preserves balance.\<close>
lemma bal_insert: "bal t \<Longrightarrow> bal (insert a t)"
unfolding bal_iff_full insert_def
--- a/src/HOL/Datatype_Examples/Compat.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Datatype_Examples/Compat.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,141 +29,141 @@
datatype 'b w = W | W' "'b w \<times> 'b list"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name w}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>w\<close>\<close>
datatype_compat w
-ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name w}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 1) \<^type_name>\<open>w\<close>\<close>
datatype ('c, 'b) s = L 'c | R 'b
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name s}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>s\<close>\<close>
datatype 'd x = X | X' "('d x list, 'd list) s"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name x}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>x\<close>\<close>
datatype_compat s
-ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name s}\<close>
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name x}\<close>
+ML \<open>get_descrs \<^theory> (1, 1, 1) \<^type_name>\<open>s\<close>\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>x\<close>\<close>
datatype_compat x
-ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name x}\<close>
+ML \<open>get_descrs \<^theory> (3, 3, 1) \<^type_name>\<open>x\<close>\<close>
thm x.induct x.rec
thm compat_x.induct compat_x.rec
datatype 'a tttre = TTTre 'a "'a tttre list list list"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tttre}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>tttre\<close>\<close>
datatype_compat tttre
-ML \<open>get_descrs @{theory} (4, 4, 1) @{type_name tttre}\<close>
+ML \<open>get_descrs \<^theory> (4, 4, 1) \<^type_name>\<open>tttre\<close>\<close>
thm tttre.induct tttre.rec
thm compat_tttre.induct compat_tttre.rec
datatype 'a ftre = FEmp | FTre "'a \<Rightarrow> 'a ftre list"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name ftre}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>ftre\<close>\<close>
datatype_compat ftre
-ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name ftre}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 1) \<^type_name>\<open>ftre\<close>\<close>
thm ftre.induct ftre.rec
thm compat_ftre.induct compat_ftre.rec
datatype 'a btre = BTre 'a "'a btre list" "'a btre list"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name btre}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>btre\<close>\<close>
datatype_compat btre
-ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name btre}\<close>
+ML \<open>get_descrs \<^theory> (3, 3, 1) \<^type_name>\<open>btre\<close>\<close>
thm btre.induct btre.rec
thm compat_btre.induct compat_btre.rec
datatype 'a foo = Foo | Foo' 'a "'a bar" and 'a bar = Bar | Bar' 'a "'a foo"
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name foo}\<close>
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name bar}\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>foo\<close>\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>bar\<close>\<close>
datatype_compat foo bar
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name foo}\<close>
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name bar}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>foo\<close>\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>bar\<close>\<close>
datatype 'a tre = Tre 'a "'a tre list"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tre}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>tre\<close>\<close>
datatype_compat tre
-ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name tre}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 1) \<^type_name>\<open>tre\<close>\<close>
thm tre.induct tre.rec
thm compat_tre.induct compat_tre.rec
datatype 'a f = F 'a and 'a g = G 'a
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name f}\<close>
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name g}\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>f\<close>\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>g\<close>\<close>
datatype h = H "h f" | H'
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name h}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>h\<close>\<close>
datatype_compat f g
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name f}\<close>
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name g}\<close>
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name h}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>f\<close>\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>g\<close>\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>h\<close>\<close>
datatype_compat h
-ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name h}\<close>
+ML \<open>get_descrs \<^theory> (3, 3, 1) \<^type_name>\<open>h\<close>\<close>
thm h.induct h.rec
thm compat_h.induct compat_h.rec
datatype myunit = MyUnity
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name myunit}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>myunit\<close>\<close>
datatype_compat myunit
-ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name myunit}\<close>
+ML \<open>get_descrs \<^theory> (1, 1, 1) \<^type_name>\<open>myunit\<close>\<close>
datatype mylist = MyNil | MyCons nat mylist
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name mylist}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>mylist\<close>\<close>
datatype_compat mylist
-ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name mylist}\<close>
+ML \<open>get_descrs \<^theory> (1, 1, 1) \<^type_name>\<open>mylist\<close>\<close>
datatype foo' = FooNil | FooCons bar' foo' and bar' = Bar
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name foo'}\<close>
-ML \<open>get_descrs @{theory} (0, 2, 2) @{type_name bar'}\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>foo'\<close>\<close>
+ML \<open>get_descrs \<^theory> (0, 2, 2) \<^type_name>\<open>bar'\<close>\<close>
datatype_compat bar' foo'
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name foo'}\<close>
-ML \<open>get_descrs @{theory} (2, 2, 2) @{type_name bar'}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>foo'\<close>\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 2) \<^type_name>\<open>bar'\<close>\<close>
datatype tree = Tree "tree foo"
-ML \<open>get_descrs @{theory} (0, 1, 1) @{type_name tree}\<close>
+ML \<open>get_descrs \<^theory> (0, 1, 1) \<^type_name>\<open>tree\<close>\<close>
datatype_compat tree
-ML \<open>get_descrs @{theory} (3, 3, 1) @{type_name tree}\<close>
+ML \<open>get_descrs \<^theory> (3, 3, 1) \<^type_name>\<open>tree\<close>\<close>
thm tree.induct tree.rec
thm compat_tree.induct compat_tree.rec
@@ -173,44 +173,44 @@
ML \<open>
val l_specs =
- [((@{binding l}, [("'a", @{sort type})], NoSyn),
- [(@{binding N}, [], NoSyn),
- (@{binding C}, [@{typ 'a}, Type (Sign.full_name @{theory} @{binding l}, [@{typ 'a}])],
+ [((\<^binding>\<open>l\<close>, [("'a", \<^sort>\<open>type\<close>)], NoSyn),
+ [(\<^binding>\<open>N\<close>, [], NoSyn),
+ (\<^binding>\<open>C\<close>, [\<^typ>\<open>'a\<close>, Type (Sign.full_name \<^theory> \<^binding>\<open>l\<close>, [\<^typ>\<open>'a\<close>])],
NoSyn)])];
Theory.setup (snd o BNF_LFP_Compat.add_datatype [] l_specs);
\<close>
-ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name l}\<close>
+ML \<open>get_descrs \<^theory> (1, 1, 1) \<^type_name>\<open>l\<close>\<close>
thm l.exhaust l.map l.induct l.rec l.size
ML \<open>
val t_specs =
- [((@{binding t}, [("'b", @{sort type})], NoSyn),
- [(@{binding T}, [@{typ 'b},
- Type (@{type_name l}, [Type (Sign.full_name @{theory} @{binding t}, [@{typ 'b}])])],
+ [((\<^binding>\<open>t\<close>, [("'b", \<^sort>\<open>type\<close>)], NoSyn),
+ [(\<^binding>\<open>T\<close>, [\<^typ>\<open>'b\<close>,
+ Type (\<^type_name>\<open>l\<close>, [Type (Sign.full_name \<^theory> \<^binding>\<open>t\<close>, [\<^typ>\<open>'b\<close>])])],
NoSyn)])];
Theory.setup (snd o BNF_LFP_Compat.add_datatype [] t_specs);
\<close>
-ML \<open>get_descrs @{theory} (2, 2, 1) @{type_name t}\<close>
+ML \<open>get_descrs \<^theory> (2, 2, 1) \<^type_name>\<open>t\<close>\<close>
thm t.exhaust t.map t.induct t.rec t.size
thm compat_t.induct compat_t.rec
ML \<open>
val ft_specs =
- [((@{binding ft}, [("'a", @{sort type})], NoSyn),
- [(@{binding FT0}, [], NoSyn),
- (@{binding FT}, [@{typ 'a} --> Type (Sign.full_name @{theory} @{binding ft}, [@{typ 'a}])],
+ [((\<^binding>\<open>ft\<close>, [("'a", \<^sort>\<open>type\<close>)], NoSyn),
+ [(\<^binding>\<open>FT0\<close>, [], NoSyn),
+ (\<^binding>\<open>FT\<close>, [\<^typ>\<open>'a\<close> --> Type (Sign.full_name \<^theory> \<^binding>\<open>ft\<close>, [\<^typ>\<open>'a\<close>])],
NoSyn)])];
Theory.setup (snd o BNF_LFP_Compat.add_datatype [] ft_specs);
\<close>
-ML \<open>get_descrs @{theory} (1, 1, 1) @{type_name ft}\<close>
+ML \<open>get_descrs \<^theory> (1, 1, 1) \<^type_name>\<open>ft\<close>\<close>
thm ft.exhaust ft.induct ft.rec ft.size
thm compat_ft.induct compat_ft.rec
--- a/src/HOL/Datatype_Examples/Stream_Processor.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Datatype_Examples/Stream_Processor.thy Sat Jan 05 17:24:33 2019 +0100
@@ -137,7 +137,7 @@
by (tactic \<open>
let
val ks = 1 upto 2;
- val ctxt = @{context};
+ val ctxt = \<^context>;
in
BNF_Tactics.unfold_thms_tac ctxt
@{thms sp\<^sub>\<mu>.rel_compp sp\<^sub>\<mu>.rel_conversep sp\<^sub>\<mu>.rel_Grp vimage2p_Grp} THEN
--- a/src/HOL/Decision_Procs/Approximation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Approximation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1453,7 +1453,7 @@
thus ?thesis unfolding f_def by auto
qed (insert assms, auto simp add: approx_tse_form_def)
-text \<open>@{term approx_form_eval} is only used for the {\tt value}-command.\<close>
+text \<open>\<^term>\<open>approx_form_eval\<close> is only used for the {\tt value}-command.\<close>
fun approx_form_eval :: "nat \<Rightarrow> form \<Rightarrow> (float * float) option list \<Rightarrow> (float * float) option list" where
"approx_form_eval prec (Bound (Var n) a b f) bs =
@@ -1477,95 +1477,95 @@
let
fun bad t = error ("Bad term: " ^ Syntax.string_of_term_global thy t);
- fun term_of_bool true = @{term True}
- | term_of_bool false = @{term False};
+ fun term_of_bool true = \<^term>\<open>True\<close>
+ | term_of_bool false = \<^term>\<open>False\<close>;
- val mk_int = HOLogic.mk_number @{typ int} o @{code integer_of_int};
- fun dest_int (@{term int_of_integer} $ j) = @{code int_of_integer} (snd (HOLogic.dest_number j))
+ val mk_int = HOLogic.mk_number \<^typ>\<open>int\<close> o @{code integer_of_int};
+ fun dest_int (\<^term>\<open>int_of_integer\<close> $ j) = @{code int_of_integer} (snd (HOLogic.dest_number j))
| dest_int i = @{code int_of_integer} (snd (HOLogic.dest_number i));
fun term_of_float (@{code Float} (k, l)) =
- @{term Float} $ mk_int k $ mk_int l;
+ \<^term>\<open>Float\<close> $ mk_int k $ mk_int l;
- fun term_of_float_float_option NONE = @{term "None :: (float \<times> float) option"}
- | term_of_float_float_option (SOME ff) = @{term "Some :: float \<times> float \<Rightarrow> _"}
+ fun term_of_float_float_option NONE = \<^term>\<open>None :: (float \<times> float) option\<close>
+ | term_of_float_float_option (SOME ff) = \<^term>\<open>Some :: float \<times> float \<Rightarrow> _\<close>
$ HOLogic.mk_prod (apply2 term_of_float ff);
val term_of_float_float_option_list =
- HOLogic.mk_list @{typ "(float \<times> float) option"} o map term_of_float_float_option;
+ HOLogic.mk_list \<^typ>\<open>(float \<times> float) option\<close> o map term_of_float_float_option;
fun nat_of_term t = @{code nat_of_integer}
(HOLogic.dest_nat t handle TERM _ => snd (HOLogic.dest_number t));
- fun float_of_term (@{term Float} $ k $ l) =
+ fun float_of_term (\<^term>\<open>Float\<close> $ k $ l) =
@{code Float} (dest_int k, dest_int l)
| float_of_term t = bad t;
- fun floatarith_of_term (@{term Add} $ a $ b) = @{code Add} (floatarith_of_term a, floatarith_of_term b)
- | floatarith_of_term (@{term Minus} $ a) = @{code Minus} (floatarith_of_term a)
- | floatarith_of_term (@{term Mult} $ a $ b) = @{code Mult} (floatarith_of_term a, floatarith_of_term b)
- | floatarith_of_term (@{term Inverse} $ a) = @{code Inverse} (floatarith_of_term a)
- | floatarith_of_term (@{term Cos} $ a) = @{code Cos} (floatarith_of_term a)
- | floatarith_of_term (@{term Arctan} $ a) = @{code Arctan} (floatarith_of_term a)
- | floatarith_of_term (@{term Abs} $ a) = @{code Abs} (floatarith_of_term a)
- | floatarith_of_term (@{term Max} $ a $ b) = @{code Max} (floatarith_of_term a, floatarith_of_term b)
- | floatarith_of_term (@{term Min} $ a $ b) = @{code Min} (floatarith_of_term a, floatarith_of_term b)
- | floatarith_of_term @{term Pi} = @{code Pi}
- | floatarith_of_term (@{term Sqrt} $ a) = @{code Sqrt} (floatarith_of_term a)
- | floatarith_of_term (@{term Exp} $ a) = @{code Exp} (floatarith_of_term a)
- | floatarith_of_term (@{term Powr} $ a $ b) = @{code Powr} (floatarith_of_term a, floatarith_of_term b)
- | floatarith_of_term (@{term Ln} $ a) = @{code Ln} (floatarith_of_term a)
- | floatarith_of_term (@{term Power} $ a $ n) =
+ fun floatarith_of_term (\<^term>\<open>Add\<close> $ a $ b) = @{code Add} (floatarith_of_term a, floatarith_of_term b)
+ | floatarith_of_term (\<^term>\<open>Minus\<close> $ a) = @{code Minus} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Mult\<close> $ a $ b) = @{code Mult} (floatarith_of_term a, floatarith_of_term b)
+ | floatarith_of_term (\<^term>\<open>Inverse\<close> $ a) = @{code Inverse} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Cos\<close> $ a) = @{code Cos} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Arctan\<close> $ a) = @{code Arctan} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Abs\<close> $ a) = @{code Abs} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Max\<close> $ a $ b) = @{code Max} (floatarith_of_term a, floatarith_of_term b)
+ | floatarith_of_term (\<^term>\<open>Min\<close> $ a $ b) = @{code Min} (floatarith_of_term a, floatarith_of_term b)
+ | floatarith_of_term \<^term>\<open>Pi\<close> = @{code Pi}
+ | floatarith_of_term (\<^term>\<open>Sqrt\<close> $ a) = @{code Sqrt} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Exp\<close> $ a) = @{code Exp} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Powr\<close> $ a $ b) = @{code Powr} (floatarith_of_term a, floatarith_of_term b)
+ | floatarith_of_term (\<^term>\<open>Ln\<close> $ a) = @{code Ln} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Power\<close> $ a $ n) =
@{code Power} (floatarith_of_term a, nat_of_term n)
- | floatarith_of_term (@{term Floor} $ a) = @{code Floor} (floatarith_of_term a)
- | floatarith_of_term (@{term Var} $ n) = @{code Var} (nat_of_term n)
- | floatarith_of_term (@{term Num} $ m) = @{code Num} (float_of_term m)
+ | floatarith_of_term (\<^term>\<open>Floor\<close> $ a) = @{code Floor} (floatarith_of_term a)
+ | floatarith_of_term (\<^term>\<open>Var\<close> $ n) = @{code Var} (nat_of_term n)
+ | floatarith_of_term (\<^term>\<open>Num\<close> $ m) = @{code Num} (float_of_term m)
| floatarith_of_term t = bad t;
- fun form_of_term (@{term Bound} $ a $ b $ c $ p) = @{code Bound}
+ fun form_of_term (\<^term>\<open>Bound\<close> $ a $ b $ c $ p) = @{code Bound}
(floatarith_of_term a, floatarith_of_term b, floatarith_of_term c, form_of_term p)
- | form_of_term (@{term Assign} $ a $ b $ p) = @{code Assign}
+ | form_of_term (\<^term>\<open>Assign\<close> $ a $ b $ p) = @{code Assign}
(floatarith_of_term a, floatarith_of_term b, form_of_term p)
- | form_of_term (@{term Less} $ a $ b) = @{code Less}
+ | form_of_term (\<^term>\<open>Less\<close> $ a $ b) = @{code Less}
(floatarith_of_term a, floatarith_of_term b)
- | form_of_term (@{term LessEqual} $ a $ b) = @{code LessEqual}
+ | form_of_term (\<^term>\<open>LessEqual\<close> $ a $ b) = @{code LessEqual}
(floatarith_of_term a, floatarith_of_term b)
- | form_of_term (@{term Conj} $ a $ b) = @{code Conj}
+ | form_of_term (\<^term>\<open>Conj\<close> $ a $ b) = @{code Conj}
(form_of_term a, form_of_term b)
- | form_of_term (@{term Disj} $ a $ b) = @{code Disj}
+ | form_of_term (\<^term>\<open>Disj\<close> $ a $ b) = @{code Disj}
(form_of_term a, form_of_term b)
- | form_of_term (@{term AtLeastAtMost} $ a $ b $ c) = @{code AtLeastAtMost}
+ | form_of_term (\<^term>\<open>AtLeastAtMost\<close> $ a $ b $ c) = @{code AtLeastAtMost}
(floatarith_of_term a, floatarith_of_term b, floatarith_of_term c)
| form_of_term t = bad t;
- fun float_float_option_of_term @{term "None :: (float \<times> float) option"} = NONE
- | float_float_option_of_term (@{term "Some :: float \<times> float \<Rightarrow> _"} $ ff) =
+ fun float_float_option_of_term \<^term>\<open>None :: (float \<times> float) option\<close> = NONE
+ | float_float_option_of_term (\<^term>\<open>Some :: float \<times> float \<Rightarrow> _\<close> $ ff) =
SOME (apply2 float_of_term (HOLogic.dest_prod ff))
- | float_float_option_of_term (@{term approx'} $ n $ a $ ffs) = @{code approx'}
+ | float_float_option_of_term (\<^term>\<open>approx'\<close> $ n $ a $ ffs) = @{code approx'}
(nat_of_term n) (floatarith_of_term a) (float_float_option_list_of_term ffs)
| float_float_option_of_term t = bad t
and float_float_option_list_of_term
- (@{term "replicate :: _ \<Rightarrow> (float \<times> float) option \<Rightarrow> _"} $ n $ @{term "None :: (float \<times> float) option"}) =
+ (\<^term>\<open>replicate :: _ \<Rightarrow> (float \<times> float) option \<Rightarrow> _\<close> $ n $ \<^term>\<open>None :: (float \<times> float) option\<close>) =
@{code replicate} (nat_of_term n) NONE
- | float_float_option_list_of_term (@{term approx_form_eval} $ n $ p $ ffs) =
+ | float_float_option_list_of_term (\<^term>\<open>approx_form_eval\<close> $ n $ p $ ffs) =
@{code approx_form_eval} (nat_of_term n) (form_of_term p) (float_float_option_list_of_term ffs)
| float_float_option_list_of_term t = map float_float_option_of_term
(HOLogic.dest_list t);
val nat_list_of_term = map nat_of_term o HOLogic.dest_list ;
- fun bool_of_term (@{term approx_form} $ n $ p $ ffs $ ms) = @{code approx_form}
+ fun bool_of_term (\<^term>\<open>approx_form\<close> $ n $ p $ ffs $ ms) = @{code approx_form}
(nat_of_term n) (form_of_term p) (float_float_option_list_of_term ffs) (nat_list_of_term ms)
- | bool_of_term (@{term approx_tse_form} $ m $ n $ q $ p) =
+ | bool_of_term (\<^term>\<open>approx_tse_form\<close> $ m $ n $ q $ p) =
@{code approx_tse_form} (nat_of_term m) (nat_of_term n) (nat_of_term q) (form_of_term p)
| bool_of_term t = bad t;
fun eval t = case fastype_of t
- of @{typ bool} =>
+ of \<^typ>\<open>bool\<close> =>
(term_of_bool o bool_of_term) t
- | @{typ "(float \<times> float) option"} =>
+ | \<^typ>\<open>(float \<times> float) option\<close> =>
(term_of_float_float_option o float_float_option_of_term) t
- | @{typ "(float \<times> float) option list"} =>
+ | \<^typ>\<open>(float \<times> float) option list\<close> =>
(term_of_float_float_option_list o float_float_option_list_of_term) t
| _ => bad t;
--- a/src/HOL/Decision_Procs/Approximation_Bounds.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Approximation_Bounds.thy Sat Jan 05 17:24:33 2019 +0100
@@ -82,8 +82,8 @@
text \<open>
-Here @{term_type "f :: nat \<Rightarrow> nat"} is the sequence defining the Taylor series, the coefficients are
-all alternating and reciprocs. We use @{term G} and @{term F} to describe the computation of @{term f}.
+Here \<^term_type>\<open>f :: nat \<Rightarrow> nat\<close> is the sequence defining the Taylor series, the coefficients are
+all alternating and reciprocs. We use \<^term>\<open>G\<close> and \<^term>\<open>F\<close> to describe the computation of \<^term>\<open>f\<close>.
\<close>
@@ -141,7 +141,7 @@
text \<open>
The horner scheme computes alternating series. To get the upper and lower bounds we need to
-guarantee to access a even or odd member. To do this we use @{term get_odd} and @{term get_even}.
+guarantee to access a even or odd member. To do this we use \<^term>\<open>get_odd\<close> and \<^term>\<open>get_even\<close>.
\<close>
definition get_odd :: "nat \<Rightarrow> nat" where
@@ -463,7 +463,7 @@
text \<open>
As first step we implement the computation of the arcus tangens series. This is only valid in the range
-@{term "{-1 :: real .. 1}"}. This is used to compute \<pi> and then the entire arcus tangens.
+\<^term>\<open>{-1 :: real .. 1}\<close>. This is used to compute \<pi> and then the entire arcus tangens.
\<close>
fun ub_arctan_horner :: "nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> float \<Rightarrow> float"
--- a/src/HOL/Decision_Procs/Commutative_Ring.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Commutative_Ring.thy Sat Jan 05 17:24:33 2019 +0100
@@ -649,21 +649,21 @@
by (induct l) (simp_all add: cring_class.in_carrier_def carrier_class)
ML \<open>
-val term_of_nat = HOLogic.mk_number @{typ nat} o @{code integer_of_nat};
+val term_of_nat = HOLogic.mk_number \<^typ>\<open>nat\<close> o @{code integer_of_nat};
-val term_of_int = HOLogic.mk_number @{typ int} o @{code integer_of_int};
+val term_of_int = HOLogic.mk_number \<^typ>\<open>int\<close> o @{code integer_of_int};
-fun term_of_pol (@{code Pc} k) = @{term Pc} $ term_of_int k
- | term_of_pol (@{code Pinj} (n, p)) = @{term Pinj} $ term_of_nat n $ term_of_pol p
- | term_of_pol (@{code PX} (p, n, q)) = @{term PX} $ term_of_pol p $ term_of_nat n $ term_of_pol q;
+fun term_of_pol (@{code Pc} k) = \<^term>\<open>Pc\<close> $ term_of_int k
+ | term_of_pol (@{code Pinj} (n, p)) = \<^term>\<open>Pinj\<close> $ term_of_nat n $ term_of_pol p
+ | term_of_pol (@{code PX} (p, n, q)) = \<^term>\<open>PX\<close> $ term_of_pol p $ term_of_nat n $ term_of_pol q;
local
-fun pol (ctxt, ct, t) = Thm.mk_binop @{cterm "Pure.eq :: pol \<Rightarrow> pol \<Rightarrow> prop"}
+fun pol (ctxt, ct, t) = Thm.mk_binop \<^cterm>\<open>Pure.eq :: pol \<Rightarrow> pol \<Rightarrow> prop\<close>
ct (Thm.cterm_of ctxt t);
val (_, raw_pol_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding pnsubstl}, pol)));
+ (Thm.add_oracle (\<^binding>\<open>pnsubstl\<close>, pol)));
fun pol_oracle ctxt ct t = raw_pol_oracle (ctxt, ct, t);
@@ -742,57 +742,57 @@
in (map tr ths1, map tr ths2, map tr ths3, map tr ths4, tr th5, tr th) end
| NONE => error "get_ring_simps: lookup failed");
-fun ring_struct (Const (@{const_name Ring.ring.add}, _) $ R $ _ $ _) = SOME R
- | ring_struct (Const (@{const_name Ring.a_minus}, _) $ R $ _ $ _) = SOME R
- | ring_struct (Const (@{const_name Group.monoid.mult}, _) $ R $ _ $ _) = SOME R
- | ring_struct (Const (@{const_name Ring.a_inv}, _) $ R $ _) = SOME R
- | ring_struct (Const (@{const_name Group.pow}, _) $ R $ _ $ _) = SOME R
- | ring_struct (Const (@{const_name Ring.ring.zero}, _) $ R) = SOME R
- | ring_struct (Const (@{const_name Group.monoid.one}, _) $ R) = SOME R
- | ring_struct (Const (@{const_name Algebra_Aux.of_integer}, _) $ R $ _) = SOME R
+fun ring_struct (Const (\<^const_name>\<open>Ring.ring.add\<close>, _) $ R $ _ $ _) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Ring.a_minus\<close>, _) $ R $ _ $ _) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Group.monoid.mult\<close>, _) $ R $ _ $ _) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Ring.a_inv\<close>, _) $ R $ _) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Group.pow\<close>, _) $ R $ _ $ _) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Ring.ring.zero\<close>, _) $ R) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Group.monoid.one\<close>, _) $ R) = SOME R
+ | ring_struct (Const (\<^const_name>\<open>Algebra_Aux.of_integer\<close>, _) $ R $ _) = SOME R
| ring_struct _ = NONE;
-fun reif_polex vs (Const (@{const_name Ring.ring.add}, _) $ _ $ a $ b) =
- @{const Add} $ reif_polex vs a $ reif_polex vs b
- | reif_polex vs (Const (@{const_name Ring.a_minus}, _) $ _ $ a $ b) =
- @{const Sub} $ reif_polex vs a $ reif_polex vs b
- | reif_polex vs (Const (@{const_name Group.monoid.mult}, _) $ _ $ a $ b) =
- @{const Mul} $ reif_polex vs a $ reif_polex vs b
- | reif_polex vs (Const (@{const_name Ring.a_inv}, _) $ _ $ a) =
- @{const Neg} $ reif_polex vs a
- | reif_polex vs (Const (@{const_name Group.pow}, _) $ _ $ a $ n) =
- @{const Pow} $ reif_polex vs a $ n
+fun reif_polex vs (Const (\<^const_name>\<open>Ring.ring.add\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>Add\<close> $ reif_polex vs a $ reif_polex vs b
+ | reif_polex vs (Const (\<^const_name>\<open>Ring.a_minus\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>Sub\<close> $ reif_polex vs a $ reif_polex vs b
+ | reif_polex vs (Const (\<^const_name>\<open>Group.monoid.mult\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>Mul\<close> $ reif_polex vs a $ reif_polex vs b
+ | reif_polex vs (Const (\<^const_name>\<open>Ring.a_inv\<close>, _) $ _ $ a) =
+ \<^const>\<open>Neg\<close> $ reif_polex vs a
+ | reif_polex vs (Const (\<^const_name>\<open>Group.pow\<close>, _) $ _ $ a $ n) =
+ \<^const>\<open>Pow\<close> $ reif_polex vs a $ n
| reif_polex vs (Free x) =
- @{const Var} $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
- | reif_polex vs (Const (@{const_name Ring.ring.zero}, _) $ _) =
- @{term "Const 0"}
- | reif_polex vs (Const (@{const_name Group.monoid.one}, _) $ _) =
- @{term "Const 1"}
- | reif_polex vs (Const (@{const_name Algebra_Aux.of_integer}, _) $ _ $ n) =
- @{const Const} $ n
+ \<^const>\<open>Var\<close> $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
+ | reif_polex vs (Const (\<^const_name>\<open>Ring.ring.zero\<close>, _) $ _) =
+ \<^term>\<open>Const 0\<close>
+ | reif_polex vs (Const (\<^const_name>\<open>Group.monoid.one\<close>, _) $ _) =
+ \<^term>\<open>Const 1\<close>
+ | reif_polex vs (Const (\<^const_name>\<open>Algebra_Aux.of_integer\<close>, _) $ _ $ n) =
+ \<^const>\<open>Const\<close> $ n
| reif_polex _ _ = error "reif_polex: bad expression";
-fun reif_polex' vs (Const (@{const_name Groups.plus}, _) $ a $ b) =
- @{const Add} $ reif_polex' vs a $ reif_polex' vs b
- | reif_polex' vs (Const (@{const_name Groups.minus}, _) $ a $ b) =
- @{const Sub} $ reif_polex' vs a $ reif_polex' vs b
- | reif_polex' vs (Const (@{const_name Groups.times}, _) $ a $ b) =
- @{const Mul} $ reif_polex' vs a $ reif_polex' vs b
- | reif_polex' vs (Const (@{const_name Groups.uminus}, _) $ a) =
- @{const Neg} $ reif_polex' vs a
- | reif_polex' vs (Const (@{const_name Power.power}, _) $ a $ n) =
- @{const Pow} $ reif_polex' vs a $ n
+fun reif_polex' vs (Const (\<^const_name>\<open>Groups.plus\<close>, _) $ a $ b) =
+ \<^const>\<open>Add\<close> $ reif_polex' vs a $ reif_polex' vs b
+ | reif_polex' vs (Const (\<^const_name>\<open>Groups.minus\<close>, _) $ a $ b) =
+ \<^const>\<open>Sub\<close> $ reif_polex' vs a $ reif_polex' vs b
+ | reif_polex' vs (Const (\<^const_name>\<open>Groups.times\<close>, _) $ a $ b) =
+ \<^const>\<open>Mul\<close> $ reif_polex' vs a $ reif_polex' vs b
+ | reif_polex' vs (Const (\<^const_name>\<open>Groups.uminus\<close>, _) $ a) =
+ \<^const>\<open>Neg\<close> $ reif_polex' vs a
+ | reif_polex' vs (Const (\<^const_name>\<open>Power.power\<close>, _) $ a $ n) =
+ \<^const>\<open>Pow\<close> $ reif_polex' vs a $ n
| reif_polex' vs (Free x) =
- @{const Var} $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
- | reif_polex' vs (Const (@{const_name numeral}, _) $ b) =
- @{const Const} $ (@{const numeral (int)} $ b)
- | reif_polex' vs (Const (@{const_name zero_class.zero}, _)) = @{term "Const 0"}
- | reif_polex' vs (Const (@{const_name one_class.one}, _)) = @{term "Const 1"}
+ \<^const>\<open>Var\<close> $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
+ | reif_polex' vs (Const (\<^const_name>\<open>numeral\<close>, _) $ b) =
+ \<^const>\<open>Const\<close> $ (@{const numeral (int)} $ b)
+ | reif_polex' vs (Const (\<^const_name>\<open>zero_class.zero\<close>, _)) = \<^term>\<open>Const 0\<close>
+ | reif_polex' vs (Const (\<^const_name>\<open>one_class.one\<close>, _)) = \<^term>\<open>Const 1\<close>
| reif_polex' vs t = error "reif_polex: bad expression";
fun head_conv (_, _, _, _, head_simp, _) ys =
(case strip_app ys of
- (@{const_name Cons}, [y, xs]) => inst [] [y, xs] head_simp);
+ (\<^const_name>\<open>Cons\<close>, [y, xs]) => inst [] [y, xs] head_simp);
fun Ipol_conv (rls as
([Ipol_simps_1, Ipol_simps_2, Ipol_simps_3,
@@ -803,17 +803,17 @@
val drop_conv_a = drop_conv a;
fun conv l p = (case strip_app p of
- (@{const_name Pc}, [c]) => (case strip_numeral c of
- (@{const_name zero_class.zero}, _) => inst [] [l] Ipol_simps_4
- | (@{const_name one_class.one}, _) => inst [] [l] Ipol_simps_5
- | (@{const_name numeral}, [m]) => inst [] [l, m] Ipol_simps_6
- | (@{const_name uminus}, [m]) => inst [] [l, m] Ipol_simps_7
+ (\<^const_name>\<open>Pc\<close>, [c]) => (case strip_numeral c of
+ (\<^const_name>\<open>zero_class.zero\<close>, _) => inst [] [l] Ipol_simps_4
+ | (\<^const_name>\<open>one_class.one\<close>, _) => inst [] [l] Ipol_simps_5
+ | (\<^const_name>\<open>numeral\<close>, [m]) => inst [] [l, m] Ipol_simps_6
+ | (\<^const_name>\<open>uminus\<close>, [m]) => inst [] [l, m] Ipol_simps_7
| _ => inst [] [l, c] Ipol_simps_1)
- | (@{const_name Pinj}, [i, P]) =>
+ | (\<^const_name>\<open>Pinj\<close>, [i, P]) =>
transitive'
(inst [] [l, i, P] Ipol_simps_2)
(cong2' conv (args2 drop_conv_a) Thm.reflexive)
- | (@{const_name PX}, [P, x, Q]) =>
+ | (\<^const_name>\<open>PX\<close>, [P, x, Q]) =>
transitive'
(inst [] [l, P, x, Q] Ipol_simps_3)
(cong2
@@ -833,32 +833,32 @@
val drop_conv_a = drop_conv a;
fun conv l r = (case strip_app r of
- (@{const_name Var}, [n]) =>
+ (\<^const_name>\<open>Var\<close>, [n]) =>
transitive'
(inst [] [l, n] Ipolex_Var)
(cong1' (head_conv rls) (args2 drop_conv_a))
- | (@{const_name Const}, [i]) => (case strip_app i of
- (@{const_name zero_class.zero}, _) => inst [] [l] Ipolex_Const_0
- | (@{const_name one_class.one}, _) => inst [] [l] Ipolex_Const_1
- | (@{const_name numeral}, [m]) => inst [] [l, m] Ipolex_Const_numeral
+ | (\<^const_name>\<open>Const\<close>, [i]) => (case strip_app i of
+ (\<^const_name>\<open>zero_class.zero\<close>, _) => inst [] [l] Ipolex_Const_0
+ | (\<^const_name>\<open>one_class.one\<close>, _) => inst [] [l] Ipolex_Const_1
+ | (\<^const_name>\<open>numeral\<close>, [m]) => inst [] [l, m] Ipolex_Const_numeral
| _ => inst [] [l, i] Ipolex_Const)
- | (@{const_name Add}, [P, Q]) =>
+ | (\<^const_name>\<open>Add\<close>, [P, Q]) =>
transitive'
(inst [] [l, P, Q] Ipolex_Add)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name Sub}, [P, Q]) =>
+ | (\<^const_name>\<open>Sub\<close>, [P, Q]) =>
transitive'
(inst [] [l, P, Q] Ipolex_Sub)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name Mul}, [P, Q]) =>
+ | (\<^const_name>\<open>Mul\<close>, [P, Q]) =>
transitive'
(inst [] [l, P, Q] Ipolex_Mul)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name Pow}, [P, n]) =>
+ | (\<^const_name>\<open>Pow\<close>, [P, n]) =>
transitive'
(inst [] [l, P, n] Ipolex_Pow)
(cong2 (args2 conv) Thm.reflexive)
- | (@{const_name Neg}, [P]) =>
+ | (\<^const_name>\<open>Neg\<close>, [P]) =>
transitive'
(inst [] [l, P] Ipolex_Neg)
(cong1 (args2 conv)))
@@ -868,9 +868,9 @@
(_, _,
[Ipolex_polex_list_Nil, Ipolex_polex_list_Cons], _, _, _)) l pps =
(case strip_app pps of
- (@{const_name Nil}, []) => inst [] [l] Ipolex_polex_list_Nil
- | (@{const_name Cons}, [p, pps']) => (case strip_app p of
- (@{const_name Pair}, [P, Q]) =>
+ (\<^const_name>\<open>Nil\<close>, []) => inst [] [l] Ipolex_polex_list_Nil
+ | (\<^const_name>\<open>Cons\<close>, [p, pps']) => (case strip_app p of
+ (\<^const_name>\<open>Pair\<close>, [P, Q]) =>
transitive'
(inst [] [l, P, Q, pps'] Ipolex_polex_list_Cons)
(cong2
@@ -892,9 +892,9 @@
val props = map fst (Facts.props (Proof_Context.facts_of ctxt)) @ maps dest_conj prems;
val ths = map (fn p as (x, _) =>
(case find_first
- ((fn Const (@{const_name Trueprop}, _) $
- (Const (@{const_name Set.member}, _) $
- Free (y, _) $ (Const (@{const_name carrier}, _) $ S)) =>
+ ((fn Const (\<^const_name>\<open>Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>Set.member\<close>, _) $
+ Free (y, _) $ (Const (\<^const_name>\<open>carrier\<close>, _) $ S)) =>
x = y andalso R aconv S
| _ => false) o Thm.prop_of) props of
SOME th => th
@@ -906,13 +906,13 @@
end;
fun mk_ring T =
- Const (@{const_name cring_class_ops},
- Type (@{type_name partial_object_ext}, [T,
- Type (@{type_name monoid_ext}, [T,
- Type (@{type_name ring_ext}, [T, @{typ unit}])])]));
+ Const (\<^const_name>\<open>cring_class_ops\<close>,
+ Type (\<^type_name>\<open>partial_object_ext\<close>, [T,
+ Type (\<^type_name>\<open>monoid_ext\<close>, [T,
+ Type (\<^type_name>\<open>ring_ext\<close>, [T, \<^typ>\<open>unit\<close>])])]));
-val iterations = @{cterm "1000::nat"};
-val Trueprop_cong = Thm.combination (Thm.reflexive @{cterm Trueprop});
+val iterations = \<^cterm>\<open>1000::nat\<close>;
+val Trueprop_cong = Thm.combination (Thm.reflexive \<^cterm>\<open>Trueprop\<close>);
fun commutative_ring_conv ctxt prems eqs ct =
let
@@ -926,7 +926,7 @@
| NONE => (mk_ring T, SOME cT, @{thm in_carrier_trivial}, reif_polex' xs));
val rls as (_, _, _, _, _, norm_subst_correct) = get_ring_simps ctxt optcT R;
val cxs = Thm.cterm_of ctxt (HOLogic.mk_list T (map Free xs));
- val ceqs = Thm.cterm_of ctxt (HOLogic.mk_list @{typ "polex * polex"}
+ val ceqs = Thm.cterm_of ctxt (HOLogic.mk_list \<^typ>\<open>polex * polex\<close>
(map (HOLogic.mk_prod o apply2 reif) eqs'));
val cp = Thm.cterm_of ctxt (reif (Thm.term_of ct));
val prem = Thm.equal_elim
@@ -960,7 +960,7 @@
local_setup \<open>
Local_Theory.declaration {syntax = false, pervasive = false}
(fn phi => Ring_Tac.Ring_Simps.map (Ring_Tac.insert_rules Ring_Tac.eq_ring_simps
- (Morphism.term phi @{term R},
+ (Morphism.term phi \<^term>\<open>R\<close>,
(Morphism.fact phi @{thms Ipol.simps [meta] Ipol_Pc [meta]},
Morphism.fact phi @{thms Ipolex.simps [meta] Ipolex_Const [meta]},
Morphism.fact phi @{thms Ipolex_polex_list.simps [meta]},
--- a/src/HOL/Decision_Procs/Conversions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Conversions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -79,8 +79,8 @@
ML \<open>
fun strip_numeral ct = (case strip_app ct of
- (@{const_name uminus}, [n]) => (case strip_app n of
- (@{const_name numeral}, [b]) => (@{const_name uminus}, [b])
+ (\<^const_name>\<open>uminus\<close>, [n]) => (case strip_app n of
+ (\<^const_name>\<open>numeral\<close>, [b]) => (\<^const_name>\<open>uminus\<close>, [b])
| _ => ("", []))
| x => x);
\<close>
@@ -90,43 +90,43 @@
ML \<open>
fun nat_conv i = (case strip_app i of
- (@{const_name zero_class.zero}, []) => @{thm nat_0 [meta]}
- | (@{const_name one_class.one}, []) => @{thm nat_one_as_int [meta, symmetric]}
- | (@{const_name numeral}, [b]) => inst [] [b] @{thm nat_numeral [meta]}
- | (@{const_name uminus}, [b]) => (case strip_app b of
- (@{const_name one_class.one}, []) => @{thm nat_minus1_eq [meta]}
- | (@{const_name numeral}, [b']) => inst [] [b'] @{thm nat_neg_numeral [meta]}));
+ (\<^const_name>\<open>zero_class.zero\<close>, []) => @{thm nat_0 [meta]}
+ | (\<^const_name>\<open>one_class.one\<close>, []) => @{thm nat_one_as_int [meta, symmetric]}
+ | (\<^const_name>\<open>numeral\<close>, [b]) => inst [] [b] @{thm nat_numeral [meta]}
+ | (\<^const_name>\<open>uminus\<close>, [b]) => (case strip_app b of
+ (\<^const_name>\<open>one_class.one\<close>, []) => @{thm nat_minus1_eq [meta]}
+ | (\<^const_name>\<open>numeral\<close>, [b']) => inst [] [b'] @{thm nat_neg_numeral [meta]}));
\<close>
ML \<open>
fun add_num_conv b b' = (case (strip_app b, strip_app b') of
- ((@{const_name Num.One}, []), (@{const_name Num.One}, [])) =>
+ ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.One\<close>, [])) =>
@{thm add_num_simps(1) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
inst [] [n] @{thm add_num_simps(2) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
transitive'
(inst [] [n] @{thm add_num_simps(3) [meta]})
(cong1 (args2 add_num_conv))
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm add_num_simps(4) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm add_num_simps(5) [meta]})
(cong1 (args2 add_num_conv))
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm add_num_simps(6) [meta]})
(cong1 (args2 add_num_conv))
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
transitive'
(inst [] [m] @{thm add_num_simps(7) [meta]})
(cong1 (args2 add_num_conv))
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm add_num_simps(8) [meta]})
(cong1 (args2 add_num_conv))
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm add_num_simps(9) [meta]})
(cong1 (cong2' add_num_conv (args2 add_num_conv) Thm.reflexive)));
@@ -134,12 +134,12 @@
ML \<open>
fun BitM_conv m = (case strip_app m of
- (@{const_name Num.One}, []) => @{thm BitM.simps(1) [meta]}
- | (@{const_name Num.Bit0}, [n]) =>
+ (\<^const_name>\<open>Num.One\<close>, []) => @{thm BitM.simps(1) [meta]}
+ | (\<^const_name>\<open>Num.Bit0\<close>, [n]) =>
transitive'
(inst [] [n] @{thm BitM.simps(2) [meta]})
(cong1 (args1 BitM_conv))
- | (@{const_name Num.Bit1}, [n]) =>
+ | (\<^const_name>\<open>Num.Bit1\<close>, [n]) =>
inst [] [n] @{thm BitM.simps(3) [meta]});
\<close>
@@ -156,9 +156,9 @@
in
fn n =>
case strip_numeral n of
- (@{const_name zero_class.zero}, []) => dbl_0_a
- | (@{const_name numeral}, [k]) => inst [] [k] dbl_numeral_a
- | (@{const_name uminus}, [k]) => inst [] [k] dbl_neg_numeral_a
+ (\<^const_name>\<open>zero_class.zero\<close>, []) => dbl_0_a
+ | (\<^const_name>\<open>numeral\<close>, [k]) => inst [] [k] dbl_numeral_a
+ | (\<^const_name>\<open>uminus\<close>, [k]) => inst [] [k] dbl_neg_numeral_a
end;
\<close>
@@ -175,9 +175,9 @@
in
fn n =>
case strip_numeral n of
- (@{const_name zero_class.zero}, []) => dbl_inc_0_a
- | (@{const_name numeral}, [k]) => inst [] [k] dbl_inc_numeral_a
- | (@{const_name uminus}, [k]) =>
+ (\<^const_name>\<open>zero_class.zero\<close>, []) => dbl_inc_0_a
+ | (\<^const_name>\<open>numeral\<close>, [k]) => inst [] [k] dbl_inc_numeral_a
+ | (\<^const_name>\<open>uminus\<close>, [k]) =>
transitive'
(inst [] [k] dbl_inc_neg_numeral_a)
(cong1 (cong1 (args1 BitM_conv)))
@@ -197,9 +197,9 @@
in
fn n =>
case strip_numeral n of
- (@{const_name zero_class.zero}, []) => dbl_dec_0_a
- | (@{const_name uminus}, [k]) => inst [] [k] dbl_dec_neg_numeral_a
- | (@{const_name numeral}, [k]) =>
+ (\<^const_name>\<open>zero_class.zero\<close>, []) => dbl_dec_0_a
+ | (\<^const_name>\<open>uminus\<close>, [k]) => inst [] [k] dbl_dec_neg_numeral_a
+ | (\<^const_name>\<open>numeral\<close>, [k]) =>
transitive'
(inst [] [k] dbl_dec_numeral_a)
(cong1 (args1 BitM_conv))
@@ -218,33 +218,33 @@
val dbl_dec_conv_a = dbl_dec_conv a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name Num.One}, []), (@{const_name Num.One}, [])) =>
+ ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.One\<close>, [])) =>
sub_One_One
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit0}, [l])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit0\<close>, [l])) =>
transitive'
(inst [] [l] sub_One_Bit0)
(cong1 (cong1 (args1 BitM_conv)))
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit1}, [l])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit1\<close>, [l])) =>
inst [] [l] sub_One_Bit1
- | ((@{const_name Num.Bit0}, [k]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [k]), (\<^const_name>\<open>Num.One\<close>, [])) =>
transitive'
(inst [] [k] sub_Bit0_One)
(cong1 (args1 BitM_conv))
- | ((@{const_name Num.Bit1}, [k]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [k]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [k] sub_Bit1_One
- | ((@{const_name Num.Bit0}, [k]), (@{const_name Num.Bit0}, [l])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [k]), (\<^const_name>\<open>Num.Bit0\<close>, [l])) =>
transitive'
(inst [] [k, l] sub_Bit0_Bit0)
(cong1' dbl_conv_a (args2 conv))
- | ((@{const_name Num.Bit0}, [k]), (@{const_name Num.Bit1}, [l])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [k]), (\<^const_name>\<open>Num.Bit1\<close>, [l])) =>
transitive'
(inst [] [k, l] sub_Bit0_Bit1)
(cong1' dbl_dec_conv_a (args2 conv))
- | ((@{const_name Num.Bit1}, [k]), (@{const_name Num.Bit0}, [l])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [k]), (\<^const_name>\<open>Num.Bit0\<close>, [l])) =>
transitive'
(inst [] [k, l] sub_Bit1_Bit0)
(cong1' dbl_inc_conv_a (args2 conv))
- | ((@{const_name Num.Bit1}, [k]), (@{const_name Num.Bit1}, [l])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [k]), (\<^const_name>\<open>Num.Bit1\<close>, [l])) =>
transitive'
(inst [] [k, l] sub_Bit1_Bit1)
(cong1' dbl_conv_a (args2 conv)))
@@ -257,13 +257,13 @@
in
fn n =>
case Thm.term_of n of
- Const (@{const_name one_class.one}, _) => numeral_1_eq_1_a
- | Const (@{const_name uminus}, _) $ Const (@{const_name one_class.one}, _) =>
+ Const (\<^const_name>\<open>one_class.one\<close>, _) => numeral_1_eq_1_a
+ | Const (\<^const_name>\<open>uminus\<close>, _) $ Const (\<^const_name>\<open>one_class.one\<close>, _) =>
Thm.combination (Thm.reflexive (Thm.dest_fun n)) numeral_1_eq_1_a
- | Const (@{const_name zero_class.zero}, _) => Thm.reflexive n
- | Const (@{const_name numeral}, _) $ _ => Thm.reflexive n
- | Const (@{const_name uminus}, _) $
- (Const (@{const_name numeral}, _) $ _) => Thm.reflexive n
+ | Const (\<^const_name>\<open>zero_class.zero\<close>, _) => Thm.reflexive n
+ | Const (\<^const_name>\<open>numeral\<close>, _) $ _ => Thm.reflexive n
+ | Const (\<^const_name>\<open>uminus\<close>, _) $
+ (Const (\<^const_name>\<open>numeral\<close>, _) $ _) => Thm.reflexive n
| _ => err "expand1" n
end;
@@ -272,10 +272,10 @@
in
fn eq =>
case Thm.term_of (Thm.rhs_of eq) of
- Const (@{const_name Num.numeral}, _) $ Const (@{const_name Num.One}, _) =>
+ Const (\<^const_name>\<open>Num.numeral\<close>, _) $ Const (\<^const_name>\<open>Num.One\<close>, _) =>
Thm.transitive eq numeral_1_eq_1_a
- | Const (@{const_name uminus}, _) $
- (Const (@{const_name Num.numeral}, _) $ Const (@{const_name Num.One}, _)) =>
+ | Const (\<^const_name>\<open>uminus\<close>, _) $
+ (Const (\<^const_name>\<open>Num.numeral\<close>, _) $ Const (\<^const_name>\<open>Num.One\<close>, _)) =>
Thm.transitive eq
(Thm.combination (Thm.reflexive (Thm.dest_fun (Thm.rhs_of eq)))
numeral_1_eq_1_a)
@@ -292,16 +292,16 @@
val expand1_a = expand1 a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), _) => inst [] [n] add_0_a
- | (_, (@{const_name zero_class.zero}, [])) => inst [] [m] add_0_right_a
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), _) => inst [] [n] add_0_a
+ | (_, (\<^const_name>\<open>zero_class.zero\<close>, [])) => inst [] [m] add_0_right_a
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
transitive'
(inst [] [m, n] numeral_plus_numeral_a)
(cong1 (args2 add_num_conv))
| _ => cong2'' (f conv) (expand1_a m) (expand1_a n))
in f conv end;
-val nat_plus_conv = plus_conv I @{ctyp nat};
+val nat_plus_conv = plus_conv I \<^ctyp>\<open>nat\<close>;
\<close>
lemma neg_numeral_plus_neg_numeral:
@@ -321,15 +321,15 @@
in
fn conv => fn m => fn n =>
case (strip_numeral m, strip_numeral n) of
- ((@{const_name Num.numeral}, [m]), (@{const_name uminus}, [n])) =>
+ ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] numeral_plus_neg_numeral_a)
(sub_conv_a m n)
- | ((@{const_name uminus}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] neg_numeral_plus_numeral_a)
(sub_conv_a n m)
- | ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
transitive'
(inst [] [m, n] neg_numeral_plus_neg_numeral_a)
(cong1 (cong1 (args2 add_num_conv)))
@@ -338,7 +338,7 @@
fun plus_conv' a = norm1_eq a oo plus_conv (plus_neg_conv a) a;
-val int_plus_conv = plus_conv' @{ctyp int};
+val int_plus_conv = plus_conv' \<^ctyp>\<open>int\<close>;
\<close>
lemma minus_one: "- 1 = - 1" by simp
@@ -354,13 +354,13 @@
in
fn n =>
case strip_app n of
- (@{const_name zero_class.zero}, []) => minus_zero_a
- | (@{const_name one_class.one}, []) => minus_one_a
- | (@{const_name Num.numeral}, [m]) => inst [] [m] minus_numeral_a
- | (@{const_name uminus}, [m]) => inst [] [m] minus_minus_a
+ (\<^const_name>\<open>zero_class.zero\<close>, []) => minus_zero_a
+ | (\<^const_name>\<open>one_class.one\<close>, []) => minus_one_a
+ | (\<^const_name>\<open>Num.numeral\<close>, [m]) => inst [] [m] minus_numeral_a
+ | (\<^const_name>\<open>uminus\<close>, [m]) => inst [] [m] minus_minus_a
end;
-val int_neg_conv = uminus_conv @{ctyp int};
+val int_neg_conv = uminus_conv \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
@@ -377,44 +377,44 @@
val norm1_eq_a = norm1_eq a;
fun conv m n = (case (strip_numeral m, strip_numeral n) of
- ((@{const_name zero_class.zero}, []), _) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), _) =>
Thm.transitive (inst [] [n] diff_0_a) (uminus_conv_a n)
- | (_, (@{const_name zero_class.zero}, [])) => inst [] [m] diff_0_right_a
- | ((@{const_name Num.numeral}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | (_, (\<^const_name>\<open>zero_class.zero\<close>, [])) => inst [] [m] diff_0_right_a
+ | ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] numeral_minus_numeral_a)
(sub_conv_a m n)
- | ((@{const_name Num.numeral}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
transitive'
(inst [] [m, n] numeral_minus_neg_numeral_a)
(cong1 (args2 add_num_conv))
- | ((@{const_name uminus}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
transitive'
(inst [] [m, n] neg_numeral_minus_numeral_a)
(cong1 (cong1 (args2 add_num_conv)))
- | ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] neg_numeral_minus_neg_numeral_a)
(sub_conv_a n m)
| _ => cong2'' conv (expand1_a m) (expand1_a n))
in norm1_eq_a oo conv end;
-val int_minus_conv = minus_conv @{ctyp int};
+val int_minus_conv = minus_conv \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
-val int_numeral = Thm.apply @{cterm "numeral :: num \<Rightarrow> int"};
+val int_numeral = Thm.apply \<^cterm>\<open>numeral :: num \<Rightarrow> int\<close>;
-val nat_minus_refl = Thm.reflexive @{cterm "minus :: nat \<Rightarrow> nat \<Rightarrow> nat"};
+val nat_minus_refl = Thm.reflexive \<^cterm>\<open>minus :: nat \<Rightarrow> nat \<Rightarrow> nat\<close>;
-val expand1_nat = expand1 @{ctyp nat};
+val expand1_nat = expand1 \<^ctyp>\<open>nat\<close>;
fun nat_minus_conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), _) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), _) =>
inst [] [n] @{thm diff_0_eq_0 [meta]}
- | (_, (@{const_name zero_class.zero}, [])) =>
+ | (_, (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] @{thm minus_nat.diff_0 [meta]}
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm diff_nat_numeral [meta]})
(cong1' nat_conv (args2 int_minus_conv))
@@ -423,23 +423,23 @@
ML \<open>
fun mult_num_conv m n = (case (strip_app m, strip_app n) of
- (_, (@{const_name Num.One}, [])) =>
+ (_, (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm mult_num_simps(1) [meta]}
- | ((@{const_name Num.One}, []), _) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), _) =>
inst [] [n] @{thm mult_num_simps(2) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm mult_num_simps(3) [meta]})
(cong1 (cong1 (args2 mult_num_conv)))
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit1}, [n'])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n'])) =>
transitive'
(inst [] [m, n'] @{thm mult_num_simps(4) [meta]})
(cong1 (args2 mult_num_conv))
- | ((@{const_name Num.Bit1}, [m']), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m']), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
transitive'
(inst [] [m', n] @{thm mult_num_simps(5) [meta]})
(cong1 (args2 mult_num_conv))
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
transitive'
(inst [] [m, n] @{thm mult_num_simps(6) [meta]})
(cong1 (cong2' add_num_conv
@@ -457,16 +457,16 @@
val norm1_eq_a = norm1_eq a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), _) => inst [] [n] mult_zero_left_a
- | (_, (@{const_name zero_class.zero}, [])) => inst [] [m] mult_zero_right_a
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), _) => inst [] [n] mult_zero_left_a
+ | (_, (\<^const_name>\<open>zero_class.zero\<close>, [])) => inst [] [m] mult_zero_right_a
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
transitive'
(inst [] [m, n] numeral_times_numeral_a)
(cong1 (args2 mult_num_conv))
| _ => cong2'' (f conv) (expand1_a m) (expand1_a n))
in norm1_eq_a oo f conv end;
-val nat_mult_conv = mult_conv I @{ctyp nat};
+val nat_mult_conv = mult_conv I \<^ctyp>\<open>nat\<close>;
\<close>
ML \<open>
@@ -478,15 +478,15 @@
in
fn conv => fn m => fn n =>
case (strip_numeral m, strip_numeral n) of
- ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
transitive'
(inst [] [m, n] neg_numeral_times_neg_numeral_a)
(cong1 (args2 mult_num_conv))
- | ((@{const_name uminus}, [m]), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
transitive'
(inst [] [m, n] neg_numeral_times_numeral_a)
(cong1 (cong1 (args2 mult_num_conv)))
- | ((@{const_name numeral}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
transitive'
(inst [] [m, n] numeral_times_neg_numeral_a)
(cong1 (cong1 (args2 mult_num_conv)))
@@ -495,30 +495,30 @@
fun mult_conv' a = mult_conv (mult_neg_conv a) a;
-val int_mult_conv = mult_conv' @{ctyp int};
+val int_mult_conv = mult_conv' \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
fun eq_num_conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name Num.One}, []), (@{const_name Num.One}, [])) =>
+ ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.One\<close>, [])) =>
@{thm eq_num_simps(1) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
inst [] [n] @{thm eq_num_simps(2) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
inst [] [n] @{thm eq_num_simps(3) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm eq_num_simps(4) [meta]}
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm eq_num_simps(5) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm eq_num_simps(6) [meta]})
(eq_num_conv m n)
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
inst [] [m, n] @{thm eq_num_simps(7) [meta]}
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
inst [] [m, n] @{thm eq_num_simps(8) [meta]}
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm eq_num_simps(9) [meta]})
(eq_num_conv m n));
@@ -536,20 +536,20 @@
val expand1_a = expand1 a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
zero_eq_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>numeral\<close>, [n])) =>
inst [] [n] zero_neq_numeral_a
- | ((@{const_name numeral}, [m]), (@{const_name zero_class.zero}, [])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] numeral_neq_zero_a
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] numeral_eq_iff_a)
(eq_num_conv m n)
| _ => cong2'' (f conv) (expand1_a m) (expand1_a n))
in f conv end;
-val nat_eq_conv = eq_conv I @{ctyp nat};
+val nat_eq_conv = eq_conv I \<^ctyp>\<open>nat\<close>;
\<close>
ML \<open>
@@ -567,15 +567,15 @@
in
fn conv => fn m => fn n =>
case (strip_numeral m, strip_numeral n) of
- ((@{const_name uminus}, [m]), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] neg_numeral_neq_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [n] zero_neq_neg_numeral_a
- | ((@{const_name Num.numeral}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [m, n] numeral_neq_neg_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
inst [] [m, n] neg_numeral_neq_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] neg_numeral_eq_iff_a)
(eq_num_conv m n)
@@ -584,54 +584,54 @@
fun eq_conv' a = eq_conv (eq_neg_conv a) a;
-val int_eq_conv = eq_conv' @{ctyp int};
+val int_eq_conv = eq_conv' \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
fun le_num_conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name Num.One}, []), _) =>
+ ((\<^const_name>\<open>Num.One\<close>, []), _) =>
inst [] [n] @{thm le_num_simps(1) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm le_num_simps(2) [meta]}
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.One}, [])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm le_num_simps(3) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm le_num_simps(4) [meta]})
(le_num_conv m n)
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm le_num_simps(5) [meta]})
(le_num_conv m n)
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm le_num_simps(6) [meta]})
(le_num_conv m n)
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm le_num_simps(7) [meta]})
(less_num_conv m n))
and less_num_conv m n = (case (strip_app m, strip_app n) of
- (_, (@{const_name Num.One}, [])) =>
+ (_, (\<^const_name>\<open>Num.One\<close>, [])) =>
inst [] [m] @{thm less_num_simps(1) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
inst [] [n] @{thm less_num_simps(2) [meta]}
- | ((@{const_name Num.One}, []), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.One\<close>, []), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
inst [] [n] @{thm less_num_simps(3) [meta]}
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm less_num_simps(4) [meta]})
(less_num_conv m n)
- | ((@{const_name Num.Bit0}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit0\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm less_num_simps(5) [meta]})
(le_num_conv m n)
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit1}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit1\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm less_num_simps(6) [meta]})
(less_num_conv m n)
- | ((@{const_name Num.Bit1}, [m]), (@{const_name Num.Bit0}, [n])) =>
+ | ((\<^const_name>\<open>Num.Bit1\<close>, [m]), (\<^const_name>\<open>Num.Bit0\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] @{thm less_num_simps(7) [meta]})
(less_num_conv m n));
@@ -649,20 +649,20 @@
val expand1_a = expand1 a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
zero_le_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>numeral\<close>, [n])) =>
inst [] [n] zero_le_numeral_a
- | ((@{const_name numeral}, [m]), (@{const_name zero_class.zero}, [])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] not_numeral_le_zero_a
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] numeral_le_iff_a)
(le_num_conv m n)
| _ => cong2'' (f conv) (expand1_a m) (expand1_a n))
in f conv end;
-val nat_le_conv = le_conv I @{ctyp nat};
+val nat_le_conv = le_conv I \<^ctyp>\<open>nat\<close>;
\<close>
ML \<open>
@@ -680,15 +680,15 @@
in
fn conv => fn m => fn n =>
case (strip_numeral m, strip_numeral n) of
- ((@{const_name uminus}, [m]), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] neg_numeral_le_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [n] not_zero_le_neg_numeral_a
- | ((@{const_name Num.numeral}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [m, n] not_numeral_le_neg_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
inst [] [m, n] neg_numeral_le_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] neg_numeral_le_iff_a)
(le_num_conv n m)
@@ -697,7 +697,7 @@
fun le_conv' a = le_conv (le_neg_conv a) a;
-val int_le_conv = le_conv' @{ctyp int};
+val int_le_conv = le_conv' \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
@@ -712,20 +712,20 @@
val expand1_a = expand1 a;
fun conv m n = (case (strip_app m, strip_app n) of
- ((@{const_name zero_class.zero}, []), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
not_zero_less_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>numeral\<close>, [n])) =>
inst [] [n] zero_less_numeral_a
- | ((@{const_name numeral}, [m]), (@{const_name zero_class.zero}, [])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] not_numeral_less_zero_a
- | ((@{const_name numeral}, [m]), (@{const_name numeral}, [n])) =>
+ | ((\<^const_name>\<open>numeral\<close>, [m]), (\<^const_name>\<open>numeral\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] numeral_less_iff_a)
(less_num_conv m n)
| _ => cong2'' (f conv) (expand1_a m) (expand1_a n))
in f conv end;
-val nat_less_conv = less_conv I @{ctyp nat};
+val nat_less_conv = less_conv I \<^ctyp>\<open>nat\<close>;
\<close>
ML \<open>
@@ -743,15 +743,15 @@
in
fn conv => fn m => fn n =>
case (strip_numeral m, strip_numeral n) of
- ((@{const_name uminus}, [m]), (@{const_name zero_class.zero}, [])) =>
+ ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>zero_class.zero\<close>, [])) =>
inst [] [m] neg_numeral_less_zero_a
- | ((@{const_name zero_class.zero}, []), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>zero_class.zero\<close>, []), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [n] not_zero_less_neg_numeral_a
- | ((@{const_name Num.numeral}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>Num.numeral\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
inst [] [m, n] not_numeral_less_neg_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name Num.numeral}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>Num.numeral\<close>, [n])) =>
inst [] [m, n] neg_numeral_less_numeral_a
- | ((@{const_name uminus}, [m]), (@{const_name uminus}, [n])) =>
+ | ((\<^const_name>\<open>uminus\<close>, [m]), (\<^const_name>\<open>uminus\<close>, [n])) =>
Thm.transitive
(inst [] [m, n] neg_numeral_less_iff_a)
(less_num_conv n m)
@@ -760,7 +760,7 @@
fun less_conv' a = less_conv (less_neg_conv a) a;
-val int_less_conv = less_conv' @{ctyp int};
+val int_less_conv = less_conv' \<^ctyp>\<open>int\<close>;
\<close>
ML \<open>
@@ -771,13 +771,13 @@
in
fn p => fn x => fn y => fn ct =>
case strip_app ct of
- (@{const_name If}, [cb, cx, cy]) =>
+ (\<^const_name>\<open>If\<close>, [cb, cx, cy]) =>
let
val p_eq = p cb
val eq = Thm.combination (Thm.reflexive (Thm.dest_fun (Thm.dest_fun2 ct))) p_eq
in
case Thm.term_of (Thm.rhs_of p_eq) of
- Const (@{const_name True}, _) =>
+ Const (\<^const_name>\<open>True\<close>, _) =>
let
val x_eq = x cx;
val cx = Thm.rhs_of x_eq;
@@ -788,7 +788,7 @@
(Thm.reflexive cy))
(inst [] [cx, cy] if_True)
end
- | Const (@{const_name False}, _) =>
+ | Const (\<^const_name>\<open>False\<close>, _) =>
let
val y_eq = y cy;
val cy = Thm.rhs_of y_eq;
@@ -812,9 +812,9 @@
val If_conv_a = If_conv (type_of_eqn drop_0_a);
fun conv n ys = (case Thm.term_of n of
- Const (@{const_name zero_class.zero}, _) => inst [] [ys] drop_0_a
+ Const (\<^const_name>\<open>zero_class.zero\<close>, _) => inst [] [ys] drop_0_a
| _ => (case strip_app ys of
- (@{const_name Cons}, [x, xs]) =>
+ (\<^const_name>\<open>Cons\<close>, [x, xs]) =>
transitive'
(inst [] [n, x, xs] drop_Cons_a)
(If_conv_a (args2 nat_eq_conv)
@@ -830,7 +830,7 @@
val If_conv_a = If_conv a;
fun conv ys n = (case strip_app ys of
- (@{const_name Cons}, [x, xs]) =>
+ (\<^const_name>\<open>Cons\<close>, [x, xs]) =>
transitive'
(inst [] [x, xs, n] nth_Cons_a)
(If_conv_a (args2 nat_eq_conv)
--- a/src/HOL/Decision_Procs/Cooper.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Cooper.thy Sat Jan 05 17:24:33 2019 +0100
@@ -2389,93 +2389,93 @@
(case AList.lookup (=) vs t of
NONE => error "Variable not found in the list!"
| SOME n => @{code Bound} (@{code nat_of_integer} n))
- | num_of_term vs @{term "0::int"} = @{code C} (@{code int_of_integer} 0)
- | num_of_term vs @{term "1::int"} = @{code C} (@{code int_of_integer} 1)
- | num_of_term vs @{term "- 1::int"} = @{code C} (@{code int_of_integer} (~ 1))
- | num_of_term vs (@{term "numeral :: _ \<Rightarrow> int"} $ t) =
+ | num_of_term vs \<^term>\<open>0::int\<close> = @{code C} (@{code int_of_integer} 0)
+ | num_of_term vs \<^term>\<open>1::int\<close> = @{code C} (@{code int_of_integer} 1)
+ | num_of_term vs \<^term>\<open>- 1::int\<close> = @{code C} (@{code int_of_integer} (~ 1))
+ | num_of_term vs (\<^term>\<open>numeral :: _ \<Rightarrow> int\<close> $ t) =
@{code C} (@{code int_of_integer} (HOLogic.dest_numeral t))
- | num_of_term vs (@{term "- numeral :: _ \<Rightarrow> int"} $ t) =
+ | num_of_term vs (\<^term>\<open>- numeral :: _ \<Rightarrow> int\<close> $ t) =
@{code C} (@{code int_of_integer} (~(HOLogic.dest_numeral t)))
| num_of_term vs (Bound i) = @{code Bound} (@{code nat_of_integer} i)
- | num_of_term vs (@{term "uminus :: int \<Rightarrow> int"} $ t') = @{code Neg} (num_of_term vs t')
- | num_of_term vs (@{term "(+) :: int \<Rightarrow> int \<Rightarrow> int"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>uminus :: int \<Rightarrow> int\<close> $ t') = @{code Neg} (num_of_term vs t')
+ | num_of_term vs (\<^term>\<open>(+) :: int \<Rightarrow> int \<Rightarrow> int\<close> $ t1 $ t2) =
@{code Add} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(-) :: int \<Rightarrow> int \<Rightarrow> int"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>(-) :: int \<Rightarrow> int \<Rightarrow> int\<close> $ t1 $ t2) =
@{code Sub} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(*) :: int \<Rightarrow> int \<Rightarrow> int"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>(*) :: int \<Rightarrow> int \<Rightarrow> int\<close> $ t1 $ t2) =
(case try HOLogic.dest_number t1 of
SOME (_, i) => @{code Mul} (@{code int_of_integer} i, num_of_term vs t2)
| NONE =>
(case try HOLogic.dest_number t2 of
SOME (_, i) => @{code Mul} (@{code int_of_integer} i, num_of_term vs t1)
| NONE => error "num_of_term: unsupported multiplication"))
- | num_of_term vs t = error ("num_of_term: unknown term " ^ Syntax.string_of_term @{context} t);
+ | num_of_term vs t = error ("num_of_term: unknown term " ^ Syntax.string_of_term \<^context> t);
-fun fm_of_term ps vs @{term True} = @{code T}
- | fm_of_term ps vs @{term False} = @{code F}
- | fm_of_term ps vs (@{term "(<) :: int \<Rightarrow> int \<Rightarrow> bool"} $ t1 $ t2) =
+fun fm_of_term ps vs \<^term>\<open>True\<close> = @{code T}
+ | fm_of_term ps vs \<^term>\<open>False\<close> = @{code F}
+ | fm_of_term ps vs (\<^term>\<open>(<) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Lt} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term ps vs (@{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Le} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term ps vs (@{term "(=) :: int \<Rightarrow> int \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>(=) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Eq} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term ps vs (@{term "(dvd) :: int \<Rightarrow> int \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>(dvd) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ t1 $ t2) =
(case try HOLogic.dest_number t1 of
SOME (_, i) => @{code Dvd} (@{code int_of_integer} i, num_of_term vs t2)
| NONE => error "num_of_term: unsupported dvd")
- | fm_of_term ps vs (@{term "(=) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>(=) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Iff} (fm_of_term ps vs t1, fm_of_term ps vs t2)
- | fm_of_term ps vs (@{term HOL.conj} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>HOL.conj\<close> $ t1 $ t2) =
@{code And} (fm_of_term ps vs t1, fm_of_term ps vs t2)
- | fm_of_term ps vs (@{term HOL.disj} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>HOL.disj\<close> $ t1 $ t2) =
@{code Or} (fm_of_term ps vs t1, fm_of_term ps vs t2)
- | fm_of_term ps vs (@{term HOL.implies} $ t1 $ t2) =
+ | fm_of_term ps vs (\<^term>\<open>HOL.implies\<close> $ t1 $ t2) =
@{code Imp} (fm_of_term ps vs t1, fm_of_term ps vs t2)
- | fm_of_term ps vs (@{term "Not"} $ t') =
+ | fm_of_term ps vs (\<^term>\<open>Not\<close> $ t') =
@{code NOT} (fm_of_term ps vs t')
- | fm_of_term ps vs (Const (@{const_name Ex}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term ps vs (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (xn, xT, p)) =
let
val (xn', p') = Syntax_Trans.variant_abs (xn, xT, p); (* FIXME !? *)
val vs' = (Free (xn', xT), 0) :: map (fn (v, n) => (v, n + 1)) vs;
in @{code E} (fm_of_term ps vs' p) end
- | fm_of_term ps vs (Const (@{const_name All}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term ps vs (Const (\<^const_name>\<open>All\<close>, _) $ Abs (xn, xT, p)) =
let
val (xn', p') = Syntax_Trans.variant_abs (xn, xT, p); (* FIXME !? *)
val vs' = (Free (xn', xT), 0) :: map (fn (v, n) => (v, n + 1)) vs;
in @{code A} (fm_of_term ps vs' p) end
- | fm_of_term ps vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term @{context} t);
+ | fm_of_term ps vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term \<^context> t);
fun term_of_num vs (@{code C} i) = HOLogic.mk_number HOLogic.intT (@{code integer_of_int} i)
| term_of_num vs (@{code Bound} n) =
let
val q = @{code integer_of_nat} n
in fst (the (find_first (fn (_, m) => q = m) vs)) end
- | term_of_num vs (@{code Neg} t') = @{term "uminus :: int \<Rightarrow> int"} $ term_of_num vs t'
- | term_of_num vs (@{code Add} (t1, t2)) = @{term "(+) :: int \<Rightarrow> int \<Rightarrow> int"} $
+ | term_of_num vs (@{code Neg} t') = \<^term>\<open>uminus :: int \<Rightarrow> int\<close> $ term_of_num vs t'
+ | term_of_num vs (@{code Add} (t1, t2)) = \<^term>\<open>(+) :: int \<Rightarrow> int \<Rightarrow> int\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Sub} (t1, t2)) = @{term "(-) :: int \<Rightarrow> int \<Rightarrow> int"} $
+ | term_of_num vs (@{code Sub} (t1, t2)) = \<^term>\<open>(-) :: int \<Rightarrow> int \<Rightarrow> int\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Mul} (i, t2)) = @{term "(*) :: int \<Rightarrow> int \<Rightarrow> int"} $
+ | term_of_num vs (@{code Mul} (i, t2)) = \<^term>\<open>(*) :: int \<Rightarrow> int \<Rightarrow> int\<close> $
term_of_num vs (@{code C} i) $ term_of_num vs t2
| term_of_num vs (@{code CN} (n, i, t)) =
term_of_num vs (@{code Add} (@{code Mul} (i, @{code Bound} n), t));
-fun term_of_fm ps vs @{code T} = @{term True}
- | term_of_fm ps vs @{code F} = @{term False}
+fun term_of_fm ps vs @{code T} = \<^term>\<open>True\<close>
+ | term_of_fm ps vs @{code F} = \<^term>\<open>False\<close>
| term_of_fm ps vs (@{code Lt} t) =
- @{term "(<) :: int \<Rightarrow> int \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::int"}
+ \<^term>\<open>(<) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::int\<close>
| term_of_fm ps vs (@{code Le} t) =
- @{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::int"}
+ \<^term>\<open>(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::int\<close>
| term_of_fm ps vs (@{code Gt} t) =
- @{term "(<) :: int \<Rightarrow> int \<Rightarrow> bool"} $ @{term "0::int"} $ term_of_num vs t
+ \<^term>\<open>(<) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ \<^term>\<open>0::int\<close> $ term_of_num vs t
| term_of_fm ps vs (@{code Ge} t) =
- @{term "(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool"} $ @{term "0::int"} $ term_of_num vs t
+ \<^term>\<open>(\<le>) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ \<^term>\<open>0::int\<close> $ term_of_num vs t
| term_of_fm ps vs (@{code Eq} t) =
- @{term "(=) :: int \<Rightarrow> int \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::int"}
+ \<^term>\<open>(=) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::int\<close>
| term_of_fm ps vs (@{code NEq} t) =
term_of_fm ps vs (@{code NOT} (@{code Eq} t))
| term_of_fm ps vs (@{code Dvd} (i, t)) =
- @{term "(dvd) :: int \<Rightarrow> int \<Rightarrow> bool"} $ term_of_num vs (@{code C} i) $ term_of_num vs t
+ \<^term>\<open>(dvd) :: int \<Rightarrow> int \<Rightarrow> bool\<close> $ term_of_num vs (@{code C} i) $ term_of_num vs t
| term_of_fm ps vs (@{code NDvd} (i, t)) =
term_of_fm ps vs (@{code NOT} (@{code Dvd} (i, t)))
| term_of_fm ps vs (@{code NOT} t') =
@@ -2487,7 +2487,7 @@
| term_of_fm ps vs (@{code Imp} (t1, t2)) =
HOLogic.imp $ term_of_fm ps vs t1 $ term_of_fm ps vs t2
| term_of_fm ps vs (@{code Iff} (t1, t2)) =
- @{term "(=) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $ term_of_fm ps vs t1 $ term_of_fm ps vs t2
+ \<^term>\<open>(=) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $ term_of_fm ps vs t1 $ term_of_fm ps vs t2
| term_of_fm ps vs (@{code Closed} n) =
let
val q = @{code integer_of_nat} n
@@ -2497,11 +2497,11 @@
fun term_bools acc t =
let
val is_op =
- member (=) [@{term HOL.conj}, @{term HOL.disj}, @{term HOL.implies},
- @{term "(=) :: bool \<Rightarrow> _"},
- @{term "(=) :: int \<Rightarrow> _"}, @{term "(<) :: int \<Rightarrow> _"},
- @{term "(\<le>) :: int \<Rightarrow> _"}, @{term "Not"}, @{term "All :: (int \<Rightarrow> _) \<Rightarrow> _"},
- @{term "Ex :: (int \<Rightarrow> _) \<Rightarrow> _"}, @{term "True"}, @{term "False"}]
+ member (=) [\<^term>\<open>HOL.conj\<close>, \<^term>\<open>HOL.disj\<close>, \<^term>\<open>HOL.implies\<close>,
+ \<^term>\<open>(=) :: bool \<Rightarrow> _\<close>,
+ \<^term>\<open>(=) :: int \<Rightarrow> _\<close>, \<^term>\<open>(<) :: int \<Rightarrow> _\<close>,
+ \<^term>\<open>(\<le>) :: int \<Rightarrow> _\<close>, \<^term>\<open>Not\<close>, \<^term>\<open>All :: (int \<Rightarrow> _) \<Rightarrow> _\<close>,
+ \<^term>\<open>Ex :: (int \<Rightarrow> _) \<Rightarrow> _\<close>, \<^term>\<open>True\<close>, \<^term>\<open>False\<close>]
fun is_ty t = not (fastype_of t = HOLogic.boolT)
in
(case t of
--- a/src/HOL/Decision_Procs/Dense_Linear_Order.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Dense_Linear_Order.thy Sat Jan 05 17:24:33 2019 +0100
@@ -716,13 +716,13 @@
fun simps phi = map (Morphism.thm phi) [@{thm "not_less"}, @{thm "not_le"}]
fun generic_whatis phi =
let
- val [lt, le] = map (Morphism.term phi) [@{term "(\<sqsubset>)"}, @{term "(\<sqsubseteq>)"}]
+ val [lt, le] = map (Morphism.term phi) [\<^term>\<open>(\<sqsubset>)\<close>, \<^term>\<open>(\<sqsubseteq>)\<close>]
fun h x t =
case Thm.term_of t of
- Const(@{const_name HOL.eq}, _)$y$z =>
+ Const(\<^const_name>\<open>HOL.eq\<close>, _)$y$z =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.Eq
else Ferrante_Rackoff_Data.Nox
- | @{term "Not"}$(Const(@{const_name HOL.eq}, _)$y$z) =>
+ | \<^term>\<open>Not\<close>$(Const(\<^const_name>\<open>HOL.eq\<close>, _)$y$z) =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.NEq
else Ferrante_Rackoff_Data.Nox
| b$y$z => if Term.could_unify (b, lt) then
@@ -901,19 +901,19 @@
fun dest_frac ct =
case Thm.term_of ct of
- Const (@{const_name Rings.divide},_) $ a $ b=>
+ Const (\<^const_name>\<open>Rings.divide\<close>,_) $ a $ b=>
Rat.make (snd (HOLogic.dest_number a), snd (HOLogic.dest_number b))
- | Const(@{const_name inverse}, _)$a => Rat.make(1, HOLogic.dest_number a |> snd)
+ | Const(\<^const_name>\<open>inverse\<close>, _)$a => Rat.make(1, HOLogic.dest_number a |> snd)
| t => Rat.of_int (snd (HOLogic.dest_number t))
fun whatis x ct = case Thm.term_of ct of
- Const(@{const_name Groups.plus}, _)$(Const(@{const_name Groups.times},_)$_$y)$_ =>
+ Const(\<^const_name>\<open>Groups.plus\<close>, _)$(Const(\<^const_name>\<open>Groups.times\<close>,_)$_$y)$_ =>
if y aconv Thm.term_of x then ("c*x+t",[(funpow 2 Thm.dest_arg1) ct, Thm.dest_arg ct])
else ("Nox",[])
-| Const(@{const_name Groups.plus}, _)$y$_ =>
+| Const(\<^const_name>\<open>Groups.plus\<close>, _)$y$_ =>
if y aconv Thm.term_of x then ("x+t",[Thm.dest_arg ct])
else ("Nox",[])
-| Const(@{const_name Groups.times}, _)$_$y =>
+| Const(\<^const_name>\<open>Groups.times\<close>, _)$_$y =>
if y aconv Thm.term_of x then ("c*x",[Thm.dest_arg1 ct])
else ("Nox",[])
| t => if t aconv Thm.term_of x then ("x",[]) else ("Nox",[]);
@@ -921,7 +921,7 @@
fun xnormalize_conv ctxt [] ct = Thm.reflexive ct
| xnormalize_conv ctxt (vs as (x::_)) ct =
case Thm.term_of ct of
- Const(@{const_name Orderings.less},_)$_$Const(@{const_name Groups.zero},_) =>
+ Const(\<^const_name>\<open>Orderings.less\<close>,_)$_$Const(\<^const_name>\<open>Groups.zero\<close>,_) =>
(case whatis x (Thm.dest_arg1 ct) of
("c*x+t",[c,t]) =>
let
@@ -930,7 +930,7 @@
val cz = Thm.dest_arg ct
val neg = cr < @0
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
(if neg then Thm.apply (Thm.apply clt c) cz
else Thm.apply (Thm.apply clt cz) c))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
@@ -953,7 +953,7 @@
val cz = Thm.dest_arg ct
val neg = cr < @0
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
(if neg then Thm.apply (Thm.apply clt c) cz
else Thm.apply (Thm.apply clt cz) c))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
@@ -964,18 +964,18 @@
| _ => Thm.reflexive ct)
-| Const(@{const_name Orderings.less_eq},_)$_$Const(@{const_name Groups.zero},_) =>
+| Const(\<^const_name>\<open>Orderings.less_eq\<close>,_)$_$Const(\<^const_name>\<open>Groups.zero\<close>,_) =>
(case whatis x (Thm.dest_arg1 ct) of
("c*x+t",[c,t]) =>
let
val T = Thm.typ_of_cterm x
val cT = Thm.ctyp_of_cterm x
val cr = dest_frac c
- val clt = Thm.cterm_of ctxt (Const (@{const_name ord_class.less}, T --> T --> @{typ bool}))
+ val clt = Thm.cterm_of ctxt (Const (\<^const_name>\<open>ord_class.less\<close>, T --> T --> \<^typ>\<open>bool\<close>))
val cz = Thm.dest_arg ct
val neg = cr < @0
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
(if neg then Thm.apply (Thm.apply clt c) cz
else Thm.apply (Thm.apply clt cz) c))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
@@ -996,11 +996,11 @@
val T = Thm.typ_of_cterm x
val cT = Thm.ctyp_of_cterm x
val cr = dest_frac c
- val clt = Thm.cterm_of ctxt (Const (@{const_name ord_class.less}, T --> T --> @{typ bool}))
+ val clt = Thm.cterm_of ctxt (Const (\<^const_name>\<open>ord_class.less\<close>, T --> T --> \<^typ>\<open>bool\<close>))
val cz = Thm.dest_arg ct
val neg = cr < @0
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
(if neg then Thm.apply (Thm.apply clt c) cz
else Thm.apply (Thm.apply clt cz) c))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
@@ -1010,7 +1010,7 @@
in rth end
| _ => Thm.reflexive ct)
-| Const(@{const_name HOL.eq},_)$_$Const(@{const_name Groups.zero},_) =>
+| Const(\<^const_name>\<open>HOL.eq\<close>,_)$_$Const(\<^const_name>\<open>Groups.zero\<close>,_) =>
(case whatis x (Thm.dest_arg1 ct) of
("c*x+t",[c,t]) =>
let
@@ -1019,8 +1019,8 @@
val ceq = Thm.dest_fun2 ct
val cz = Thm.dest_arg ct
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
- (Thm.apply @{cterm "Not"} (Thm.apply (Thm.apply ceq c) cz)))
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
+ (Thm.apply \<^cterm>\<open>Not\<close> (Thm.apply (Thm.apply ceq c) cz)))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
val th = Thm.implies_elim
(Thm.instantiate' [SOME T] (map SOME [c,x,t]) @{thm nz_prod_sum_eq}) cth
@@ -1041,8 +1041,8 @@
val ceq = Thm.dest_fun2 ct
val cz = Thm.dest_arg ct
val cthp = Simplifier.rewrite ctxt
- (Thm.apply @{cterm "Trueprop"}
- (Thm.apply @{cterm "Not"} (Thm.apply (Thm.apply ceq c) cz)))
+ (Thm.apply \<^cterm>\<open>Trueprop\<close>
+ (Thm.apply \<^cterm>\<open>Not\<close> (Thm.apply (Thm.apply ceq c) cz)))
val cth = Thm.equal_elim (Thm.symmetric cthp) TrueI
val rth = Thm.implies_elim
(Thm.instantiate' [SOME T] (map SOME [c,x]) @{thm nz_prod_eq}) cth
@@ -1053,10 +1053,10 @@
val less_iff_diff_less_0 = mk_meta_eq @{thm "less_iff_diff_less_0"}
val le_iff_diff_le_0 = mk_meta_eq @{thm "le_iff_diff_le_0"}
val eq_iff_diff_eq_0 = mk_meta_eq @{thm "eq_iff_diff_eq_0"}
- val ss = simpset_of @{context}
+ val ss = simpset_of \<^context>
in
fun field_isolate_conv phi ctxt vs ct = case Thm.term_of ct of
- Const(@{const_name Orderings.less},_)$a$b =>
+ Const(\<^const_name>\<open>Orderings.less\<close>,_)$a$b =>
let val (ca,cb) = Thm.dest_binop ct
val T = Thm.ctyp_of_cterm ca
val th = Thm.instantiate' [SOME T] [SOME ca, SOME cb] less_iff_diff_less_0
@@ -1065,7 +1065,7 @@
(Semiring_Normalizer.semiring_normalize_ord_conv (put_simpset ss ctxt) (earlier_ord vs)))) th
val rth = Thm.transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
in rth end
-| Const(@{const_name Orderings.less_eq},_)$a$b =>
+| Const(\<^const_name>\<open>Orderings.less_eq\<close>,_)$a$b =>
let val (ca,cb) = Thm.dest_binop ct
val T = Thm.ctyp_of_cterm ca
val th = Thm.instantiate' [SOME T] [SOME ca, SOME cb] le_iff_diff_le_0
@@ -1075,7 +1075,7 @@
val rth = Thm.transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
in rth end
-| Const(@{const_name HOL.eq},_)$a$b =>
+| Const(\<^const_name>\<open>HOL.eq\<close>,_)$a$b =>
let val (ca,cb) = Thm.dest_binop ct
val T = Thm.ctyp_of_cterm ca
val th = Thm.instantiate' [SOME T] [SOME ca, SOME cb] eq_iff_diff_eq_0
@@ -1084,7 +1084,7 @@
(Semiring_Normalizer.semiring_normalize_ord_conv (put_simpset ss ctxt) (earlier_ord vs)))) th
val rth = Thm.transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
in rth end
-| @{term "Not"} $(Const(@{const_name HOL.eq},_)$a$b) => Conv.arg_conv (field_isolate_conv phi ctxt vs) ct
+| \<^term>\<open>Not\<close> $(Const(\<^const_name>\<open>HOL.eq\<close>,_)$a$b) => Conv.arg_conv (field_isolate_conv phi ctxt vs) ct
| _ => Thm.reflexive ct
end;
@@ -1092,17 +1092,17 @@
let
fun h x t =
case Thm.term_of t of
- Const(@{const_name HOL.eq}, _)$y$z =>
+ Const(\<^const_name>\<open>HOL.eq\<close>, _)$y$z =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.Eq
else Ferrante_Rackoff_Data.Nox
- | @{term "Not"}$(Const(@{const_name HOL.eq}, _)$y$z) =>
+ | \<^term>\<open>Not\<close>$(Const(\<^const_name>\<open>HOL.eq\<close>, _)$y$z) =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.NEq
else Ferrante_Rackoff_Data.Nox
- | Const(@{const_name Orderings.less},_)$y$z =>
+ | Const(\<^const_name>\<open>Orderings.less\<close>,_)$y$z =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.Lt
else if Thm.term_of x aconv z then Ferrante_Rackoff_Data.Gt
else Ferrante_Rackoff_Data.Nox
- | Const (@{const_name Orderings.less_eq},_)$y$z =>
+ | Const (\<^const_name>\<open>Orderings.less_eq\<close>,_)$y$z =>
if Thm.term_of x aconv y then Ferrante_Rackoff_Data.Le
else if Thm.term_of x aconv z then Ferrante_Rackoff_Data.Ge
else Ferrante_Rackoff_Data.Nox
--- a/src/HOL/Decision_Procs/Ferrack.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Ferrack.thy Sat Jan 05 17:24:33 2019 +0100
@@ -2466,76 +2466,76 @@
val mk_Bound = @{code Bound} o @{code nat_of_integer};
fun num_of_term vs (Free vT) = mk_Bound (find_index (fn vT' => vT = vT') vs)
- | num_of_term vs @{term "real_of_int (0::int)"} = mk_C 0
- | num_of_term vs @{term "real_of_int (1::int)"} = mk_C 1
- | num_of_term vs @{term "0::real"} = mk_C 0
- | num_of_term vs @{term "1::real"} = mk_C 1
+ | num_of_term vs \<^term>\<open>real_of_int (0::int)\<close> = mk_C 0
+ | num_of_term vs \<^term>\<open>real_of_int (1::int)\<close> = mk_C 1
+ | num_of_term vs \<^term>\<open>0::real\<close> = mk_C 0
+ | num_of_term vs \<^term>\<open>1::real\<close> = mk_C 1
| num_of_term vs (Bound i) = mk_Bound i
- | num_of_term vs (@{term "uminus :: real \<Rightarrow> real"} $ t') = @{code Neg} (num_of_term vs t')
- | num_of_term vs (@{term "(+) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>uminus :: real \<Rightarrow> real\<close> $ t') = @{code Neg} (num_of_term vs t')
+ | num_of_term vs (\<^term>\<open>(+) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) =
@{code Add} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(-) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>(-) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) =
@{code Sub} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(*) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) = (case num_of_term vs t1
+ | num_of_term vs (\<^term>\<open>(*) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) = (case num_of_term vs t1
of @{code C} i => @{code Mul} (i, num_of_term vs t2)
| _ => error "num_of_term: unsupported multiplication")
- | num_of_term vs (@{term "real_of_int :: int \<Rightarrow> real"} $ t') =
+ | num_of_term vs (\<^term>\<open>real_of_int :: int \<Rightarrow> real\<close> $ t') =
(mk_C (snd (HOLogic.dest_number t'))
handle TERM _ => error ("num_of_term: unknown term"))
| num_of_term vs t' =
(mk_C (snd (HOLogic.dest_number t'))
handle TERM _ => error ("num_of_term: unknown term"));
-fun fm_of_term vs @{term True} = @{code T}
- | fm_of_term vs @{term False} = @{code F}
- | fm_of_term vs (@{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+fun fm_of_term vs \<^term>\<open>True\<close> = @{code T}
+ | fm_of_term vs \<^term>\<open>False\<close> = @{code F}
+ | fm_of_term vs (\<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Lt} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Le} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(=) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(=) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Eq} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(\<longleftrightarrow>) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(\<longleftrightarrow>) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Iff} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.conj} $ t1 $ t2) = @{code And} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.disj} $ t1 $ t2) = @{code Or} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.implies} $ t1 $ t2) = @{code Imp} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term "Not"} $ t') = @{code NOT} (fm_of_term vs t')
- | fm_of_term vs (Const (@{const_name Ex}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term vs (\<^term>\<open>HOL.conj\<close> $ t1 $ t2) = @{code And} (fm_of_term vs t1, fm_of_term vs t2)
+ | fm_of_term vs (\<^term>\<open>HOL.disj\<close> $ t1 $ t2) = @{code Or} (fm_of_term vs t1, fm_of_term vs t2)
+ | fm_of_term vs (\<^term>\<open>HOL.implies\<close> $ t1 $ t2) = @{code Imp} (fm_of_term vs t1, fm_of_term vs t2)
+ | fm_of_term vs (\<^term>\<open>Not\<close> $ t') = @{code NOT} (fm_of_term vs t')
+ | fm_of_term vs (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (xn, xT, p)) =
@{code E} (fm_of_term (("", dummyT) :: vs) p)
- | fm_of_term vs (Const (@{const_name All}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term vs (Const (\<^const_name>\<open>All\<close>, _) $ Abs (xn, xT, p)) =
@{code A} (fm_of_term (("", dummyT) :: vs) p)
- | fm_of_term vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term @{context} t);
+ | fm_of_term vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term \<^context> t);
-fun term_of_num vs (@{code C} i) = @{term "real_of_int :: int \<Rightarrow> real"} $
+fun term_of_num vs (@{code C} i) = \<^term>\<open>real_of_int :: int \<Rightarrow> real\<close> $
HOLogic.mk_number HOLogic.intT (@{code integer_of_int} i)
| term_of_num vs (@{code Bound} n) = Free (nth vs (@{code integer_of_nat} n))
- | term_of_num vs (@{code Neg} t') = @{term "uminus :: real \<Rightarrow> real"} $ term_of_num vs t'
- | term_of_num vs (@{code Add} (t1, t2)) = @{term "(+) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ | term_of_num vs (@{code Neg} t') = \<^term>\<open>uminus :: real \<Rightarrow> real\<close> $ term_of_num vs t'
+ | term_of_num vs (@{code Add} (t1, t2)) = \<^term>\<open>(+) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Sub} (t1, t2)) = @{term "(-) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ | term_of_num vs (@{code Sub} (t1, t2)) = \<^term>\<open>(-) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Mul} (i, t2)) = @{term "(*) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ | term_of_num vs (@{code Mul} (i, t2)) = \<^term>\<open>(*) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs (@{code C} i) $ term_of_num vs t2
| term_of_num vs (@{code CN} (n, i, t)) = term_of_num vs (@{code Add} (@{code Mul} (i, @{code Bound} n), t));
-fun term_of_fm vs @{code T} = @{term True}
- | term_of_fm vs @{code F} = @{term False}
- | term_of_fm vs (@{code Lt} t) = @{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $
- term_of_num vs t $ @{term "0::real"}
- | term_of_fm vs (@{code Le} t) = @{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $
- term_of_num vs t $ @{term "0::real"}
- | term_of_fm vs (@{code Gt} t) = @{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $
- @{term "0::real"} $ term_of_num vs t
- | term_of_fm vs (@{code Ge} t) = @{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $
- @{term "0::real"} $ term_of_num vs t
- | term_of_fm vs (@{code Eq} t) = @{term "(=) :: real \<Rightarrow> real \<Rightarrow> bool"} $
- term_of_num vs t $ @{term "0::real"}
+fun term_of_fm vs @{code T} = \<^term>\<open>True\<close>
+ | term_of_fm vs @{code F} = \<^term>\<open>False\<close>
+ | term_of_fm vs (@{code Lt} t) = \<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $
+ term_of_num vs t $ \<^term>\<open>0::real\<close>
+ | term_of_fm vs (@{code Le} t) = \<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $
+ term_of_num vs t $ \<^term>\<open>0::real\<close>
+ | term_of_fm vs (@{code Gt} t) = \<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $
+ \<^term>\<open>0::real\<close> $ term_of_num vs t
+ | term_of_fm vs (@{code Ge} t) = \<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $
+ \<^term>\<open>0::real\<close> $ term_of_num vs t
+ | term_of_fm vs (@{code Eq} t) = \<^term>\<open>(=) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $
+ term_of_num vs t $ \<^term>\<open>0::real\<close>
| term_of_fm vs (@{code NEq} t) = term_of_fm vs (@{code NOT} (@{code Eq} t))
| term_of_fm vs (@{code NOT} t') = HOLogic.Not $ term_of_fm vs t'
| term_of_fm vs (@{code And} (t1, t2)) = HOLogic.conj $ term_of_fm vs t1 $ term_of_fm vs t2
| term_of_fm vs (@{code Or} (t1, t2)) = HOLogic.disj $ term_of_fm vs t1 $ term_of_fm vs t2
| term_of_fm vs (@{code Imp} (t1, t2)) = HOLogic.imp $ term_of_fm vs t1 $ term_of_fm vs t2
- | term_of_fm vs (@{code Iff} (t1, t2)) = @{term "(\<longleftrightarrow>) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $
+ | term_of_fm vs (@{code Iff} (t1, t2)) = \<^term>\<open>(\<longleftrightarrow>) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $
term_of_fm vs t1 $ term_of_fm vs t2;
in fn (ctxt, t) =>
--- a/src/HOL/Decision_Procs/MIR.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/MIR.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5575,98 +5575,98 @@
fun num_of_term vs (t as Free (xn, xT)) = (case AList.lookup (=) vs t
of NONE => error "Variable not found in the list!"
| SOME n => mk_Bound n)
- | num_of_term vs @{term "of_int (0::int)"} = mk_C 0
- | num_of_term vs @{term "of_int (1::int)"} = mk_C 1
- | num_of_term vs @{term "0::real"} = mk_C 0
- | num_of_term vs @{term "1::real"} = mk_C 1
- | num_of_term vs @{term "- 1::real"} = mk_C (~ 1)
+ | num_of_term vs \<^term>\<open>of_int (0::int)\<close> = mk_C 0
+ | num_of_term vs \<^term>\<open>of_int (1::int)\<close> = mk_C 1
+ | num_of_term vs \<^term>\<open>0::real\<close> = mk_C 0
+ | num_of_term vs \<^term>\<open>1::real\<close> = mk_C 1
+ | num_of_term vs \<^term>\<open>- 1::real\<close> = mk_C (~ 1)
| num_of_term vs (Bound i) = mk_Bound i
- | num_of_term vs (@{term "uminus :: real \<Rightarrow> real"} $ t') = @{code Neg} (num_of_term vs t')
- | num_of_term vs (@{term "(+) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>uminus :: real \<Rightarrow> real\<close> $ t') = @{code Neg} (num_of_term vs t')
+ | num_of_term vs (\<^term>\<open>(+) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) =
@{code Add} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(-) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>(-) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) =
@{code Sub} (num_of_term vs t1, num_of_term vs t2)
- | num_of_term vs (@{term "(*) :: real \<Rightarrow> real \<Rightarrow> real"} $ t1 $ t2) =
+ | num_of_term vs (\<^term>\<open>(*) :: real \<Rightarrow> real \<Rightarrow> real\<close> $ t1 $ t2) =
(case (num_of_term vs t1)
of @{code C} i => @{code Mul} (i, num_of_term vs t2)
| _ => error "num_of_term: unsupported Multiplication")
- | num_of_term vs (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "numeral :: _ \<Rightarrow> int"} $ t')) =
+ | num_of_term vs (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>numeral :: _ \<Rightarrow> int\<close> $ t')) =
mk_C (HOLogic.dest_numeral t')
- | num_of_term vs (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "- numeral :: _ \<Rightarrow> int"} $ t')) =
+ | num_of_term vs (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>- numeral :: _ \<Rightarrow> int\<close> $ t')) =
mk_C (~ (HOLogic.dest_numeral t'))
- | num_of_term vs (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "floor :: real \<Rightarrow> int"} $ t')) =
+ | num_of_term vs (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>floor :: real \<Rightarrow> int\<close> $ t')) =
@{code Floor} (num_of_term vs t')
- | num_of_term vs (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "ceiling :: real \<Rightarrow> int"} $ t')) =
+ | num_of_term vs (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>ceiling :: real \<Rightarrow> int\<close> $ t')) =
@{code Neg} (@{code Floor} (@{code Neg} (num_of_term vs t')))
- | num_of_term vs (@{term "numeral :: _ \<Rightarrow> real"} $ t') =
+ | num_of_term vs (\<^term>\<open>numeral :: _ \<Rightarrow> real\<close> $ t') =
mk_C (HOLogic.dest_numeral t')
- | num_of_term vs (@{term "- numeral :: _ \<Rightarrow> real"} $ t') =
+ | num_of_term vs (\<^term>\<open>- numeral :: _ \<Rightarrow> real\<close> $ t') =
mk_C (~ (HOLogic.dest_numeral t'))
- | num_of_term vs t = error ("num_of_term: unknown term " ^ Syntax.string_of_term @{context} t);
-
-fun fm_of_term vs @{term True} = @{code T}
- | fm_of_term vs @{term False} = @{code F}
- | fm_of_term vs (@{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+ | num_of_term vs t = error ("num_of_term: unknown term " ^ Syntax.string_of_term \<^context> t);
+
+fun fm_of_term vs \<^term>\<open>True\<close> = @{code T}
+ | fm_of_term vs \<^term>\<open>False\<close> = @{code F}
+ | fm_of_term vs (\<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Lt} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Le} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(=) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(=) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Eq} (@{code Sub} (num_of_term vs t1, num_of_term vs t2))
- | fm_of_term vs (@{term "(rdvd)"} $ (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "numeral :: _ \<Rightarrow> int"} $ t1)) $ t2) =
+ | fm_of_term vs (\<^term>\<open>(rdvd)\<close> $ (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>numeral :: _ \<Rightarrow> int\<close> $ t1)) $ t2) =
mk_Dvd (HOLogic.dest_numeral t1, num_of_term vs t2)
- | fm_of_term vs (@{term "(rdvd)"} $ (@{term "of_int :: int \<Rightarrow> real"} $ (@{term "- numeral :: _ \<Rightarrow> int"} $ t1)) $ t2) =
+ | fm_of_term vs (\<^term>\<open>(rdvd)\<close> $ (\<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>- numeral :: _ \<Rightarrow> int\<close> $ t1)) $ t2) =
mk_Dvd (~ (HOLogic.dest_numeral t1), num_of_term vs t2)
- | fm_of_term vs (@{term "(=) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>(=) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $ t1 $ t2) =
@{code Iff} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.conj} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>HOL.conj\<close> $ t1 $ t2) =
@{code And} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.disj} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>HOL.disj\<close> $ t1 $ t2) =
@{code Or} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term HOL.implies} $ t1 $ t2) =
+ | fm_of_term vs (\<^term>\<open>HOL.implies\<close> $ t1 $ t2) =
@{code Imp} (fm_of_term vs t1, fm_of_term vs t2)
- | fm_of_term vs (@{term "Not"} $ t') =
+ | fm_of_term vs (\<^term>\<open>Not\<close> $ t') =
@{code NOT} (fm_of_term vs t')
- | fm_of_term vs (Const (@{const_name Ex}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term vs (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (xn, xT, p)) =
@{code E} (fm_of_term (map (fn (v, n) => (v, n + 1)) vs) p)
- | fm_of_term vs (Const (@{const_name All}, _) $ Abs (xn, xT, p)) =
+ | fm_of_term vs (Const (\<^const_name>\<open>All\<close>, _) $ Abs (xn, xT, p)) =
@{code A} (fm_of_term (map (fn (v, n) => (v, n + 1)) vs) p)
- | fm_of_term vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term @{context} t);
-
-fun term_of_num vs (@{code C} i) = @{term "of_int :: int \<Rightarrow> real"} $
+ | fm_of_term vs t = error ("fm_of_term : unknown term " ^ Syntax.string_of_term \<^context> t);
+
+fun term_of_num vs (@{code C} i) = \<^term>\<open>of_int :: int \<Rightarrow> real\<close> $
HOLogic.mk_number HOLogic.intT (@{code integer_of_int} i)
| term_of_num vs (@{code Bound} n) =
let
val m = @{code integer_of_nat} n;
in fst (the (find_first (fn (_, q) => m = q) vs)) end
| term_of_num vs (@{code Neg} (@{code Floor} (@{code Neg} t'))) =
- @{term "of_int :: int \<Rightarrow> real"} $ (@{term "ceiling :: real \<Rightarrow> int"} $ term_of_num vs t')
- | term_of_num vs (@{code Neg} t') = @{term "uminus :: real \<Rightarrow> real"} $ term_of_num vs t'
- | term_of_num vs (@{code Add} (t1, t2)) = @{term "(+) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ \<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>ceiling :: real \<Rightarrow> int\<close> $ term_of_num vs t')
+ | term_of_num vs (@{code Neg} t') = \<^term>\<open>uminus :: real \<Rightarrow> real\<close> $ term_of_num vs t'
+ | term_of_num vs (@{code Add} (t1, t2)) = \<^term>\<open>(+) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Sub} (t1, t2)) = @{term "(-) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ | term_of_num vs (@{code Sub} (t1, t2)) = \<^term>\<open>(-) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs t1 $ term_of_num vs t2
- | term_of_num vs (@{code Mul} (i, t2)) = @{term "(*) :: real \<Rightarrow> real \<Rightarrow> real"} $
+ | term_of_num vs (@{code Mul} (i, t2)) = \<^term>\<open>(*) :: real \<Rightarrow> real \<Rightarrow> real\<close> $
term_of_num vs (@{code C} i) $ term_of_num vs t2
- | term_of_num vs (@{code Floor} t) = @{term "of_int :: int \<Rightarrow> real"} $ (@{term "floor :: real \<Rightarrow> int"} $ term_of_num vs t)
+ | term_of_num vs (@{code Floor} t) = \<^term>\<open>of_int :: int \<Rightarrow> real\<close> $ (\<^term>\<open>floor :: real \<Rightarrow> int\<close> $ term_of_num vs t)
| term_of_num vs (@{code CN} (n, i, t)) = term_of_num vs (@{code Add} (@{code Mul} (i, @{code Bound} n), t))
| term_of_num vs (@{code CF} (c, t, s)) = term_of_num vs (@{code Add} (@{code Mul} (c, @{code Floor} t), s));
-fun term_of_fm vs @{code T} = @{term True}
- | term_of_fm vs @{code F} = @{term False}
+fun term_of_fm vs @{code T} = \<^term>\<open>True\<close>
+ | term_of_fm vs @{code F} = \<^term>\<open>False\<close>
| term_of_fm vs (@{code Lt} t) =
- @{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::real"}
+ \<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::real\<close>
| term_of_fm vs (@{code Le} t) =
- @{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::real"}
+ \<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::real\<close>
| term_of_fm vs (@{code Gt} t) =
- @{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ @{term "0::real"} $ term_of_num vs t
+ \<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ \<^term>\<open>0::real\<close> $ term_of_num vs t
| term_of_fm vs (@{code Ge} t) =
- @{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ @{term "0::real"} $ term_of_num vs t
+ \<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ \<^term>\<open>0::real\<close> $ term_of_num vs t
| term_of_fm vs (@{code Eq} t) =
- @{term "(=) :: real \<Rightarrow> real \<Rightarrow> bool"} $ term_of_num vs t $ @{term "0::real"}
+ \<^term>\<open>(=) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ term_of_num vs t $ \<^term>\<open>0::real\<close>
| term_of_fm vs (@{code NEq} t) =
term_of_fm vs (@{code NOT} (@{code Eq} t))
| term_of_fm vs (@{code Dvd} (i, t)) =
- @{term "(rdvd)"} $ term_of_num vs (@{code C} i) $ term_of_num vs t
+ \<^term>\<open>(rdvd)\<close> $ term_of_num vs (@{code C} i) $ term_of_num vs t
| term_of_fm vs (@{code NDvd} (i, t)) =
term_of_fm vs (@{code NOT} (@{code Dvd} (i, t)))
| term_of_fm vs (@{code NOT} t') =
@@ -5678,7 +5678,7 @@
| term_of_fm vs (@{code Imp} (t1, t2)) =
HOLogic.imp $ term_of_fm vs t1 $ term_of_fm vs t2
| term_of_fm vs (@{code Iff} (t1, t2)) =
- @{term "(=) :: bool \<Rightarrow> bool \<Rightarrow> bool"} $ term_of_fm vs t1 $ term_of_fm vs t2;
+ \<^term>\<open>(=) :: bool \<Rightarrow> bool \<Rightarrow> bool\<close> $ term_of_fm vs t1 $ term_of_fm vs t2;
in
fn (ctxt, t) =>
--- a/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Sat Jan 05 17:24:33 2019 +0100
@@ -3923,7 +3923,7 @@
let
fun binopT T = T --> T --> T;
-fun relT T = T --> T --> @{typ bool};
+fun relT T = T --> T --> \<^typ>\<open>bool\<close>;
val mk_C = @{code C} o apply2 @{code int_of_integer};
val mk_poly_Bound = @{code poly.Bound} o @{code nat_of_integer};
@@ -3934,7 +3934,7 @@
fun try_dest_num t = SOME ((snd o HOLogic.dest_number) t)
handle TERM _ => NONE;
-fun dest_nat (t as Const (@{const_name Suc}, _)) = HOLogic.dest_nat t
+fun dest_nat (t as Const (\<^const_name>\<open>Suc\<close>, _)) = HOLogic.dest_nat t
| dest_nat t = dest_num t;
fun the_index ts t =
@@ -3942,58 +3942,58 @@
val k = find_index (fn t' => t aconv t') ts;
in if k < 0 then raise General.Subscript else k end;
-fun num_of_term ps (Const (@{const_name Groups.uminus}, _) $ t) =
+fun num_of_term ps (Const (\<^const_name>\<open>Groups.uminus\<close>, _) $ t) =
@{code poly.Neg} (num_of_term ps t)
- | num_of_term ps (Const (@{const_name Groups.plus}, _) $ a $ b) =
+ | num_of_term ps (Const (\<^const_name>\<open>Groups.plus\<close>, _) $ a $ b) =
@{code poly.Add} (num_of_term ps a, num_of_term ps b)
- | num_of_term ps (Const (@{const_name Groups.minus}, _) $ a $ b) =
+ | num_of_term ps (Const (\<^const_name>\<open>Groups.minus\<close>, _) $ a $ b) =
@{code poly.Sub} (num_of_term ps a, num_of_term ps b)
- | num_of_term ps (Const (@{const_name Groups.times}, _) $ a $ b) =
+ | num_of_term ps (Const (\<^const_name>\<open>Groups.times\<close>, _) $ a $ b) =
@{code poly.Mul} (num_of_term ps a, num_of_term ps b)
- | num_of_term ps (Const (@{const_name Power.power}, _) $ a $ n) =
+ | num_of_term ps (Const (\<^const_name>\<open>Power.power\<close>, _) $ a $ n) =
@{code poly.Pw} (num_of_term ps a, @{code nat_of_integer} (dest_nat n))
- | num_of_term ps (Const (@{const_name Rings.divide}, _) $ a $ b) =
+ | num_of_term ps (Const (\<^const_name>\<open>Rings.divide\<close>, _) $ a $ b) =
mk_C (dest_num a, dest_num b)
| num_of_term ps t =
(case try_dest_num t of
SOME k => mk_C (k, 1)
| NONE => mk_poly_Bound (the_index ps t));
-fun tm_of_term fs ps (Const(@{const_name Groups.uminus}, _) $ t) =
+fun tm_of_term fs ps (Const(\<^const_name>\<open>Groups.uminus\<close>, _) $ t) =
@{code Neg} (tm_of_term fs ps t)
- | tm_of_term fs ps (Const(@{const_name Groups.plus}, _) $ a $ b) =
+ | tm_of_term fs ps (Const(\<^const_name>\<open>Groups.plus\<close>, _) $ a $ b) =
@{code Add} (tm_of_term fs ps a, tm_of_term fs ps b)
- | tm_of_term fs ps (Const(@{const_name Groups.minus}, _) $ a $ b) =
+ | tm_of_term fs ps (Const(\<^const_name>\<open>Groups.minus\<close>, _) $ a $ b) =
@{code Sub} (tm_of_term fs ps a, tm_of_term fs ps b)
- | tm_of_term fs ps (Const(@{const_name Groups.times}, _) $ a $ b) =
+ | tm_of_term fs ps (Const(\<^const_name>\<open>Groups.times\<close>, _) $ a $ b) =
@{code Mul} (num_of_term ps a, tm_of_term fs ps b)
| tm_of_term fs ps t = (@{code CP} (num_of_term ps t)
handle TERM _ => mk_Bound (the_index fs t)
| General.Subscript => mk_Bound (the_index fs t));
-fun fm_of_term fs ps @{term True} = @{code T}
- | fm_of_term fs ps @{term False} = @{code F}
- | fm_of_term fs ps (Const (@{const_name Not}, _) $ p) =
+fun fm_of_term fs ps \<^term>\<open>True\<close> = @{code T}
+ | fm_of_term fs ps \<^term>\<open>False\<close> = @{code F}
+ | fm_of_term fs ps (Const (\<^const_name>\<open>Not\<close>, _) $ p) =
@{code NOT} (fm_of_term fs ps p)
- | fm_of_term fs ps (Const (@{const_name HOL.conj}, _) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ p $ q) =
@{code And} (fm_of_term fs ps p, fm_of_term fs ps q)
- | fm_of_term fs ps (Const (@{const_name HOL.disj}, _) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>HOL.disj\<close>, _) $ p $ q) =
@{code Or} (fm_of_term fs ps p, fm_of_term fs ps q)
- | fm_of_term fs ps (Const (@{const_name HOL.implies}, _) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>HOL.implies\<close>, _) $ p $ q) =
@{code Imp} (fm_of_term fs ps p, fm_of_term fs ps q)
- | fm_of_term fs ps (@{term HOL.iff} $ p $ q) =
+ | fm_of_term fs ps (\<^term>\<open>HOL.iff\<close> $ p $ q) =
@{code Iff} (fm_of_term fs ps p, fm_of_term fs ps q)
- | fm_of_term fs ps (Const (@{const_name HOL.eq}, T) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>HOL.eq\<close>, T) $ p $ q) =
@{code Eq} (@{code Sub} (tm_of_term fs ps p, tm_of_term fs ps q))
- | fm_of_term fs ps (Const (@{const_name Orderings.less}, _) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>Orderings.less\<close>, _) $ p $ q) =
@{code Lt} (@{code Sub} (tm_of_term fs ps p, tm_of_term fs ps q))
- | fm_of_term fs ps (Const (@{const_name Orderings.less_eq}, _) $ p $ q) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>Orderings.less_eq\<close>, _) $ p $ q) =
@{code Le} (@{code Sub} (tm_of_term fs ps p, tm_of_term fs ps q))
- | fm_of_term fs ps (Const (@{const_name Ex}, _) $ Abs (abs as (_, xT, _))) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (abs as (_, xT, _))) =
let
val (xn', p') = Syntax_Trans.variant_abs abs; (* FIXME !? *)
in @{code E} (fm_of_term (Free (xn', xT) :: fs) ps p') end
- | fm_of_term fs ps (Const (@{const_name All},_) $ Abs (abs as (_, xT, _))) =
+ | fm_of_term fs ps (Const (\<^const_name>\<open>All\<close>,_) $ Abs (abs as (_, xT, _))) =
let
val (xn', p') = Syntax_Trans.variant_abs abs; (* FIXME !? *)
in @{code A} (fm_of_term (Free (xn', xT) :: fs) ps p') end
@@ -4004,22 +4004,22 @@
val (c, d) = apply2 (@{code integer_of_int}) (a, b)
in
(if d = 1 then HOLogic.mk_number T c
- else if d = 0 then Const (@{const_name Groups.zero}, T)
+ else if d = 0 then Const (\<^const_name>\<open>Groups.zero\<close>, T)
else
- Const (@{const_name Rings.divide}, binopT T) $
+ Const (\<^const_name>\<open>Rings.divide\<close>, binopT T) $
HOLogic.mk_number T c $ HOLogic.mk_number T d)
end
| term_of_num T ps (@{code poly.Bound} i) = nth ps (@{code integer_of_nat} i)
| term_of_num T ps (@{code poly.Add} (a, b)) =
- Const (@{const_name Groups.plus}, binopT T) $ term_of_num T ps a $ term_of_num T ps b
+ Const (\<^const_name>\<open>Groups.plus\<close>, binopT T) $ term_of_num T ps a $ term_of_num T ps b
| term_of_num T ps (@{code poly.Mul} (a, b)) =
- Const (@{const_name Groups.times}, binopT T) $ term_of_num T ps a $ term_of_num T ps b
+ Const (\<^const_name>\<open>Groups.times\<close>, binopT T) $ term_of_num T ps a $ term_of_num T ps b
| term_of_num T ps (@{code poly.Sub} (a, b)) =
- Const (@{const_name Groups.minus}, binopT T) $ term_of_num T ps a $ term_of_num T ps b
+ Const (\<^const_name>\<open>Groups.minus\<close>, binopT T) $ term_of_num T ps a $ term_of_num T ps b
| term_of_num T ps (@{code poly.Neg} a) =
- Const (@{const_name Groups.uminus}, T --> T) $ term_of_num T ps a
+ Const (\<^const_name>\<open>Groups.uminus\<close>, T --> T) $ term_of_num T ps a
| term_of_num T ps (@{code poly.Pw} (a, n)) =
- Const (@{const_name Power.power}, T --> @{typ nat} --> T) $
+ Const (\<^const_name>\<open>Power.power\<close>, T --> \<^typ>\<open>nat\<close> --> T) $
term_of_num T ps a $ HOLogic.mk_number HOLogic.natT (@{code integer_of_nat} n)
| term_of_num T ps (@{code poly.CN} (c, n, p)) =
term_of_num T ps (@{code poly.Add} (c, @{code poly.Mul} (@{code poly.Bound} n, p)));
@@ -4027,40 +4027,40 @@
fun term_of_tm T fs ps (@{code CP} p) = term_of_num T ps p
| term_of_tm T fs ps (@{code Bound} i) = nth fs (@{code integer_of_nat} i)
| term_of_tm T fs ps (@{code Add} (a, b)) =
- Const (@{const_name Groups.plus}, binopT T) $ term_of_tm T fs ps a $ term_of_tm T fs ps b
+ Const (\<^const_name>\<open>Groups.plus\<close>, binopT T) $ term_of_tm T fs ps a $ term_of_tm T fs ps b
| term_of_tm T fs ps (@{code Mul} (a, b)) =
- Const (@{const_name Groups.times}, binopT T) $ term_of_num T ps a $ term_of_tm T fs ps b
+ Const (\<^const_name>\<open>Groups.times\<close>, binopT T) $ term_of_num T ps a $ term_of_tm T fs ps b
| term_of_tm T fs ps (@{code Sub} (a, b)) =
- Const (@{const_name Groups.minus}, binopT T) $ term_of_tm T fs ps a $ term_of_tm T fs ps b
+ Const (\<^const_name>\<open>Groups.minus\<close>, binopT T) $ term_of_tm T fs ps a $ term_of_tm T fs ps b
| term_of_tm T fs ps (@{code Neg} a) =
- Const (@{const_name Groups.uminus}, T --> T) $ term_of_tm T fs ps a
+ Const (\<^const_name>\<open>Groups.uminus\<close>, T --> T) $ term_of_tm T fs ps a
| term_of_tm T fs ps (@{code CNP} (n, c, p)) =
term_of_tm T fs ps (@{code Add} (@{code Mul} (c, @{code Bound} n), p));
-fun term_of_fm T fs ps @{code T} = @{term True}
- | term_of_fm T fs ps @{code F} = @{term False}
- | term_of_fm T fs ps (@{code NOT} p) = @{term Not} $ term_of_fm T fs ps p
+fun term_of_fm T fs ps @{code T} = \<^term>\<open>True\<close>
+ | term_of_fm T fs ps @{code F} = \<^term>\<open>False\<close>
+ | term_of_fm T fs ps (@{code NOT} p) = \<^term>\<open>Not\<close> $ term_of_fm T fs ps p
| term_of_fm T fs ps (@{code And} (p, q)) =
- @{term HOL.conj} $ term_of_fm T fs ps p $ term_of_fm T fs ps q
+ \<^term>\<open>HOL.conj\<close> $ term_of_fm T fs ps p $ term_of_fm T fs ps q
| term_of_fm T fs ps (@{code Or} (p, q)) =
- @{term HOL.disj} $ term_of_fm T fs ps p $ term_of_fm T fs ps q
+ \<^term>\<open>HOL.disj\<close> $ term_of_fm T fs ps p $ term_of_fm T fs ps q
| term_of_fm T fs ps (@{code Imp} (p, q)) =
- @{term HOL.implies} $ term_of_fm T fs ps p $ term_of_fm T fs ps q
+ \<^term>\<open>HOL.implies\<close> $ term_of_fm T fs ps p $ term_of_fm T fs ps q
| term_of_fm T fs ps (@{code Iff} (p, q)) =
- @{term HOL.iff} $ term_of_fm T fs ps p $ term_of_fm T fs ps q
+ \<^term>\<open>HOL.iff\<close> $ term_of_fm T fs ps p $ term_of_fm T fs ps q
| term_of_fm T fs ps (@{code Lt} p) =
- Const (@{const_name Orderings.less}, relT T) $
- term_of_tm T fs ps p $ Const (@{const_name Groups.zero}, T)
+ Const (\<^const_name>\<open>Orderings.less\<close>, relT T) $
+ term_of_tm T fs ps p $ Const (\<^const_name>\<open>Groups.zero\<close>, T)
| term_of_fm T fs ps (@{code Le} p) =
- Const (@{const_name Orderings.less_eq}, relT T) $
- term_of_tm T fs ps p $ Const (@{const_name Groups.zero}, T)
+ Const (\<^const_name>\<open>Orderings.less_eq\<close>, relT T) $
+ term_of_tm T fs ps p $ Const (\<^const_name>\<open>Groups.zero\<close>, T)
| term_of_fm T fs ps (@{code Eq} p) =
- Const (@{const_name HOL.eq}, relT T) $
- term_of_tm T fs ps p $ Const (@{const_name Groups.zero}, T)
+ Const (\<^const_name>\<open>HOL.eq\<close>, relT T) $
+ term_of_tm T fs ps p $ Const (\<^const_name>\<open>Groups.zero\<close>, T)
| term_of_fm T fs ps (@{code NEq} p) =
- @{term Not} $
- (Const (@{const_name HOL.eq}, relT T) $
- term_of_tm T fs ps p $ Const (@{const_name Groups.zero}, T))
+ \<^term>\<open>Not\<close> $
+ (Const (\<^const_name>\<open>HOL.eq\<close>, relT T) $
+ term_of_tm T fs ps p $ Const (\<^const_name>\<open>Groups.zero\<close>, T))
| term_of_fm T fs ps _ = error "term_of_fm: quantifiers";
fun frpar_procedure alternative T ps fm =
--- a/src/HOL/Decision_Procs/Polynomial_List.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Polynomial_List.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,18 +19,18 @@
subsection \<open>Arithmetic Operations on Polynomials\<close>
text \<open>Addition\<close>
-primrec (in semiring_0) padd :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl "+++" 65)
+primrec (in semiring_0) padd :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl \<open>+++\<close> 65)
where
padd_Nil: "[] +++ l2 = l2"
| padd_Cons: "(h # t) +++ l2 = (if l2 = [] then h # t else (h + hd l2) # (t +++ tl l2))"
text \<open>Multiplication by a constant\<close>
-primrec (in semiring_0) cmult :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl "%*" 70) where
+primrec (in semiring_0) cmult :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl \<open>%*\<close> 70) where
cmult_Nil: "c %* [] = []"
| cmult_Cons: "c %* (h#t) = (c * h)#(c %* t)"
text \<open>Multiplication by a polynomial\<close>
-primrec (in semiring_0) pmult :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl "***" 70)
+primrec (in semiring_0) pmult :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixl \<open>***\<close> 70)
where
pmult_Nil: "[] *** l2 = []"
| pmult_Cons: "(h # t) *** l2 = (if t = [] then h %* l2 else (h %* l2) +++ (0 # (t *** l2)))"
@@ -42,7 +42,7 @@
| mulexp_Suc: "mulexp (Suc n) p q = p *** mulexp n p q"
text \<open>Exponential\<close>
-primrec (in semiring_1) pexp :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl "%^" 80)
+primrec (in semiring_1) pexp :: "'a list \<Rightarrow> nat \<Rightarrow> 'a list" (infixl \<open>%^\<close> 80)
where
pexp_0: "p %^ 0 = [1]"
| pexp_Suc: "p %^ (Suc n) = p *** (p %^ n)"
@@ -67,10 +67,10 @@
text \<open>Other definitions.\<close>
-definition (in ring_1) poly_minus :: "'a list \<Rightarrow> 'a list" ("-- _" [80] 80)
+definition (in ring_1) poly_minus :: "'a list \<Rightarrow> 'a list" (\<open>-- _\<close> [80] 80)
where "-- p = (- 1) %* p"
-definition (in semiring_0) divides :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool" (infixl "divides" 70)
+definition (in semiring_0) divides :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool" (infixl \<open>divides\<close> 70)
where "p1 divides p2 \<longleftrightarrow> (\<exists>q. poly p2 = poly(p1 *** q))"
lemma (in semiring_0) dividesI: "poly p2 = poly (p1 *** q) \<Longrightarrow> p1 divides p2"
@@ -226,7 +226,7 @@
by (induct n) (auto simp add: poly_mult mult.assoc)
-subsection \<open>Key Property: if @{term "f a = 0"} then @{term "(x - a)"} divides @{term "p(x)"}.\<close>
+subsection \<open>Key Property: if \<^term>\<open>f a = 0\<close> then \<^term>\<open>(x - a)\<close> divides \<^term>\<open>p(x)\<close>.\<close>
lemma (in comm_ring_1) lemma_poly_linear_rem: "\<exists>q r. h#t = [r] +++ [-a, 1] *** q"
proof (induct t arbitrary: h)
--- a/src/HOL/Decision_Procs/Rat_Pair.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Rat_Pair.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,10 +10,10 @@
type_synonym Num = "int \<times> int"
-abbreviation Num0_syn :: Num ("0\<^sub>N")
+abbreviation Num0_syn :: Num (\<open>0\<^sub>N\<close>)
where "0\<^sub>N \<equiv> (0, 0)"
-abbreviation Numi_syn :: "int \<Rightarrow> Num" ("'((_)')\<^sub>N")
+abbreviation Numi_syn :: "int \<Rightarrow> Num" (\<open>'((_)')\<^sub>N\<close>)
where "(i)\<^sub>N \<equiv> (i, 1)"
definition isnormNum :: "Num \<Rightarrow> bool"
@@ -78,29 +78,29 @@
text \<open>Arithmetic over Num\<close>
-definition Nadd :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl "+\<^sub>N" 60)
+definition Nadd :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl \<open>+\<^sub>N\<close> 60)
where
"Nadd = (\<lambda>(a, b) (a', b').
if a = 0 \<or> b = 0 then normNum (a', b')
else if a' = 0 \<or> b' = 0 then normNum (a, b)
else normNum (a * b' + b * a', b * b'))"
-definition Nmul :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl "*\<^sub>N" 60)
+definition Nmul :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl \<open>*\<^sub>N\<close> 60)
where
"Nmul = (\<lambda>(a, b) (a', b').
let g = gcd (a * a') (b * b')
in (a * a' div g, b * b' div g))"
-definition Nneg :: "Num \<Rightarrow> Num" ("~\<^sub>N")
+definition Nneg :: "Num \<Rightarrow> Num" (\<open>~\<^sub>N\<close>)
where "Nneg = (\<lambda>(a, b). (- a, b))"
-definition Nsub :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl "-\<^sub>N" 60)
+definition Nsub :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl \<open>-\<^sub>N\<close> 60)
where "Nsub = (\<lambda>a b. a +\<^sub>N ~\<^sub>N b)"
definition Ninv :: "Num \<Rightarrow> Num"
where "Ninv = (\<lambda>(a, b). if a < 0 then (- b, \<bar>a\<bar>) else (b, a))"
-definition Ndiv :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl "\<div>\<^sub>N" 60)
+definition Ndiv :: "Num \<Rightarrow> Num \<Rightarrow> Num" (infixl \<open>\<div>\<^sub>N\<close> 60)
where "Ndiv = (\<lambda>a b. a *\<^sub>N Ninv b)"
lemma Nneg_normN[simp]: "isnormNum x \<Longrightarrow> isnormNum (~\<^sub>N x)"
@@ -151,22 +151,22 @@
text \<open>Relations over Num\<close>
-definition Nlt0:: "Num \<Rightarrow> bool" ("0>\<^sub>N")
+definition Nlt0:: "Num \<Rightarrow> bool" (\<open>0>\<^sub>N\<close>)
where "Nlt0 = (\<lambda>(a, b). a < 0)"
-definition Nle0:: "Num \<Rightarrow> bool" ("0\<ge>\<^sub>N")
+definition Nle0:: "Num \<Rightarrow> bool" (\<open>0\<ge>\<^sub>N\<close>)
where "Nle0 = (\<lambda>(a, b). a \<le> 0)"
-definition Ngt0:: "Num \<Rightarrow> bool" ("0<\<^sub>N")
+definition Ngt0:: "Num \<Rightarrow> bool" (\<open>0<\<^sub>N\<close>)
where "Ngt0 = (\<lambda>(a, b). a > 0)"
-definition Nge0:: "Num \<Rightarrow> bool" ("0\<le>\<^sub>N")
+definition Nge0:: "Num \<Rightarrow> bool" (\<open>0\<le>\<^sub>N\<close>)
where "Nge0 = (\<lambda>(a, b). a \<ge> 0)"
-definition Nlt :: "Num \<Rightarrow> Num \<Rightarrow> bool" (infix "<\<^sub>N" 55)
+definition Nlt :: "Num \<Rightarrow> Num \<Rightarrow> bool" (infix \<open><\<^sub>N\<close> 55)
where "Nlt = (\<lambda>a b. 0>\<^sub>N (a -\<^sub>N b))"
-definition Nle :: "Num \<Rightarrow> Num \<Rightarrow> bool" (infix "\<le>\<^sub>N" 55)
+definition Nle :: "Num \<Rightarrow> Num \<Rightarrow> bool" (infix \<open>\<le>\<^sub>N\<close> 55)
where "Nle = (\<lambda>a b. 0\<ge>\<^sub>N (a -\<^sub>N b))"
definition "INum = (\<lambda>(a, b). of_int a / of_int b)"
--- a/src/HOL/Decision_Procs/Reflective_Field.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/Reflective_Field.thy Sat Jan 05 17:24:33 2019 +0100
@@ -619,31 +619,31 @@
qed
ML \<open>
-val term_of_nat = HOLogic.mk_number @{typ nat} o @{code integer_of_nat};
+val term_of_nat = HOLogic.mk_number \<^typ>\<open>nat\<close> o @{code integer_of_nat};
-val term_of_int = HOLogic.mk_number @{typ int} o @{code integer_of_int};
+val term_of_int = HOLogic.mk_number \<^typ>\<open>int\<close> o @{code integer_of_int};
-fun term_of_pexpr (@{code PExpr1} x) = @{term PExpr1} $ term_of_pexpr1 x
- | term_of_pexpr (@{code PExpr2} x) = @{term PExpr2} $ term_of_pexpr2 x
-and term_of_pexpr1 (@{code PCnst} k) = @{term PCnst} $ term_of_int k
- | term_of_pexpr1 (@{code PVar} n) = @{term PVar} $ term_of_nat n
- | term_of_pexpr1 (@{code PAdd} (x, y)) = @{term PAdd} $ term_of_pexpr x $ term_of_pexpr y
- | term_of_pexpr1 (@{code PSub} (x, y)) = @{term PSub} $ term_of_pexpr x $ term_of_pexpr y
- | term_of_pexpr1 (@{code PNeg} x) = @{term PNeg} $ term_of_pexpr x
-and term_of_pexpr2 (@{code PMul} (x, y)) = @{term PMul} $ term_of_pexpr x $ term_of_pexpr y
- | term_of_pexpr2 (@{code PPow} (x, n)) = @{term PPow} $ term_of_pexpr x $ term_of_nat n
+fun term_of_pexpr (@{code PExpr1} x) = \<^term>\<open>PExpr1\<close> $ term_of_pexpr1 x
+ | term_of_pexpr (@{code PExpr2} x) = \<^term>\<open>PExpr2\<close> $ term_of_pexpr2 x
+and term_of_pexpr1 (@{code PCnst} k) = \<^term>\<open>PCnst\<close> $ term_of_int k
+ | term_of_pexpr1 (@{code PVar} n) = \<^term>\<open>PVar\<close> $ term_of_nat n
+ | term_of_pexpr1 (@{code PAdd} (x, y)) = \<^term>\<open>PAdd\<close> $ term_of_pexpr x $ term_of_pexpr y
+ | term_of_pexpr1 (@{code PSub} (x, y)) = \<^term>\<open>PSub\<close> $ term_of_pexpr x $ term_of_pexpr y
+ | term_of_pexpr1 (@{code PNeg} x) = \<^term>\<open>PNeg\<close> $ term_of_pexpr x
+and term_of_pexpr2 (@{code PMul} (x, y)) = \<^term>\<open>PMul\<close> $ term_of_pexpr x $ term_of_pexpr y
+ | term_of_pexpr2 (@{code PPow} (x, n)) = \<^term>\<open>PPow\<close> $ term_of_pexpr x $ term_of_nat n
fun term_of_result (x, (y, zs)) =
HOLogic.mk_prod (term_of_pexpr x, HOLogic.mk_prod
- (term_of_pexpr y, HOLogic.mk_list @{typ pexpr} (map term_of_pexpr zs)));
+ (term_of_pexpr y, HOLogic.mk_list \<^typ>\<open>pexpr\<close> (map term_of_pexpr zs)));
local
-fun fnorm (ctxt, ct, t) = Thm.mk_binop @{cterm "Pure.eq :: pexpr \<times> pexpr \<times> pexpr list \<Rightarrow> pexpr \<times> pexpr \<times> pexpr list \<Rightarrow> prop"}
+fun fnorm (ctxt, ct, t) = Thm.mk_binop \<^cterm>\<open>Pure.eq :: pexpr \<times> pexpr \<times> pexpr list \<Rightarrow> pexpr \<times> pexpr \<times> pexpr list \<Rightarrow> prop\<close>
ct (Thm.cterm_of ctxt t);
val (_, raw_fnorm_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding fnorm}, fnorm)));
+ (Thm.add_oracle (\<^binding>\<open>fnorm\<close>, fnorm)));
fun fnorm_oracle ctxt ct t = raw_fnorm_oracle (ctxt, ct, t);
@@ -680,59 +680,59 @@
open Ring_Tac;
-fun field_struct (Const (@{const_name Ring.ring.add}, _) $ R $ _ $ _) = SOME R
- | field_struct (Const (@{const_name Ring.a_minus}, _) $ R $ _ $ _) = SOME R
- | field_struct (Const (@{const_name Group.monoid.mult}, _) $ R $ _ $ _) = SOME R
- | field_struct (Const (@{const_name Ring.a_inv}, _) $ R $ _) = SOME R
- | field_struct (Const (@{const_name Group.pow}, _) $ R $ _ $ _) = SOME R
- | field_struct (Const (@{const_name Algebra_Aux.m_div}, _) $ R $ _ $ _) = SOME R
- | field_struct (Const (@{const_name Ring.ring.zero}, _) $ R) = SOME R
- | field_struct (Const (@{const_name Group.monoid.one}, _) $ R) = SOME R
- | field_struct (Const (@{const_name Algebra_Aux.of_integer}, _) $ R $ _) = SOME R
+fun field_struct (Const (\<^const_name>\<open>Ring.ring.add\<close>, _) $ R $ _ $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Ring.a_minus\<close>, _) $ R $ _ $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Group.monoid.mult\<close>, _) $ R $ _ $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Ring.a_inv\<close>, _) $ R $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Group.pow\<close>, _) $ R $ _ $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Algebra_Aux.m_div\<close>, _) $ R $ _ $ _) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Ring.ring.zero\<close>, _) $ R) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Group.monoid.one\<close>, _) $ R) = SOME R
+ | field_struct (Const (\<^const_name>\<open>Algebra_Aux.of_integer\<close>, _) $ R $ _) = SOME R
| field_struct _ = NONE;
-fun reif_fexpr vs (Const (@{const_name Ring.ring.add}, _) $ _ $ a $ b) =
- @{const FAdd} $ reif_fexpr vs a $ reif_fexpr vs b
- | reif_fexpr vs (Const (@{const_name Ring.a_minus}, _) $ _ $ a $ b) =
- @{const FSub} $ reif_fexpr vs a $ reif_fexpr vs b
- | reif_fexpr vs (Const (@{const_name Group.monoid.mult}, _) $ _ $ a $ b) =
- @{const FMul} $ reif_fexpr vs a $ reif_fexpr vs b
- | reif_fexpr vs (Const (@{const_name Ring.a_inv}, _) $ _ $ a) =
- @{const FNeg} $ reif_fexpr vs a
- | reif_fexpr vs (Const (@{const_name Group.pow}, _) $ _ $ a $ n) =
- @{const FPow} $ reif_fexpr vs a $ n
- | reif_fexpr vs (Const (@{const_name Algebra_Aux.m_div}, _) $ _ $ a $ b) =
- @{const FDiv} $ reif_fexpr vs a $ reif_fexpr vs b
+fun reif_fexpr vs (Const (\<^const_name>\<open>Ring.ring.add\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>FAdd\<close> $ reif_fexpr vs a $ reif_fexpr vs b
+ | reif_fexpr vs (Const (\<^const_name>\<open>Ring.a_minus\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>FSub\<close> $ reif_fexpr vs a $ reif_fexpr vs b
+ | reif_fexpr vs (Const (\<^const_name>\<open>Group.monoid.mult\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>FMul\<close> $ reif_fexpr vs a $ reif_fexpr vs b
+ | reif_fexpr vs (Const (\<^const_name>\<open>Ring.a_inv\<close>, _) $ _ $ a) =
+ \<^const>\<open>FNeg\<close> $ reif_fexpr vs a
+ | reif_fexpr vs (Const (\<^const_name>\<open>Group.pow\<close>, _) $ _ $ a $ n) =
+ \<^const>\<open>FPow\<close> $ reif_fexpr vs a $ n
+ | reif_fexpr vs (Const (\<^const_name>\<open>Algebra_Aux.m_div\<close>, _) $ _ $ a $ b) =
+ \<^const>\<open>FDiv\<close> $ reif_fexpr vs a $ reif_fexpr vs b
| reif_fexpr vs (Free x) =
- @{const FVar} $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
- | reif_fexpr vs (Const (@{const_name Ring.ring.zero}, _) $ _) =
- @{term "FCnst 0"}
- | reif_fexpr vs (Const (@{const_name Group.monoid.one}, _) $ _) =
- @{term "FCnst 1"}
- | reif_fexpr vs (Const (@{const_name Algebra_Aux.of_integer}, _) $ _ $ n) =
- @{const FCnst} $ n
+ \<^const>\<open>FVar\<close> $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
+ | reif_fexpr vs (Const (\<^const_name>\<open>Ring.ring.zero\<close>, _) $ _) =
+ \<^term>\<open>FCnst 0\<close>
+ | reif_fexpr vs (Const (\<^const_name>\<open>Group.monoid.one\<close>, _) $ _) =
+ \<^term>\<open>FCnst 1\<close>
+ | reif_fexpr vs (Const (\<^const_name>\<open>Algebra_Aux.of_integer\<close>, _) $ _ $ n) =
+ \<^const>\<open>FCnst\<close> $ n
| reif_fexpr _ _ = error "reif_fexpr: bad expression";
-fun reif_fexpr' vs (Const (@{const_name Groups.plus}, _) $ a $ b) =
- @{const FAdd} $ reif_fexpr' vs a $ reif_fexpr' vs b
- | reif_fexpr' vs (Const (@{const_name Groups.minus}, _) $ a $ b) =
- @{const FSub} $ reif_fexpr' vs a $ reif_fexpr' vs b
- | reif_fexpr' vs (Const (@{const_name Groups.times}, _) $ a $ b) =
- @{const FMul} $ reif_fexpr' vs a $ reif_fexpr' vs b
- | reif_fexpr' vs (Const (@{const_name Groups.uminus}, _) $ a) =
- @{const FNeg} $ reif_fexpr' vs a
- | reif_fexpr' vs (Const (@{const_name Power.power}, _) $ a $ n) =
- @{const FPow} $ reif_fexpr' vs a $ n
- | reif_fexpr' vs (Const (@{const_name divide}, _) $ a $ b) =
- @{const FDiv} $ reif_fexpr' vs a $ reif_fexpr' vs b
+fun reif_fexpr' vs (Const (\<^const_name>\<open>Groups.plus\<close>, _) $ a $ b) =
+ \<^const>\<open>FAdd\<close> $ reif_fexpr' vs a $ reif_fexpr' vs b
+ | reif_fexpr' vs (Const (\<^const_name>\<open>Groups.minus\<close>, _) $ a $ b) =
+ \<^const>\<open>FSub\<close> $ reif_fexpr' vs a $ reif_fexpr' vs b
+ | reif_fexpr' vs (Const (\<^const_name>\<open>Groups.times\<close>, _) $ a $ b) =
+ \<^const>\<open>FMul\<close> $ reif_fexpr' vs a $ reif_fexpr' vs b
+ | reif_fexpr' vs (Const (\<^const_name>\<open>Groups.uminus\<close>, _) $ a) =
+ \<^const>\<open>FNeg\<close> $ reif_fexpr' vs a
+ | reif_fexpr' vs (Const (\<^const_name>\<open>Power.power\<close>, _) $ a $ n) =
+ \<^const>\<open>FPow\<close> $ reif_fexpr' vs a $ n
+ | reif_fexpr' vs (Const (\<^const_name>\<open>divide\<close>, _) $ a $ b) =
+ \<^const>\<open>FDiv\<close> $ reif_fexpr' vs a $ reif_fexpr' vs b
| reif_fexpr' vs (Free x) =
- @{const FVar} $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
- | reif_fexpr' vs (Const (@{const_name zero_class.zero}, _)) =
- @{term "FCnst 0"}
- | reif_fexpr' vs (Const (@{const_name one_class.one}, _)) =
- @{term "FCnst 1"}
- | reif_fexpr' vs (Const (@{const_name numeral}, _) $ b) =
- @{const FCnst} $ (@{const numeral (int)} $ b)
+ \<^const>\<open>FVar\<close> $ HOLogic.mk_number HOLogic.natT (find_index (equal x) vs)
+ | reif_fexpr' vs (Const (\<^const_name>\<open>zero_class.zero\<close>, _)) =
+ \<^term>\<open>FCnst 0\<close>
+ | reif_fexpr' vs (Const (\<^const_name>\<open>one_class.one\<close>, _)) =
+ \<^term>\<open>FCnst 1\<close>
+ | reif_fexpr' vs (Const (\<^const_name>\<open>numeral\<close>, _) $ b) =
+ \<^const>\<open>FCnst\<close> $ (@{const numeral (int)} $ b)
| reif_fexpr' _ _ = error "reif_fexpr: bad expression";
fun eq_field_simps
@@ -768,7 +768,7 @@
val If_conv_a = If_conv a;
fun conv ys n = (case strip_app ys of
- (@{const_name Cons}, [x, xs]) =>
+ (\<^const_name>\<open>Cons\<close>, [x, xs]) =>
transitive'
(inst [] [x, xs, n] nth_el_Cons)
(If_conv_a (args2 nat_eq_conv)
@@ -786,29 +786,29 @@
val nth_el_conv' = nth_el_conv rls;
fun conv xs x = (case strip_app x of
- (@{const_name FCnst}, [c]) => (case strip_app c of
- (@{const_name zero_class.zero}, _) => inst [] [xs] feval_simps_9
- | (@{const_name one_class.one}, _) => inst [] [xs] feval_simps_10
- | (@{const_name numeral}, [n]) => inst [] [xs, n] feval_simps_11
+ (\<^const_name>\<open>FCnst\<close>, [c]) => (case strip_app c of
+ (\<^const_name>\<open>zero_class.zero\<close>, _) => inst [] [xs] feval_simps_9
+ | (\<^const_name>\<open>one_class.one\<close>, _) => inst [] [xs] feval_simps_10
+ | (\<^const_name>\<open>numeral\<close>, [n]) => inst [] [xs, n] feval_simps_11
| _ => inst [] [xs, c] feval_simps_1)
- | (@{const_name FVar}, [n]) =>
+ | (\<^const_name>\<open>FVar\<close>, [n]) =>
transitive' (inst [] [xs, n] feval_simps_2) (args2 nth_el_conv')
- | (@{const_name FAdd}, [a, b]) =>
+ | (\<^const_name>\<open>FAdd\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] feval_simps_3)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name FSub}, [a, b]) =>
+ | (\<^const_name>\<open>FSub\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] feval_simps_4)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name FMul}, [a, b]) =>
+ | (\<^const_name>\<open>FMul\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] feval_simps_5)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name FNeg}, [a]) =>
+ | (\<^const_name>\<open>FNeg\<close>, [a]) =>
transitive' (inst [] [xs, a] feval_simps_6)
(cong1 (args2 conv))
- | (@{const_name FDiv}, [a, b]) =>
+ | (\<^const_name>\<open>FDiv\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] feval_simps_7)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name FPow}, [a, n]) =>
+ | (\<^const_name>\<open>FPow\<close>, [a, n]) =>
transitive' (inst [] [xs, a, n] feval_simps_8)
(cong2 (args2 conv) Thm.reflexive))
in conv end;
@@ -824,29 +824,29 @@
val nth_el_conv' = nth_el_conv rls;
fun conv xs x = (case strip_app x of
- (@{const_name PExpr1}, [e]) => (case strip_app e of
- (@{const_name PCnst}, [c]) => (case strip_numeral c of
- (@{const_name zero_class.zero}, _) => inst [] [xs] peval_simps_8
- | (@{const_name one_class.one}, _) => inst [] [xs] peval_simps_9
- | (@{const_name numeral}, [n]) => inst [] [xs, n] peval_simps_10
- | (@{const_name uminus}, [n]) => inst [] [xs, n] peval_simps_11
+ (\<^const_name>\<open>PExpr1\<close>, [e]) => (case strip_app e of
+ (\<^const_name>\<open>PCnst\<close>, [c]) => (case strip_numeral c of
+ (\<^const_name>\<open>zero_class.zero\<close>, _) => inst [] [xs] peval_simps_8
+ | (\<^const_name>\<open>one_class.one\<close>, _) => inst [] [xs] peval_simps_9
+ | (\<^const_name>\<open>numeral\<close>, [n]) => inst [] [xs, n] peval_simps_10
+ | (\<^const_name>\<open>uminus\<close>, [n]) => inst [] [xs, n] peval_simps_11
| _ => inst [] [xs, c] peval_simps_1)
- | (@{const_name PVar}, [n]) =>
+ | (\<^const_name>\<open>PVar\<close>, [n]) =>
transitive' (inst [] [xs, n] peval_simps_2) (args2 nth_el_conv')
- | (@{const_name PAdd}, [a, b]) =>
+ | (\<^const_name>\<open>PAdd\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] peval_simps_3)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name PSub}, [a, b]) =>
+ | (\<^const_name>\<open>PSub\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] peval_simps_4)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name PNeg}, [a]) =>
+ | (\<^const_name>\<open>PNeg\<close>, [a]) =>
transitive' (inst [] [xs, a] peval_simps_5)
(cong1 (args2 conv)))
- | (@{const_name PExpr2}, [e]) => (case strip_app e of
- (@{const_name PMul}, [a, b]) =>
+ | (\<^const_name>\<open>PExpr2\<close>, [e]) => (case strip_app e of
+ (\<^const_name>\<open>PMul\<close>, [a, b]) =>
transitive' (inst [] [xs, a, b] peval_simps_6)
(cong2 (args2 conv) (args2 conv))
- | (@{const_name PPow}, [a, n]) =>
+ | (\<^const_name>\<open>PPow\<close>, [a, n]) =>
transitive' (inst [] [xs, a, n] peval_simps_7)
(cong2 (args2 conv) Thm.reflexive)))
in conv end;
@@ -859,9 +859,9 @@
val peval_conv' = peval_conv rls;
fun conv xs qs = (case strip_app qs of
- (@{const_name Nil}, []) => inst [] [xs] nonzero_Nil
- | (@{const_name Cons}, [p, ps]) => (case Thm.term_of ps of
- Const (@{const_name Nil}, _) =>
+ (\<^const_name>\<open>Nil\<close>, []) => inst [] [xs] nonzero_Nil
+ | (\<^const_name>\<open>Cons\<close>, [p, ps]) => (case Thm.term_of ps of
+ Const (\<^const_name>\<open>Nil\<close>, _) =>
transitive' (inst [] [xs, p] nonzero_singleton)
(cong1 (cong2 (args2 peval_conv') Thm.reflexive))
| _ => transitive' (inst [] [xs, p, ps] nonzero_Cons)
@@ -873,12 +873,12 @@
let
val (prems, concl) = Logic.strip_horn g;
fun find_eq s = (case s of
- (_ $ (Const (@{const_name HOL.eq}, Type (_, [T, _])) $ t $ u)) =>
+ (_ $ (Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [T, _])) $ t $ u)) =>
(case (field_struct t, field_struct u) of
(SOME R, _) => SOME ((t, u), R, T, NONE, mk_in_carrier ctxt R [], reif_fexpr)
| (_, SOME R) => SOME ((t, u), R, T, NONE, mk_in_carrier ctxt R [], reif_fexpr)
| _ =>
- if Sign.of_sort (Proof_Context.theory_of ctxt) (T, @{sort field})
+ if Sign.of_sort (Proof_Context.theory_of ctxt) (T, \<^sort>\<open>field\<close>)
then SOME ((t, u), mk_ring T, T, SOME T, K @{thm in_carrier_trivial}, reif_fexpr')
else NONE)
| _ => NONE);
@@ -894,7 +894,7 @@
val ce = Thm.cterm_of ctxt (reif xs t);
val ce' = Thm.cterm_of ctxt (reif xs u);
val fnorm = cv ctxt
- (Thm.apply @{cterm fnorm} (Thm.apply (Thm.apply @{cterm FSub} ce) ce'));
+ (Thm.apply \<^cterm>\<open>fnorm\<close> (Thm.apply (Thm.apply \<^cterm>\<open>FSub\<close> ce) ce'));
val (_, [n, dc]) = strip_app (Thm.rhs_of fnorm);
val (_, [_, c]) = strip_app dc;
val th =
@@ -925,7 +925,7 @@
local_setup \<open>
Local_Theory.declaration {syntax = false, pervasive = false}
(fn phi => Field_Tac.Field_Simps.map (Ring_Tac.insert_rules Field_Tac.eq_field_simps
- (Morphism.term phi @{term R},
+ (Morphism.term phi \<^term>\<open>R\<close>,
(Morphism.fact phi @{thms feval.simps [meta] feval_Cnst [meta]},
Morphism.fact phi @{thms peval.simps [meta] peval_Cnst [meta]},
Morphism.fact phi @{thms nonzero.simps [meta] nonzero_singleton [meta]},
--- a/src/HOL/Decision_Procs/approximation.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/approximation.ML Sat Jan 05 17:24:33 2019 +0100
@@ -15,11 +15,11 @@
fun reorder_bounds_tac ctxt prems i =
let
- fun variable_of_bound (Const (@{const_name Trueprop}, _) $
- (Const (@{const_name Set.member}, _) $
+ fun variable_of_bound (Const (\<^const_name>\<open>Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>Set.member\<close>, _) $
Free (name, _) $ _)) = name
- | variable_of_bound (Const (@{const_name Trueprop}, _) $
- (Const (@{const_name HOL.eq}, _) $
+ | variable_of_bound (Const (\<^const_name>\<open>Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $
Free (name, _) $ _)) = name
| variable_of_bound t = raise TERM ("variable_of_bound", [t])
@@ -58,8 +58,8 @@
fun rewrite_interpret_form_tac ctxt prec splitting taylor i st = let
fun lookup_splitting (Free (name, _)) =
(case AList.lookup (op =) splitting name
- of SOME s => HOLogic.mk_number @{typ nat} s
- | NONE => @{term "0 :: nat"})
+ of SOME s => HOLogic.mk_number \<^typ>\<open>nat\<close> s
+ | NONE => \<^term>\<open>0 :: nat\<close>)
| lookup_splitting t = raise TERM ("lookup_splitting", [t])
val vs = nth (Thm.prems_of st) (i - 1)
|> Logic.strip_imp_concl
@@ -67,23 +67,23 @@
|> Term.strip_comb |> snd |> List.last
|> HOLogic.dest_list
val p = prec
- |> HOLogic.mk_number @{typ nat}
+ |> HOLogic.mk_number \<^typ>\<open>nat\<close>
|> Thm.cterm_of ctxt
in case taylor
of NONE => let
val n = vs |> length
- |> HOLogic.mk_number @{typ nat}
+ |> HOLogic.mk_number \<^typ>\<open>nat\<close>
|> Thm.cterm_of ctxt
val s = vs
|> map lookup_splitting
- |> HOLogic.mk_list @{typ nat}
+ |> HOLogic.mk_list \<^typ>\<open>nat\<close>
|> Thm.cterm_of ctxt
in
- (resolve_tac ctxt [Thm.instantiate ([], [((("n", 0), @{typ nat}), n),
- ((("prec", 0), @{typ nat}), p),
- ((("ss", 0), @{typ "nat list"}), s)])
+ (resolve_tac ctxt [Thm.instantiate ([], [((("n", 0), \<^typ>\<open>nat\<close>), n),
+ ((("prec", 0), \<^typ>\<open>nat\<close>), p),
+ ((("ss", 0), \<^typ>\<open>nat list\<close>), s)])
@{thm approx_form}] i
- THEN simp_tac (put_simpset (simpset_of @{context}) ctxt) i) st
+ THEN simp_tac (put_simpset (simpset_of \<^context>) ctxt) i) st
end
| SOME t =>
@@ -91,51 +91,51 @@
then raise (TERM ("More than one variable used for taylor series expansion", [Thm.prop_of st]))
else let
val t = t
- |> HOLogic.mk_number @{typ nat}
+ |> HOLogic.mk_number \<^typ>\<open>nat\<close>
|> Thm.cterm_of ctxt
val s = vs |> map lookup_splitting |> hd
|> Thm.cterm_of ctxt
in
- resolve_tac ctxt [Thm.instantiate ([], [((("s", 0), @{typ nat}), s),
- ((("t", 0), @{typ nat}), t),
- ((("prec", 0), @{typ nat}), p)])
+ resolve_tac ctxt [Thm.instantiate ([], [((("s", 0), \<^typ>\<open>nat\<close>), s),
+ ((("t", 0), \<^typ>\<open>nat\<close>), t),
+ ((("prec", 0), \<^typ>\<open>nat\<close>), p)])
@{thm approx_tse_form}] i st
end
end
-fun calculated_subterms (@{const Trueprop} $ t) = calculated_subterms t
- | calculated_subterms (@{const HOL.implies} $ _ $ t) = calculated_subterms t
- | calculated_subterms (@{term "(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) = [t1, t2]
- | calculated_subterms (@{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ t1 $ t2) = [t1, t2]
- | calculated_subterms (@{term "(\<in>) :: real \<Rightarrow> real set \<Rightarrow> bool"} $ t1 $
- (@{term "atLeastAtMost :: real \<Rightarrow> real \<Rightarrow> real set"} $ t2 $ t3)) = [t1, t2, t3]
+fun calculated_subterms (\<^const>\<open>Trueprop\<close> $ t) = calculated_subterms t
+ | calculated_subterms (\<^const>\<open>HOL.implies\<close> $ _ $ t) = calculated_subterms t
+ | calculated_subterms (\<^term>\<open>(\<le>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) = [t1, t2]
+ | calculated_subterms (\<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ t1 $ t2) = [t1, t2]
+ | calculated_subterms (\<^term>\<open>(\<in>) :: real \<Rightarrow> real set \<Rightarrow> bool\<close> $ t1 $
+ (\<^term>\<open>atLeastAtMost :: real \<Rightarrow> real \<Rightarrow> real set\<close> $ t2 $ t3)) = [t1, t2, t3]
| calculated_subterms t = raise TERM ("calculated_subterms", [t])
-fun dest_interpret_form (@{const "interpret_form"} $ b $ xs) = (b, xs)
+fun dest_interpret_form (\<^const>\<open>interpret_form\<close> $ b $ xs) = (b, xs)
| dest_interpret_form t = raise TERM ("dest_interpret_form", [t])
-fun dest_interpret (@{const "interpret_floatarith"} $ b $ xs) = (b, xs)
+fun dest_interpret (\<^const>\<open>interpret_floatarith\<close> $ b $ xs) = (b, xs)
| dest_interpret t = raise TERM ("dest_interpret", [t])
-fun dest_interpret_env (@{const "interpret_form"} $ _ $ xs) = xs
- | dest_interpret_env (@{const "interpret_floatarith"} $ _ $ xs) = xs
+fun dest_interpret_env (\<^const>\<open>interpret_form\<close> $ _ $ xs) = xs
+ | dest_interpret_env (\<^const>\<open>interpret_floatarith\<close> $ _ $ xs) = xs
| dest_interpret_env t = raise TERM ("dest_interpret_env", [t])
-fun dest_float (@{const "Float"} $ m $ e) = (snd (HOLogic.dest_number m), snd (HOLogic.dest_number e))
+fun dest_float (\<^const>\<open>Float\<close> $ m $ e) = (snd (HOLogic.dest_number m), snd (HOLogic.dest_number e))
| dest_float t = raise TERM ("dest_float", [t])
-fun dest_ivl (Const (@{const_name "Some"}, _) $
- (Const (@{const_name Pair}, _) $ u $ l)) = SOME (dest_float u, dest_float l)
- | dest_ivl (Const (@{const_name "None"}, _)) = NONE
+fun dest_ivl (Const (\<^const_name>\<open>Some\<close>, _) $
+ (Const (\<^const_name>\<open>Pair\<close>, _) $ u $ l)) = SOME (dest_float u, dest_float l)
+ | dest_ivl (Const (\<^const_name>\<open>None\<close>, _)) = NONE
| dest_ivl t = raise TERM ("dest_result", [t])
-fun mk_approx' prec t = (@{const "approx'"}
- $ HOLogic.mk_number @{typ nat} prec
- $ t $ @{term "[] :: (float * float) option list"})
+fun mk_approx' prec t = (\<^const>\<open>approx'\<close>
+ $ HOLogic.mk_number \<^typ>\<open>nat\<close> prec
+ $ t $ \<^term>\<open>[] :: (float * float) option list\<close>)
-fun mk_approx_form_eval prec t xs = (@{const "approx_form_eval"}
- $ HOLogic.mk_number @{typ nat} prec
+fun mk_approx_form_eval prec t xs = (\<^const>\<open>approx_form_eval\<close>
+ $ HOLogic.mk_number \<^typ>\<open>nat\<close> prec
$ t $ xs)
fun float2_float10 prec round_down (m, e) = (
@@ -170,21 +170,21 @@
fun mk_result prec (SOME (l, u)) =
(let
fun mk_float10 rnd x = (let val (m, e) = float2_float10 prec rnd x
- in if e = 0 then HOLogic.mk_number @{typ real} m
- else if e = 1 then @{term "divide :: real \<Rightarrow> real \<Rightarrow> real"} $
- HOLogic.mk_number @{typ real} m $
- @{term "10"}
- else @{term "divide :: real \<Rightarrow> real \<Rightarrow> real"} $
- HOLogic.mk_number @{typ real} m $
- (@{term "power 10 :: nat \<Rightarrow> real"} $
- HOLogic.mk_number @{typ nat} (~e)) end)
- in @{term "atLeastAtMost :: real \<Rightarrow> real \<Rightarrow> real set"} $ mk_float10 true l $ mk_float10 false u end)
- | mk_result _ NONE = @{term "UNIV :: real set"}
+ in if e = 0 then HOLogic.mk_number \<^typ>\<open>real\<close> m
+ else if e = 1 then \<^term>\<open>divide :: real \<Rightarrow> real \<Rightarrow> real\<close> $
+ HOLogic.mk_number \<^typ>\<open>real\<close> m $
+ \<^term>\<open>10\<close>
+ else \<^term>\<open>divide :: real \<Rightarrow> real \<Rightarrow> real\<close> $
+ HOLogic.mk_number \<^typ>\<open>real\<close> m $
+ (\<^term>\<open>power 10 :: nat \<Rightarrow> real\<close> $
+ HOLogic.mk_number \<^typ>\<open>nat\<close> (~e)) end)
+ in \<^term>\<open>atLeastAtMost :: real \<Rightarrow> real \<Rightarrow> real set\<close> $ mk_float10 true l $ mk_float10 false u end)
+ | mk_result _ NONE = \<^term>\<open>UNIV :: real set\<close>
fun realify t =
let
val t = Logic.varify_global t
- val m = map (fn (name, _) => (name, @{typ real})) (Term.add_tvars t [])
+ val m = map (fn (name, _) => (name, \<^typ>\<open>real\<close>)) (Term.add_tvars t [])
val t = Term.subst_TVars m t
in t end
@@ -197,7 +197,7 @@
fun preproc_form_conv ctxt =
Simplifier.rewrite
(put_simpset HOL_basic_ss ctxt addsimps
- (Named_Theorems.get ctxt @{named_theorems approximation_preproc}))
+ (Named_Theorems.get ctxt \<^named_theorems>\<open>approximation_preproc\<close>))
fun reify_form_conv ctxt ct =
let
@@ -241,12 +241,12 @@
|> HOLogic.dest_Trueprop
|> dest_interpret_form
|> (fn (data, xs) =>
- mk_approx_form_eval prec data (HOLogic.mk_list @{typ "(float * float) option"}
- (map (fn _ => @{term "None :: (float * float) option"}) (HOLogic.dest_list xs)))
+ mk_approx_form_eval prec data (HOLogic.mk_list \<^typ>\<open>(float * float) option\<close>
+ (map (fn _ => \<^term>\<open>None :: (float * float) option\<close>) (HOLogic.dest_list xs)))
|> approximate ctxt
|> HOLogic.dest_list
|> curry ListPair.zip (HOLogic.dest_list xs @ calculated_subterms arith_term)
- |> map (fn (elem, s) => @{term "(\<in>) :: real \<Rightarrow> real set \<Rightarrow> bool"} $ elem $ mk_result prec (dest_ivl s))
+ |> map (fn (elem, s) => \<^term>\<open>(\<in>) :: real \<Rightarrow> real set \<Rightarrow> bool\<close> $ elem $ mk_result prec (dest_ivl s))
|> foldr1 HOLogic.mk_conj))
fun approx_arith prec ctxt t = realify t
@@ -261,8 +261,8 @@
|> mk_result prec
fun approx prec ctxt t =
- if type_of t = @{typ prop} then approx_form prec ctxt t
- else if type_of t = @{typ bool} then approx_form prec ctxt (@{const Trueprop} $ t)
+ if type_of t = \<^typ>\<open>prop\<close> then approx_form prec ctxt t
+ else if type_of t = \<^typ>\<open>bool\<close> then approx_form prec ctxt (\<^const>\<open>Trueprop\<close> $ t)
else approx_arith prec ctxt t
fun approximate_cmd modes raw_t state =
@@ -279,10 +279,10 @@
end |> Pretty.writeln;
val opt_modes =
- Scan.optional (@{keyword "("} |-- Parse.!!! (Scan.repeat1 Parse.name --| @{keyword ")"})) [];
+ Scan.optional (\<^keyword>\<open>(\<close> |-- Parse.!!! (Scan.repeat1 Parse.name --| \<^keyword>\<open>)\<close>)) [];
val _ =
- Outer_Syntax.command @{command_keyword approximate} "print approximation of term"
+ Outer_Syntax.command \<^command_keyword>\<open>approximate\<close> "print approximation of term"
(opt_modes -- Parse.term
>> (fn (modes, t) => Toplevel.keep (approximate_cmd modes t)));
--- a/src/HOL/Decision_Procs/approximation_generator.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/approximation_generator.ML Sat Jan 05 17:24:33 2019 +0100
@@ -17,11 +17,11 @@
structure Approximation_Generator : APPROXIMATION_GENERATOR =
struct
-val custom_seed = Attrib.setup_config_int @{binding quickcheck_approximation_custom_seed} (K ~1)
+val custom_seed = Attrib.setup_config_int \<^binding>\<open>quickcheck_approximation_custom_seed\<close> (K ~1)
-val precision = Attrib.setup_config_int @{binding quickcheck_approximation_precision} (K 30)
+val precision = Attrib.setup_config_int \<^binding>\<open>quickcheck_approximation_precision\<close> (K 30)
-val epsilon = Attrib.setup_config_real @{binding quickcheck_approximation_epsilon} (K 0.0)
+val epsilon = Attrib.setup_config_real \<^binding>\<open>quickcheck_approximation_epsilon\<close> (K 0.0)
val random_float = @{code "random_class.random::_ \<Rightarrow> _ \<Rightarrow> (float \<times> (unit \<Rightarrow> term)) \<times> _"}
@@ -33,33 +33,33 @@
fun real_of_man_exp m e = Real.fromManExp {man = Real.fromInt m, exp = e}
-fun mapprox_float (@{term Float} $ m $ e) = real_of_man_exp (int_of_term m) (int_of_term e)
+fun mapprox_float (\<^term>\<open>Float\<close> $ m $ e) = real_of_man_exp (int_of_term m) (int_of_term e)
| mapprox_float t = Real.fromInt (snd (HOLogic.dest_number t))
handle TERM _ => raise TERM ("mapprox_float", [t]);
(* TODO: define using compiled terms? *)
-fun mapprox_floatarith (@{term Add} $ a $ b) xs = mapprox_floatarith a xs + mapprox_floatarith b xs
- | mapprox_floatarith (@{term Minus} $ a) xs = ~ (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Mult} $ a $ b) xs = mapprox_floatarith a xs * mapprox_floatarith b xs
- | mapprox_floatarith (@{term Inverse} $ a) xs = 1.0 / mapprox_floatarith a xs
- | mapprox_floatarith (@{term Cos} $ a) xs = Math.cos (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Arctan} $ a) xs = Math.atan (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Abs} $ a) xs = abs (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Max} $ a $ b) xs =
+fun mapprox_floatarith (\<^term>\<open>Add\<close> $ a $ b) xs = mapprox_floatarith a xs + mapprox_floatarith b xs
+ | mapprox_floatarith (\<^term>\<open>Minus\<close> $ a) xs = ~ (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Mult\<close> $ a $ b) xs = mapprox_floatarith a xs * mapprox_floatarith b xs
+ | mapprox_floatarith (\<^term>\<open>Inverse\<close> $ a) xs = 1.0 / mapprox_floatarith a xs
+ | mapprox_floatarith (\<^term>\<open>Cos\<close> $ a) xs = Math.cos (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Arctan\<close> $ a) xs = Math.atan (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Abs\<close> $ a) xs = abs (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Max\<close> $ a $ b) xs =
Real.max (mapprox_floatarith a xs, mapprox_floatarith b xs)
- | mapprox_floatarith (@{term Min} $ a $ b) xs =
+ | mapprox_floatarith (\<^term>\<open>Min\<close> $ a $ b) xs =
Real.min (mapprox_floatarith a xs, mapprox_floatarith b xs)
- | mapprox_floatarith @{term Pi} _ = Math.pi
- | mapprox_floatarith (@{term Sqrt} $ a) xs = Math.sqrt (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Exp} $ a) xs = Math.exp (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Powr} $ a $ b) xs =
+ | mapprox_floatarith \<^term>\<open>Pi\<close> _ = Math.pi
+ | mapprox_floatarith (\<^term>\<open>Sqrt\<close> $ a) xs = Math.sqrt (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Exp\<close> $ a) xs = Math.exp (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Powr\<close> $ a $ b) xs =
Math.pow (mapprox_floatarith a xs, mapprox_floatarith b xs)
- | mapprox_floatarith (@{term Ln} $ a) xs = Math.ln (mapprox_floatarith a xs)
- | mapprox_floatarith (@{term Power} $ a $ n) xs =
+ | mapprox_floatarith (\<^term>\<open>Ln\<close> $ a) xs = Math.ln (mapprox_floatarith a xs)
+ | mapprox_floatarith (\<^term>\<open>Power\<close> $ a $ n) xs =
Math.pow (mapprox_floatarith a xs, Real.fromInt (nat_of_term n))
- | mapprox_floatarith (@{term Floor} $ a) xs = Real.fromInt (floor (mapprox_floatarith a xs))
- | mapprox_floatarith (@{term Var} $ n) xs = nth xs (nat_of_term n)
- | mapprox_floatarith (@{term Num} $ m) _ = mapprox_float m
+ | mapprox_floatarith (\<^term>\<open>Floor\<close> $ a) xs = Real.fromInt (floor (mapprox_floatarith a xs))
+ | mapprox_floatarith (\<^term>\<open>Var\<close> $ n) xs = nth xs (nat_of_term n)
+ | mapprox_floatarith (\<^term>\<open>Num\<close> $ m) _ = mapprox_float m
| mapprox_floatarith t _ = raise TERM ("mapprox_floatarith", [t])
fun mapprox_atLeastAtMost eps x a b xs =
@@ -69,22 +69,22 @@
mapprox_floatarith a xs + eps <= x' andalso x' + eps <= mapprox_floatarith b xs
end
-fun mapprox_form eps (@{term Bound} $ x $ a $ b $ f) xs =
+fun mapprox_form eps (\<^term>\<open>Bound\<close> $ x $ a $ b $ f) xs =
(not (mapprox_atLeastAtMost eps x a b xs)) orelse mapprox_form eps f xs
-| mapprox_form eps (@{term Assign} $ x $ a $ f) xs =
+| mapprox_form eps (\<^term>\<open>Assign\<close> $ x $ a $ f) xs =
(Real.!= (mapprox_floatarith x xs, mapprox_floatarith a xs)) orelse mapprox_form eps f xs
-| mapprox_form eps (@{term Less} $ a $ b) xs = mapprox_floatarith a xs + eps < mapprox_floatarith b xs
-| mapprox_form eps (@{term LessEqual} $ a $ b) xs = mapprox_floatarith a xs + eps <= mapprox_floatarith b xs
-| mapprox_form eps (@{term AtLeastAtMost} $ x $ a $ b) xs = mapprox_atLeastAtMost eps x a b xs
-| mapprox_form eps (@{term Conj} $ f $ g) xs = mapprox_form eps f xs andalso mapprox_form eps g xs
-| mapprox_form eps (@{term Disj} $ f $ g) xs = mapprox_form eps f xs orelse mapprox_form eps g xs
+| mapprox_form eps (\<^term>\<open>Less\<close> $ a $ b) xs = mapprox_floatarith a xs + eps < mapprox_floatarith b xs
+| mapprox_form eps (\<^term>\<open>LessEqual\<close> $ a $ b) xs = mapprox_floatarith a xs + eps <= mapprox_floatarith b xs
+| mapprox_form eps (\<^term>\<open>AtLeastAtMost\<close> $ x $ a $ b) xs = mapprox_atLeastAtMost eps x a b xs
+| mapprox_form eps (\<^term>\<open>Conj\<close> $ f $ g) xs = mapprox_form eps f xs andalso mapprox_form eps g xs
+| mapprox_form eps (\<^term>\<open>Disj\<close> $ f $ g) xs = mapprox_form eps f xs orelse mapprox_form eps g xs
| mapprox_form _ t _ = raise TERM ("mapprox_form", [t])
-fun dest_interpret_form (@{const "interpret_form"} $ b $ xs) = (b, xs)
+fun dest_interpret_form (\<^const>\<open>interpret_form\<close> $ b $ xs) = (b, xs)
| dest_interpret_form t = raise TERM ("dest_interpret_form", [t])
-fun optionT t = Type (@{type_name "option"}, [t])
-fun mk_Some t = Const (@{const_name "Some"}, t --> optionT t)
+fun optionT t = Type (\<^type_name>\<open>option\<close>, [t])
+fun mk_Some t = Const (\<^const_name>\<open>Some\<close>, t --> optionT t)
fun random_float_list size xs seed =
fold (K (apsnd (random_float size) #-> (fn c => apfst (fn b => b::c)))) xs ([],seed)
@@ -92,7 +92,7 @@
fun real_of_Float (@{code Float} (m, e)) =
real_of_man_exp (@{code integer_of_int} m) (@{code integer_of_int} e)
-fun is_True @{term True} = true
+fun is_True \<^term>\<open>True\<close> = true
| is_True _ = false
val postproc_form_eqs =
@@ -126,12 +126,12 @@
let
val (rs, seed') = random_float_list size xs seed
fun mk_approx_form e ts =
- @{const "approx_form"} $
- HOLogic.mk_number @{typ nat} prec $
+ \<^const>\<open>approx_form\<close> $
+ HOLogic.mk_number \<^typ>\<open>nat\<close> prec $
e $
- (HOLogic.mk_list @{typ "(float * float) option"}
- (map (fn t => mk_Some @{typ "float * float"} $ HOLogic.mk_prod (t, t)) ts)) $
- @{term "[] :: nat list"}
+ (HOLogic.mk_list \<^typ>\<open>(float * float) option\<close>
+ (map (fn t => mk_Some \<^typ>\<open>float * float\<close> $ HOLogic.mk_prod (t, t)) ts)) $
+ \<^term>\<open>[] :: nat list\<close>
in
(if
mapprox_form eps e (map (real_of_Float o fst) rs)
@@ -146,7 +146,7 @@
val ts' = map
(AList.lookup op = (map dest_Free xs ~~ ts)
#> the_default Term.dummy
- #> curry op $ @{term "real_of_float::float\<Rightarrow>_"}
+ #> curry op $ \<^term>\<open>real_of_float::float\<Rightarrow>_\<close>
#> conv_term ctxt (rewrite_with ctxt postproc_form_eqs))
frees
in
@@ -207,7 +207,7 @@
Quickcheck_Common.generator_test_goal_terms
("approximation", (fn _ => fn _ => false, approximation_generator))
-val active = Attrib.setup_config_bool @{binding quickcheck_approximation_active} (K false)
+val active = Attrib.setup_config_bool \<^binding>\<open>quickcheck_approximation_active\<close> (K false)
val setup = Context.theory_map (Quickcheck.add_tester ("approximation", (active, test_goals)))
--- a/src/HOL/Decision_Procs/cooper_tac.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/cooper_tac.ML Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
structure Cooper_Tac: COOPER_TAC =
struct
-val cooper_ss = simpset_of @{context};
+val cooper_ss = simpset_of \<^context>;
fun prepare_for_linz q fm =
let
@@ -26,7 +26,7 @@
val np = length ps
val (fm',np) = List.foldr (fn ((x, T), (fm,n)) => mk_all ((x, T), (fm,n)))
(List.foldr HOLogic.mk_imp c rhs, np) ps
- val (vs, _) = List.partition (fn t => q orelse (type_of t) = @{typ nat})
+ val (vs, _) = List.partition (fn t => q orelse (type_of t) = \<^typ>\<open>nat\<close>)
(Misc_Legacy.term_frees fm' @ Misc_Legacy.term_vars fm');
val fm2 = List.foldr mk_all2 fm' vs
in (fm2, np + length vs, length rhs) end;
@@ -53,7 +53,7 @@
div_by_1 mod_by_1 div_by_Suc_0 mod_by_Suc_0
Suc_eq_plus1}
addsimps @{thms ac_simps}
- addsimprocs [@{simproc cancel_div_mod_nat}, @{simproc cancel_div_mod_int}]
+ addsimprocs [\<^simproc>\<open>cancel_div_mod_nat\<close>, \<^simproc>\<open>cancel_div_mod_int\<close>]
val simpset0 =
put_simpset HOL_basic_ss ctxt
addsimps @{thms minus_div_mult_eq_mod [symmetric] Suc_eq_plus1 simp_thms}
@@ -84,7 +84,7 @@
(* The result of the quantifier elimination *)
val (th, tac) =
(case Thm.prop_of pre_thm of
- Const (@{const_name Pure.imp}, _) $ (Const (@{const_name Trueprop}, _) $ t1) $ _ =>
+ Const (\<^const_name>\<open>Pure.imp\<close>, _) $ (Const (\<^const_name>\<open>Trueprop\<close>, _) $ t1) $ _ =>
let
val pth = linzqe_oracle (ctxt, Envir.eta_long [] t1)
in
--- a/src/HOL/Decision_Procs/ex/Approximation_Ex.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/ex/Approximation_Ex.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,14 +17,14 @@
variables can be used, but each one need to be bounded by an upper and lower
bound.
-To specify the bounds either @{term "l\<^sub>1 \<le> x \<and> x \<le> u\<^sub>1"},
-@{term "x \<in> { l\<^sub>1 .. u\<^sub>1 }"} or @{term "x = bnd"} can be used. Where the
+To specify the bounds either \<^term>\<open>l\<^sub>1 \<le> x \<and> x \<le> u\<^sub>1\<close>,
+\<^term>\<open>x \<in> { l\<^sub>1 .. u\<^sub>1 }\<close> or \<^term>\<open>x = bnd\<close> can be used. Where the
bound specification are again arithmetic formulas containing variables. They can
be connected using either meta level or HOL equivalence.
To use interval splitting add for each variable whos interval should be splitted
to the "splitting:" parameter. The parameter specifies how often each interval
-should be divided, e.g. when x = 16 is specified, there will be @{term "65536 = 2^16"}
+should be divided, e.g. when x = 16 is specified, there will be \<^term>\<open>65536 = 2^16\<close>
intervals to be calculated.
To use taylor series expansion specify the variable to derive. You need to
--- a/src/HOL/Decision_Procs/ex/Approximation_Quickcheck_Ex.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/ex/Approximation_Quickcheck_Ex.thy Sat Jan 05 17:24:33 2019 +0100
@@ -31,7 +31,7 @@
shows "x > 1 \<Longrightarrow> x \<le> 2 ^ 20 * log 2 x + 1 \<and> (sin x)\<^sup>2 + (cos x)\<^sup>2 = 1"
using [[quickcheck_approximation_custom_seed = 1]]
using [[quickcheck_approximation_epsilon = 0.00000001]]
- \<comment> \<open>avoids spurious counterexamples in approximate computation of @{term "(sin x)\<^sup>2 + (cos x)\<^sup>2"}
+ \<comment> \<open>avoids spurious counterexamples in approximate computation of \<^term>\<open>(sin x)\<^sup>2 + (cos x)\<^sup>2\<close>
and therefore avoids expensive failing attempts for certification\<close>
quickcheck[approximation, expect=counterexample, size=20]
oops
--- a/src/HOL/Decision_Procs/ferrack_tac.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/ferrack_tac.ML Sat Jan 05 17:24:33 2019 +0100
@@ -12,7 +12,7 @@
val ferrack_ss = let val ths = [@{thm of_int_eq_iff}, @{thm of_int_less_iff},
@{thm of_int_le_iff}]
- in @{context} delsimps ths addsimps (map (fn th => th RS sym) ths)
+ in \<^context> delsimps ths addsimps (map (fn th => th RS sym) ths)
end |> simpset_of;
val binarith = @{thms arith_simps}
@@ -63,7 +63,7 @@
fun assm_tac i = REPEAT_DETERM_N nh (assume_tac ctxt i)
(* The result of the quantifier elimination *)
val (th, tac) = case Thm.prop_of pre_thm of
- Const (@{const_name Pure.imp}, _) $ (Const (@{const_name Trueprop}, _) $ t1) $ _ =>
+ Const (\<^const_name>\<open>Pure.imp\<close>, _) $ (Const (\<^const_name>\<open>Trueprop\<close>, _) $ t1) $ _ =>
let val pth = linr_oracle (ctxt, Envir.eta_long [] t1)
in
((pth RS iffD2) RS pre_thm,
--- a/src/HOL/Decision_Procs/ferrante_rackoff.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/ferrante_rackoff.ML Sat Jan 05 17:24:33 2019 +0100
@@ -33,12 +33,12 @@
{isolate_conv = icv, whatis = wi, simpset = simpset}):entry) =
let
fun uset (vars as (x::vs)) p = case Thm.term_of p of
- Const(@{const_name HOL.conj}, _)$ _ $ _ =>
+ Const(\<^const_name>\<open>HOL.conj\<close>, _)$ _ $ _ =>
let
val ((b,l),r) = Thm.dest_comb p |>> Thm.dest_comb
val (lS,lth) = uset vars l val (rS, rth) = uset vars r
in (lS@rS, Drule.binop_cong_rule b lth rth) end
- | Const(@{const_name HOL.disj}, _)$ _ $ _ =>
+ | Const(\<^const_name>\<open>HOL.disj\<close>, _)$ _ $ _ =>
let
val ((b,l),r) = Thm.dest_comb p |>> Thm.dest_comb
val (lS,lth) = uset vars l val (rS, rth) = uset vars r
@@ -78,7 +78,7 @@
fun main vs p =
let
val ((xn,ce),(x,fm)) = (case Thm.term_of p of
- Const(@{const_name Ex},_)$Abs(xn,xT,_) =>
+ Const(\<^const_name>\<open>Ex\<close>,_)$Abs(xn,xT,_) =>
Thm.dest_comb p ||> Thm.dest_abs (SOME xn) |>> pair xn
| _ => raise CTERM ("main QE only treats existential quantifiers!", [p]))
val cT = Thm.ctyp_of_cterm x
@@ -99,8 +99,8 @@
in
fun provein x S =
case Thm.term_of S of
- Const(@{const_name Orderings.bot}, _) => raise CTERM ("provein : not a member!", [S])
- | Const(@{const_name insert}, _) $ y $_ =>
+ Const(\<^const_name>\<open>Orderings.bot\<close>, _) => raise CTERM ("provein : not a member!", [S])
+ | Const(\<^const_name>\<open>insert\<close>, _) $ y $_ =>
let val (cy,S') = Thm.dest_binop S
in if Thm.term_of x aconv y then Thm.instantiate' [] [SOME x, SOME S'] insI1
else Thm.implies_elim (Thm.instantiate' [] [SOME x, SOME S', SOME cy] insI2)
@@ -123,12 +123,12 @@
fun decomp_mpinf fm =
case Thm.term_of fm of
- Const(@{const_name HOL.conj},_)$_$_ =>
+ Const(\<^const_name>\<open>HOL.conj\<close>,_)$_$_ =>
let val (p,q) = Thm.dest_binop fm
in ([p,q], myfwd (minf_conj,pinf_conj, nmi_conj, npi_conj,ld_conj)
(Thm.lambda x p) (Thm.lambda x q))
end
- | Const(@{const_name HOL.disj},_)$_$_ =>
+ | Const(\<^const_name>\<open>HOL.disj\<close>,_)$_$_ =>
let val (p,q) = Thm.dest_binop fm
in ([p,q],myfwd (minf_disj, pinf_disj, nmi_disj, npi_disj,ld_disj)
(Thm.lambda x p) (Thm.lambda x q))
@@ -176,19 +176,19 @@
let
fun h bounds tm =
(case Thm.term_of tm of
- Const (@{const_name HOL.eq}, T) $ _ $ _ =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, T) $ _ $ _ =>
if domain_type T = HOLogic.boolT then find_args bounds tm
else Thm.dest_fun2 tm
- | Const (@{const_name Not}, _) $ _ => h bounds (Thm.dest_arg tm)
- | Const (@{const_name All}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name Ex}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name HOL.conj}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name HOL.disj}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name HOL.implies}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Pure.imp}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Pure.eq}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Pure.all}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name Trueprop}, _) $ _ => h bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Not\<close>, _) $ _ => h bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>All\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Ex\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>HOL.conj\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>HOL.disj\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>HOL.implies\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Pure.imp\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Pure.eq\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Pure.all\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Trueprop\<close>, _) $ _ => h bounds (Thm.dest_arg tm)
| _ => Thm.dest_fun2 tm)
and find_args bounds tm =
(h bounds (Thm.dest_arg tm) handle CTERM _ => Thm.dest_arg1 tm)
--- a/src/HOL/Decision_Procs/ferrante_rackoff_data.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/ferrante_rackoff_data.ML Sat Jan 05 17:24:33 2019 +0100
@@ -120,7 +120,7 @@
val _ =
Theory.setup
- (Attrib.setup @{binding ferrack}
+ (Attrib.setup \<^binding>\<open>ferrack\<close>
((keyword minfN |-- thms)
-- (keyword pinfN |-- thms)
-- (keyword nmiN |-- thms)
--- a/src/HOL/Decision_Procs/langford.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/langford.ML Sat Jan 05 17:24:33 2019 +0100
@@ -15,8 +15,8 @@
let
fun h acc ct =
(case Thm.term_of ct of
- Const (@{const_name Orderings.bot}, _) => acc
- | Const (@{const_name insert}, _) $ _ $ t => h (Thm.dest_arg1 ct :: acc) (Thm.dest_arg ct));
+ Const (\<^const_name>\<open>Orderings.bot\<close>, _) => acc
+ | Const (\<^const_name>\<open>insert\<close>, _) $ _ $ t => h (Thm.dest_arg1 ct :: acc) (Thm.dest_arg ct));
in h [] end;
fun prove_finite cT u =
@@ -34,7 +34,7 @@
fun basic_dloqe ctxt stupid dlo_qeth dlo_qeth_nolb dlo_qeth_noub gather ep =
(case Thm.term_of ep of
- Const (@{const_name Ex}, _) $ _ =>
+ Const (\<^const_name>\<open>Ex\<close>, _) $ _ =>
let
val p = Thm.dest_arg ep
val ths =
@@ -53,10 +53,10 @@
val qe =
(case (Thm.term_of L, Thm.term_of U) of
- (Const (@{const_name Orderings.bot}, _),_) =>
+ (Const (\<^const_name>\<open>Orderings.bot\<close>, _),_) =>
let val (neU, fU) = proveneF U
in simp_rule ctxt (Thm.transitive ths (dlo_qeth_nolb OF [neU, fU])) end
- | (_, Const (@{const_name Orderings.bot}, _)) =>
+ | (_, Const (\<^const_name>\<open>Orderings.bot\<close>, _)) =>
let val (neL,fL) = proveneF L
in simp_rule ctxt (Thm.transitive ths (dlo_qeth_noub OF [neL, fL])) end
| _ =>
@@ -71,30 +71,30 @@
let
fun h acc ct =
(case Thm.term_of ct of
- @{term HOL.conj} $ _ $ _ => h (h acc (Thm.dest_arg ct)) (Thm.dest_arg1 ct)
+ \<^term>\<open>HOL.conj\<close> $ _ $ _ => h (h acc (Thm.dest_arg ct)) (Thm.dest_arg1 ct)
| _ => ct :: acc)
in h [] end;
fun conjuncts ct =
(case Thm.term_of ct of
- @{term HOL.conj} $ _ $ _ => Thm.dest_arg1 ct :: conjuncts (Thm.dest_arg ct)
+ \<^term>\<open>HOL.conj\<close> $ _ $ _ => Thm.dest_arg1 ct :: conjuncts (Thm.dest_arg ct)
| _ => [ct]);
fun fold1 f = foldr1 (uncurry f); (* FIXME !? *)
val list_conj =
- fold1 (fn c => fn c' => Thm.apply (Thm.apply @{cterm HOL.conj} c) c');
+ fold1 (fn c => fn c' => Thm.apply (Thm.apply \<^cterm>\<open>HOL.conj\<close> c) c');
fun mk_conj_tab th =
let
fun h acc th =
(case Thm.prop_of th of
- @{term "Trueprop"} $ (@{term HOL.conj} $ p $ q) =>
+ \<^term>\<open>Trueprop\<close> $ (\<^term>\<open>HOL.conj\<close> $ p $ q) =>
h (h acc (th RS conjunct2)) (th RS conjunct1)
- | @{term "Trueprop"} $ p => (p, th) :: acc)
+ | \<^term>\<open>Trueprop\<close> $ p => (p, th) :: acc)
in fold (Termtab.insert Thm.eq_thm) (h [] th) Termtab.empty end;
-fun is_conj (@{term HOL.conj}$_$_) = true
+fun is_conj (\<^term>\<open>HOL.conj\<close>$_$_) = true
| is_conj _ = false;
fun prove_conj tab cjs =
@@ -122,7 +122,7 @@
fun is_eqx x eq =
(case Thm.term_of eq of
- Const (@{const_name HOL.eq}, _) $ l $ r =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, _) $ l $ r =>
l aconv Thm.term_of x orelse r aconv Thm.term_of x
| _ => false);
@@ -130,11 +130,11 @@
fun proc ctxt ct =
(case Thm.term_of ct of
- Const (@{const_name Ex}, _) $ Abs (xn, _, _) =>
+ Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (xn, _, _) =>
let
val e = Thm.dest_fun ct
val (x,p) = Thm.dest_abs (SOME xn) (Thm.dest_arg ct)
- val Pp = Thm.apply @{cterm Trueprop} p
+ val Pp = Thm.apply \<^cterm>\<open>Trueprop\<close> p
val (eqs,neqs) = List.partition (is_eqx x) (all_conjuncts p)
in
(case eqs of
@@ -145,8 +145,8 @@
case ndx of
[] => NONE
| _ =>
- conj_aci_rule (Thm.mk_binop @{cterm "(\<equiv>) :: prop => _"} Pp
- (Thm.apply @{cterm Trueprop} (list_conj (ndx @ dx))))
+ conj_aci_rule (Thm.mk_binop \<^cterm>\<open>(\<equiv>) :: prop => _\<close> Pp
+ (Thm.apply \<^cterm>\<open>Trueprop\<close> (list_conj (ndx @ dx))))
|> Thm.abstract_rule xn x
|> Drule.arg_cong_rule e
|> Conv.fconv_rule
@@ -156,8 +156,8 @@
|> SOME
end
| _ =>
- conj_aci_rule (Thm.mk_binop @{cterm "(\<equiv>) :: prop => _"} Pp
- (Thm.apply @{cterm Trueprop} (list_conj (eqs @ neqs))))
+ conj_aci_rule (Thm.mk_binop \<^cterm>\<open>(\<equiv>) :: prop => _\<close> Pp
+ (Thm.apply \<^cterm>\<open>Trueprop\<close> (list_conj (eqs @ neqs))))
|> Thm.abstract_rule xn x |> Drule.arg_cong_rule e
|> Conv.fconv_rule
(Conv.arg_conv
@@ -170,8 +170,8 @@
in
val reduce_ex_simproc =
- Simplifier.make_simproc @{context} "reduce_ex_simproc"
- {lhss = [@{term "\<exists>x. P x"}], proc = K proc};
+ Simplifier.make_simproc \<^context> "reduce_ex_simproc"
+ {lhss = [\<^term>\<open>\<exists>x. P x\<close>], proc = K proc};
end;
@@ -196,19 +196,19 @@
let
fun h bounds tm =
(case Thm.term_of tm of
- Const (@{const_name HOL.eq}, T) $ _ $ _ =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, T) $ _ $ _ =>
if domain_type T = HOLogic.boolT then find_args bounds tm
else Thm.dest_fun2 tm
- | Const (@{const_name Not}, _) $ _ => h bounds (Thm.dest_arg tm)
- | Const (@{const_name All}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name Pure.all}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name Ex}, _) $ _ => find_body bounds (Thm.dest_arg tm)
- | Const (@{const_name HOL.conj}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name HOL.disj}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name HOL.implies}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Pure.imp}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Pure.eq}, _) $ _ $ _ => find_args bounds tm
- | Const (@{const_name Trueprop}, _) $ _ => h bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Not\<close>, _) $ _ => h bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>All\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Pure.all\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>Ex\<close>, _) $ _ => find_body bounds (Thm.dest_arg tm)
+ | Const (\<^const_name>\<open>HOL.conj\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>HOL.disj\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>HOL.implies\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Pure.imp\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Pure.eq\<close>, _) $ _ $ _ => find_args bounds tm
+ | Const (\<^const_name>\<open>Trueprop\<close>, _) $ _ => h bounds (Thm.dest_arg tm)
| _ => Thm.dest_fun2 tm)
and find_args bounds tm =
(h bounds (Thm.dest_arg tm) handle CTERM _ => h bounds (Thm.dest_arg1 tm))
--- a/src/HOL/Decision_Procs/langford_data.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/langford_data.ML Sat Jan 05 17:24:33 2019 +0100
@@ -90,7 +90,7 @@
val _ =
Theory.setup
- (Attrib.setup @{binding langford}
+ (Attrib.setup \<^binding>\<open>langford\<close>
((keyword qeN |-- thms) --
(keyword gatherN |-- thms) --
(keyword atomsN |-- terms) >> (fn ((qes, gs), atoms) =>
@@ -108,6 +108,6 @@
val _ =
Theory.setup
- (Attrib.setup @{binding langfordsimp} (Attrib.add_del add_simp del_simp) "Langford simpset");
+ (Attrib.setup \<^binding>\<open>langfordsimp\<close> (Attrib.add_del add_simp del_simp) "Langford simpset");
end;
--- a/src/HOL/Decision_Procs/mir_tac.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Decision_Procs/mir_tac.ML Sat Jan 05 17:24:33 2019 +0100
@@ -11,7 +11,7 @@
struct
val mir_ss =
- simpset_of (@{context} delsimps [@{thm "of_int_eq_iff"}, @{thm "of_int_less_iff"}, @{thm "of_int_le_iff"}]
+ simpset_of (\<^context> delsimps [@{thm "of_int_eq_iff"}, @{thm "of_int_less_iff"}, @{thm "of_int_le_iff"}]
addsimps @{thms "iff_real_of_int"});
val nT = HOLogic.natT;
@@ -75,7 +75,7 @@
@{thm div_by_1}, @{thm mod_by_1}, @{thm div_by_Suc_0}, @{thm mod_by_Suc_0},
@{thm "Suc_eq_plus1"}]
addsimps @{thms add.assoc add.commute add.left_commute}
- addsimprocs [@{simproc cancel_div_mod_nat}, @{simproc cancel_div_mod_int}]
+ addsimprocs [\<^simproc>\<open>cancel_div_mod_nat\<close>, \<^simproc>\<open>cancel_div_mod_int\<close>]
val simpset0 = put_simpset HOL_basic_ss ctxt
addsimps @{thms minus_div_mult_eq_mod [symmetric] Suc_eq_plus1}
addsimps comp_ths
@@ -105,7 +105,7 @@
(* The result of the quantifier elimination *)
val (th, tac) =
case Thm.prop_of pre_thm of
- Const (@{const_name Pure.imp}, _) $ (Const (@{const_name Trueprop}, _) $ t1) $ _ =>
+ Const (\<^const_name>\<open>Pure.imp\<close>, _) $ (Const (\<^const_name>\<open>Trueprop\<close>, _) $ t1) $ _ =>
let
val pth = mirfr_oracle (ctxt, Envir.eta_long [] t1)
in
--- a/src/HOL/Eisbach/Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Eisbach/Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -231,9 +231,8 @@
text \<open>
Eisbach_Tools provides the curry and uncurry attributes. This is useful
when the number of premises of a thm isn't known statically. The pattern
- @{term "P \<Longrightarrow> Q"} matches P against the major premise of a thm, and Q is the
- rest of the premises with the conclusion. If we first uncurry, then @{term
- "P \<Longrightarrow> Q"} will match P with the conjunction of all the premises, and Q with
+ \<^term>\<open>P \<Longrightarrow> Q\<close> matches P against the major premise of a thm, and Q is the
+ rest of the premises with the conclusion. If we first uncurry, then \<^term>\<open>P \<Longrightarrow> Q\<close> will match P with the conjunction of all the premises, and Q with
the final conclusion of the rule.
\<close>
--- a/src/HOL/Eisbach/Tests.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Eisbach/Tests.thy Sat Jan 05 17:24:33 2019 +0100
@@ -257,10 +257,10 @@
lemma assumes A shows A by (uses_test\<^sub>1 uses_test\<^sub>1_uses: assms)
-ML \<open>test_internal_fact @{context} "uses_test\<^sub>1_uses"\<close>
+ML \<open>test_internal_fact \<^context> "uses_test\<^sub>1_uses"\<close>
-ML \<open>test_internal_fact @{context} "Tests.uses_test\<^sub>1_uses"\<close>
-ML \<open>test_internal_fact @{context} "Tests.uses_test\<^sub>1.uses_test\<^sub>1_uses"\<close>
+ML \<open>test_internal_fact \<^context> "Tests.uses_test\<^sub>1_uses"\<close>
+ML \<open>test_internal_fact \<^context> "Tests.uses_test\<^sub>1.uses_test\<^sub>1_uses"\<close>
subsection \<open>Basic fact passing\<close>
@@ -422,7 +422,7 @@
);
\<close>
-local_setup \<open>Local_Theory.add_thms_dynamic (@{binding test_dyn}, Data.get)\<close>
+local_setup \<open>Local_Theory.add_thms_dynamic (\<^binding>\<open>test_dyn\<close>, Data.get)\<close>
setup \<open>Context.theory_map (Data.put @{thms TrueI})\<close>
@@ -530,7 +530,7 @@
Args.term -- Args.term --
(Scan.lift (Args.$$$ "rule" -- Args.colon) |-- Attrib.thms) >>
(fn ((x, y), r) => fn ctxt =>
- Method_Closure.apply_method ctxt @{method test_method} [x, y] [r] [] ctxt)
+ Method_Closure.apply_method ctxt \<^method>\<open>test_method\<close> [x, y] [r] [] ctxt)
\<close>
lemma
--- a/src/HOL/HOL.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOL.thy Sat Jan 05 17:24:33 2019 +0100
@@ -898,7 +898,7 @@
structure Blast = Blast
(
structure Classical = Classical
- val Trueprop_const = dest_Const @{const Trueprop}
+ val Trueprop_const = dest_Const \<^const>\<open>Trueprop\<close>
val equality_name = \<^const_name>\<open>HOL.eq\<close>
val not_name = \<^const_name>\<open>Not\<close>
val notE = @{thm notE}
@@ -1508,7 +1508,7 @@
{lhss = [\<^term>\<open>induct_false \<Longrightarrow> PROP P \<Longrightarrow> PROP Q\<close>],
proc = fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- _ $ (P as _ $ @{const induct_false}) $ (_ $ Q $ _) =>
+ _ $ (P as _ $ \<^const>\<open>induct_false\<close>) $ (_ $ Q $ _) =>
if P <> Q then SOME Drule.swap_prems_eq else NONE
| _ => NONE)},
Simplifier.make_simproc \<^context> "induct_equal_conj_curry"
@@ -1517,11 +1517,11 @@
(case Thm.term_of ct of
_ $ (_ $ P) $ _ =>
let
- fun is_conj (@{const induct_conj} $ P $ Q) =
+ fun is_conj (\<^const>\<open>induct_conj\<close> $ P $ Q) =
is_conj P andalso is_conj Q
| is_conj (Const (\<^const_name>\<open>induct_equal\<close>, _) $ _ $ _) = true
- | is_conj @{const induct_true} = true
- | is_conj @{const induct_false} = true
+ | is_conj \<^const>\<open>induct_true\<close> = true
+ | is_conj \<^const>\<open>induct_false\<close> = true
| is_conj _ = false
in if is_conj P then SOME @{thm induct_conj_curry} else NONE end
| _ => NONE)}]
--- a/src/HOL/HOLCF/Bifinite.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Bifinite.thy Sat Jan 05 17:24:33 2019 +0100
@@ -168,7 +168,7 @@
by standard (rule profinite)
text \<open>
- Types @{typ "'a \<rightarrow> 'b"} and @{typ "'a u \<rightarrow>! 'b"} are isomorphic.
+ Types \<^typ>\<open>'a \<rightarrow> 'b\<close> and \<^typ>\<open>'a u \<rightarrow>! 'b\<close> are isomorphic.
\<close>
definition "encode_cfun = (\<Lambda> f. sfun_abs\<cdot>(fup\<cdot>f))"
@@ -198,7 +198,7 @@
qed
text \<open>
- Types @{typ "('a * 'b) u"} and @{typ "'a u \<otimes> 'b u"} are isomorphic.
+ Types \<^typ>\<open>('a * 'b) u\<close> and \<^typ>\<open>'a u \<otimes> 'b u\<close> are isomorphic.
\<close>
definition "encode_prod_u = (\<Lambda>(up\<cdot>(x, y)). (:up\<cdot>x, up\<cdot>y:))"
--- a/src/HOL/HOLCF/Cfun.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Cfun.thy Sat Jan 05 17:24:33 2019 +0100
@@ -35,13 +35,13 @@
parse_translation \<open>
(* rewrite (_cabs x t) => (Abs_cfun (%x. t)) *)
- [Syntax_Trans.mk_binder_tr (@{syntax_const "_cabs"}, @{const_syntax Abs_cfun})]
+ [Syntax_Trans.mk_binder_tr (\<^syntax_const>\<open>_cabs\<close>, \<^const_syntax>\<open>Abs_cfun\<close>)]
\<close>
print_translation \<open>
- [(@{const_syntax Abs_cfun}, fn _ => fn [Abs abs] =>
+ [(\<^const_syntax>\<open>Abs_cfun\<close>, fn _ => fn [Abs abs] =>
let val (x, t) = Syntax_Trans.atomic_abs_tr' abs
- in Syntax.const @{syntax_const "_cabs"} $ x $ t end)]
+ in Syntax.const \<^syntax_const>\<open>_cabs\<close> $ x $ t end)]
\<close> \<comment> \<open>To avoid eta-contraction of body\<close>
text \<open>Syntax for nested abstractions\<close>
@@ -57,10 +57,10 @@
(* cf. Syntax.lambda_ast_tr from src/Pure/Syntax/syn_trans.ML *)
let
fun Lambda_ast_tr [pats, body] =
- Ast.fold_ast_p @{syntax_const "_cabs"}
- (Ast.unfold_ast @{syntax_const "_cargs"} (Ast.strip_positions pats), body)
+ Ast.fold_ast_p \<^syntax_const>\<open>_cabs\<close>
+ (Ast.unfold_ast \<^syntax_const>\<open>_cargs\<close> (Ast.strip_positions pats), body)
| Lambda_ast_tr asts = raise Ast.AST ("Lambda_ast_tr", asts);
- in [(@{syntax_const "_Lambda"}, K Lambda_ast_tr)] end
+ in [(\<^syntax_const>\<open>_Lambda\<close>, K Lambda_ast_tr)] end
\<close>
print_ast_translation \<open>
@@ -68,13 +68,13 @@
(* cf. Syntax.abs_ast_tr' from src/Pure/Syntax/syn_trans.ML *)
let
fun cabs_ast_tr' asts =
- (case Ast.unfold_ast_p @{syntax_const "_cabs"}
- (Ast.Appl (Ast.Constant @{syntax_const "_cabs"} :: asts)) of
+ (case Ast.unfold_ast_p \<^syntax_const>\<open>_cabs\<close>
+ (Ast.Appl (Ast.Constant \<^syntax_const>\<open>_cabs\<close> :: asts)) of
([], _) => raise Ast.AST ("cabs_ast_tr'", asts)
| (xs, body) => Ast.Appl
- [Ast.Constant @{syntax_const "_Lambda"},
- Ast.fold_ast @{syntax_const "_cargs"} xs, body]);
- in [(@{syntax_const "_cabs"}, K cabs_ast_tr')] end
+ [Ast.Constant \<^syntax_const>\<open>_Lambda\<close>,
+ Ast.fold_ast \<^syntax_const>\<open>_cargs\<close> xs, body]);
+ in [(\<^syntax_const>\<open>_cabs\<close>, K cabs_ast_tr')] end
\<close>
text \<open>Dummy patterns for continuous abstraction\<close>
@@ -126,8 +126,8 @@
subsubsection \<open>Beta-reduction simproc\<close>
text \<open>
- Given the term @{term "(\<Lambda> x. f x)\<cdot>y"}, the procedure tries to
- construct the theorem @{term "(\<Lambda> x. f x)\<cdot>y \<equiv> f y"}. If this
+ Given the term \<^term>\<open>(\<Lambda> x. f x)\<cdot>y\<close>, the procedure tries to
+ construct the theorem \<^term>\<open>(\<Lambda> x. f x)\<cdot>y \<equiv> f y\<close>. If this
theorem cannot be completely solved by the cont2cont rules, then
the procedure returns the ordinary conditional \<open>beta_cfun\<close>
rule.
@@ -198,7 +198,7 @@
lemmas monofun_Rep_cfun1 = cont_Rep_cfun1 [THEN cont2mono]
lemmas monofun_Rep_cfun2 = cont_Rep_cfun2 [THEN cont2mono]
-text \<open>contlub, cont properties of @{term Rep_cfun} in each argument\<close>
+text \<open>contlub, cont properties of \<^term>\<open>Rep_cfun\<close> in each argument\<close>
lemma contlub_cfun_arg: "chain Y \<Longrightarrow> f\<cdot>(\<Squnion>i. Y i) = (\<Squnion>i. f\<cdot>(Y i))"
by (rule cont_Rep_cfun2 [THEN cont2contlubE])
@@ -217,7 +217,7 @@
lemma monofun_cfun: "f \<sqsubseteq> g \<Longrightarrow> x \<sqsubseteq> y \<Longrightarrow> f\<cdot>x \<sqsubseteq> g\<cdot>y"
by (rule below_trans [OF monofun_cfun_fun monofun_cfun_arg])
-text \<open>ch2ch - rules for the type @{typ "'a \<rightarrow> 'b"}\<close>
+text \<open>ch2ch - rules for the type \<^typ>\<open>'a \<rightarrow> 'b\<close>\<close>
lemma chain_monofun: "chain Y \<Longrightarrow> chain (\<lambda>i. f\<cdot>(Y i))"
by (erule monofun_Rep_cfun2 [THEN ch2ch_monofun])
@@ -235,7 +235,7 @@
"(\<And>x. chain (\<lambda>i. S i x)) \<Longrightarrow> (\<And>i. cont (\<lambda>x. S i x)) \<Longrightarrow> chain (\<lambda>i. \<Lambda> x. S i x)"
by (simp add: chain_def cfun_below_iff)
-text \<open>contlub, cont properties of @{term Rep_cfun} in both arguments\<close>
+text \<open>contlub, cont properties of \<^term>\<open>Rep_cfun\<close> in both arguments\<close>
lemma lub_APP: "chain F \<Longrightarrow> chain Y \<Longrightarrow> (\<Squnion>i. F i\<cdot>(Y i)) = (\<Squnion>i. F i)\<cdot>(\<Squnion>i. Y i)"
by (simp add: contlub_cfun_fun contlub_cfun_arg diag_lub)
@@ -256,7 +256,7 @@
apply (rule minimal [THEN monofun_cfun_arg])
done
-text \<open>type @{typ "'a \<rightarrow> 'b"} is chain complete\<close>
+text \<open>type \<^typ>\<open>'a \<rightarrow> 'b\<close> is chain complete\<close>
lemma lub_cfun: "chain F \<Longrightarrow> (\<Squnion>i. F i) = (\<Lambda> x. \<Squnion>i. F i\<cdot>x)"
by (simp add: lub_cfun lub_fun ch2ch_lambda)
@@ -264,7 +264,7 @@
subsection \<open>Continuity simplification procedure\<close>
-text \<open>cont2cont lemma for @{term Rep_cfun}\<close>
+text \<open>cont2cont lemma for \<^term>\<open>Rep_cfun\<close>\<close>
lemma cont2cont_APP [simp, cont2cont]:
assumes f: "cont (\<lambda>x. f x)"
@@ -279,7 +279,7 @@
text \<open>
Two specific lemmas for the combination of LCF and HOL terms.
- These lemmas are needed in theories that use types like @{typ "'a \<rightarrow> 'b \<Rightarrow> 'c"}.
+ These lemmas are needed in theories that use types like \<^typ>\<open>'a \<rightarrow> 'b \<Rightarrow> 'c\<close>.
\<close>
lemma cont_APP_app [simp]: "cont f \<Longrightarrow> cont g \<Longrightarrow> cont (\<lambda>x. ((f x)\<cdot>(g x)) s)"
@@ -289,14 +289,14 @@
by (rule cont_APP_app [THEN cont2cont_fun])
-text \<open>cont2mono Lemma for @{term "\<lambda>x. LAM y. c1(x)(y)"}\<close>
+text \<open>cont2mono Lemma for \<^term>\<open>\<lambda>x. LAM y. c1(x)(y)\<close>\<close>
lemma cont2mono_LAM:
"\<lbrakk>\<And>x. cont (\<lambda>y. f x y); \<And>y. monofun (\<lambda>x. f x y)\<rbrakk>
\<Longrightarrow> monofun (\<lambda>x. \<Lambda> y. f x y)"
by (simp add: monofun_def cfun_below_iff)
-text \<open>cont2cont Lemma for @{term "\<lambda>x. LAM y. f x y"}\<close>
+text \<open>cont2cont Lemma for \<^term>\<open>\<lambda>x. LAM y. f x y\<close>\<close>
text \<open>
Not suitable as a cont2cont rule, because on nested lambdas
@@ -332,7 +332,7 @@
subsection \<open>Miscellaneous\<close>
-text \<open>Monotonicity of @{term Abs_cfun}\<close>
+text \<open>Monotonicity of \<^term>\<open>Abs_cfun\<close>\<close>
lemma monofun_LAM: "cont f \<Longrightarrow> cont g \<Longrightarrow> (\<And>x. f x \<sqsubseteq> g x) \<Longrightarrow> (\<Lambda> x. f x) \<sqsubseteq> (\<Lambda> x. g x)"
by (simp add: cfun_below_iff)
@@ -435,8 +435,8 @@
text \<open>
Show that interpretation of (pcpo, \<open>_\<rightarrow>_\<close>) is a category.
\<^item> The class of objects is interpretation of syntactical class pcpo.
- \<^item> The class of arrows between objects @{typ 'a} and @{typ 'b} is interpret. of @{typ "'a \<rightarrow> 'b"}.
- \<^item> The identity arrow is interpretation of @{term ID}.
+ \<^item> The class of arrows between objects \<^typ>\<open>'a\<close> and \<^typ>\<open>'b\<close> is interpret. of \<^typ>\<open>'a \<rightarrow> 'b\<close>.
+ \<^item> The identity arrow is interpretation of \<^term>\<open>ID\<close>.
\<^item> The composition of f and g is interpretation of \<open>oo\<close>.
\<close>
--- a/src/HOL/HOLCF/Cpodef.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Cpodef.thy Sat Jan 05 17:24:33 2019 +0100
@@ -131,7 +131,7 @@
subsubsection \<open>Continuity of \emph{Rep} and \emph{Abs}\<close>
-text \<open>For any sub-cpo, the @{term Rep} function is continuous.\<close>
+text \<open>For any sub-cpo, the \<^term>\<open>Rep\<close> function is continuous.\<close>
theorem typedef_cont_Rep:
fixes Abs :: "'a::cpo \<Rightarrow> 'b::cpo"
@@ -148,7 +148,7 @@
done
text \<open>
- For a sub-cpo, we can make the @{term Abs} function continuous
+ For a sub-cpo, we can make the \<^term>\<open>Abs\<close> function continuous
only if we restrict its domain to the defining subset by
composing it with another continuous function.
\<close>
@@ -205,7 +205,7 @@
text \<open>
As a special case, a subtype of a pcpo has a least element
- if the defining subset contains @{term \<bottom>}.
+ if the defining subset contains \<^term>\<open>\<bottom>\<close>.
\<close>
theorem typedef_pcpo:
@@ -220,8 +220,8 @@
subsubsection \<open>Strictness of \emph{Rep} and \emph{Abs}\<close>
text \<open>
- For a sub-pcpo where @{term \<bottom>} is a member of the defining
- subset, @{term Rep} and @{term Abs} are both strict.
+ For a sub-pcpo where \<^term>\<open>\<bottom>\<close> is a member of the defining
+ subset, \<^term>\<open>Rep\<close> and \<^term>\<open>Abs\<close> are both strict.
\<close>
theorem typedef_Abs_strict:
--- a/src/HOL/HOLCF/Domain.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Domain.thy Sat Jan 05 17:24:33 2019 +0100
@@ -84,12 +84,12 @@
setup \<open>
fold Sign.add_const_constraint
- [ (@{const_name defl}, SOME @{typ "'a::pcpo itself \<Rightarrow> udom defl"})
- , (@{const_name emb}, SOME @{typ "'a::pcpo \<rightarrow> udom"})
- , (@{const_name prj}, SOME @{typ "udom \<rightarrow> 'a::pcpo"})
- , (@{const_name liftdefl}, SOME @{typ "'a::pcpo itself \<Rightarrow> udom u defl"})
- , (@{const_name liftemb}, SOME @{typ "'a::pcpo u \<rightarrow> udom u"})
- , (@{const_name liftprj}, SOME @{typ "udom u \<rightarrow> 'a::pcpo u"}) ]
+ [ (\<^const_name>\<open>defl\<close>, SOME \<^typ>\<open>'a::pcpo itself \<Rightarrow> udom defl\<close>)
+ , (\<^const_name>\<open>emb\<close>, SOME \<^typ>\<open>'a::pcpo \<rightarrow> udom\<close>)
+ , (\<^const_name>\<open>prj\<close>, SOME \<^typ>\<open>udom \<rightarrow> 'a::pcpo\<close>)
+ , (\<^const_name>\<open>liftdefl\<close>, SOME \<^typ>\<open>'a::pcpo itself \<Rightarrow> udom u defl\<close>)
+ , (\<^const_name>\<open>liftemb\<close>, SOME \<^typ>\<open>'a::pcpo u \<rightarrow> udom u\<close>)
+ , (\<^const_name>\<open>liftprj\<close>, SOME \<^typ>\<open>udom u \<rightarrow> 'a::pcpo u\<close>) ]
\<close>
lemma typedef_domain_class:
@@ -142,12 +142,12 @@
setup \<open>
fold Sign.add_const_constraint
- [(@{const_name defl}, SOME @{typ "'a::domain itself \<Rightarrow> udom defl"}),
- (@{const_name emb}, SOME @{typ "'a::domain \<rightarrow> udom"}),
- (@{const_name prj}, SOME @{typ "udom \<rightarrow> 'a::domain"}),
- (@{const_name liftdefl}, SOME @{typ "'a::predomain itself \<Rightarrow> udom u defl"}),
- (@{const_name liftemb}, SOME @{typ "'a::predomain u \<rightarrow> udom u"}),
- (@{const_name liftprj}, SOME @{typ "udom u \<rightarrow> 'a::predomain u"})]
+ [(\<^const_name>\<open>defl\<close>, SOME \<^typ>\<open>'a::domain itself \<Rightarrow> udom defl\<close>),
+ (\<^const_name>\<open>emb\<close>, SOME \<^typ>\<open>'a::domain \<rightarrow> udom\<close>),
+ (\<^const_name>\<open>prj\<close>, SOME \<^typ>\<open>udom \<rightarrow> 'a::domain\<close>),
+ (\<^const_name>\<open>liftdefl\<close>, SOME \<^typ>\<open>'a::predomain itself \<Rightarrow> udom u defl\<close>),
+ (\<^const_name>\<open>liftemb\<close>, SOME \<^typ>\<open>'a::predomain u \<rightarrow> udom u\<close>),
+ (\<^const_name>\<open>liftprj\<close>, SOME \<^typ>\<open>udom u \<rightarrow> 'a::predomain u\<close>)]
\<close>
ML_file "Tools/domaindef.ML"
--- a/src/HOL/HOLCF/FOCUS/Buffer_adm.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/FOCUS/Buffer_adm.thy Sat Jan 05 17:24:33 2019 +0100
@@ -199,7 +199,7 @@
apply (drule spec, erule impE)
apply (erule BufAC_Asm_antiton [THEN antitonPD])
apply (erule is_ub_thelub)
-apply (tactic "smp_tac @{context} 3 1")
+apply (tactic "smp_tac \<^context> 3 1")
apply (drule is_ub_thelub)
apply (drule (1) mp)
apply (drule (1) mp)
--- a/src/HOL/HOLCF/FOCUS/Fstream.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/FOCUS/Fstream.thy Sat Jan 05 17:24:33 2019 +0100
@@ -152,12 +152,12 @@
lemma slen_fscons_eq_rev:
"(#x < enat (Suc (Suc n))) = (\<forall>a y. x \<noteq> a~> y \<or> #y < enat (Suc n))"
apply (simp add: fscons_def2 slen_scons_eq_rev)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
-apply (tactic \<open>step_tac (put_claset HOL_cs @{context} addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
+apply (tactic \<open>step_tac (put_claset HOL_cs \<^context> addSEs @{thms DefE}) 1\<close>)
apply (erule contrapos_np)
apply (fast dest: not_Undef_is_Def [THEN iffD1] elim: DefE)
done
--- a/src/HOL/HOLCF/Fix.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Fix.thy Sat Jan 05 17:24:33 2019 +0100
@@ -46,7 +46,7 @@
definition "fix" :: "('a \<rightarrow> 'a) \<rightarrow> 'a"
where "fix = (\<Lambda> F. \<Squnion>i. iterate i\<cdot>F\<cdot>\<bottom>)"
-text \<open>Binder syntax for @{term fix}\<close>
+text \<open>Binder syntax for \<^term>\<open>fix\<close>\<close>
abbreviation fix_syn :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a" (binder "\<mu> " 10)
where "fix_syn (\<lambda>x. f x) \<equiv> fix\<cdot>(\<Lambda> x. f x)"
@@ -54,9 +54,9 @@
notation (ASCII)
fix_syn (binder "FIX " 10)
-text \<open>Properties of @{term fix}\<close>
+text \<open>Properties of \<^term>\<open>fix\<close>\<close>
-text \<open>direct connection between @{term fix} and iteration\<close>
+text \<open>direct connection between \<^term>\<open>fix\<close> and iteration\<close>
lemma fix_def2: "fix\<cdot>F = (\<Squnion>i. iterate i\<cdot>F\<cdot>\<bottom>)"
by (simp add: fix_def)
@@ -114,7 +114,7 @@
lemma fix_eq5: "f = fix\<cdot>F \<Longrightarrow> f\<cdot>x = F\<cdot>f\<cdot>x"
by (erule fix_eq4 [THEN cfun_fun_cong])
-text \<open>strictness of @{term fix}\<close>
+text \<open>strictness of \<^term>\<open>fix\<close>\<close>
lemma fix_bottom_iff: "fix\<cdot>F = \<bottom> \<longleftrightarrow> F\<cdot>\<bottom> = \<bottom>"
apply (rule iffI)
@@ -129,7 +129,7 @@
lemma fix_defined: "F\<cdot>\<bottom> \<noteq> \<bottom> \<Longrightarrow> fix\<cdot>F \<noteq> \<bottom>"
by (simp add: fix_bottom_iff)
-text \<open>@{term fix} applied to identity and constant functions\<close>
+text \<open>\<^term>\<open>fix\<close> applied to identity and constant functions\<close>
lemma fix_id: "(\<mu> x. x) = \<bottom>"
by (simp add: fix_strict)
--- a/src/HOL/HOLCF/Fixrec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Fixrec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -233,15 +233,15 @@
setup \<open>
Fixrec.add_matchers
- [ (@{const_name up}, @{const_name match_up}),
- (@{const_name sinl}, @{const_name match_sinl}),
- (@{const_name sinr}, @{const_name match_sinr}),
- (@{const_name spair}, @{const_name match_spair}),
- (@{const_name Pair}, @{const_name match_Pair}),
- (@{const_name ONE}, @{const_name match_ONE}),
- (@{const_name TT}, @{const_name match_TT}),
- (@{const_name FF}, @{const_name match_FF}),
- (@{const_name bottom}, @{const_name match_bottom}) ]
+ [ (\<^const_name>\<open>up\<close>, \<^const_name>\<open>match_up\<close>),
+ (\<^const_name>\<open>sinl\<close>, \<^const_name>\<open>match_sinl\<close>),
+ (\<^const_name>\<open>sinr\<close>, \<^const_name>\<open>match_sinr\<close>),
+ (\<^const_name>\<open>spair\<close>, \<^const_name>\<open>match_spair\<close>),
+ (\<^const_name>\<open>Pair\<close>, \<^const_name>\<open>match_Pair\<close>),
+ (\<^const_name>\<open>ONE\<close>, \<^const_name>\<open>match_ONE\<close>),
+ (\<^const_name>\<open>TT\<close>, \<^const_name>\<open>match_TT\<close>),
+ (\<^const_name>\<open>FF\<close>, \<^const_name>\<open>match_FF\<close>),
+ (\<^const_name>\<open>bottom\<close>, \<^const_name>\<open>match_bottom\<close>) ]
\<close>
hide_const (open) succeed fail run
--- a/src/HOL/HOLCF/Fun_Cpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Fun_Cpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -57,7 +57,7 @@
lemma ch2ch_lambda: "(\<And>x. chain (\<lambda>i. S i x)) \<Longrightarrow> chain S"
by (simp add: chain_def below_fun_def)
-text \<open>Type @{typ "'a::type \<Rightarrow> 'b::cpo"} is chain complete\<close>
+text \<open>Type \<^typ>\<open>'a::type \<Rightarrow> 'b::cpo\<close> is chain complete\<close>
lemma is_lub_lambda: "(\<And>x. range (\<lambda>i. Y i x) <<| f x) \<Longrightarrow> range Y <<| f"
by (simp add: is_lub_def is_ub_def below_fun_def)
--- a/src/HOL/HOLCF/IOA/ABP/Correctness.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/ABP/Correctness.thy Sat Jan 05 17:24:33 2019 +0100
@@ -84,7 +84,7 @@
lemma last_ind_on_first:
"l ~= [] ==> hd (reverse (reduce (a # l))) = hd (reverse (reduce l))"
apply simp
- apply (tactic \<open>auto_tac (put_simpset HOL_ss @{context}
+ apply (tactic \<open>auto_tac (put_simpset HOL_ss \<^context>
addsimps (@{thms reverse.simps} @ [@{thm hd_append}, @{thm rev_red_not_nil}])
|> Splitter.add_split @{thm list.split})\<close>)
done
@@ -165,7 +165,7 @@
lemma sender_abstraction: "is_weak_ref_map reduce srch_ioa srch_fin_ioa"
apply (tactic \<open>
- simp_tac (put_simpset HOL_ss @{context}
+ simp_tac (put_simpset HOL_ss \<^context>
addsimps [@{thm srch_fin_ioa_def}, @{thm rsch_fin_ioa_def},
@{thm srch_ioa_def}, @{thm rsch_ioa_def}, @{thm rename_through_pmap},
@{thm channel_abstraction}]) 1\<close>)
@@ -173,7 +173,7 @@
lemma receiver_abstraction: "is_weak_ref_map reduce rsch_ioa rsch_fin_ioa"
apply (tactic \<open>
- simp_tac (put_simpset HOL_ss @{context}
+ simp_tac (put_simpset HOL_ss \<^context>
addsimps [@{thm srch_fin_ioa_def}, @{thm rsch_fin_ioa_def},
@{thm srch_ioa_def}, @{thm rsch_ioa_def}, @{thm rename_through_pmap},
@{thm channel_abstraction}]) 1\<close>)
--- a/src/HOL/HOLCF/IOA/Automata.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/Automata.thy Sat Jan 05 17:24:33 2019 +0100
@@ -316,7 +316,7 @@
"compatible A B \<Longrightarrow> input_enabled A \<Longrightarrow> input_enabled B \<Longrightarrow> input_enabled (A \<parallel> B)"
apply (unfold input_enabled_def)
apply (simp add: Let_def inputs_of_par trans_of_par)
- apply (tactic "safe_tac (Context.raw_transfer @{theory} @{theory_context Fun})")
+ apply (tactic "safe_tac (Context.raw_transfer \<^theory> \<^theory_context>\<open>Fun\<close>)")
apply (simp add: inp_is_act)
prefer 2
apply (simp add: inp_is_act)
--- a/src/HOL/HOLCF/IOA/CompoScheds.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/CompoScheds.thy Sat Jan 05 17:24:33 2019 +0100
@@ -277,7 +277,7 @@
@{thms Filter_def Forall_def sforall_def mkex_def stutter_def},
asm_full_simp_tac ctxt,
SELECT_GOAL
- (safe_tac (Context.raw_transfer (Proof_Context.theory_of ctxt) @{theory_context Fun})),
+ (safe_tac (Context.raw_transfer (Proof_Context.theory_of ctxt) \<^theory_context>\<open>Fun\<close>)),
Seq_case_simp_tac ctxt exA,
Seq_case_simp_tac ctxt exB,
asm_full_simp_tac ctxt,
--- a/src/HOL/HOLCF/IOA/CompoTraces.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/CompoTraces.thy Sat Jan 05 17:24:33 2019 +0100
@@ -448,7 +448,7 @@
apply (rule_tac x = "schA" in spec)
apply (rule_tac x = "schB" in spec)
apply (rule_tac x = "tr" in spec)
- apply (tactic "thin_tac' @{context} 5 1")
+ apply (tactic "thin_tac' \<^context> 5 1")
apply (rule nat_less_induct)
apply (rule allI)+
apply (rename_tac tr schB schA)
@@ -458,7 +458,7 @@
apply (case_tac "Forall (\<lambda>x. x \<in> act B \<and> x \<notin> act A) tr")
apply (rule seq_take_lemma [THEN iffD2, THEN spec])
- apply (tactic "thin_tac' @{context} 5 1")
+ apply (tactic "thin_tac' \<^context> 5 1")
apply (case_tac "Finite tr")
@@ -666,7 +666,7 @@
apply (rule_tac x = "schA" in spec)
apply (rule_tac x = "schB" in spec)
apply (rule_tac x = "tr" in spec)
- apply (tactic "thin_tac' @{context} 5 1")
+ apply (tactic "thin_tac' \<^context> 5 1")
apply (rule nat_less_induct)
apply (rule allI)+
apply (rename_tac tr schB schA)
@@ -676,7 +676,7 @@
apply (case_tac "Forall (\<lambda>x. x \<in> act A \<and> x \<notin> act B) tr")
apply (rule seq_take_lemma [THEN iffD2, THEN spec])
- apply (tactic "thin_tac' @{context} 5 1")
+ apply (tactic "thin_tac' \<^context> 5 1")
apply (case_tac "Finite tr")
--- a/src/HOL/HOLCF/IOA/NTP/Impl.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/NTP/Impl.thy Sat Jan 05 17:24:33 2019 +0100
@@ -102,8 +102,8 @@
3) renname_ss unfolds transitions and the abstract channel *)
ML \<open>
-val ss = simpset_of (@{context} addsimps @{thms "transitions"});
-val rename_ss = simpset_of (put_simpset ss @{context} addsimps @{thms unfold_renaming});
+val ss = simpset_of (\<^context> addsimps @{thms "transitions"});
+val rename_ss = simpset_of (put_simpset ss \<^context> addsimps @{thms unfold_renaming});
fun tac ctxt =
asm_simp_tac (put_simpset ss ctxt
@@ -131,34 +131,34 @@
apply (simp add: Impl.inv1_def split del: if_split)
apply (induct_tac a)
-apply (tactic "EVERY1[tac @{context}, tac @{context}, tac @{context}, tac @{context}]")
-apply (tactic "tac @{context} 1")
-apply (tactic "tac_ren @{context} 1")
+apply (tactic "EVERY1[tac \<^context>, tac \<^context>, tac \<^context>, tac \<^context>]")
+apply (tactic "tac \<^context> 1")
+apply (tactic "tac_ren \<^context> 1")
txt \<open>5 + 1\<close>
-apply (tactic "tac @{context} 1")
-apply (tactic "tac_ren @{context} 1")
+apply (tactic "tac \<^context> 1")
+apply (tactic "tac_ren \<^context> 1")
txt \<open>4 + 1\<close>
-apply (tactic \<open>EVERY1[tac @{context}, tac @{context}, tac @{context}, tac @{context}]\<close>)
+apply (tactic \<open>EVERY1[tac \<^context>, tac \<^context>, tac \<^context>, tac \<^context>]\<close>)
txt \<open>Now the other half\<close>
apply (simp add: Impl.inv1_def split del: if_split)
apply (induct_tac a)
-apply (tactic "EVERY1 [tac @{context}, tac @{context}]")
+apply (tactic "EVERY1 [tac \<^context>, tac \<^context>]")
txt \<open>detour 1\<close>
-apply (tactic "tac @{context} 1")
-apply (tactic "tac_ren @{context} 1")
+apply (tactic "tac \<^context> 1")
+apply (tactic "tac_ren \<^context> 1")
apply (rule impI)
apply (erule conjE)+
apply (simp (no_asm_simp) add: hdr_sum_def Multiset.count_def Multiset.countm_nonempty_def
split: if_split)
txt \<open>detour 2\<close>
-apply (tactic "tac @{context} 1")
-apply (tactic "tac_ren @{context} 1")
+apply (tactic "tac \<^context> 1")
+apply (tactic "tac_ren \<^context> 1")
apply (rule impI)
apply (erule conjE)+
apply (simp add: Impl.hdr_sum_def Multiset.count_def Multiset.countm_nonempty_def
@@ -183,8 +183,8 @@
apply (rule countm_spurious_delm)
apply (simp (no_asm))
-apply (tactic "EVERY1 [tac @{context}, tac @{context}, tac @{context},
- tac @{context}, tac @{context}, tac @{context}]")
+apply (tactic "EVERY1 [tac \<^context>, tac \<^context>, tac \<^context>,
+ tac \<^context>, tac \<^context>, tac \<^context>]")
done
@@ -203,34 +203,34 @@
txt \<open>10 cases. First 4 are simple, since state doesn't change\<close>
- ML_prf \<open>val tac2 = asm_full_simp_tac (put_simpset ss @{context} addsimps [@{thm inv2_def}])\<close>
+ ML_prf \<open>val tac2 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv2_def}])\<close>
txt \<open>10 - 7\<close>
apply (tactic "EVERY1 [tac2,tac2,tac2,tac2]")
txt \<open>6\<close>
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
txt \<open>6 - 5\<close>
apply (tactic "EVERY1 [tac2,tac2]")
txt \<open>4\<close>
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
apply (tactic "tac2 1")
txt \<open>3\<close>
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE})] 1\<close>)
apply (tactic "tac2 1")
- apply (tactic \<open>fold_goals_tac @{context} [rewrite_rule @{context} [@{thm Packet.hdr_def}]
+ apply (tactic \<open>fold_goals_tac \<^context> [rewrite_rule \<^context> [@{thm Packet.hdr_def}]
(@{thm Impl.hdr_sum_def})]\<close>)
apply arith
txt \<open>2\<close>
apply (tactic "tac2 1")
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct1] 1\<close>)
apply (intro strip)
apply (erule conjE)+
@@ -238,12 +238,12 @@
txt \<open>1\<close>
apply (tactic "tac2 1")
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct2] 1\<close>)
apply (intro strip)
apply (erule conjE)+
- apply (tactic \<open>fold_goals_tac @{context}
- [rewrite_rule @{context} [@{thm Packet.hdr_def}] (@{thm Impl.hdr_sum_def})]\<close>)
+ apply (tactic \<open>fold_goals_tac \<^context>
+ [rewrite_rule \<^context> [@{thm Packet.hdr_def}] (@{thm Impl.hdr_sum_def})]\<close>)
apply simp
done
@@ -260,13 +260,13 @@
apply (simp (no_asm_simp) add: impl_ioas split del: if_split)
apply (induct_tac "a")
- ML_prf \<open>val tac3 = asm_full_simp_tac (put_simpset ss @{context} addsimps [@{thm inv3_def}])\<close>
+ ML_prf \<open>val tac3 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv3_def}])\<close>
txt \<open>10 - 8\<close>
apply (tactic "EVERY1[tac3,tac3,tac3]")
- apply (tactic "tac_ren @{context} 1")
+ apply (tactic "tac_ren \<^context> 1")
apply (intro strip, (erule conjE)+)
apply hypsubst
apply (erule exE)
@@ -274,7 +274,7 @@
txt \<open>7\<close>
apply (tactic "tac3 1")
- apply (tactic "tac_ren @{context} 1")
+ apply (tactic "tac_ren \<^context> 1")
apply force
txt \<open>6 - 3\<close>
@@ -282,18 +282,18 @@
apply (tactic "EVERY1[tac3,tac3,tac3,tac3]")
txt \<open>2\<close>
- apply (tactic "asm_full_simp_tac (put_simpset ss @{context}) 1")
+ apply (tactic "asm_full_simp_tac (put_simpset ss \<^context>) 1")
apply (simp (no_asm) add: inv3_def)
apply (intro strip, (erule conjE)+)
apply (rule imp_disjL [THEN iffD1])
apply (rule impI)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv2_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
apply (erule conjE)+
apply (rule_tac j = "count (ssent (sen s)) (~sbit (sen s))" and
k = "count (rsent (rec s)) (sbit (sen s))" in le_trans)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm inv1_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm inv1_def}]
(@{thm raw_inv1} RS @{thm invariantE}) RS conjunct2] 1\<close>)
apply (simp add: hdr_sum_def Multiset.count_def)
apply (rule add_le_mono)
@@ -308,7 +308,7 @@
apply (intro strip, (erule conjE)+)
apply (rule imp_disjL [THEN iffD1])
apply (rule impI)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv2_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
done
@@ -325,7 +325,7 @@
apply (simp (no_asm_simp) add: impl_ioas split del: if_split)
apply (induct_tac "a")
- ML_prf \<open>val tac4 = asm_full_simp_tac (put_simpset ss @{context} addsimps [@{thm inv4_def}])\<close>
+ ML_prf \<open>val tac4 = asm_full_simp_tac (put_simpset ss \<^context> addsimps [@{thm inv4_def}])\<close>
txt \<open>10 - 2\<close>
@@ -334,7 +334,7 @@
txt \<open>2 b\<close>
apply (intro strip, (erule conjE)+)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv2_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
apply simp
@@ -342,9 +342,9 @@
apply (tactic "tac4 1")
apply (intro strip, (erule conjE)+)
apply (rule ccontr)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv2_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv2_def}]
(@{thm raw_inv2} RS @{thm invariantE})] 1\<close>)
- apply (tactic \<open>forward_tac @{context} [rewrite_rule @{context} [@{thm Impl.inv3_def}]
+ apply (tactic \<open>forward_tac \<^context> [rewrite_rule \<^context> [@{thm Impl.inv3_def}]
(@{thm raw_inv3} RS @{thm invariantE})] 1\<close>)
apply simp
apply (rename_tac m, erule_tac x = "m" in allE)
--- a/src/HOL/HOLCF/IOA/ShortExecutions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/IOA/ShortExecutions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -171,7 +171,7 @@
apply (rule mp)
prefer 2
apply assumption
- apply (tactic "thin_tac' @{context} 1 1")
+ apply (tactic "thin_tac' \<^context> 1 1")
apply (rule_tac x = "s" in spec)
apply (rule nat_less_induct)
apply (intro strip)
--- a/src/HOL/HOLCF/Library/Bool_Discrete.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Bool_Discrete.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports HOLCF
begin
-text \<open>Discrete cpo instance for @{typ bool}.\<close>
+text \<open>Discrete cpo instance for \<^typ>\<open>bool\<close>.\<close>
instantiation bool :: discrete_cpo
begin
--- a/src/HOL/HOLCF/Library/Char_Discrete.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Char_Discrete.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports HOLCF
begin
-subsection \<open>Discrete cpo instance for @{typ char}.\<close>
+subsection \<open>Discrete cpo instance for \<^typ>\<open>char\<close>.\<close>
instantiation char :: discrete_cpo
begin
--- a/src/HOL/HOLCF/Library/Int_Discrete.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Int_Discrete.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports HOLCF
begin
-text \<open>Discrete cpo instance for @{typ int}.\<close>
+text \<open>Discrete cpo instance for \<^typ>\<open>int\<close>.\<close>
instantiation int :: discrete_cpo
begin
--- a/src/HOL/HOLCF/Library/List_Cpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/List_Cpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -329,8 +329,8 @@
setup \<open>
Fixrec.add_matchers
- [ (@{const_name Nil}, @{const_name match_Nil}),
- (@{const_name Cons}, @{const_name match_Cons}) ]
+ [ (\<^const_name>\<open>Nil\<close>, \<^const_name>\<open>match_Nil\<close>),
+ (\<^const_name>\<open>Cons\<close>, \<^const_name>\<open>match_Cons\<close>) ]
\<close>
end
--- a/src/HOL/HOLCF/Library/List_Predomain.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/List_Predomain.thy Sat Jan 05 17:24:33 2019 +0100
@@ -61,7 +61,7 @@
done
text \<open>
- Types @{typ "'a list u"}. and @{typ "'a u slist"} are isomorphic.
+ Types \<^typ>\<open>'a list u\<close>. and \<^typ>\<open>'a u slist\<close> are isomorphic.
\<close>
fixrec encode_list_u where
@@ -168,7 +168,7 @@
done
setup \<open>
- Domain_Take_Proofs.add_rec_type (@{type_name "list"}, [true])
+ Domain_Take_Proofs.add_rec_type (\<^type_name>\<open>list\<close>, [true])
\<close>
end
--- a/src/HOL/HOLCF/Library/Nat_Discrete.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Nat_Discrete.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports HOLCF
begin
-text \<open>Discrete cpo instance for @{typ nat}.\<close>
+text \<open>Discrete cpo instance for \<^typ>\<open>nat\<close>.\<close>
instantiation nat :: discrete_cpo
begin
--- a/src/HOL/HOLCF/Library/Option_Cpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Option_Cpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -210,8 +210,8 @@
setup \<open>
Fixrec.add_matchers
- [ (@{const_name None}, @{const_name match_None}),
- (@{const_name Some}, @{const_name match_Some}) ]
+ [ (\<^const_name>\<open>None\<close>, \<^const_name>\<open>match_None\<close>),
+ (\<^const_name>\<open>Some\<close>, \<^const_name>\<open>match_Some\<close>) ]
\<close>
subsection \<open>Option type is a predomain\<close>
@@ -286,7 +286,7 @@
by (simp add: cfcomp1 u_map_map encode_option_option_map)
setup \<open>
- Domain_Take_Proofs.add_rec_type (@{type_name "option"}, [true])
+ Domain_Take_Proofs.add_rec_type (\<^type_name>\<open>option\<close>, [true])
\<close>
end
--- a/src/HOL/HOLCF/Library/Sum_Cpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Library/Sum_Cpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -248,8 +248,8 @@
setup \<open>
Fixrec.add_matchers
- [ (@{const_name Inl}, @{const_name match_Inl}),
- (@{const_name Inr}, @{const_name match_Inr}) ]
+ [ (\<^const_name>\<open>Inl\<close>, \<^const_name>\<open>match_Inl\<close>),
+ (\<^const_name>\<open>Inr\<close>, \<^const_name>\<open>match_Inr\<close>) ]
\<close>
subsection \<open>Disjoint sum is a predomain\<close>
@@ -366,7 +366,7 @@
done
setup \<open>
- Domain_Take_Proofs.add_rec_type (@{type_name "sum"}, [true, true])
+ Domain_Take_Proofs.add_rec_type (\<^type_name>\<open>sum\<close>, [true, true])
\<close>
end
--- a/src/HOL/HOLCF/Lift.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Lift.thy Sat Jan 05 17:24:33 2019 +0100
@@ -32,7 +32,7 @@
old_rep_datatype "\<bottom>::'a lift" Def
by (erule lift_induct) (simp_all add: Def_def Abs_lift_inject inst_lift_pcpo)
-text \<open>@{term bottom} and @{term Def}\<close>
+text \<open>\<^term>\<open>bottom\<close> and \<^term>\<open>Def\<close>\<close>
lemma not_Undef_is_Def: "(x \<noteq> \<bottom>) = (\<exists>y. x = Def y)"
by (cases x) simp_all
@@ -41,7 +41,7 @@
by (cases x) simp_all
text \<open>
- For @{term "x ~= \<bottom>"} in assumptions \<open>defined\<close> replaces \<open>x\<close> by \<open>Def a\<close> in conclusion.\<close>
+ For \<^term>\<open>x ~= \<bottom>\<close> in assumptions \<open>defined\<close> replaces \<open>x\<close> by \<open>Def a\<close> in conclusion.\<close>
method_setup defined = \<open>
Scan.succeed (fn ctxt => SIMPLE_METHOD'
@@ -70,7 +70,7 @@
by (induct x) auto
qed
-subsection \<open>Continuity of @{const case_lift}\<close>
+subsection \<open>Continuity of \<^const>\<open>case_lift\<close>\<close>
lemma case_lift_eq: "case_lift \<bottom> f x = fup\<cdot>(\<Lambda> y. f (undiscr y))\<cdot>(Rep_lift x)"
apply (induct x, unfold lift.case)
--- a/src/HOL/HOLCF/One.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/One.thy Sat Jan 05 17:24:33 2019 +0100
@@ -16,7 +16,7 @@
definition ONE :: "one"
where "ONE \<equiv> Def ()"
-text \<open>Exhaustion and Elimination for type @{typ one}\<close>
+text \<open>Exhaustion and Elimination for type \<^typ>\<open>one\<close>\<close>
lemma Exh_one: "t = \<bottom> \<or> t = ONE"
by (induct t) (simp_all add: ONE_def)
@@ -49,7 +49,7 @@
lemma compact_ONE: "compact ONE"
by (rule compact_chfin)
-text \<open>Case analysis function for type @{typ one}\<close>
+text \<open>Case analysis function for type \<^typ>\<open>one\<close>\<close>
definition one_case :: "'a::pcpo \<rightarrow> one \<rightarrow> 'a"
where "one_case = (\<Lambda> a x. seq\<cdot>x\<cdot>a)"
--- a/src/HOL/HOLCF/Pcpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Pcpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -155,11 +155,11 @@
syntax UU :: logic
translations "UU" \<rightharpoonup> "CONST bottom"
-text \<open>Simproc to rewrite @{term "\<bottom> = x"} to @{term "x = \<bottom>"}.\<close>
+text \<open>Simproc to rewrite \<^term>\<open>\<bottom> = x\<close> to \<^term>\<open>x = \<bottom>\<close>.\<close>
setup \<open>Reorient_Proc.add (fn Const(\<^const_name>\<open>bottom\<close>, _) => true | _ => false)\<close>
simproc_setup reorient_bottom ("\<bottom> = x") = Reorient_Proc.proc
-text \<open>useful lemmas about @{term \<bottom>}\<close>
+text \<open>useful lemmas about \<^term>\<open>\<bottom>\<close>\<close>
lemma below_bottom_iff [simp]: "x \<sqsubseteq> \<bottom> \<longleftrightarrow> x = \<bottom>"
by (simp add: po_eq_conv)
--- a/src/HOL/HOLCF/Porder.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Porder.thy Sat Jan 05 17:24:33 2019 +0100
@@ -149,7 +149,7 @@
lemma is_lub_unique: "S <<| x \<Longrightarrow> S <<| y \<Longrightarrow> x = y"
unfolding is_lub_def is_ub_def by (blast intro: below_antisym)
-text \<open>technical lemmas about @{term lub} and @{term is_lub}\<close>
+text \<open>technical lemmas about \<^term>\<open>lub\<close> and \<^term>\<open>is_lub\<close>\<close>
lemma is_lub_lub: "M <<| x \<Longrightarrow> M <<| lub M"
unfolding lub_def by (rule theI [OF _ is_lub_unique])
--- a/src/HOL/HOLCF/Powerdomains.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Powerdomains.thy Sat Jan 05 17:24:33 2019 +0100
@@ -204,9 +204,9 @@
setup \<open>
fold Domain_Take_Proofs.add_rec_type
- [(@{type_name "upper_pd"}, [true]),
- (@{type_name "lower_pd"}, [true]),
- (@{type_name "convex_pd"}, [true])]
+ [(\<^type_name>\<open>upper_pd\<close>, [true]),
+ (\<^type_name>\<open>lower_pd\<close>, [true]),
+ (\<^type_name>\<open>convex_pd\<close>, [true])]
\<close>
end
--- a/src/HOL/HOLCF/Product_Cpo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Product_Cpo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -80,7 +80,7 @@
lemma ch2ch_Pair [simp]: "chain X \<Longrightarrow> chain Y \<Longrightarrow> chain (\<lambda>i. (X i, Y i))"
by (rule chainI, simp add: chainE)
-text \<open>@{term fst} and @{term snd} are monotone\<close>
+text \<open>\<^term>\<open>fst\<close> and \<^term>\<open>snd\<close> are monotone\<close>
lemma fst_monofun: "x \<sqsubseteq> y \<Longrightarrow> fst x \<sqsubseteq> fst y"
by (simp add: below_prod_def)
--- a/src/HOL/HOLCF/Representable.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Representable.thy Sat Jan 05 17:24:33 2019 +0100
@@ -64,14 +64,14 @@
qed
text \<open>
- Constants @{const liftemb} and @{const liftprj} imply class predomain.
+ Constants \<^const>\<open>liftemb\<close> and \<^const>\<open>liftprj\<close> imply class predomain.
\<close>
setup \<open>
fold Sign.add_const_constraint
- [(@{const_name liftemb}, SOME @{typ "'a::predomain u \<rightarrow> udom u"}),
- (@{const_name liftprj}, SOME @{typ "udom u \<rightarrow> 'a::predomain u"}),
- (@{const_name liftdefl}, SOME @{typ "'a::predomain itself \<Rightarrow> udom u defl"})]
+ [(\<^const_name>\<open>liftemb\<close>, SOME \<^typ>\<open>'a::predomain u \<rightarrow> udom u\<close>),
+ (\<^const_name>\<open>liftprj\<close>, SOME \<^typ>\<open>udom u \<rightarrow> 'a::predomain u\<close>),
+ (\<^const_name>\<open>liftdefl\<close>, SOME \<^typ>\<open>'a::predomain itself \<Rightarrow> udom u defl\<close>)]
\<close>
interpretation predomain: pcpo_ep_pair liftemb liftprj
--- a/src/HOL/HOLCF/Tools/Domain/domain.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain.ML Sat Jan 05 17:24:33 2019 +0100
@@ -188,8 +188,8 @@
Domain_Isomorphism.domain_isomorphism (map prep spec)
end
-fun pcpo_arg lazy = if lazy then @{sort cpo} else @{sort pcpo}
-fun rep_arg lazy = if lazy then @{sort predomain} else @{sort "domain"}
+fun pcpo_arg lazy = if lazy then \<^sort>\<open>cpo\<close> else \<^sort>\<open>pcpo\<close>
+fun rep_arg lazy = if lazy then \<^sort>\<open>predomain\<close> else \<^sort>\<open>domain\<close>
fun read_sort thy (SOME s) = Syntax.read_sort_global thy s
| read_sort thy NONE = Sign.defaultS thy
@@ -228,9 +228,9 @@
(** outer syntax **)
val dest_decl : (bool * binding option * string) parser =
- @{keyword "("} |-- Scan.optional (@{keyword "lazy"} >> K true) false --
- (Parse.binding >> SOME) -- (@{keyword "::"} |-- Parse.typ) --| @{keyword ")"} >> Scan.triple1
- || @{keyword "("} |-- @{keyword "lazy"} |-- Parse.typ --| @{keyword ")"}
+ \<^keyword>\<open>(\<close> |-- Scan.optional (\<^keyword>\<open>lazy\<close> >> K true) false --
+ (Parse.binding >> SOME) -- (\<^keyword>\<open>::\<close> |-- Parse.typ) --| \<^keyword>\<open>)\<close> >> Scan.triple1
+ || \<^keyword>\<open>(\<close> |-- \<^keyword>\<open>lazy\<close> |-- Parse.typ --| \<^keyword>\<open>)\<close>
>> (fn t => (true, NONE, t))
|| Parse.typ >> (fn t => (false, NONE, t))
@@ -239,10 +239,10 @@
val domain_decl =
(Parse.type_args_constrained -- Parse.binding -- Parse.opt_mixfix) --
- (@{keyword "="} |-- Parse.enum1 "|" cons_decl)
+ (\<^keyword>\<open>=\<close> |-- Parse.enum1 "|" cons_decl)
val domains_decl =
- Scan.optional (@{keyword "("} |-- (@{keyword "unsafe"} >> K true) --| @{keyword ")"}) false --
+ Scan.optional (\<^keyword>\<open>(\<close> |-- (\<^keyword>\<open>unsafe\<close> >> K true) --| \<^keyword>\<open>)\<close>) false --
Parse.and_list1 domain_decl
fun mk_domain
@@ -261,7 +261,7 @@
end
val _ =
- Outer_Syntax.command @{command_keyword domain} "define recursive domains (HOLCF)"
+ Outer_Syntax.command \<^command_keyword>\<open>domain\<close> "define recursive domains (HOLCF)"
(domains_decl >> (Toplevel.theory o mk_domain))
end
--- a/src/HOL/HOLCF/Tools/Domain/domain_axioms.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain_axioms.ML Sat Jan 05 17:24:33 2019 +0100
@@ -103,7 +103,7 @@
(* axiomatize type constructor arities *)
fun thy_arity (_, _, (lhsT, _)) =
let val (dname, tvars) = dest_Type lhsT
- in (dname, map (snd o dest_TFree) tvars, @{sort pcpo}) end
+ in (dname, map (snd o dest_TFree) tvars, \<^sort>\<open>pcpo\<close>) end
val thy = fold (Axclass.arity_axiomatization o thy_arity) dom_eqns thy
(* declare and axiomatize abs/rep *)
--- a/src/HOL/HOLCF/Tools/Domain/domain_constructors.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain_constructors.ML Sat Jan 05 17:24:33 2019 +0100
@@ -65,14 +65,14 @@
(************************** miscellaneous functions ***************************)
val simple_ss =
- simpset_of (put_simpset HOL_basic_ss @{context} addsimps @{thms simp_thms})
+ simpset_of (put_simpset HOL_basic_ss \<^context> addsimps @{thms simp_thms})
val beta_rules =
@{thms beta_cfun cont_id cont_const cont2cont_APP cont2cont_LAM'} @
@{thms cont2cont_fst cont2cont_snd cont2cont_Pair}
val beta_ss =
- simpset_of (put_simpset HOL_basic_ss @{context} addsimps (@{thms simp_thms} @ beta_rules))
+ simpset_of (put_simpset HOL_basic_ss \<^context> addsimps (@{thms simp_thms} @ beta_rules))
fun define_consts
(specs : (binding * term * mixfix) list)
@@ -196,8 +196,8 @@
(* prove exhaustiveness of constructors *)
local
- fun arg2typ n (true, _) = (n+1, mk_upT (TVar (("'a", n), @{sort cpo})))
- | arg2typ n (false, _) = (n+1, TVar (("'a", n), @{sort pcpo}))
+ fun arg2typ n (true, _) = (n+1, mk_upT (TVar (("'a", n), \<^sort>\<open>cpo\<close>)))
+ | arg2typ n (false, _) = (n+1, TVar (("'a", n), \<^sort>\<open>pcpo\<close>))
fun args2typ n [] = (n, oneT)
| args2typ n [arg] = arg2typ n arg
| args2typ n (arg::args) =
@@ -408,7 +408,7 @@
(* calculate function arguments of case combinator *)
val tns = map fst (Term.add_tfreesT lhsT [])
- val resultT = TFree (singleton (Name.variant_list tns) "'t", @{sort pcpo})
+ val resultT = TFree (singleton (Name.variant_list tns) "'t", \<^sort>\<open>pcpo\<close>)
fun fTs T = map (fn (_, args) => map snd args -->> T) spec
val fns = Old_Datatype_Prop.indexify_names (map (K "f") spec)
val fs = map Free (fns ~~ fTs resultT)
@@ -456,26 +456,26 @@
fun argvars n args = map_index (argvar n) args
fun app s (l, r) = Ast.mk_appl (Ast.Constant s) [l, r]
val cabs = app "_cabs"
- val capp = app @{const_syntax Rep_cfun}
+ val capp = app \<^const_syntax>\<open>Rep_cfun\<close>
val capps = Library.foldl capp
fun con1 authentic n (con, args) =
Library.foldl capp (c_ast authentic con, argvars n args)
fun con1_constraint authentic n (con, args) =
Library.foldl capp
(Ast.Appl
- [Ast.Constant @{syntax_const "_constrain"}, c_ast authentic con,
+ [Ast.Constant \<^syntax_const>\<open>_constrain\<close>, c_ast authentic con,
Ast.Variable ("'a" ^ string_of_int n)],
argvars n args)
fun case1 constraint authentic (n, c) =
- app @{syntax_const "_case1"}
+ app \<^syntax_const>\<open>_case1\<close>
((if constraint then con1_constraint else con1) authentic n c, expvar n)
fun arg1 (n, (_, args)) = List.foldr cabs (expvar n) (argvars n args)
- fun when1 n (m, c) = if n = m then arg1 (n, c) else Ast.Constant @{const_syntax bottom}
+ fun when1 n (m, c) = if n = m then arg1 (n, c) else Ast.Constant \<^const_syntax>\<open>bottom\<close>
val case_constant = Ast.Constant (syntax (case_const dummyT))
fun case_trans constraint authentic =
(app "_case_syntax"
(Ast.Variable "x",
- foldr1 (app @{syntax_const "_case2"}) (map_index (case1 constraint authentic) spec)),
+ foldr1 (app \<^syntax_const>\<open>_case2\<close>) (map_index (case1 constraint authentic) spec)),
capp (capps (case_constant, map_index arg1 spec), Ast.Variable "x"))
fun one_abscon_trans authentic (n, c) =
(if authentic then Syntax.Parse_Print_Rule else Syntax.Parse_Rule)
@@ -690,7 +690,7 @@
fun dis_fun i (j, (_, args)) =
let
val (vs, _) = get_vars args
- val tr = if i = j then @{term TT} else @{term FF}
+ val tr = if i = j then \<^term>\<open>TT\<close> else \<^term>\<open>FF\<close>
in
big_lambdas vs tr
end
@@ -724,7 +724,7 @@
let
val (vs, nonlazy) = get_vars args
val lhs = dis ` list_ccomb (con, vs)
- val rhs = if i = j then @{term TT} else @{term FF}
+ val rhs = if i = j then \<^term>\<open>TT\<close> else \<^term>\<open>FF\<close>
val assms = map (mk_trp o mk_defined) nonlazy
val concl = mk_trp (mk_eq (lhs, rhs))
val goal = Logic.list_implies (assms, concl)
@@ -775,7 +775,7 @@
let
val ts : string list = map fst (Term.add_tfreesT lhsT [])
val t : string = singleton (Name.variant_list ts) "'t"
- in TFree (t, @{sort pcpo}) end
+ in TFree (t, \<^sort>\<open>pcpo\<close>) end
(* define match combinators *)
local
--- a/src/HOL/HOLCF/Tools/Domain/domain_induction.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain_induction.ML Sat Jan 05 17:24:33 2019 +0100
@@ -41,13 +41,13 @@
val {take_consts, take_Suc_thms, deflation_take_thms, ...} = take_info
val deflation_thms = Domain_Take_Proofs.get_deflation_thms thy
- val n = Free ("n", @{typ nat})
- val n' = @{const Suc} $ n
+ val n = Free ("n", \<^typ>\<open>nat\<close>)
+ val n' = \<^const>\<open>Suc\<close> $ n
local
val newTs = map (#absT o #iso_info) constr_infos
val subs = newTs ~~ map (fn t => t $ n) take_consts
- fun is_ID (Const (c, _)) = (c = @{const_name ID})
+ fun is_ID (Const (c, _)) = (c = \<^const_name>\<open>ID\<close>)
| is_ID _ = false
in
fun map_of_arg thy v T =
--- a/src/HOL/HOLCF/Tools/Domain/domain_isomorphism.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain_isomorphism.ML Sat Jan 05 17:24:33 2019 +0100
@@ -34,10 +34,10 @@
struct
val beta_ss =
- simpset_of (put_simpset HOL_basic_ss @{context}
- addsimps @{thms simp_thms} addsimprocs [@{simproc beta_cfun_proc}])
+ simpset_of (put_simpset HOL_basic_ss \<^context>
+ addsimps @{thms simp_thms} addsimprocs [\<^simproc>\<open>beta_cfun_proc\<close>])
-fun is_cpo thy T = Sign.of_sort thy (T, @{sort cpo})
+fun is_cpo thy T = Sign.of_sort thy (T, \<^sort>\<open>cpo\<close>)
(******************************************************************************)
@@ -49,36 +49,36 @@
infixr 6 ->>
infixr -->>
-val udomT = @{typ udom}
-val deflT = @{typ "udom defl"}
-val udeflT = @{typ "udom u defl"}
+val udomT = \<^typ>\<open>udom\<close>
+val deflT = \<^typ>\<open>udom defl\<close>
+val udeflT = \<^typ>\<open>udom u defl\<close>
fun mk_DEFL T =
- Const (@{const_name defl}, Term.itselfT T --> deflT) $ Logic.mk_type T
+ Const (\<^const_name>\<open>defl\<close>, Term.itselfT T --> deflT) $ Logic.mk_type T
-fun dest_DEFL (Const (@{const_name defl}, _) $ t) = Logic.dest_type t
+fun dest_DEFL (Const (\<^const_name>\<open>defl\<close>, _) $ t) = Logic.dest_type t
| dest_DEFL t = raise TERM ("dest_DEFL", [t])
fun mk_LIFTDEFL T =
- Const (@{const_name liftdefl}, Term.itselfT T --> udeflT) $ Logic.mk_type T
+ Const (\<^const_name>\<open>liftdefl\<close>, Term.itselfT T --> udeflT) $ Logic.mk_type T
-fun dest_LIFTDEFL (Const (@{const_name liftdefl}, _) $ t) = Logic.dest_type t
+fun dest_LIFTDEFL (Const (\<^const_name>\<open>liftdefl\<close>, _) $ t) = Logic.dest_type t
| dest_LIFTDEFL t = raise TERM ("dest_LIFTDEFL", [t])
-fun mk_u_defl t = mk_capply (@{const "u_defl"}, t)
+fun mk_u_defl t = mk_capply (\<^const>\<open>u_defl\<close>, t)
-fun emb_const T = Const (@{const_name emb}, T ->> udomT)
-fun prj_const T = Const (@{const_name prj}, udomT ->> T)
+fun emb_const T = Const (\<^const_name>\<open>emb\<close>, T ->> udomT)
+fun prj_const T = Const (\<^const_name>\<open>prj\<close>, udomT ->> T)
fun coerce_const (T, U) = mk_cfcomp (prj_const U, emb_const T)
fun isodefl_const T =
- Const (@{const_name isodefl}, (T ->> T) --> deflT --> HOLogic.boolT)
+ Const (\<^const_name>\<open>isodefl\<close>, (T ->> T) --> deflT --> HOLogic.boolT)
fun isodefl'_const T =
- Const (@{const_name isodefl'}, (T ->> T) --> udeflT --> HOLogic.boolT)
+ Const (\<^const_name>\<open>isodefl'\<close>, (T ->> T) --> udeflT --> HOLogic.boolT)
fun mk_deflation t =
- Const (@{const_name deflation}, Term.fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>deflation\<close>, Term.fastype_of t --> boolT) $ t
(* splits a cterm into the right and lefthand sides of equality *)
fun dest_eqs t = HOLogic.dest_eq (HOLogic.dest_Trueprop t)
@@ -121,9 +121,9 @@
(* convert parameters to lambda abstractions *)
fun mk_eqn (lhs, rhs) =
case lhs of
- Const (@{const_name Rep_cfun}, _) $ f $ (x as Free _) =>
+ Const (\<^const_name>\<open>Rep_cfun\<close>, _) $ f $ (x as Free _) =>
mk_eqn (f, big_lambda x rhs)
- | f $ Const (@{const_name Pure.type}, T) =>
+ | f $ Const (\<^const_name>\<open>Pure.type\<close>, T) =>
mk_eqn (f, Abs ("t", T, rhs))
| Const _ => Logic.mk_equals (lhs, rhs)
| _ => raise TERM ("lhs not of correct form", [lhs, rhs])
@@ -150,7 +150,7 @@
val cont_thm =
let
val prop = mk_trp (mk_cont functional)
- val rules = Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems cont2cont}
+ val rules = Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>cont2cont\<close>
fun tac ctxt = REPEAT_ALL_NEW (match_tac ctxt (rev rules)) 1
in
Goal.prove_global thy [] [] prop (tac o #context)
@@ -188,7 +188,7 @@
(T : typ) : term =
let
val defl_simps =
- Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems domain_defl_simps}
+ Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>domain_defl_simps\<close>
val rules = map (Thm.concl_of #> HOLogic.dest_Trueprop #> HOLogic.dest_eq) (rev defl_simps)
val rules' = map (apfst mk_DEFL) tab1 @ map (apfst mk_LIFTDEFL) tab2
fun proc1 t =
@@ -405,14 +405,14 @@
val tmp_thy =
let
fun arity (vs, tbind, _, _, _) =
- (Sign.full_name thy tbind, map the_sort vs, @{sort "domain"})
+ (Sign.full_name thy tbind, map the_sort vs, \<^sort>\<open>domain\<close>)
in
fold Axclass.arity_axiomatization (map arity doms) tmp_thy
end
(* check bifiniteness of right-hand sides *)
fun check_rhs (_, _, _, rhs, _) =
- if Sign.of_sort tmp_thy (rhs, @{sort "domain"}) then ()
+ if Sign.of_sort tmp_thy (rhs, \<^sort>\<open>domain\<close>) then ()
else error ("Type not of sort domain: " ^
quote (Syntax.string_of_typ_global tmp_thy rhs))
val _ = map check_rhs doms
@@ -502,7 +502,7 @@
Domaindef.add_domaindef spec defl NONE thy
(* declare domain_defl_simps rules *)
val thy =
- Context.theory_map (Named_Theorems.add_thm @{named_theorems domain_defl_simps} DEFL) thy
+ Context.theory_map (Named_Theorems.add_thm \<^named_theorems>\<open>domain_defl_simps\<close> DEFL) thy
in
(DEFL, thy)
end
@@ -513,7 +513,7 @@
let
val goal = mk_eqs (mk_DEFL lhsT, mk_DEFL rhsT)
val DEFL_simps =
- Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems domain_defl_simps}
+ Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>domain_defl_simps\<close>
fun tac ctxt =
rewrite_goals_tac ctxt (map mk_meta_eq (rev DEFL_simps))
THEN TRY (resolve_tac ctxt defl_unfold_thms 1)
@@ -618,7 +618,7 @@
val isodefl_rules =
@{thms conjI isodefl_ID_DEFL isodefl_LIFTDEFL}
@ isodefl_abs_rep_thms
- @ rev (Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems domain_isodefl})
+ @ rev (Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>domain_isodefl\<close>)
in
Goal.prove_global thy [] assms goal (fn {prems, context = ctxt} =>
EVERY
@@ -642,7 +642,7 @@
(Global_Theory.add_thms o map (Thm.no_attributes o apsnd Drule.zero_var_indexes))
(conjuncts isodefl_binds isodefl_thm)
val thy =
- fold (Context.theory_map o Named_Theorems.add_thm @{named_theorems domain_isodefl})
+ fold (Context.theory_map o Named_Theorems.add_thm \<^named_theorems>\<open>domain_isodefl\<close>)
isodefl_thms thy
(* prove map_ID theorems *)
@@ -650,7 +650,7 @@
(((map_const, (lhsT, _)), DEFL_thm), isodefl_thm) =
let
val Ts = snd (dest_Type lhsT)
- fun is_cpo T = Sign.of_sort thy (T, @{sort cpo})
+ fun is_cpo T = Sign.of_sort thy (T, \<^sort>\<open>cpo\<close>)
val lhs = list_ccomb (map_const, map mk_ID (filter is_cpo Ts))
val goal = mk_eqs (lhs, mk_ID lhsT)
fun tac ctxt = EVERY
@@ -680,7 +680,7 @@
val lub_take_lemma =
let
val lhs = mk_tuple (map mk_lub take_consts)
- fun is_cpo T = Sign.of_sort thy (T, @{sort cpo})
+ fun is_cpo T = Sign.of_sort thy (T, \<^sort>\<open>cpo\<close>)
fun mk_map_ID (map_const, (lhsT, _)) =
list_ccomb (map_const, map mk_ID (filter is_cpo (snd (dest_Type lhsT))))
val rhs = mk_tuple (map mk_map_ID (map_consts ~~ dom_eqns))
@@ -746,8 +746,8 @@
val parse_domain_iso :
(string list * binding * mixfix * string * (binding * binding) option)
parser =
- (Parse.type_args -- Parse.binding -- Parse.opt_mixfix -- (@{keyword "="} |-- Parse.typ) --
- Scan.option (@{keyword "morphisms"} |-- Parse.!!! (Parse.binding -- Parse.binding)))
+ (Parse.type_args -- Parse.binding -- Parse.opt_mixfix -- (\<^keyword>\<open>=\<close> |-- Parse.typ) --
+ Scan.option (\<^keyword>\<open>morphisms\<close> |-- Parse.!!! (Parse.binding -- Parse.binding)))
>> (fn ((((vs, t), mx), rhs), morphs) => (vs, t, mx, rhs, morphs))
val parse_domain_isos = Parse.and_list1 parse_domain_iso
@@ -755,7 +755,7 @@
in
val _ =
- Outer_Syntax.command @{command_keyword domain_isomorphism} "define domain isomorphisms (HOLCF)"
+ Outer_Syntax.command \<^command_keyword>\<open>domain_isomorphism\<close> "define domain isomorphisms (HOLCF)"
(parse_domain_isos >> (Toplevel.theory o domain_isomorphism_cmd))
end
--- a/src/HOL/HOLCF/Tools/Domain/domain_take_proofs.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/Domain/domain_take_proofs.ML Sat Jan 05 17:24:33 2019 +0100
@@ -107,8 +107,8 @@
}
val beta_ss =
- simpset_of (put_simpset HOL_basic_ss @{context}
- addsimps @{thms simp_thms} addsimprocs [@{simproc beta_cfun_proc}])
+ simpset_of (put_simpset HOL_basic_ss \<^context>
+ addsimps @{thms simp_thms} addsimprocs [\<^simproc>\<open>beta_cfun_proc\<close>])
(******************************************************************************)
(******************************** theory data *********************************)
@@ -127,15 +127,15 @@
Rec_Data.map (Symtab.insert (K true) (tname, bs))
fun add_deflation_thm thm =
- Context.theory_map (Named_Theorems.add_thm @{named_theorems domain_deflation} thm)
+ Context.theory_map (Named_Theorems.add_thm \<^named_theorems>\<open>domain_deflation\<close> thm)
val get_rec_tab = Rec_Data.get
fun get_deflation_thms thy =
- rev (Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems domain_deflation})
+ rev (Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>domain_deflation\<close>)
-val map_ID_add = Named_Theorems.add @{named_theorems domain_map_ID}
+val map_ID_add = Named_Theorems.add \<^named_theorems>\<open>domain_map_ID\<close>
fun get_map_ID_thms thy =
- rev (Named_Theorems.get (Proof_Context.init_global thy) @{named_theorems domain_map_ID})
+ rev (Named_Theorems.get (Proof_Context.init_global thy) \<^named_theorems>\<open>domain_map_ID\<close>)
(******************************************************************************)
@@ -149,7 +149,7 @@
infix 9 `
fun mk_deflation t =
- Const (@{const_name deflation}, Term.fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>deflation\<close>, Term.fastype_of t --> boolT) $ t
fun mk_eqs (t, u) = HOLogic.mk_Trueprop (HOLogic.mk_eq (t, u))
@@ -272,7 +272,7 @@
(* prove take_0 lemmas *)
fun prove_take_0 ((take_const, dbind), (lhsT, _)) thy =
let
- val lhs = take_const $ @{term "0::nat"}
+ val lhs = take_const $ \<^term>\<open>0::nat\<close>
val goal = mk_eqs (lhs, mk_bottom (lhsT ->> lhsT))
val rules = take_defs @ @{thms iterate_0 fst_strict snd_strict}
fun tac ctxt = simp_tac (put_simpset HOL_basic_ss ctxt addsimps rules) 1
@@ -289,7 +289,7 @@
fun prove_take_Suc
(((take_const, rep_abs), dbind), (_, rhsT)) thy =
let
- val lhs = take_const $ (@{term Suc} $ n)
+ val lhs = take_const $ (\<^term>\<open>Suc\<close> $ n)
val body = map_of_typ thy (newTs ~~ take_is) rhsT
val rhs = mk_cfcomp2 (rep_abs, body)
val goal = mk_eqs (lhs, rhs)
@@ -434,9 +434,9 @@
val iso_locale_thms = map iso_locale iso_infos
val decisive_abs_rep_thms =
map (fn x => @{thm decisive_abs_rep} OF [x]) iso_locale_thms
- val n = Free ("n", @{typ nat})
+ val n = Free ("n", \<^typ>\<open>nat\<close>)
fun mk_decisive t =
- Const (@{const_name decisive}, fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>decisive\<close>, fastype_of t --> boolT) $ t
fun f take_const = mk_decisive (take_const $ n)
val goal = mk_trp (foldr1 mk_conj (map f take_consts))
val rules0 = @{thm decisive_bottom} :: take_0_thms
@@ -529,7 +529,7 @@
(* test for finiteness of domain definitions *)
local
- val types = [@{type_name ssum}, @{type_name sprod}]
+ val types = [\<^type_name>\<open>ssum\<close>, \<^type_name>\<open>sprod\<close>]
fun finite d T = if member (op =) absTs T then d else finite' d T
and finite' d (Type (c, Ts)) =
let val d' = d andalso member (op =) types c
--- a/src/HOL/HOLCF/Tools/cont_consts.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/cont_consts.ML Sat Jan 05 17:24:33 2019 +0100
@@ -29,7 +29,7 @@
[Syntax.Parse_Print_Rule
(Ast.mk_appl (Ast.Constant name2) (map Ast.Variable vnames),
fold (fn a => fn t =>
- Ast.mk_appl (Ast.Constant @{const_syntax Rep_cfun}) [t, Ast.Variable a])
+ Ast.mk_appl (Ast.Constant \<^const_syntax>\<open>Rep_cfun\<close>) [t, Ast.Variable a])
vnames (Ast.Constant name1))] @
(case mx of
Infix _ => [extra_parse_rule]
@@ -57,7 +57,7 @@
trans_rules (syntax c2) (syntax c1) n mx)
end
-fun cfun_arity (Type (n, [_, T])) = if n = @{type_name cfun} then 1 + cfun_arity T else 0
+fun cfun_arity (Type (n, [_, T])) = if n = \<^type_name>\<open>cfun\<close> then 1 + cfun_arity T else 0
| cfun_arity _ = 0
fun is_contconst (_, _, NoSyn) = false
--- a/src/HOL/HOLCF/Tools/cont_proc.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/cont_proc.ML Sat Jan 05 17:24:33 2019 +0100
@@ -24,11 +24,11 @@
val cont_R = @{thm cont_Rep_cfun2}
(* checks whether a term is written entirely in the LCF sublanguage *)
-fun is_lcf_term (Const (@{const_name Rep_cfun}, _) $ t $ u) =
+fun is_lcf_term (Const (\<^const_name>\<open>Rep_cfun\<close>, _) $ t $ u) =
is_lcf_term t andalso is_lcf_term u
- | is_lcf_term (Const (@{const_name Abs_cfun}, _) $ Abs (_, _, t)) =
+ | is_lcf_term (Const (\<^const_name>\<open>Abs_cfun\<close>, _) $ Abs (_, _, t)) =
is_lcf_term t
- | is_lcf_term (Const (@{const_name Abs_cfun}, _) $ t) =
+ | is_lcf_term (Const (\<^const_name>\<open>Abs_cfun\<close>, _) $ t) =
is_lcf_term (Term.incr_boundvars 1 t $ Bound 0)
| is_lcf_term (Bound _) = true
| is_lcf_term t = not (Term.is_open t)
@@ -64,17 +64,17 @@
(* first list: cont thm for each dangling bound variable *)
(* second list: cont thm for each LAM in t *)
(* if b = false, only return cont thm for outermost LAMs *)
- fun cont_thms1 b (Const (@{const_name Rep_cfun}, _) $ f $ t) =
+ fun cont_thms1 b (Const (\<^const_name>\<open>Rep_cfun\<close>, _) $ f $ t) =
let
val (cs1,ls1) = cont_thms1 b f
val (cs2,ls2) = cont_thms1 b t
in (zip cs1 cs2, if b then ls1 @ ls2 else []) end
- | cont_thms1 b (Const (@{const_name Abs_cfun}, _) $ Abs (_, _, t)) =
+ | cont_thms1 b (Const (\<^const_name>\<open>Abs_cfun\<close>, _) $ Abs (_, _, t)) =
let
val (cs, ls) = cont_thms1 b t
val (cs', l) = lam cs
in (cs', l::ls) end
- | cont_thms1 b (Const (@{const_name Abs_cfun}, _) $ t) =
+ | cont_thms1 b (Const (\<^const_name>\<open>Abs_cfun\<close>, _) $ t) =
let
val t' = Term.incr_boundvars 1 t $ Bound 0
val (cs, ls) = cont_thms1 b t'
@@ -104,9 +104,9 @@
[] => no_tac
| (c::_) => resolve_tac ctxt [c] i
- fun cont_tac_of_term (Const (@{const_name cont}, _) $ f) =
+ fun cont_tac_of_term (Const (\<^const_name>\<open>cont\<close>, _) $ f) =
let
- val f' = Const (@{const_name Abs_cfun}, dummyT) $ f
+ val f' = Const (\<^const_name>\<open>Abs_cfun\<close>, dummyT) $ f
in
if is_lcf_term f'
then new_cont_tac f'
@@ -126,8 +126,8 @@
in Option.map fst (Seq.pull (cont_tac ctxt 1 tr)) end
in
val cont_proc =
- Simplifier.make_simproc @{context} "cont_proc"
- {lhss = [@{term "cont f"}], proc = K solve_cont}
+ Simplifier.make_simproc \<^context> "cont_proc"
+ {lhss = [\<^term>\<open>cont f\<close>], proc = K solve_cont}
end
val setup = map_theory_simpset (fn ctxt => ctxt addsimprocs [cont_proc])
--- a/src/HOL/HOLCF/Tools/cpodef.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/cpodef.ML Sat Jan 05 17:24:33 2019 +0100
@@ -53,10 +53,10 @@
(* building terms *)
-fun adm_const T = Const (@{const_name adm}, (T --> HOLogic.boolT) --> HOLogic.boolT)
+fun adm_const T = Const (\<^const_name>\<open>adm\<close>, (T --> HOLogic.boolT) --> HOLogic.boolT)
fun mk_adm (x, T, P) = adm_const T $ absfree (x, T) P
-fun below_const T = Const (@{const_name below}, T --> T --> HOLogic.boolT)
+fun below_const T = Const (\<^const_name>\<open>below\<close>, T --> T --> HOLogic.boolT)
(* proving class instances *)
@@ -75,7 +75,7 @@
val (full_tname, Ts) = dest_Type newT
val lhs_sorts = map (snd o dest_TFree) Ts
fun tac ctxt = resolve_tac ctxt [@{thm typedef_cpo} OF cpo_thms] 1
- val thy = Axclass.prove_arity (full_tname, lhs_sorts, @{sort cpo}) tac thy
+ val thy = Axclass.prove_arity (full_tname, lhs_sorts, \<^sort>\<open>cpo\<close>) tac thy
(* transfer thms so that they will know about the new cpo instance *)
val cpo_thms' = map (Thm.transfer thy) cpo_thms
fun make thm = Drule.zero_var_indexes (thm OF cpo_thms')
@@ -115,7 +115,7 @@
val (full_tname, Ts) = dest_Type newT
val lhs_sorts = map (snd o dest_TFree) Ts
fun tac ctxt = resolve_tac ctxt [@{thm typedef_pcpo} OF pcpo_thms] 1
- val thy = Axclass.prove_arity (full_tname, lhs_sorts, @{sort pcpo}) tac thy
+ val thy = Axclass.prove_arity (full_tname, lhs_sorts, \<^sort>\<open>pcpo\<close>) tac thy
val pcpo_thms' = map (Thm.transfer thy) pcpo_thms
fun make thm = Drule.zero_var_indexes (thm OF pcpo_thms')
val Rep_strict = make @{thm typedef_Rep_strict}
@@ -174,7 +174,7 @@
val below_eqn = Logic.mk_equals (below_const newT,
Abs ("x", newT, Abs ("y", newT, below_const oldT $ (RepC $ Bound 1) $ (RepC $ Bound 0))))
val ((_, (_, below_ldef)), lthy) = thy
- |> Class.instantiation ([full_tname], lhs_tfrees, @{sort po})
+ |> Class.instantiation ([full_tname], lhs_tfrees, \<^sort>\<open>po\<close>)
|> Specification.definition NONE [] []
((Binding.prefix_name "below_" (Thm.def_binding name), []), below_eqn)
val ctxt_thy = Proof_Context.init_global (Proof_Context.theory_of lthy)
@@ -227,7 +227,7 @@
val (newT, oldT, set) = prepare prep_term name typ raw_set thy
val goal_bottom_mem =
- HOLogic.mk_Trueprop (HOLogic.mk_mem (Const (@{const_name bottom}, oldT), set))
+ HOLogic.mk_Trueprop (HOLogic.mk_mem (Const (\<^const_name>\<open>bottom\<close>, oldT), set))
val goal_admissible =
HOLogic.mk_Trueprop (mk_adm ("x", oldT, HOLogic.mk_mem (Free ("x", oldT), set)))
@@ -322,9 +322,9 @@
fun cpodef pcpo =
(Parse.type_args_constrained -- Parse.binding) -- Parse.opt_mixfix --
- (@{keyword "="} |-- Parse.term) --
+ (\<^keyword>\<open>=\<close> |-- Parse.term) --
Scan.option
- (@{keyword "morphisms"} |-- Parse.!!! (Parse.binding -- Parse.binding))
+ (\<^keyword>\<open>morphisms\<close> |-- Parse.!!! (Parse.binding -- Parse.binding))
>> (fn ((((args, t), mx), A), morphs) =>
Toplevel.theory_to_proof
((if pcpo then pcpodef_proof_cmd else cpodef_proof_cmd)
@@ -333,12 +333,12 @@
in
val _ =
- Outer_Syntax.command @{command_keyword pcpodef}
+ Outer_Syntax.command \<^command_keyword>\<open>pcpodef\<close>
"HOLCF type definition (requires admissibility proof)"
(cpodef true)
val _ =
- Outer_Syntax.command @{command_keyword cpodef}
+ Outer_Syntax.command \<^command_keyword>\<open>cpodef\<close>
"HOLCF type definition (requires admissibility proof)"
(cpodef false)
--- a/src/HOL/HOLCF/Tools/domaindef.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/domaindef.ML Sat Jan 05 17:24:33 2019 +0100
@@ -48,28 +48,28 @@
(* building types and terms *)
-val udomT = @{typ udom}
-val deflT = @{typ "udom defl"}
-val udeflT = @{typ "udom u defl"}
-fun emb_const T = Const (@{const_name emb}, T ->> udomT)
-fun prj_const T = Const (@{const_name prj}, udomT ->> T)
-fun defl_const T = Const (@{const_name defl}, Term.itselfT T --> deflT)
-fun liftemb_const T = Const (@{const_name liftemb}, mk_upT T ->> mk_upT udomT)
-fun liftprj_const T = Const (@{const_name liftprj}, mk_upT udomT ->> mk_upT T)
-fun liftdefl_const T = Const (@{const_name liftdefl}, Term.itselfT T --> udeflT)
+val udomT = \<^typ>\<open>udom\<close>
+val deflT = \<^typ>\<open>udom defl\<close>
+val udeflT = \<^typ>\<open>udom u defl\<close>
+fun emb_const T = Const (\<^const_name>\<open>emb\<close>, T ->> udomT)
+fun prj_const T = Const (\<^const_name>\<open>prj\<close>, udomT ->> T)
+fun defl_const T = Const (\<^const_name>\<open>defl\<close>, Term.itselfT T --> deflT)
+fun liftemb_const T = Const (\<^const_name>\<open>liftemb\<close>, mk_upT T ->> mk_upT udomT)
+fun liftprj_const T = Const (\<^const_name>\<open>liftprj\<close>, mk_upT udomT ->> mk_upT T)
+fun liftdefl_const T = Const (\<^const_name>\<open>liftdefl\<close>, Term.itselfT T --> udeflT)
fun mk_u_map t =
let
val (T, U) = dest_cfunT (fastype_of t)
val u_map_type = (T ->> U) ->> (mk_upT T ->> mk_upT U)
- val u_map_const = Const (@{const_name u_map}, u_map_type)
+ val u_map_const = Const (\<^const_name>\<open>u_map\<close>, u_map_type)
in
mk_capply (u_map_const, t)
end
fun mk_cast (t, x) =
capply_const (udomT, udomT)
- $ (capply_const (deflT, udomT ->> udomT) $ @{term "cast :: udom defl -> udom -> udom"} $ t)
+ $ (capply_const (deflT, udomT ->> udomT) $ \<^term>\<open>cast :: udom defl -> udom -> udom\<close> $ t)
$ x
(* manipulating theorems *)
@@ -92,7 +92,7 @@
val tmp_ctxt = tmp_ctxt |> Variable.declare_constraints defl
val deflT = Term.fastype_of defl
- val _ = if deflT = @{typ "udom defl"} then ()
+ val _ = if deflT = \<^typ>\<open>udom defl\<close> then ()
else error ("Not type defl: " ^ quote (Syntax.string_of_typ tmp_ctxt deflT))
(*lhs*)
@@ -101,7 +101,7 @@
val newT = Type (full_tname, map TFree lhs_tfrees)
(*set*)
- val set = @{term "defl_set :: udom defl => udom set"} $ defl
+ val set = \<^term>\<open>defl_set :: udom defl => udom set\<close> $ defl
(*pcpodef*)
fun tac1 ctxt = resolve_tac ctxt @{thms defl_set_bottom} 1
@@ -124,7 +124,7 @@
val liftdefl_eqn =
Logic.mk_equals (liftdefl_const newT,
Abs ("t", Term.itselfT newT,
- mk_capply (@{const liftdefl_of}, defl_const newT $ Logic.mk_type newT)))
+ mk_capply (\<^const>\<open>liftdefl_of\<close>, defl_const newT $ Logic.mk_type newT)))
val name_def = Thm.def_binding tname
val emb_bind = (Binding.prefix_name "emb_" name_def, [])
@@ -136,7 +136,7 @@
(*instantiate class rep*)
val lthy = thy
- |> Class.instantiation ([full_tname], lhs_tfrees, @{sort domain})
+ |> Class.instantiation ([full_tname], lhs_tfrees, \<^sort>\<open>domain\<close>)
val ((_, (_, emb_ldef)), lthy) =
Specification.definition NONE [] [] (emb_bind, emb_eqn) lthy
val ((_, (_, prj_ldef)), lthy) =
@@ -196,11 +196,11 @@
(** outer syntax **)
val _ =
- Outer_Syntax.command @{command_keyword domaindef} "HOLCF definition of domains from deflations"
+ Outer_Syntax.command \<^command_keyword>\<open>domaindef\<close> "HOLCF definition of domains from deflations"
((Parse.type_args_constrained -- Parse.binding) --
- Parse.opt_mixfix -- (@{keyword "="} |-- Parse.term) --
+ Parse.opt_mixfix -- (\<^keyword>\<open>=\<close> |-- Parse.term) --
Scan.option
- (@{keyword "morphisms"} |-- Parse.!!! (Parse.binding -- Parse.binding)) >>
+ (\<^keyword>\<open>morphisms\<close> |-- Parse.!!! (Parse.binding -- Parse.binding)) >>
(fn (((((args, t)), mx), A), morphs) =>
Toplevel.theory (domaindef_cmd ((t, args, mx), A, SOME (Typedef.make_morphisms t morphs)))));
--- a/src/HOL/HOLCF/Tools/fixrec.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/fixrec.ML Sat Jan 05 17:24:33 2019 +0100
@@ -34,12 +34,12 @@
local
-fun binder_cfun (Type(@{type_name cfun},[T, U])) = T :: binder_cfun U
- | binder_cfun (Type(@{type_name "fun"},[T, U])) = T :: binder_cfun U
+fun binder_cfun (Type(\<^type_name>\<open>cfun\<close>,[T, U])) = T :: binder_cfun U
+ | binder_cfun (Type(\<^type_name>\<open>fun\<close>,[T, U])) = T :: binder_cfun U
| binder_cfun _ = []
-fun body_cfun (Type(@{type_name cfun},[_, U])) = body_cfun U
- | body_cfun (Type(@{type_name "fun"},[_, U])) = body_cfun U
+fun body_cfun (Type(\<^type_name>\<open>cfun\<close>,[_, U])) = body_cfun U
+ | body_cfun (Type(\<^type_name>\<open>fun\<close>,[_, U])) = body_cfun U
| body_cfun T = T
in
@@ -59,24 +59,24 @@
fun dest_eqs t = HOLogic.dest_eq (HOLogic.dest_Trueprop t)
(* similar to Thm.head_of, but for continuous application *)
-fun chead_of (Const(@{const_name Rep_cfun},_)$f$_) = chead_of f
+fun chead_of (Const(\<^const_name>\<open>Rep_cfun\<close>,_)$f$_) = chead_of f
| chead_of u = u
infix 1 === val (op ===) = HOLogic.mk_eq
fun mk_mplus (t, u) =
let val mT = Term.fastype_of t
- in Const(@{const_name Fixrec.mplus}, mT ->> mT ->> mT) ` t ` u end
+ in Const(\<^const_name>\<open>Fixrec.mplus\<close>, mT ->> mT ->> mT) ` t ` u end
fun mk_run t =
let
val mT = Term.fastype_of t
val T = dest_matchT mT
- val run = Const(@{const_name Fixrec.run}, mT ->> T)
+ val run = Const(\<^const_name>\<open>Fixrec.run\<close>, mT ->> T)
in
case t of
- Const(@{const_name Rep_cfun}, _) $
- Const(@{const_name Fixrec.succeed}, _) $ u => u
+ Const(\<^const_name>\<open>Rep_cfun\<close>, _) $
+ Const(\<^const_name>\<open>Fixrec.succeed\<close>, _) $ u => u
| _ => run ` t
end
@@ -130,7 +130,7 @@
"or simp rules are configured for all non-HOLCF constants.\n" ^
"The error occurred for the goal statement:\n" ^
Syntax.string_of_term lthy prop)
- val rules = Named_Theorems.get lthy @{named_theorems cont2cont}
+ val rules = Named_Theorems.get lthy \<^named_theorems>\<open>cont2cont\<close>
val fast_tac = SOLVED' (REPEAT_ALL_NEW (match_tac lthy (rev rules)))
val slow_tac = SOLVED' (simp_tac lthy)
val tac = fast_tac 1 ORELSE slow_tac 1 ORELSE err
@@ -219,7 +219,7 @@
(* compiles a monadic term for a constructor pattern *)
and comp_con T p rhs vs taken =
case p of
- Const(@{const_name Rep_cfun},_) $ f $ x =>
+ Const(\<^const_name>\<open>Rep_cfun\<close>,_) $ f $ x =>
let val (rhs', v, taken') = comp_pat x rhs taken
in comp_con T f rhs' (v::vs) taken' end
| f $ x =>
@@ -243,7 +243,7 @@
(* returns (constant, (vars, matcher)) *)
fun compile_lhs match_name pat rhs vs taken =
case pat of
- Const(@{const_name Rep_cfun}, _) $ f $ x =>
+ Const(\<^const_name>\<open>Rep_cfun\<close>, _) $ f $ x =>
let val (rhs', v, taken') = compile_pat match_name x rhs taken
in compile_lhs match_name f rhs' (v::vs) taken' end
| Free(_,_) => (pat, (vs, rhs))
@@ -388,18 +388,18 @@
(*************************************************************************)
val opt_thm_name' : (bool * Attrib.binding) parser =
- @{keyword "("} -- @{keyword "unchecked"} -- @{keyword ")"} >> K (true, Binding.empty_atts)
+ \<^keyword>\<open>(\<close> -- \<^keyword>\<open>unchecked\<close> -- \<^keyword>\<open>)\<close> >> K (true, Binding.empty_atts)
|| Parse_Spec.opt_thm_name ":" >> pair false
val spec' : (bool * (Attrib.binding * string)) parser =
opt_thm_name' -- Parse.prop >> (fn ((a, b), c) => (a, (b, c)))
val multi_specs' : (bool * (Attrib.binding * string)) list parser =
- let val unexpected = Scan.ahead (Parse.name || @{keyword "["} || @{keyword "("})
- in Parse.enum1 "|" (spec' --| Scan.option (unexpected -- Parse.!!! @{keyword "|"})) end
+ let val unexpected = Scan.ahead (Parse.name || \<^keyword>\<open>[\<close> || \<^keyword>\<open>(\<close>)
+ in Parse.enum1 "|" (spec' --| Scan.option (unexpected -- Parse.!!! \<^keyword>\<open>|\<close>)) end
val _ =
- Outer_Syntax.local_theory @{command_keyword fixrec} "define recursive functions (HOLCF)"
+ Outer_Syntax.local_theory \<^command_keyword>\<open>fixrec\<close> "define recursive functions (HOLCF)"
(Parse.vars -- (Parse.where_ |-- Parse.!!! multi_specs')
>> (fn (vars, specs) => add_fixrec_cmd vars specs))
--- a/src/HOL/HOLCF/Tools/holcf_library.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tools/holcf_library.ML Sat Jan 05 17:24:33 2019 +0100
@@ -33,9 +33,9 @@
(*** Basic HOLCF concepts ***)
-fun mk_bottom T = Const (@{const_name bottom}, T)
+fun mk_bottom T = Const (\<^const_name>\<open>bottom\<close>, T)
-fun below_const T = Const (@{const_name below}, [T, T] ---> boolT)
+fun below_const T = Const (\<^const_name>\<open>below\<close>, [T, T] ---> boolT)
fun mk_below (t, u) = below_const (fastype_of t) $ t $ u
fun mk_undef t = mk_eq (t, mk_bottom (fastype_of t))
@@ -43,24 +43,24 @@
fun mk_defined t = mk_not (mk_undef t)
fun mk_adm t =
- Const (@{const_name adm}, fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>adm\<close>, fastype_of t --> boolT) $ t
fun mk_compact t =
- Const (@{const_name compact}, fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>compact\<close>, fastype_of t --> boolT) $ t
fun mk_cont t =
- Const (@{const_name cont}, fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>cont\<close>, fastype_of t --> boolT) $ t
fun mk_chain t =
- Const (@{const_name chain}, Term.fastype_of t --> boolT) $ t
+ Const (\<^const_name>\<open>chain\<close>, Term.fastype_of t --> boolT) $ t
fun mk_lub t =
let
val T = Term.range_type (Term.fastype_of t)
- val lub_const = Const (@{const_name lub}, mk_setT T --> T)
- val UNIV_const = @{term "UNIV :: nat set"}
+ val lub_const = Const (\<^const_name>\<open>lub\<close>, mk_setT T --> T)
+ val UNIV_const = \<^term>\<open>UNIV :: nat set\<close>
val image_type = (natT --> T) --> mk_setT natT --> mk_setT T
- val image_const = Const (@{const_name image}, image_type)
+ val image_const = Const (\<^const_name>\<open>image\<close>, image_type)
in
lub_const $ (image_const $ t $ UNIV_const)
end
@@ -68,19 +68,19 @@
(*** Continuous function space ***)
-fun mk_cfunT (T, U) = Type(@{type_name cfun}, [T, U])
+fun mk_cfunT (T, U) = Type(\<^type_name>\<open>cfun\<close>, [T, U])
val (op ->>) = mk_cfunT
val (op -->>) = Library.foldr mk_cfunT
-fun dest_cfunT (Type(@{type_name cfun}, [T, U])) = (T, U)
+fun dest_cfunT (Type(\<^type_name>\<open>cfun\<close>, [T, U])) = (T, U)
| dest_cfunT T = raise TYPE ("dest_cfunT", [T], [])
fun capply_const (S, T) =
- Const(@{const_name Rep_cfun}, (S ->> T) --> (S --> T))
+ Const(\<^const_name>\<open>Rep_cfun\<close>, (S ->> T) --> (S --> T))
fun cabs_const (S, T) =
- Const(@{const_name Abs_cfun}, (S --> T) --> (S ->> T))
+ Const(\<^const_name>\<open>Abs_cfun\<close>, (S --> T) --> (S ->> T))
fun mk_cabs t =
let val T = fastype_of t
@@ -101,7 +101,7 @@
fun mk_capply (t, u) =
let val (S, T) =
case fastype_of t of
- Type(@{type_name cfun}, [S, T]) => (S, T)
+ Type(\<^type_name>\<open>cfun\<close>, [S, T]) => (S, T)
| _ => raise TERM ("mk_capply " ^ ML_Syntax.print_list ML_Syntax.print_term [t, u], [t, u])
in capply_const (S, T) $ t $ u end
@@ -109,10 +109,10 @@
val list_ccomb : term * term list -> term = Library.foldl mk_capply
-fun mk_ID T = Const (@{const_name ID}, T ->> T)
+fun mk_ID T = Const (\<^const_name>\<open>ID\<close>, T ->> T)
fun cfcomp_const (T, U, V) =
- Const (@{const_name cfcomp}, (U ->> V) ->> (T ->> U) ->> (T ->> V))
+ Const (\<^const_name>\<open>cfcomp\<close>, (U ->> V) ->> (T ->> U) ->> (T ->> V))
fun mk_cfcomp (f, g) =
let
@@ -124,7 +124,7 @@
else raise TYPE ("mk_cfcomp", [U, U'], [f, g])
end
-fun strictify_const T = Const (@{const_name strictify}, T ->> T)
+fun strictify_const T = Const (\<^const_name>\<open>strictify\<close>, T ->> T)
fun mk_strictify t = strictify_const (fastype_of t) ` t
fun mk_strict t =
@@ -154,17 +154,17 @@
(*** Lifted cpo type ***)
-fun mk_upT T = Type(@{type_name "u"}, [T])
+fun mk_upT T = Type(\<^type_name>\<open>u\<close>, [T])
-fun dest_upT (Type(@{type_name "u"}, [T])) = T
+fun dest_upT (Type(\<^type_name>\<open>u\<close>, [T])) = T
| dest_upT T = raise TYPE ("dest_upT", [T], [])
-fun up_const T = Const(@{const_name up}, T ->> mk_upT T)
+fun up_const T = Const(\<^const_name>\<open>up\<close>, T ->> mk_upT T)
fun mk_up t = up_const (fastype_of t) ` t
fun fup_const (T, U) =
- Const(@{const_name fup}, (T ->> U) ->> mk_upT T ->> U)
+ Const(\<^const_name>\<open>fup\<close>, (T ->> U) ->> mk_upT T ->> U)
fun mk_fup t = fup_const (dest_cfunT (fastype_of t)) ` t
@@ -173,39 +173,39 @@
(*** Lifted unit type ***)
-val oneT = @{typ "one"}
+val oneT = \<^typ>\<open>one\<close>
-fun one_case_const T = Const (@{const_name one_case}, T ->> oneT ->> T)
+fun one_case_const T = Const (\<^const_name>\<open>one_case\<close>, T ->> oneT ->> T)
fun mk_one_case t = one_case_const (fastype_of t) ` t
(*** Strict product type ***)
-fun mk_sprodT (T, U) = Type(@{type_name sprod}, [T, U])
+fun mk_sprodT (T, U) = Type(\<^type_name>\<open>sprod\<close>, [T, U])
-fun dest_sprodT (Type(@{type_name sprod}, [T, U])) = (T, U)
+fun dest_sprodT (Type(\<^type_name>\<open>sprod\<close>, [T, U])) = (T, U)
| dest_sprodT T = raise TYPE ("dest_sprodT", [T], [])
fun spair_const (T, U) =
- Const(@{const_name spair}, T ->> U ->> mk_sprodT (T, U))
+ Const(\<^const_name>\<open>spair\<close>, T ->> U ->> mk_sprodT (T, U))
(* builds the expression (:t, u:) *)
fun mk_spair (t, u) =
spair_const (fastype_of t, fastype_of u) ` t ` u
(* builds the expression (:t1,t2,..,tn:) *)
-fun mk_stuple [] = @{term "ONE"}
+fun mk_stuple [] = \<^term>\<open>ONE\<close>
| mk_stuple (t::[]) = t
| mk_stuple (t::ts) = mk_spair (t, mk_stuple ts)
fun sfst_const (T, U) =
- Const(@{const_name sfst}, mk_sprodT (T, U) ->> T)
+ Const(\<^const_name>\<open>sfst\<close>, mk_sprodT (T, U) ->> T)
fun ssnd_const (T, U) =
- Const(@{const_name ssnd}, mk_sprodT (T, U) ->> U)
+ Const(\<^const_name>\<open>ssnd\<close>, mk_sprodT (T, U) ->> U)
fun ssplit_const (T, U, V) =
- Const (@{const_name ssplit}, (T ->> U ->> V) ->> mk_sprodT (T, U) ->> V)
+ Const (\<^const_name>\<open>ssplit\<close>, (T ->> U ->> V) ->> mk_sprodT (T, U) ->> V)
fun mk_ssplit t =
let val (T, (U, V)) = apsnd dest_cfunT (dest_cfunT (fastype_of t))
@@ -214,13 +214,13 @@
(*** Strict sum type ***)
-fun mk_ssumT (T, U) = Type(@{type_name ssum}, [T, U])
+fun mk_ssumT (T, U) = Type(\<^type_name>\<open>ssum\<close>, [T, U])
-fun dest_ssumT (Type(@{type_name ssum}, [T, U])) = (T, U)
+fun dest_ssumT (Type(\<^type_name>\<open>ssum\<close>, [T, U])) = (T, U)
| dest_ssumT T = raise TYPE ("dest_ssumT", [T], [])
-fun sinl_const (T, U) = Const(@{const_name sinl}, T ->> mk_ssumT (T, U))
-fun sinr_const (T, U) = Const(@{const_name sinr}, U ->> mk_ssumT (T, U))
+fun sinl_const (T, U) = Const(\<^const_name>\<open>sinl\<close>, T ->> mk_ssumT (T, U))
+fun sinr_const (T, U) = Const(\<^const_name>\<open>sinr\<close>, U ->> mk_ssumT (T, U))
(* builds the list [sinl(t1), sinl(sinr(t2)), ... sinr(...sinr(tn))] *)
fun mk_sinjects ts =
@@ -241,7 +241,7 @@
end
fun sscase_const (T, U, V) =
- Const(@{const_name sscase},
+ Const(\<^const_name>\<open>sscase\<close>,
(T ->> V) ->> (U ->> V) ->> mk_ssumT (T, U) ->> V)
fun mk_sscase (t, u) =
@@ -258,30 +258,30 @@
(*** pattern match monad type ***)
-fun mk_matchT T = Type (@{type_name "match"}, [T])
+fun mk_matchT T = Type (\<^type_name>\<open>match\<close>, [T])
-fun dest_matchT (Type(@{type_name "match"}, [T])) = T
+fun dest_matchT (Type(\<^type_name>\<open>match\<close>, [T])) = T
| dest_matchT T = raise TYPE ("dest_matchT", [T], [])
-fun mk_fail T = Const (@{const_name "Fixrec.fail"}, mk_matchT T)
+fun mk_fail T = Const (\<^const_name>\<open>Fixrec.fail\<close>, mk_matchT T)
-fun succeed_const T = Const (@{const_name "Fixrec.succeed"}, T ->> mk_matchT T)
+fun succeed_const T = Const (\<^const_name>\<open>Fixrec.succeed\<close>, T ->> mk_matchT T)
fun mk_succeed t = succeed_const (fastype_of t) ` t
(*** lifted boolean type ***)
-val trT = @{typ "tr"}
+val trT = \<^typ>\<open>tr\<close>
(*** theory of fixed points ***)
fun mk_fix t =
let val (T, _) = dest_cfunT (fastype_of t)
- in mk_capply (Const(@{const_name fix}, (T ->> T) ->> T), t) end
+ in mk_capply (Const(\<^const_name>\<open>fix\<close>, (T ->> T) ->> T), t) end
fun iterate_const T =
- Const (@{const_name iterate}, natT --> (T ->> T) ->> (T ->> T))
+ Const (\<^const_name>\<open>iterate\<close>, natT --> (T ->> T) ->> (T ->> T))
fun mk_iterate (n, f) =
let val (T, _) = dest_cfunT (Term.fastype_of f)
--- a/src/HOL/HOLCF/Tr.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tr.thy Sat Jan 05 17:24:33 2019 +0100
@@ -21,7 +21,7 @@
definition FF :: "tr"
where "FF = Def False"
-text \<open>Exhaustion and Elimination for type @{typ tr}\<close>
+text \<open>Exhaustion and Elimination for type \<^typ>\<open>tr\<close>\<close>
lemma Exh_tr: "t = \<bottom> \<or> t = TT \<or> t = FF"
by (induct t) (auto simp: FF_def TT_def)
@@ -34,7 +34,7 @@
"P \<bottom> \<Longrightarrow> P TT \<Longrightarrow> P FF \<Longrightarrow> P x"
by (cases x) simp_all
-text \<open>distinctness for type @{typ tr}\<close>
+text \<open>distinctness for type \<^typ>\<open>tr\<close>\<close>
lemma dist_below_tr [simp]:
"TT \<notsqsubseteq> \<bottom>" "FF \<notsqsubseteq> \<bottom>" "TT \<notsqsubseteq> FF" "FF \<notsqsubseteq> TT"
--- a/src/HOL/HOLCF/Tutorial/Fixrec_ex.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Tutorial/Fixrec_ex.thy Sat Jan 05 17:24:33 2019 +0100
@@ -87,7 +87,7 @@
text \<open>
If the function is already strict in that argument, then the bottom
pattern does not change the meaning of the function. For example,
- in the definition of @{term from_sinr_up}, the first equation is
+ in the definition of \<^term>\<open>from_sinr_up\<close>, the first equation is
actually redundant, and could have been proven separately by
\<open>fixrec_simp\<close>.
\<close>
@@ -197,7 +197,7 @@
[simp del]: "repeat\<cdot>x = lCons\<cdot>x\<cdot>(repeat\<cdot>x)"
text \<open>
- We can derive other non-looping simp rules for @{const repeat} by
+ We can derive other non-looping simp rules for \<^const>\<open>repeat\<close> by
using the \<open>subst\<close> method with the \<open>repeat.simps\<close> rule.
\<close>
--- a/src/HOL/HOLCF/Up.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/Up.thy Sat Jan 05 17:24:33 2019 +0100
@@ -136,7 +136,7 @@
subsection \<open>Continuity of \emph{Iup} and \emph{Ifup}\<close>
-text \<open>continuity for @{term Iup}\<close>
+text \<open>continuity for \<^term>\<open>Iup\<close>\<close>
lemma cont_Iup: "cont Iup"
apply (rule contI)
@@ -144,7 +144,7 @@
apply (erule cpo_lubI)
done
-text \<open>continuity for @{term Ifup}\<close>
+text \<open>continuity for \<^term>\<open>Ifup\<close>\<close>
lemma cont_Ifup1: "cont (\<lambda>f. Ifup f x)"
by (induct x) simp_all
@@ -189,7 +189,7 @@
"case l of (XCONST up :: 'a)\<cdot>x \<Rightarrow> t" \<rightharpoonup> "CONST fup\<cdot>(\<Lambda> x. t)\<cdot>l"
"\<Lambda>(XCONST up\<cdot>x). t" \<rightleftharpoons> "CONST fup\<cdot>(\<Lambda> x. t)"
-text \<open>continuous versions of lemmas for @{typ "('a)u"}\<close>
+text \<open>continuous versions of lemmas for \<^typ>\<open>('a)u\<close>\<close>
lemma Exh_Up: "z = \<bottom> \<or> (\<exists>x. z = up\<cdot>x)"
by (induct z) (simp add: inst_up_pcpo, simp add: up_def cont_Iup)
--- a/src/HOL/HOLCF/ex/Focus_ex.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/ex/Focus_ex.thy Sat Jan 05 17:24:33 2019 +0100
@@ -178,7 +178,7 @@
done
lemma lemma3: "def_g g \<longrightarrow> is_g g"
-apply (tactic \<open>simp_tac (put_simpset HOL_ss @{context}
+apply (tactic \<open>simp_tac (put_simpset HOL_ss \<^context>
addsimps [@{thm def_g_def}, @{thm lemma1}, @{thm lemma2}]) 1\<close>)
apply (rule impI)
apply (erule exE)
@@ -203,7 +203,7 @@
done
lemma lemma4: "is_g g \<longrightarrow> def_g g"
-apply (tactic \<open>simp_tac (put_simpset HOL_ss @{context}
+apply (tactic \<open>simp_tac (put_simpset HOL_ss \<^context>
delsimps (@{thms HOL.ex_simps} @ @{thms HOL.all_simps})
addsimps [@{thm lemma1}, @{thm lemma2}, @{thm def_g_def}]) 1\<close>)
apply (rule impI)
--- a/src/HOL/HOLCF/ex/Pattern_Match.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/HOLCF/ex/Pattern_Match.thy Sat Jan 05 17:24:33 2019 +0100
@@ -131,8 +131,8 @@
parse_translation \<open>
(* rewrite (_pat x) => (succeed) *)
(* rewrite (_variable x t) => (Abs_cfun (%x. t)) *)
- [(@{syntax_const "_pat"}, fn _ => fn _ => Syntax.const @{const_syntax Fixrec.succeed}),
- Syntax_Trans.mk_binder_tr (@{syntax_const "_variable"}, @{const_syntax Abs_cfun})]
+ [(\<^syntax_const>\<open>_pat\<close>, fn _ => fn _ => Syntax.const \<^const_syntax>\<open>Fixrec.succeed\<close>),
+ Syntax_Trans.mk_binder_tr (\<^syntax_const>\<open>_variable\<close>, \<^const_syntax>\<open>Abs_cfun\<close>)]
\<close>
text \<open>Printing Case expressions\<close>
@@ -142,29 +142,29 @@
print_translation \<open>
let
- fun dest_LAM (Const (@{const_syntax Rep_cfun},_) $ Const (@{const_syntax unit_when},_) $ t) =
- (Syntax.const @{syntax_const "_noargs"}, t)
- | dest_LAM (Const (@{const_syntax Rep_cfun},_) $ Const (@{const_syntax csplit},_) $ t) =
+ fun dest_LAM (Const (\<^const_syntax>\<open>Rep_cfun\<close>,_) $ Const (\<^const_syntax>\<open>unit_when\<close>,_) $ t) =
+ (Syntax.const \<^syntax_const>\<open>_noargs\<close>, t)
+ | dest_LAM (Const (\<^const_syntax>\<open>Rep_cfun\<close>,_) $ Const (\<^const_syntax>\<open>csplit\<close>,_) $ t) =
let
val (v1, t1) = dest_LAM t;
val (v2, t2) = dest_LAM t1;
- in (Syntax.const @{syntax_const "_args"} $ v1 $ v2, t2) end
- | dest_LAM (Const (@{const_syntax Abs_cfun},_) $ t) =
+ in (Syntax.const \<^syntax_const>\<open>_args\<close> $ v1 $ v2, t2) end
+ | dest_LAM (Const (\<^const_syntax>\<open>Abs_cfun\<close>,_) $ t) =
let
val abs =
case t of Abs abs => abs
| _ => ("x", dummyT, incr_boundvars 1 t $ Bound 0);
val (x, t') = Syntax_Trans.atomic_abs_tr' abs;
- in (Syntax.const @{syntax_const "_variable"} $ x, t') end
+ in (Syntax.const \<^syntax_const>\<open>_variable\<close> $ x, t') end
| dest_LAM _ = raise Match; (* too few vars: abort translation *)
- fun Case1_tr' [Const(@{const_syntax branch},_) $ p, r] =
+ fun Case1_tr' [Const(\<^const_syntax>\<open>branch\<close>,_) $ p, r] =
let val (v, t) = dest_LAM r in
- Syntax.const @{syntax_const "_Case1"} $
- (Syntax.const @{syntax_const "_match"} $ p $ v) $ t
+ Syntax.const \<^syntax_const>\<open>_Case1\<close> $
+ (Syntax.const \<^syntax_const>\<open>_match\<close> $ p $ v) $ t
end;
- in [(@{const_syntax Rep_cfun}, K Case1_tr')] end
+ in [(\<^const_syntax>\<open>Rep_cfun\<close>, K Case1_tr')] end
\<close>
translations
@@ -382,7 +382,7 @@
@{thms cont2cont_fst cont2cont_snd cont2cont_Pair};
val beta_ss =
- simpset_of (put_simpset HOL_basic_ss @{context} addsimps (@{thms simp_thms} @ beta_rules));
+ simpset_of (put_simpset HOL_basic_ss \<^context> addsimps (@{thms simp_thms} @ beta_rules));
fun define_consts
(specs : (binding * term * mixfix) list)
@@ -453,14 +453,14 @@
val (U2, V2) = apsnd dest_matchT (dest_cfunT T2);
val pat_typ = [T1, T2] --->
(mk_prodT (U1, U2) ->> mk_matchT (mk_prodT (V1, V2)));
- val pat_const = Const (@{const_name cpair_pat}, pat_typ);
+ val pat_const = Const (\<^const_name>\<open>cpair_pat\<close>, pat_typ);
in
pat_const $ p1 $ p2
end;
fun mk_tuple_pat [] = succeed_const HOLogic.unitT
| mk_tuple_pat ps = foldr1 mk_pair_pat ps;
fun branch_const (T,U,V) =
- Const (@{const_name branch},
+ Const (\<^const_name>\<open>branch\<close>,
(T ->> mk_matchT U) --> (U ->> V) ->> T ->> mk_matchT V);
(* define pattern combinators *)
@@ -475,7 +475,7 @@
(map (K "'t") args)
|> Old_Datatype_Prop.indexify_names
|> Name.variant_list tns
- |> map (fn t => TFree (t, @{sort pcpo}));
+ |> map (fn t => TFree (t, \<^sort>\<open>pcpo\<close>));
val patNs = Old_Datatype_Prop.indexify_names (map (K "pat") args);
val patTs = map2 (fn T => fn V => T ->> mk_matchT V) Ts Vs;
val pats = map Free (patNs ~~ patTs);
@@ -500,7 +500,7 @@
local
fun syntax c = Lexicon.mark_const (fst (dest_Const c));
fun app s (l, r) = Ast.mk_appl (Ast.Constant s) [l, r];
- val capp = app @{const_syntax Rep_cfun};
+ val capp = app \<^const_syntax>\<open>Rep_cfun\<close>;
val capps = Library.foldl capp
fun app_var x = Ast.mk_appl (Ast.Constant "_variable") [x, Ast.Variable "rhs"];
@@ -533,7 +533,7 @@
local
val tns = map (fst o dest_TFree) (snd (dest_Type lhsT));
val rn = singleton (Name.variant_list tns) "'r";
- val R = TFree (rn, @{sort pcpo});
+ val R = TFree (rn, \<^sort>\<open>pcpo\<close>);
fun pat_lhs (pat, args) =
let
val Ts = map snd args;
@@ -541,7 +541,7 @@
(map (K "'t") args)
|> Old_Datatype_Prop.indexify_names
|> Name.variant_list (rn::tns)
- |> map (fn t => TFree (t, @{sort pcpo}));
+ |> map (fn t => TFree (t, \<^sort>\<open>pcpo\<close>));
val patNs = Old_Datatype_Prop.indexify_names (map (K "pat") args);
val patTs = map2 (fn T => fn V => T ->> mk_matchT V) Ts Vs;
val pats = map Free (patNs ~~ patTs);
--- a/src/HOL/Hahn_Banach/Vector_Space.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hahn_Banach/Vector_Space.thy Sat Jan 05 17:24:33 2019 +0100
@@ -11,7 +11,7 @@
subsection \<open>Signature\<close>
text \<open>
- For the definition of real vector spaces a type @{typ 'a} of the sort
+ For the definition of real vector spaces a type \<^typ>\<open>'a\<close> of the sort
\<open>{plus, minus, zero}\<close> is considered, on which a real scalar multiplication
\<open>\<cdot>\<close> is declared.
\<close>
@@ -23,7 +23,7 @@
subsection \<open>Vector space laws\<close>
text \<open>
- A \<^emph>\<open>vector space\<close> is a non-empty set \<open>V\<close> of elements from @{typ 'a} with the
+ A \<^emph>\<open>vector space\<close> is a non-empty set \<open>V\<close> of elements from \<^typ>\<open>'a\<close> with the
following vector space laws: The set \<open>V\<close> is closed under addition and scalar
multiplication, addition is associative and commutative; \<open>- x\<close> is the
inverse of \<open>x\<close> wrt.\ addition and \<open>0\<close> is the neutral element of addition.
--- a/src/HOL/Hoare/Heap.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Heap.thy Sat Jan 05 17:24:33 2019 +0100
@@ -60,11 +60,11 @@
definition distPath :: "('a \<Rightarrow> 'a ref) \<Rightarrow> 'a ref \<Rightarrow> 'a list \<Rightarrow> 'a ref \<Rightarrow> bool"
where "distPath h x as y \<longleftrightarrow> Path h x as y \<and> distinct as"
-text\<open>The term @{term"distPath h x as y"} expresses the fact that a
-non-repeating path @{term as} connects location @{term x} to location
-@{term y} by means of the @{term h} field. In the case where \<open>x
-= y\<close>, and there is a cycle from @{term x} to itself, @{term as} can
-be both @{term "[]"} and the non-repeating list of nodes in the
+text\<open>The term \<^term>\<open>distPath h x as y\<close> expresses the fact that a
+non-repeating path \<^term>\<open>as\<close> connects location \<^term>\<open>x\<close> to location
+\<^term>\<open>y\<close> by means of the \<^term>\<open>h\<close> field. In the case where \<open>x
+= y\<close>, and there is a cycle from \<^term>\<open>x\<close> to itself, \<^term>\<open>as\<close> can
+be both \<^term>\<open>[]\<close> and the non-repeating list of nodes in the
cycle.\<close>
lemma neq_dP: "p \<noteq> q \<Longrightarrow> Path h p Ps q \<Longrightarrow> distinct Ps \<Longrightarrow>
--- a/src/HOL/Hoare/HeapSyntaxAbort.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/HeapSyntaxAbort.thy Sat Jan 05 17:24:33 2019 +0100
@@ -7,8 +7,8 @@
subsection "Field access and update"
-text\<open>Heap update \<open>p^.h := e\<close> is now guarded against @{term p}
-being Null. However, @{term p} may still be illegal,
+text\<open>Heap update \<open>p^.h := e\<close> is now guarded against \<^term>\<open>p\<close>
+being Null. However, \<^term>\<open>p\<close> may still be illegal,
e.g. uninitialized or dangling. To guard against that, one needs a
more detailed model of the heap where allocated and free addresses are
distinguished, e.g. by making the heap a map, or by carrying the set
--- a/src/HOL/Hoare/Hoare_Logic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Hoare_Logic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -54,8 +54,8 @@
("{_} // _ // {_}" [0,55,0] 50)
ML_file "hoare_syntax.ML"
-parse_translation \<open>[(@{syntax_const "_hoare_vars"}, K Hoare_Syntax.hoare_vars_tr)]\<close>
-print_translation \<open>[(@{const_syntax Valid}, K (Hoare_Syntax.spec_tr' @{syntax_const "_hoare"}))]\<close>
+parse_translation \<open>[(\<^syntax_const>\<open>_hoare_vars\<close>, K Hoare_Syntax.hoare_vars_tr)]\<close>
+print_translation \<open>[(\<^const_syntax>\<open>Valid\<close>, K (Hoare_Syntax.spec_tr' \<^syntax_const>\<open>_hoare\<close>))]\<close>
lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) q"
--- a/src/HOL/Hoare/Hoare_Logic_Abort.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Hoare_Logic_Abort.thy Sat Jan 05 17:24:33 2019 +0100
@@ -56,9 +56,9 @@
("{_} // _ // {_}" [0,55,0] 50)
ML_file "hoare_syntax.ML"
-parse_translation \<open>[(@{syntax_const "_hoare_abort_vars"}, K Hoare_Syntax.hoare_vars_tr)]\<close>
+parse_translation \<open>[(\<^syntax_const>\<open>_hoare_abort_vars\<close>, K Hoare_Syntax.hoare_vars_tr)]\<close>
print_translation
- \<open>[(@{const_syntax Valid}, K (Hoare_Syntax.spec_tr' @{syntax_const "_hoare_abort"}))]\<close>
+ \<open>[(\<^const_syntax>\<open>Valid\<close>, K (Hoare_Syntax.spec_tr' \<^syntax_const>\<open>_hoare_abort\<close>))]\<close>
section \<open>The proof rules\<close>
--- a/src/HOL/Hoare/Pointer_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Pointer_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
apply fastforce
done
-text\<open>And now with ghost variables @{term ps} and @{term qs}. Even
+text\<open>And now with ghost variables \<^term>\<open>ps\<close> and \<^term>\<open>qs\<close>. Even
``more automatic''.\<close>
lemma "VARS next p ps q qs r
@@ -113,7 +113,7 @@
text\<open>What follows is a sequence of successively more intelligent proofs that
a simple loop finds an element in a linked list.
-We start with a proof based on the @{term List} predicate. This means it only
+We start with a proof based on the \<^term>\<open>List\<close> predicate. This means it only
works for acyclic lists.\<close>
lemma "VARS tl p
@@ -128,7 +128,7 @@
apply clarsimp
done
-text\<open>Using @{term Path} instead of @{term List} generalizes the correctness
+text\<open>Using \<^term>\<open>Path\<close> instead of \<^term>\<open>List\<close> generalizes the correctness
statement to cyclic lists as well:\<close>
lemma "VARS tl p
@@ -145,7 +145,7 @@
text\<open>Now it dawns on us that we do not need the list witness at all --- it
suffices to talk about reachability, i.e.\ we can use relations directly. The
-first version uses a relation on @{typ"'a ref"}:\<close>
+first version uses a relation on \<^typ>\<open>'a ref\<close>:\<close>
lemma "VARS tl p
{(p,X) \<in> {(Ref x,tl x) |x. True}\<^sup>*}
@@ -161,7 +161,7 @@
apply(fast elim:converse_rtranclE)
done
-text\<open>Finally, a version based on a relation on type @{typ 'a}:\<close>
+text\<open>Finally, a version based on a relation on type \<^typ>\<open>'a\<close>:\<close>
lemma "VARS tl p
{p \<noteq> Null \<and> (addr p,X) \<in> {(x,y). tl x = Ref y}\<^sup>*}
@@ -328,8 +328,7 @@
usually more efficient to give the witness directly than to have it
found by proof.
-Now we try a functional version of the abstraction relation @{term
-Path}. Since the result is not that convincing, we do not prove any of
+Now we try a functional version of the abstraction relation \<^term>\<open>Path\<close>. Since the result is not that convincing, we do not prove any of
the lemmas.\<close>
axiomatization
@@ -423,25 +422,24 @@
apply clarsimp
done
-text\<open>In the beginning, we are able to assert @{term"distPath next
-root as root"}, with @{term"as"} set to @{term"[]"} or
-@{term"[r,a,b,c]"}. Note that @{term"Path next root as root"} would
+text\<open>In the beginning, we are able to assert \<^term>\<open>distPath next
+root as root\<close>, with \<^term>\<open>as\<close> set to \<^term>\<open>[]\<close> or
+\<^term>\<open>[r,a,b,c]\<close>. Note that \<^term>\<open>Path next root as root\<close> would
additionally give us an infinite number of lists with the recurring
-sequence @{term"[r,a,b,c]"}.
+sequence \<^term>\<open>[r,a,b,c]\<close>.
The precondition states that there exists a non-empty non-repeating
-path \mbox{@{term "r # Ps"}} from pointer @{term root} to itself, given that
-@{term root} points to location @{term r}. Pointers @{term p} and
-@{term q} are then set to @{term root} and the successor of @{term
-root} respectively. If @{term "q = root"}, we have circled the loop,
-otherwise we set the @{term next} pointer field of @{term q} to point
-to @{term p}, and shift @{term p} and @{term q} one step forward. The
-invariant thus states that @{term p} and @{term q} point to two
-disjoint lists @{term ps} and @{term qs}, such that @{term"Ps = rev ps
-@ qs"}. After the loop terminates, one
+path \mbox{\<^term>\<open>r # Ps\<close>} from pointer \<^term>\<open>root\<close> to itself, given that
+\<^term>\<open>root\<close> points to location \<^term>\<open>r\<close>. Pointers \<^term>\<open>p\<close> and
+\<^term>\<open>q\<close> are then set to \<^term>\<open>root\<close> and the successor of \<^term>\<open>root\<close> respectively. If \<^term>\<open>q = root\<close>, we have circled the loop,
+otherwise we set the \<^term>\<open>next\<close> pointer field of \<^term>\<open>q\<close> to point
+to \<^term>\<open>p\<close>, and shift \<^term>\<open>p\<close> and \<^term>\<open>q\<close> one step forward. The
+invariant thus states that \<^term>\<open>p\<close> and \<^term>\<open>q\<close> point to two
+disjoint lists \<^term>\<open>ps\<close> and \<^term>\<open>qs\<close>, such that \<^term>\<open>Ps = rev ps
+@ qs\<close>. After the loop terminates, one
extra step is needed to close the loop. As expected, the postcondition
-states that the @{term distPath} from @{term root} to itself is now
-@{term "r # (rev Ps)"}.
+states that the \<^term>\<open>distPath\<close> from \<^term>\<open>root\<close> to itself is now
+\<^term>\<open>r # (rev Ps)\<close>.
It may come as a surprise to the reader that the simple algorithm for
acyclic list reversal, with modified annotations, works for cyclic
--- a/src/HOL/Hoare/Pointers0.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Pointers0.thy Sat Jan 05 17:24:33 2019 +0100
@@ -263,7 +263,7 @@
text\<open>What follows is a sequence of successively more intelligent proofs that
a simple loop finds an element in a linked list.
-We start with a proof based on the @{term List} predicate. This means it only
+We start with a proof based on the \<^term>\<open>List\<close> predicate. This means it only
works for acyclic lists.\<close>
lemma "VARS tl p
@@ -281,7 +281,7 @@
apply clarsimp
done
-text\<open>Using @{term Path} instead of @{term List} generalizes the correctness
+text\<open>Using \<^term>\<open>Path\<close> instead of \<^term>\<open>List\<close> generalizes the correctness
statement to cyclic lists as well:\<close>
lemma "VARS tl p
--- a/src/HOL/Hoare/SchorrWaite.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/SchorrWaite.thy Sat Jan 05 17:24:33 2019 +0100
@@ -338,10 +338,10 @@
proof (rule allI, rule impI)
fix x
assume a: "x \<in> R \<and> \<not> m x"
- \<comment> \<open>First, a disjunction on @{term"p^.r"} used later in the proof\<close>
+ \<comment> \<open>First, a disjunction on \<^term>\<open>p^.r\<close> used later in the proof\<close>
have pDisj:"p^.r = Null \<or> (p^.r \<noteq> Null \<and> p^.r^.m)" using poI1 poI2
by auto
- \<comment> \<open>@{term x} belongs to the left hand side of @{thm[source] subset}:\<close>
+ \<comment> \<open>\<^term>\<open>x\<close> belongs to the left hand side of @{thm[source] subset}:\<close>
have incl: "x \<in> ?Ra\<^sup>*``addrs ?A" using a i4 by (simp only:reachable_def, clarsimp)
have excl: "x \<notin> ?Rb\<^sup>*`` addrs ?T" using pDisj ifB1 a by (auto simp add:addrs_def)
\<comment> \<open>And therefore also belongs to the right hand side of @{thm[source]subset},\<close>
@@ -354,14 +354,14 @@
from i5 have poI5: "\<forall>x. m x \<longrightarrow> x \<in> R" .
moreover
- \<comment> \<open>If it is not on the stack, then its @{term l} and @{term r} fields are unchanged\<close>
+ \<comment> \<open>If it is not on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields are unchanged\<close>
from i7 i6 ifB2
have poI6: "\<forall>x. x \<notin> set stack_tl \<longrightarrow> (r(p \<rightarrow> t)) x = iR x \<and> l x = iL x"
by(auto simp: addr_p_eq stack_eq fun_upd_apply)
moreover
- \<comment> \<open>If it is on the stack, then its @{term l} and @{term r} fields can be reconstructed\<close>
+ \<comment> \<open>If it is on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields can be reconstructed\<close>
from p_notin_stack_tl i7 have poI7: "stkOk c l (r(p \<rightarrow> t)) iL iR p stack_tl"
by (clarsimp simp:stack_eq addr_p_eq)
@@ -454,13 +454,13 @@
have "?swI5" .
moreover
- \<comment> \<open>If it is not on the stack, then its @{term l} and @{term r} fields are unchanged\<close>
+ \<comment> \<open>If it is not on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields are unchanged\<close>
from i6 stack_eq
have "?swI6"
by clarsimp
moreover
- \<comment> \<open>If it is on the stack, then its @{term l} and @{term r} fields can be reconstructed\<close>
+ \<comment> \<open>If it is on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields can be reconstructed\<close>
from stackDist i7 nifB2
have "?swI7"
by (clarsimp simp:addr_p_eq stack_eq)
@@ -552,13 +552,13 @@
by (auto simp:addrs_def i3 reachable_def addr_t_eq fun_upd_apply intro:self_reachable)
moreover
- \<comment> \<open>If it is not on the stack, then its @{term l} and @{term r} fields are unchanged\<close>
+ \<comment> \<open>If it is not on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields are unchanged\<close>
from i6
have "?puI6"
by (simp add:new_stack_eq)
moreover
- \<comment> \<open>If it is on the stack, then its @{term l} and @{term r} fields can be reconstructed\<close>
+ \<comment> \<open>If it is on the stack, then its \<^term>\<open>l\<close> and \<^term>\<open>r\<close> fields can be reconstructed\<close>
from stackDist i6 t_notin_stack i7
have "?puI7" by (clarsimp simp:addr_t_eq new_stack_eq)
--- a/src/HOL/Hoare/Separation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/Separation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -61,24 +61,24 @@
\<^cancel>\<open>| free_tr((list as Free("List",_))$ p $ ps) = list $ Syntax.free "H" $ p $ ps\<close>
| free_tr t = t
-fun emp_tr [] = Syntax.const @{const_syntax is_empty} $ Syntax.free "H"
+fun emp_tr [] = Syntax.const \<^const_syntax>\<open>is_empty\<close> $ Syntax.free "H"
| emp_tr ts = raise TERM ("emp_tr", ts);
-fun singl_tr [p, q] = Syntax.const @{const_syntax singl} $ Syntax.free "H" $ p $ q
+fun singl_tr [p, q] = Syntax.const \<^const_syntax>\<open>singl\<close> $ Syntax.free "H" $ p $ q
| singl_tr ts = raise TERM ("singl_tr", ts);
-fun star_tr [P,Q] = Syntax.const @{const_syntax star} $
+fun star_tr [P,Q] = Syntax.const \<^const_syntax>\<open>star\<close> $
absfree ("H", dummyT) (free_tr P) $ absfree ("H", dummyT) (free_tr Q) $
Syntax.free "H"
| star_tr ts = raise TERM ("star_tr", ts);
-fun wand_tr [P, Q] = Syntax.const @{const_syntax wand} $
+fun wand_tr [P, Q] = Syntax.const \<^const_syntax>\<open>wand\<close> $
absfree ("H", dummyT) P $ absfree ("H", dummyT) Q $ Syntax.free "H"
| wand_tr ts = raise TERM ("wand_tr", ts);
\<close>
parse_translation \<open>
- [(@{syntax_const "_emp"}, K emp_tr),
- (@{syntax_const "_singl"}, K singl_tr),
- (@{syntax_const "_star"}, K star_tr),
- (@{syntax_const "_wand"}, K wand_tr)]
+ [(\<^syntax_const>\<open>_emp\<close>, K emp_tr),
+ (\<^syntax_const>\<open>_singl\<close>, K singl_tr),
+ (\<^syntax_const>\<open>_star\<close>, K star_tr),
+ (\<^syntax_const>\<open>_wand\<close>, K wand_tr)]
\<close>
text\<open>Now it looks much better:\<close>
@@ -110,24 +110,24 @@
\<^cancel>\<open>| strip (Abs(_,_,((list as Const("List",_))$ Bound 0 $ p $ ps))) = list$p$ps\<close>
| strip (Abs(_,_,(t as Const("_var",_) $ Var _) $ Bound 0)) = t
| strip (Abs(_,_,P)) = P
- | strip (Const(@{const_syntax is_empty},_)) = Syntax.const @{syntax_const "_emp"}
+ | strip (Const(\<^const_syntax>\<open>is_empty\<close>,_)) = Syntax.const \<^syntax_const>\<open>_emp\<close>
| strip t = t;
in
-fun is_empty_tr' [_] = Syntax.const @{syntax_const "_emp"}
-fun singl_tr' [_,p,q] = Syntax.const @{syntax_const "_singl"} $ p $ q
-fun star_tr' [P,Q,_] = Syntax.const @{syntax_const "_star"} $ strip P $ strip Q
-fun wand_tr' [P,Q,_] = Syntax.const @{syntax_const "_wand"} $ strip P $ strip Q
+fun is_empty_tr' [_] = Syntax.const \<^syntax_const>\<open>_emp\<close>
+fun singl_tr' [_,p,q] = Syntax.const \<^syntax_const>\<open>_singl\<close> $ p $ q
+fun star_tr' [P,Q,_] = Syntax.const \<^syntax_const>\<open>_star\<close> $ strip P $ strip Q
+fun wand_tr' [P,Q,_] = Syntax.const \<^syntax_const>\<open>_wand\<close> $ strip P $ strip Q
end
\<close>
print_translation \<open>
- [(@{const_syntax is_empty}, K is_empty_tr'),
- (@{const_syntax singl}, K singl_tr'),
- (@{const_syntax star}, K star_tr'),
- (@{const_syntax wand}, K wand_tr')]
+ [(\<^const_syntax>\<open>is_empty\<close>, K is_empty_tr'),
+ (\<^const_syntax>\<open>singl\<close>, K singl_tr'),
+ (\<^const_syntax>\<open>star\<close>, K star_tr'),
+ (\<^const_syntax>\<open>wand\<close>, K wand_tr')]
\<close>
text\<open>Now the intermediate proof states are also readable:\<close>
--- a/src/HOL/Hoare/hoare_syntax.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/hoare_syntax.ML Sat Jan 05 17:24:33 2019 +0100
@@ -18,7 +18,7 @@
local
fun idt_name (Free (x, _)) = SOME x
- | idt_name (Const (@{syntax_const "_constrain"}, _) $ t $ _) = idt_name t
+ | idt_name (Const (\<^syntax_const>\<open>_constrain\<close>, _) $ t $ _) = idt_name t
| idt_name _ = NONE;
fun eq_idt tu =
@@ -28,11 +28,11 @@
fun mk_abstuple [x] body = Syntax_Trans.abs_tr [x, body]
| mk_abstuple (x :: xs) body =
- Syntax.const @{const_syntax case_prod} $ Syntax_Trans.abs_tr [x, mk_abstuple xs body];
+ Syntax.const \<^const_syntax>\<open>case_prod\<close> $ Syntax_Trans.abs_tr [x, mk_abstuple xs body];
fun mk_fbody x e [y] = if eq_idt (x, y) then e else y
| mk_fbody x e (y :: xs) =
- Syntax.const @{const_syntax Pair} $
+ Syntax.const \<^const_syntax>\<open>Pair\<close> $
(if eq_idt (x, y) then e else y) $ mk_fbody x e xs;
fun mk_fexp x e xs = mk_abstuple xs (mk_fbody x e xs);
@@ -43,32 +43,32 @@
were boolean expressions*)
fun bexp_tr (Const ("TRUE", _)) _ = Syntax.const "TRUE" (* FIXME !? *)
- | bexp_tr b xs = Syntax.const @{const_syntax Collect} $ mk_abstuple xs b;
+ | bexp_tr b xs = Syntax.const \<^const_syntax>\<open>Collect\<close> $ mk_abstuple xs b;
-fun assn_tr r xs = Syntax.const @{const_syntax Collect} $ mk_abstuple xs r;
+fun assn_tr r xs = Syntax.const \<^const_syntax>\<open>Collect\<close> $ mk_abstuple xs r;
(* com_tr *)
-fun com_tr (Const (@{syntax_const "_assign"}, _) $ x $ e) xs =
- Syntax.const @{const_syntax Basic} $ mk_fexp x e xs
- | com_tr (Const (@{const_syntax Basic},_) $ f) _ = Syntax.const @{const_syntax Basic} $ f
- | com_tr (Const (@{const_syntax Seq},_) $ c1 $ c2) xs =
- Syntax.const @{const_syntax Seq} $ com_tr c1 xs $ com_tr c2 xs
- | com_tr (Const (@{const_syntax Cond},_) $ b $ c1 $ c2) xs =
- Syntax.const @{const_syntax Cond} $ bexp_tr b xs $ com_tr c1 xs $ com_tr c2 xs
- | com_tr (Const (@{const_syntax While},_) $ b $ I $ c) xs =
- Syntax.const @{const_syntax While} $ bexp_tr b xs $ assn_tr I xs $ com_tr c xs
+fun com_tr (Const (\<^syntax_const>\<open>_assign\<close>, _) $ x $ e) xs =
+ Syntax.const \<^const_syntax>\<open>Basic\<close> $ mk_fexp x e xs
+ | com_tr (Const (\<^const_syntax>\<open>Basic\<close>,_) $ f) _ = Syntax.const \<^const_syntax>\<open>Basic\<close> $ f
+ | com_tr (Const (\<^const_syntax>\<open>Seq\<close>,_) $ c1 $ c2) xs =
+ Syntax.const \<^const_syntax>\<open>Seq\<close> $ com_tr c1 xs $ com_tr c2 xs
+ | com_tr (Const (\<^const_syntax>\<open>Cond\<close>,_) $ b $ c1 $ c2) xs =
+ Syntax.const \<^const_syntax>\<open>Cond\<close> $ bexp_tr b xs $ com_tr c1 xs $ com_tr c2 xs
+ | com_tr (Const (\<^const_syntax>\<open>While\<close>,_) $ b $ I $ c) xs =
+ Syntax.const \<^const_syntax>\<open>While\<close> $ bexp_tr b xs $ assn_tr I xs $ com_tr c xs
| com_tr t _ = t;
-fun vars_tr (Const (@{syntax_const "_idts"}, _) $ idt $ vars) = idt :: vars_tr vars
+fun vars_tr (Const (\<^syntax_const>\<open>_idts\<close>, _) $ idt $ vars) = idt :: vars_tr vars
| vars_tr t = [t];
in
fun hoare_vars_tr [vars, pre, prg, post] =
let val xs = vars_tr vars
- in Syntax.const @{const_syntax Valid} $
+ in Syntax.const \<^const_syntax>\<open>Valid\<close> $
assn_tr pre xs $ com_tr prg xs $ assn_tr post xs
end
| hoare_vars_tr ts = raise TERM ("hoare_vars_tr", ts);
@@ -82,21 +82,21 @@
local
fun dest_abstuple
- (Const (@{const_syntax case_prod}, _) $ Abs (v, _, body)) =
+ (Const (\<^const_syntax>\<open>case_prod\<close>, _) $ Abs (v, _, body)) =
subst_bound (Syntax.free v, dest_abstuple body)
| dest_abstuple (Abs (v,_, body)) = subst_bound (Syntax.free v, body)
| dest_abstuple tm = tm;
-fun abs2list (Const (@{const_syntax case_prod}, _) $ Abs (x, T, t)) = Free (x, T) :: abs2list t
+fun abs2list (Const (\<^const_syntax>\<open>case_prod\<close>, _) $ Abs (x, T, t)) = Free (x, T) :: abs2list t
| abs2list (Abs (x, T, _)) = [Free (x, T)]
| abs2list _ = [];
-fun mk_ts (Const (@{const_syntax case_prod}, _) $ Abs (_, _, t)) = mk_ts t
+fun mk_ts (Const (\<^const_syntax>\<open>case_prod\<close>, _) $ Abs (_, _, t)) = mk_ts t
| mk_ts (Abs (_, _, t)) = mk_ts t
- | mk_ts (Const (@{const_syntax Pair}, _) $ a $ b) = a :: mk_ts b
+ | mk_ts (Const (\<^const_syntax>\<open>Pair\<close>, _) $ a $ b) = a :: mk_ts b
| mk_ts t = [t];
-fun mk_vts (Const (@{const_syntax case_prod},_) $ Abs (x, _, t)) =
+fun mk_vts (Const (\<^const_syntax>\<open>case_prod\<close>,_) $ Abs (x, _, t)) =
(Syntax.free x :: abs2list t, mk_ts t)
| mk_vts (Abs (x, _, t)) = ([Syntax.free x], [t])
| mk_vts _ = raise Match;
@@ -106,20 +106,20 @@
if t = Bound i then find_ch vts (i - 1) xs
else (true, (v, subst_bounds (xs, t)));
-fun is_f (Const (@{const_syntax case_prod}, _) $ Abs _) = true
+fun is_f (Const (\<^const_syntax>\<open>case_prod\<close>, _) $ Abs _) = true
| is_f (Abs _) = true
| is_f _ = false;
(* assn_tr' & bexp_tr'*)
-fun assn_tr' (Const (@{const_syntax Collect}, _) $ T) = dest_abstuple T
- | assn_tr' (Const (@{const_syntax inter}, _) $
- (Const (@{const_syntax Collect}, _) $ T1) $ (Const (@{const_syntax Collect}, _) $ T2)) =
- Syntax.const @{const_syntax inter} $ dest_abstuple T1 $ dest_abstuple T2
+fun assn_tr' (Const (\<^const_syntax>\<open>Collect\<close>, _) $ T) = dest_abstuple T
+ | assn_tr' (Const (\<^const_syntax>\<open>inter\<close>, _) $
+ (Const (\<^const_syntax>\<open>Collect\<close>, _) $ T1) $ (Const (\<^const_syntax>\<open>Collect\<close>, _) $ T2)) =
+ Syntax.const \<^const_syntax>\<open>inter\<close> $ dest_abstuple T1 $ dest_abstuple T2
| assn_tr' t = t;
-fun bexp_tr' (Const (@{const_syntax Collect}, _) $ T) = dest_abstuple T
+fun bexp_tr' (Const (\<^const_syntax>\<open>Collect\<close>, _) $ T) = dest_abstuple T
| bexp_tr' t = t;
@@ -131,19 +131,19 @@
val (ch, which) = find_ch (vs ~~ ts) (length vs - 1) (rev vs);
in
if ch
- then Syntax.const @{syntax_const "_assign"} $ fst which $ snd which
- else Syntax.const @{const_syntax annskip}
+ then Syntax.const \<^syntax_const>\<open>_assign\<close> $ fst which $ snd which
+ else Syntax.const \<^const_syntax>\<open>annskip\<close>
end;
-fun com_tr' (Const (@{const_syntax Basic}, _) $ f) =
+fun com_tr' (Const (\<^const_syntax>\<open>Basic\<close>, _) $ f) =
if is_f f then mk_assign f
- else Syntax.const @{const_syntax Basic} $ f
- | com_tr' (Const (@{const_syntax Seq},_) $ c1 $ c2) =
- Syntax.const @{const_syntax Seq} $ com_tr' c1 $ com_tr' c2
- | com_tr' (Const (@{const_syntax Cond}, _) $ b $ c1 $ c2) =
- Syntax.const @{const_syntax Cond} $ bexp_tr' b $ com_tr' c1 $ com_tr' c2
- | com_tr' (Const (@{const_syntax While}, _) $ b $ I $ c) =
- Syntax.const @{const_syntax While} $ bexp_tr' b $ assn_tr' I $ com_tr' c
+ else Syntax.const \<^const_syntax>\<open>Basic\<close> $ f
+ | com_tr' (Const (\<^const_syntax>\<open>Seq\<close>,_) $ c1 $ c2) =
+ Syntax.const \<^const_syntax>\<open>Seq\<close> $ com_tr' c1 $ com_tr' c2
+ | com_tr' (Const (\<^const_syntax>\<open>Cond\<close>, _) $ b $ c1 $ c2) =
+ Syntax.const \<^const_syntax>\<open>Cond\<close> $ bexp_tr' b $ com_tr' c1 $ com_tr' c2
+ | com_tr' (Const (\<^const_syntax>\<open>While\<close>, _) $ b $ I $ c) =
+ Syntax.const \<^const_syntax>\<open>While\<close> $ bexp_tr' b $ assn_tr' I $ com_tr' c
| com_tr' t = t;
in
--- a/src/HOL/Hoare/hoare_tac.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare/hoare_tac.ML Sat Jan 05 17:24:33 2019 +0100
@@ -26,12 +26,12 @@
local
(** maps (%x1 ... xn. t) to [x1,...,xn] **)
-fun abs2list (Const (@{const_name case_prod}, _) $ Abs (x, T, t)) = Free (x, T) :: abs2list t
+fun abs2list (Const (\<^const_name>\<open>case_prod\<close>, _) $ Abs (x, T, t)) = Free (x, T) :: abs2list t
| abs2list (Abs (x, T, _)) = [Free (x, T)]
| abs2list _ = [];
(** maps {(x1,...,xn). t} to [x1,...,xn] **)
-fun mk_vars (Const (@{const_name Collect},_) $ T) = abs2list T
+fun mk_vars (Const (\<^const_name>\<open>Collect\<close>,_) $ T) = abs2list T
| mk_vars _ = [];
(** abstraction of body over a tuple formed from a list of free variables.
@@ -47,7 +47,7 @@
Abs (_, T, _) => T
| Const (_, Type (_, [_, Type (_, [T, _])])) $ _ => T);
in
- Const (@{const_name case_prod},
+ Const (\<^const_name>\<open>case_prod\<close>,
(T --> T2 --> HOLogic.boolT) --> HOLogic.mk_prodT (T, T2) --> HOLogic.boolT) $
absfree (x, T) z
end;
@@ -62,8 +62,8 @@
val T2 =
(case z of
Free (_, T) => T
- | Const (@{const_name Pair}, Type ("fun", [_, Type ("fun", [_, T])])) $ _ $ _ => T);
- in Const (@{const_name Pair}, [T, T2] ---> HOLogic.mk_prodT (T, T2)) $ x $ z end;
+ | Const (\<^const_name>\<open>Pair\<close>, Type ("fun", [_, Type ("fun", [_, T])])) $ _ $ _ => T);
+ in Const (\<^const_name>\<open>Pair\<close>, [T, T2] ---> HOLogic.mk_prodT (T, T2)) $ x $ z end;
(** maps a subgoal of the form:
VARS x1 ... xn {._.} _ {._.} or to [x1,...,xn]
@@ -78,7 +78,7 @@
let val T as Type ("fun",[t,_]) = fastype_of tm;
in HOLogic.Collect_const t $ tm end;
-fun inclt ty = Const (@{const_name Orderings.less_eq}, [ty,ty] ---> HOLogic.boolT);
+fun inclt ty = Const (\<^const_name>\<open>Orderings.less_eq\<close>, [ty,ty] ---> HOLogic.boolT);
in
--- a/src/HOL/Hoare_Parallel/Gar_Coll.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/Gar_Coll.thy Sat Jan 05 17:24:33 2019 +0100
@@ -768,27 +768,27 @@
apply interfree_aux
apply(simp_all add:collector_mutator_interfree)
apply(unfold modules collector_defs Mut_init_def)
-apply(tactic \<open>TRYALL (interfree_aux_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (interfree_aux_tac \<^context>)\<close>)
\<comment> \<open>32 subgoals left\<close>
apply(simp_all add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)
\<comment> \<open>20 subgoals left\<close>
-apply(tactic\<open>TRYALL (clarify_tac @{context})\<close>)
+apply(tactic\<open>TRYALL (clarify_tac \<^context>)\<close>)
apply(simp_all add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [disjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [disjE])\<close>)
apply simp_all
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- resolve_tac @{context} [subset_trans],
- eresolve_tac @{context} @{thms Graph3},
- force_tac @{context},
- assume_tac @{context}])\<close>)
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- eresolve_tac @{context} [subset_trans],
- resolve_tac @{context} @{thms Graph9},
- force_tac @{context}])\<close>)
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI1],
- eresolve_tac @{context} @{thms psubset_subset_trans},
- resolve_tac @{context} @{thms Graph9},
- force_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [subset_trans],
+ eresolve_tac \<^context> @{thms Graph3},
+ force_tac \<^context>,
+ assume_tac \<^context>])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ eresolve_tac \<^context> [subset_trans],
+ resolve_tac \<^context> @{thms Graph9},
+ force_tac \<^context>])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI1],
+ eresolve_tac \<^context> @{thms psubset_subset_trans},
+ resolve_tac \<^context> @{thms Graph9},
+ force_tac \<^context>])\<close>)
done
subsubsection \<open>Interference freedom Mutator-Collector\<close>
@@ -799,10 +799,10 @@
apply interfree_aux
apply(simp_all add:collector_mutator_interfree)
apply(unfold modules collector_defs Mut_init_def)
-apply(tactic \<open>TRYALL (interfree_aux_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (interfree_aux_tac \<^context>)\<close>)
\<comment> \<open>64 subgoals left\<close>
apply(simp_all add:nth_list_update Invariants Append_to_free0)+
-apply(tactic\<open>TRYALL (clarify_tac @{context})\<close>)
+apply(tactic\<open>TRYALL (clarify_tac \<^context>)\<close>)
\<comment> \<open>4 subgoals left\<close>
apply force
apply(simp add:Append_to_free2)
--- a/src/HOL/Hoare_Parallel/Mul_Gar_Coll.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/Mul_Gar_Coll.thy Sat Jan 05 17:24:33 2019 +0100
@@ -130,8 +130,8 @@
apply(interfree_aux)
apply(simp_all add:mul_mutator_interfree)
apply(simp_all add: mul_mutator_defs)
-apply(tactic \<open>TRYALL (interfree_aux_tac @{context})\<close>)
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (interfree_aux_tac \<^context>)\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply (simp_all add:nth_list_update)
done
@@ -1121,7 +1121,7 @@
interfree_aux (Some(Mul_Append n),{}, Some(Mul_Redirect_Edge j n))"
apply (unfold mul_modules)
apply interfree_aux
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply(simp_all add:Graph6 Append_to_free0 Append_to_free1 mul_collector_defs mul_mutator_defs Mul_AppendInv_def)
apply(erule_tac x=j in allE, force dest:Graph3)+
done
@@ -1130,7 +1130,7 @@
interfree_aux (Some(Mul_Redirect_Edge j n),{},Some(Mul_Append n))"
apply (unfold mul_modules)
apply interfree_aux
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply(simp_all add:mul_collector_defs Append_to_free0 Mul_AppendInv_def mul_mutator_defs nth_list_update)
done
@@ -1138,7 +1138,7 @@
interfree_aux (Some(Mul_Append n),{}, Some(Mul_Color_Target j n))"
apply (unfold mul_modules)
apply interfree_aux
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply(simp_all add:mul_mutator_defs mul_collector_defs Mul_AppendInv_def Graph7 Graph8 Append_to_free0 Append_to_free1
Graph12 nth_list_update)
done
@@ -1147,7 +1147,7 @@
interfree_aux (Some(Mul_Color_Target j n),{}, Some(Mul_Append n))"
apply (unfold mul_modules)
apply interfree_aux
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply(simp_all add: mul_mutator_defs nth_list_update)
apply(simp add:Mul_AppendInv_def Append_to_free0)
done
@@ -1170,70 +1170,70 @@
apply interfree_aux
apply(simp_all add:mul_collector_mutator_interfree)
apply(unfold mul_modules mul_collector_defs mul_mutator_defs)
-apply(tactic \<open>TRYALL (interfree_aux_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (interfree_aux_tac \<^context>)\<close>)
\<comment> \<open>42 subgoals left\<close>
apply (clarify,simp add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)+
\<comment> \<open>24 subgoals left\<close>
apply(simp_all add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)
\<comment> \<open>14 subgoals left\<close>
-apply(tactic \<open>TRYALL (clarify_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (clarify_tac \<^context>)\<close>)
apply(simp_all add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)
-apply(tactic \<open>TRYALL (resolve_tac @{context} [conjI])\<close>)
-apply(tactic \<open>TRYALL (resolve_tac @{context} [impI])\<close>)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [disjE])\<close>)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [conjE])\<close>)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [disjE])\<close>)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [disjE])\<close>)
+apply(tactic \<open>TRYALL (resolve_tac \<^context> [conjI])\<close>)
+apply(tactic \<open>TRYALL (resolve_tac \<^context> [impI])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [disjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [conjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [disjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [disjE])\<close>)
\<comment> \<open>72 subgoals left\<close>
apply(simp_all add:Graph6 Graph7 Graph8 Append_to_free0 Append_to_free1 Graph12)
\<comment> \<open>35 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI1],
- resolve_tac @{context} [subset_trans],
- eresolve_tac @{context} @{thms Graph3},
- force_tac @{context},
- assume_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI1],
+ resolve_tac \<^context> [subset_trans],
+ eresolve_tac \<^context> @{thms Graph3},
+ force_tac \<^context>,
+ assume_tac \<^context>])\<close>)
\<comment> \<open>28 subgoals left\<close>
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [conjE])\<close>)
-apply(tactic \<open>TRYALL (eresolve_tac @{context} [disjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [conjE])\<close>)
+apply(tactic \<open>TRYALL (eresolve_tac \<^context> [disjE])\<close>)
\<comment> \<open>34 subgoals left\<close>
apply(rule disjI2,rule disjI1,erule le_trans,force simp add:Queue_def less_Suc_eq_le le_length_filter_update)
apply(rule disjI2,rule disjI1,erule le_trans,force simp add:Queue_def less_Suc_eq_le le_length_filter_update)
apply(case_tac [!] "M x!(T (Muts x ! j))=Black")
apply(simp_all add:Graph10)
\<comment> \<open>47 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[REPEAT o resolve_tac @{context} [disjI2],
- eresolve_tac @{context} @{thms subset_psubset_trans},
- eresolve_tac @{context} @{thms Graph11},
- force_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[REPEAT o resolve_tac \<^context> [disjI2],
+ eresolve_tac \<^context> @{thms subset_psubset_trans},
+ eresolve_tac \<^context> @{thms Graph11},
+ force_tac \<^context>])\<close>)
\<comment> \<open>41 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- resolve_tac @{context} [disjI1],
- eresolve_tac @{context} @{thms le_trans},
- force_tac (@{context} addsimps @{thms Queue_def less_Suc_eq_le le_length_filter_update})])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [disjI1],
+ eresolve_tac \<^context> @{thms le_trans},
+ force_tac (\<^context> addsimps @{thms Queue_def less_Suc_eq_le le_length_filter_update})])\<close>)
\<comment> \<open>35 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- resolve_tac @{context} [disjI1],
- eresolve_tac @{context} @{thms psubset_subset_trans},
- resolve_tac @{context} @{thms Graph9},
- force_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [disjI1],
+ eresolve_tac \<^context> @{thms psubset_subset_trans},
+ resolve_tac \<^context> @{thms Graph9},
+ force_tac \<^context>])\<close>)
\<comment> \<open>31 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- resolve_tac @{context} [disjI1],
- eresolve_tac @{context} @{thms subset_psubset_trans},
- eresolve_tac @{context} @{thms Graph11},
- force_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [disjI1],
+ eresolve_tac \<^context> @{thms subset_psubset_trans},
+ eresolve_tac \<^context> @{thms Graph11},
+ force_tac \<^context>])\<close>)
\<comment> \<open>29 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[REPEAT o resolve_tac @{context} [disjI2],
- eresolve_tac @{context} @{thms subset_psubset_trans},
- eresolve_tac @{context} @{thms subset_psubset_trans},
- eresolve_tac @{context} @{thms Graph11},
- force_tac @{context}])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[REPEAT o resolve_tac \<^context> [disjI2],
+ eresolve_tac \<^context> @{thms subset_psubset_trans},
+ eresolve_tac \<^context> @{thms subset_psubset_trans},
+ eresolve_tac \<^context> @{thms Graph11},
+ force_tac \<^context>])\<close>)
\<comment> \<open>25 subgoals left\<close>
-apply(tactic \<open>TRYALL(EVERY'[resolve_tac @{context} [disjI2],
- resolve_tac @{context} [disjI2],
- resolve_tac @{context} [disjI1],
- eresolve_tac @{context} @{thms le_trans},
- force_tac (@{context} addsimps @{thms Queue_def less_Suc_eq_le le_length_filter_update})])\<close>)
+apply(tactic \<open>TRYALL(EVERY'[resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [disjI2],
+ resolve_tac \<^context> [disjI1],
+ eresolve_tac \<^context> @{thms le_trans},
+ force_tac (\<^context> addsimps @{thms Queue_def less_Suc_eq_le le_length_filter_update})])\<close>)
\<comment> \<open>10 subgoals left\<close>
apply(rule disjI2,rule disjI2,rule conjI,erule less_le_trans,force simp add:Queue_def less_Suc_eq_le le_length_filter_update, rule disjI1, rule less_imp_le, erule less_le_trans, force simp add:Queue_def less_Suc_eq_le le_length_filter_update)+
done
@@ -1246,7 +1246,7 @@
apply interfree_aux
apply(simp_all add:mul_collector_mutator_interfree)
apply(unfold mul_modules mul_collector_defs mul_mutator_defs)
-apply(tactic \<open>TRYALL (interfree_aux_tac @{context})\<close>)
+apply(tactic \<open>TRYALL (interfree_aux_tac \<^context>)\<close>)
\<comment> \<open>76 subgoals left\<close>
apply (clarsimp simp add: nth_list_update)+
\<comment> \<open>56 subgoals left\<close>
--- a/src/HOL/Hoare_Parallel/OG_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/OG_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -170,10 +170,10 @@
\<comment> \<open>35 vc\<close>
apply simp_all
\<comment> \<open>16 vc\<close>
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
\<comment> \<open>11 vc\<close>
apply simp_all
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
\<comment> \<open>10 subgoals left\<close>
apply(erule less_SucE)
apply simp
@@ -430,13 +430,13 @@
\<lbrace> \<forall>k<length a. (a ! k)=(\<acute>b ! k)\<rbrace>"
apply oghoare
\<comment> \<open>138 vc\<close>
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
\<comment> \<open>112 subgoals left\<close>
apply(simp_all (no_asm))
\<comment> \<open>43 subgoals left\<close>
-apply(tactic \<open>ALLGOALS (conjI_Tac @{context} (K all_tac))\<close>)
+apply(tactic \<open>ALLGOALS (conjI_Tac \<^context> (K all_tac))\<close>)
\<comment> \<open>419 subgoals left\<close>
-apply(tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply(tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
\<comment> \<open>99 subgoals left\<close>
apply(simp_all only:length_0_conv [THEN sym])
\<comment> \<open>20 subgoals left\<close>
@@ -535,7 +535,7 @@
\<lbrace>\<acute>x=n\<rbrace>"
apply oghoare
apply (simp_all cong del: sum.cong_strong)
-apply (tactic \<open>ALLGOALS (clarify_tac @{context})\<close>)
+apply (tactic \<open>ALLGOALS (clarify_tac \<^context>)\<close>)
apply (simp_all cong del: sum.cong_strong)
apply(erule (1) Example2_lemma2)
apply(erule (1) Example2_lemma2)
--- a/src/HOL/Hoare_Parallel/OG_Syntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/OG_Syntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -76,52 +76,52 @@
print_translation \<open>
let
fun quote_tr' f (t :: ts) =
- Term.list_comb (f $ Syntax_Trans.quote_tr' @{syntax_const "_antiquote"} t, ts)
+ Term.list_comb (f $ Syntax_Trans.quote_tr' \<^syntax_const>\<open>_antiquote\<close> t, ts)
| quote_tr' _ _ = raise Match;
fun annquote_tr' f (r :: t :: ts) =
- Term.list_comb (f $ r $ Syntax_Trans.quote_tr' @{syntax_const "_antiquote"} t, ts)
+ Term.list_comb (f $ r $ Syntax_Trans.quote_tr' \<^syntax_const>\<open>_antiquote\<close> t, ts)
| annquote_tr' _ _ = raise Match;
- val assert_tr' = quote_tr' (Syntax.const @{syntax_const "_Assert"});
+ val assert_tr' = quote_tr' (Syntax.const \<^syntax_const>\<open>_Assert\<close>);
- fun bexp_tr' name ((Const (@{const_syntax Collect}, _) $ t) :: ts) =
+ fun bexp_tr' name ((Const (\<^const_syntax>\<open>Collect\<close>, _) $ t) :: ts) =
quote_tr' (Syntax.const name) (t :: ts)
| bexp_tr' _ _ = raise Match;
- fun annbexp_tr' name (r :: (Const (@{const_syntax Collect}, _) $ t) :: ts) =
+ fun annbexp_tr' name (r :: (Const (\<^const_syntax>\<open>Collect\<close>, _) $ t) :: ts) =
annquote_tr' (Syntax.const name) (r :: t :: ts)
| annbexp_tr' _ _ = raise Match;
fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) =
- quote_tr' (Syntax.const @{syntax_const "_Assign"} $ Syntax_Trans.update_name_tr' f)
+ quote_tr' (Syntax.const \<^syntax_const>\<open>_Assign\<close> $ Syntax_Trans.update_name_tr' f)
(Abs (x, dummyT, Syntax_Trans.const_abs_tr' k) :: ts)
| assign_tr' _ = raise Match;
fun annassign_tr' (r :: Abs (x, _, f $ k $ Bound 0) :: ts) =
- quote_tr' (Syntax.const @{syntax_const "_AnnAssign"} $ r $ Syntax_Trans.update_name_tr' f)
+ quote_tr' (Syntax.const \<^syntax_const>\<open>_AnnAssign\<close> $ r $ Syntax_Trans.update_name_tr' f)
(Abs (x, dummyT, Syntax_Trans.const_abs_tr' k) :: ts)
| annassign_tr' _ = raise Match;
- fun Parallel_PAR [(Const (@{const_syntax Cons}, _) $
- (Const (@{const_syntax Pair}, _) $ (Const (@{const_syntax Some},_) $ t1 ) $ t2) $
- Const (@{const_syntax Nil}, _))] = Syntax.const @{syntax_const "_prg"} $ t1 $ t2
- | Parallel_PAR [(Const (@{const_syntax Cons}, _) $
- (Const (@{const_syntax Pair}, _) $ (Const (@{const_syntax Some}, _) $ t1) $ t2) $ ts)] =
- Syntax.const @{syntax_const "_prgs"} $ t1 $ t2 $ Parallel_PAR [ts]
+ fun Parallel_PAR [(Const (\<^const_syntax>\<open>Cons\<close>, _) $
+ (Const (\<^const_syntax>\<open>Pair\<close>, _) $ (Const (\<^const_syntax>\<open>Some\<close>,_) $ t1 ) $ t2) $
+ Const (\<^const_syntax>\<open>Nil\<close>, _))] = Syntax.const \<^syntax_const>\<open>_prg\<close> $ t1 $ t2
+ | Parallel_PAR [(Const (\<^const_syntax>\<open>Cons\<close>, _) $
+ (Const (\<^const_syntax>\<open>Pair\<close>, _) $ (Const (\<^const_syntax>\<open>Some\<close>, _) $ t1) $ t2) $ ts)] =
+ Syntax.const \<^syntax_const>\<open>_prgs\<close> $ t1 $ t2 $ Parallel_PAR [ts]
| Parallel_PAR _ = raise Match;
- fun Parallel_tr' ts = Syntax.const @{syntax_const "_PAR"} $ Parallel_PAR ts;
+ fun Parallel_tr' ts = Syntax.const \<^syntax_const>\<open>_PAR\<close> $ Parallel_PAR ts;
in
- [(@{const_syntax Collect}, K assert_tr'),
- (@{const_syntax Basic}, K assign_tr'),
- (@{const_syntax Cond}, K (bexp_tr' @{syntax_const "_Cond"})),
- (@{const_syntax While}, K (bexp_tr' @{syntax_const "_While_inv"})),
- (@{const_syntax AnnBasic}, K annassign_tr'),
- (@{const_syntax AnnWhile}, K (annbexp_tr' @{syntax_const "_AnnWhile"})),
- (@{const_syntax AnnAwait}, K (annbexp_tr' @{syntax_const "_AnnAwait"})),
- (@{const_syntax AnnCond1}, K (annbexp_tr' @{syntax_const "_AnnCond1"})),
- (@{const_syntax AnnCond2}, K (annbexp_tr' @{syntax_const "_AnnCond2"}))]
+ [(\<^const_syntax>\<open>Collect\<close>, K assert_tr'),
+ (\<^const_syntax>\<open>Basic\<close>, K assign_tr'),
+ (\<^const_syntax>\<open>Cond\<close>, K (bexp_tr' \<^syntax_const>\<open>_Cond\<close>)),
+ (\<^const_syntax>\<open>While\<close>, K (bexp_tr' \<^syntax_const>\<open>_While_inv\<close>)),
+ (\<^const_syntax>\<open>AnnBasic\<close>, K annassign_tr'),
+ (\<^const_syntax>\<open>AnnWhile\<close>, K (annbexp_tr' \<^syntax_const>\<open>_AnnWhile\<close>)),
+ (\<^const_syntax>\<open>AnnAwait\<close>, K (annbexp_tr' \<^syntax_const>\<open>_AnnAwait\<close>)),
+ (\<^const_syntax>\<open>AnnCond1\<close>, K (annbexp_tr' \<^syntax_const>\<open>_AnnCond1\<close>)),
+ (\<^const_syntax>\<open>AnnCond2\<close>, K (annbexp_tr' \<^syntax_const>\<open>_AnnCond2\<close>))]
end
\<close>
--- a/src/HOL/Hoare_Parallel/Quote_Antiquote.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/Quote_Antiquote.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,9 +12,9 @@
parse_translation \<open>
let
- fun quote_tr [t] = Syntax_Trans.quote_tr @{syntax_const "_antiquote"} t
+ fun quote_tr [t] = Syntax_Trans.quote_tr \<^syntax_const>\<open>_antiquote\<close> t
| quote_tr ts = raise TERM ("quote_tr", ts);
- in [(@{syntax_const "_quote"}, K quote_tr)] end
+ in [(\<^syntax_const>\<open>_quote\<close>, K quote_tr)] end
\<close>
end
--- a/src/HOL/Hoare_Parallel/RG_Syntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Hoare_Parallel/RG_Syntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -58,24 +58,24 @@
print_translation \<open>
let
fun quote_tr' f (t :: ts) =
- Term.list_comb (f $ Syntax_Trans.quote_tr' @{syntax_const "_antiquote"} t, ts)
+ Term.list_comb (f $ Syntax_Trans.quote_tr' \<^syntax_const>\<open>_antiquote\<close> t, ts)
| quote_tr' _ _ = raise Match;
- val assert_tr' = quote_tr' (Syntax.const @{syntax_const "_Assert"});
+ val assert_tr' = quote_tr' (Syntax.const \<^syntax_const>\<open>_Assert\<close>);
- fun bexp_tr' name ((Const (@{const_syntax Collect}, _) $ t) :: ts) =
+ fun bexp_tr' name ((Const (\<^const_syntax>\<open>Collect\<close>, _) $ t) :: ts) =
quote_tr' (Syntax.const name) (t :: ts)
| bexp_tr' _ _ = raise Match;
fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) =
- quote_tr' (Syntax.const @{syntax_const "_Assign"} $ Syntax_Trans.update_name_tr' f)
+ quote_tr' (Syntax.const \<^syntax_const>\<open>_Assign\<close> $ Syntax_Trans.update_name_tr' f)
(Abs (x, dummyT, Syntax_Trans.const_abs_tr' k) :: ts)
| assign_tr' _ = raise Match;
in
- [(@{const_syntax Collect}, K assert_tr'),
- (@{const_syntax Basic}, K assign_tr'),
- (@{const_syntax Cond}, K (bexp_tr' @{syntax_const "_Cond"})),
- (@{const_syntax While}, K (bexp_tr' @{syntax_const "_While"}))]
+ [(\<^const_syntax>\<open>Collect\<close>, K assert_tr'),
+ (\<^const_syntax>\<open>Basic\<close>, K assign_tr'),
+ (\<^const_syntax>\<open>Cond\<close>, K (bexp_tr' \<^syntax_const>\<open>_Cond\<close>)),
+ (\<^const_syntax>\<open>While\<close>, K (bexp_tr' \<^syntax_const>\<open>_While\<close>))]
end
\<close>
--- a/src/HOL/IMP/AExp.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/AExp.thy Sat Jan 05 17:24:33 2019 +0100
@@ -96,8 +96,8 @@
"asimp (Plus a\<^sub>1 a\<^sub>2) = plus (asimp a\<^sub>1) (asimp a\<^sub>2)"
text_raw\<open>}%endsnip\<close>
-text\<open>Note that in @{const asimp_const} the optimized constructor was
-inlined. Making it a separate function @{const plus} improves modularity of
+text\<open>Note that in \<^const>\<open>asimp_const\<close> the optimized constructor was
+inlined. Making it a separate function \<^const>\<open>plus\<close> improves modularity of
the code and the proofs.\<close>
value "asimp (Plus (Plus (N 0) (N 0)) (Plus (V ''x'') (N 0)))"
--- a/src/HOL/IMP/Abs_Int0.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Abs_Int0.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,8 +8,8 @@
subsubsection "Orderings"
-text\<open>The basic type classes @{class order}, @{class semilattice_sup} and @{class order_top} are
-defined in @{theory Main}, more precisely in theories @{theory HOL.Orderings} and @{theory HOL.Lattices}.
+text\<open>The basic type classes \<^class>\<open>order\<close>, \<^class>\<open>semilattice_sup\<close> and \<^class>\<open>order_top\<close> are
+defined in \<^theory>\<open>Main\<close>, more precisely in theories \<^theory>\<open>HOL.Orderings\<close> and \<^theory>\<open>HOL.Lattices\<close>.
If you view this theory with jedit, just click on the names to get there.\<close>
class semilattice_sup_top = semilattice_sup + order_top
@@ -164,8 +164,8 @@
type_synonym 'av st = "(vname \<Rightarrow> 'av)"
text\<open>The for-clause (here and elsewhere) only serves the purpose of fixing
-the name of the type parameter @{typ 'av} which would otherwise be renamed to
-@{typ 'a}.\<close>
+the name of the type parameter \<^typ>\<open>'av\<close> which would otherwise be renamed to
+\<^typ>\<open>'a\<close>.\<close>
locale Abs_Int_fun = Val_semilattice where \<gamma>=\<gamma>
for \<gamma> :: "'av::semilattice_sup_top \<Rightarrow> val set"
@@ -354,10 +354,10 @@
begin
text\<open>The predicates \<open>top_on_ty a X\<close> that follow describe that any abstract
-state in \<open>a\<close> maps all variables in \<open>X\<close> to @{term \<top>}.
+state in \<open>a\<close> maps all variables in \<open>X\<close> to \<^term>\<open>\<top>\<close>.
This is an important invariant for the termination proof where we argue that only
the finitely many variables in the program change. That the others do not change
-follows because they remain @{term \<top>}.\<close>
+follows because they remain \<^term>\<open>\<top>\<close>.\<close>
fun top_on_st :: "'av st \<Rightarrow> vname set \<Rightarrow> bool" ("top'_on\<^sub>s") where
"top_on_st S X = (\<forall>x\<in>X. S x = \<top>)"
--- a/src/HOL/IMP/Abs_Int1_parity.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Abs_Int1_parity.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,13 +8,13 @@
datatype parity = Even | Odd | Either
-text\<open>Instantiation of class @{class order} with type @{typ parity}:\<close>
+text\<open>Instantiation of class \<^class>\<open>order\<close> with type \<^typ>\<open>parity\<close>:\<close>
instantiation parity :: order
begin
text\<open>First the definition of the interface function \<open>\<le>\<close>. Note that
-the header of the definition must refer to the ascii name @{const less_eq} of the
+the header of the definition must refer to the ascii name \<^const>\<open>less_eq\<close> of the
constants as \<open>less_eq_parity\<close> and the definition is named \<open>less_eq_parity_def\<close>. Inside the definition the symbolic names can be used.\<close>
definition less_eq_parity where
@@ -46,7 +46,7 @@
end
-text\<open>Instantiation of class @{class semilattice_sup_top} with type @{typ parity}:\<close>
+text\<open>Instantiation of class \<^class>\<open>semilattice_sup_top\<close> with type \<^typ>\<open>parity\<close>:\<close>
instantiation parity :: semilattice_sup_top
begin
@@ -99,7 +99,7 @@
"plus_parity x Either = Either"
text\<open>First we instantiate the abstract value interface and prove that the
-functions on type @{typ parity} have all the necessary properties:\<close>
+functions on type \<^typ>\<open>parity\<close> have all the necessary properties:\<close>
global_interpretation Val_semilattice
where \<gamma> = \<gamma>_parity and num' = num_parity and plus' = plus_parity
--- a/src/HOL/IMP/Abs_Int2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Abs_Int2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -85,13 +85,11 @@
(let (a1,a2) = inv_plus' a (aval'' e1 S) (aval'' e2 S)
in inv_aval' e1 a1 (inv_aval' e2 a2 S))"
-text\<open>The test for @{const bot} in the @{const V}-case is important: @{const
-bot} indicates that a variable has no possible values, i.e.\ that the current
+text\<open>The test for \<^const>\<open>bot\<close> in the \<^const>\<open>V\<close>-case is important: \<^const>\<open>bot\<close> indicates that a variable has no possible values, i.e.\ that the current
program point is unreachable. But then the abstract state should collapse to
-@{const None}. Put differently, we maintain the invariant that in an abstract
-state of the form @{term"Some s"}, all variables are mapped to non-@{const
-bot} values. Otherwise the (pointwise) sup of two abstract states, one of
-which contains @{const bot} values, may produce too large a result, thus
+\<^const>\<open>None\<close>. Put differently, we maintain the invariant that in an abstract
+state of the form \<^term>\<open>Some s\<close>, all variables are mapped to non-\<^const>\<open>bot\<close> values. Otherwise the (pointwise) sup of two abstract states, one of
+which contains \<^const>\<open>bot\<close> values, may produce too large a result, thus
making the analysis less precise.\<close>
--- a/src/HOL/IMP/Abs_Int3.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Abs_Int3.thy Sat Jan 05 17:24:33 2019 +0100
@@ -266,8 +266,8 @@
definition "step_up_ivl n = ((\<lambda>C. C \<nabla> step_ivl \<top> C)^^n)"
definition "step_down_ivl n = ((\<lambda>C. C \<triangle> step_ivl \<top> C)^^n)"
-text\<open>For @{const test3_ivl}, @{const AI_ivl} needed as many iterations as
-the loop took to execute. In contrast, @{const AI_wn_ivl} converges in a
+text\<open>For \<^const>\<open>test3_ivl\<close>, \<^const>\<open>AI_ivl\<close> needed as many iterations as
+the loop took to execute. In contrast, \<^const>\<open>AI_wn_ivl\<close> converges in a
constant number of steps:\<close>
value "show_acom (step_up_ivl 1 (bot test3_ivl))"
@@ -322,9 +322,9 @@
by(auto simp add: narrow_acom_def top_on_acom_def)(metis top_on_opt_narrow in_set_zipE)
text\<open>The assumptions for widening and narrowing differ because during
-narrowing we have the invariant @{prop"y \<le> x"} (where \<open>y\<close> is the next
+narrowing we have the invariant \<^prop>\<open>y \<le> x\<close> (where \<open>y\<close> is the next
iterate), but during widening there is no such invariant, there we only have
-that not yet @{prop"y \<le> x"}. This complicates the termination proof for
+that not yet \<^prop>\<open>y \<le> x\<close>. This complicates the termination proof for
widening.\<close>
locale Measure_wn = Measure1 where m=m
@@ -584,7 +584,7 @@
subsubsection "Counterexamples"
-text\<open>Widening is increasing by assumption, but @{prop"x \<le> f x"} is not an invariant of widening.
+text\<open>Widening is increasing by assumption, but \<^prop>\<open>x \<le> f x\<close> is not an invariant of widening.
It can already be lost after the first step:\<close>
lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y"
--- a/src/HOL/IMP/Big_Step.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Big_Step.thy Sat Jan 05 17:24:33 2019 +0100
@@ -86,8 +86,8 @@
subsection "Rule inversion"
-text\<open>What can we deduce from @{prop "(SKIP,s) \<Rightarrow> t"} ?
-That @{prop "s = t"}. This is how we can automatically prove it:\<close>
+text\<open>What can we deduce from \<^prop>\<open>(SKIP,s) \<Rightarrow> t\<close> ?
+That \<^prop>\<open>s = t\<close>. This is how we can automatically prove it:\<close>
inductive_cases SkipE[elim!]: "(SKIP,s) \<Rightarrow> t"
thm SkipE
@@ -188,10 +188,10 @@
case WhileTrue
from \<open>bval b s\<close> \<open>(?w, s) \<Rightarrow> t\<close> obtain s' where
"(c, s) \<Rightarrow> s'" and "(?w, s') \<Rightarrow> t" by auto
- \<comment> \<open>now we can build a derivation tree for the @{text IF}\<close>
+ \<comment> \<open>now we can build a derivation tree for the \<^text>\<open>IF\<close>\<close>
\<comment> \<open>first, the body of the True-branch:\<close>
hence "(c;; ?w, s) \<Rightarrow> t" by (rule Seq)
- \<comment> \<open>then the whole @{text IF}\<close>
+ \<comment> \<open>then the whole \<^text>\<open>IF\<close>\<close>
with \<open>bval b s\<close> show ?thesis by (rule IfTrue)
qed
qed
@@ -209,7 +209,7 @@
\<comment> \<open>and for this, only the Seq-rule is applicable:\<close>
from \<open>(c;; ?w, s) \<Rightarrow> t\<close> obtain s' where
"(c, s) \<Rightarrow> s'" and "(?w, s') \<Rightarrow> t" by auto
- \<comment> \<open>with this information, we can build a derivation tree for @{text WHILE}\<close>
+ \<comment> \<open>with this information, we can build a derivation tree for \<^text>\<open>WHILE\<close>\<close>
with \<open>bval b s\<close> show ?thesis by (rule WhileTrue)
qed
qed
@@ -267,11 +267,11 @@
theorem
"(c,s) \<Rightarrow> t \<Longrightarrow> (c,s) \<Rightarrow> t' \<Longrightarrow> t' = t"
proof (induction arbitrary: t' rule: big_step.induct)
- \<comment> \<open>the only interesting case, @{text WhileTrue}:\<close>
+ \<comment> \<open>the only interesting case, \<^text>\<open>WhileTrue\<close>:\<close>
fix b c s s\<^sub>1 t t'
\<comment> \<open>The assumptions of the rule:\<close>
assume "bval b s" and "(c,s) \<Rightarrow> s\<^sub>1" and "(WHILE b DO c,s\<^sub>1) \<Rightarrow> t"
- \<comment> \<open>Ind.Hyp; note the @{text"\<And>"} because of arbitrary:\<close>
+ \<comment> \<open>Ind.Hyp; note the \<^text>\<open>\<And>\<close> because of arbitrary:\<close>
assume IHc: "\<And>t'. (c,s) \<Rightarrow> t' \<Longrightarrow> t' = s\<^sub>1"
assume IHw: "\<And>t'. (WHILE b DO c,s\<^sub>1) \<Rightarrow> t' \<Longrightarrow> t' = t"
\<comment> \<open>Premise of implication:\<close>
--- a/src/HOL/IMP/C_like.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/C_like.thy Sat Jan 05 17:24:33 2019 +0100
@@ -59,7 +59,7 @@
DO ( N 2 ::= Plus (!(N 2)) (!(!(N 0)));
N 0 ::= Plus (!(N 0)) (N 1) )"
-text \<open>To show the first n variables in a @{typ "nat \<Rightarrow> nat"} state:\<close>
+text \<open>To show the first n variables in a \<^typ>\<open>nat \<Rightarrow> nat\<close> state:\<close>
definition
"list t n = map t [0 ..< n]"
--- a/src/HOL/IMP/Collecting1.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Collecting1.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,7 +5,7 @@
begin
text\<open>The idea: the state is propagated through the annotated command as an
-annotation @{term "{s}"}, all other annotations are @{term "{}"}. It is easy
+annotation \<^term>\<open>{s}\<close>, all other annotations are \<^term>\<open>{}\<close>. It is easy
to show that this semantics approximates the collecting semantics.\<close>
lemma step_preserves_le:
--- a/src/HOL/IMP/Compiler.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Compiler.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,8 +9,8 @@
text \<open>
In the following, we use the length of lists as integers
- instead of natural numbers. Instead of converting @{typ nat}
- to @{typ int} explicitly, we tell Isabelle to coerce @{typ nat}
+ instead of natural numbers. Instead of converting \<^typ>\<open>nat\<close>
+ to \<^typ>\<open>int\<close> explicitly, we tell Isabelle to coerce \<^typ>\<open>nat\<close>
automatically when necessary.
\<close>
declare [[coercion_enabled]]
@@ -18,7 +18,7 @@
text \<open>
Similarly, we will want to access the ith element of a list,
- where @{term i} is an @{typ int}.
+ where \<^term>\<open>i\<close> is an \<^typ>\<open>int\<close>.
\<close>
fun inth :: "'a list \<Rightarrow> int \<Rightarrow> 'a" (infixl "!!" 100) where
"(x # xs) !! i = (if i = 0 then x else xs !! (i - 1))"
@@ -32,7 +32,7 @@
(xs @ ys) !! i = (if i < size xs then xs !! i else ys !! (i - size xs))"
by (induction xs arbitrary: i) (auto simp: algebra_simps)
-text\<open>We hide coercion @{const int} applied to @{const length}:\<close>
+text\<open>We hide coercion \<^const>\<open>int\<close> applied to \<^const>\<open>length\<close>:\<close>
abbreviation (output)
"isize xs == int (length xs)"
@@ -123,7 +123,7 @@
by (induction rule: exec_induct) (blast intro: star.step exec1_appendL)+
text\<open>Now we specialise the above lemmas to enable automatic proofs of
-@{prop "P \<turnstile> c \<rightarrow>* c'"} where \<open>P\<close> is a mixture of concrete instructions and
+\<^prop>\<open>P \<turnstile> c \<rightarrow>* c'\<close> where \<open>P\<close> is a mixture of concrete instructions and
pieces of code that we already know how they execute (by induction), combined
by \<open>@\<close> and \<open>#\<close>. Backward jumps are not supported.
The details should be skipped on a first reading.
--- a/src/HOL/IMP/Compiler2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Compiler2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
subsection \<open>Definitions\<close>
-text \<open>Execution in @{term n} steps for simpler induction\<close>
+text \<open>Execution in \<^term>\<open>n\<close> steps for simpler induction\<close>
primrec
exec_n :: "instr list \<Rightarrow> config \<Rightarrow> nat \<Rightarrow> config \<Rightarrow> bool"
("_/ \<turnstile> (_ \<rightarrow>^_/ _)" [65,0,1000,55] 55)
@@ -21,7 +21,7 @@
"P \<turnstile> c \<rightarrow>^0 c' = (c'=c)" |
"P \<turnstile> c \<rightarrow>^(Suc n) c'' = (\<exists>c'. (P \<turnstile> c \<rightarrow> c') \<and> P \<turnstile> c' \<rightarrow>^n c'')"
-text \<open>The possible successor PCs of an instruction at position @{term n}\<close>
+text \<open>The possible successor PCs of an instruction at position \<^term>\<open>n\<close>\<close>
text_raw\<open>\snip{isuccsdef}{0}{1}{%\<close>
definition isuccs :: "instr \<Rightarrow> int \<Rightarrow> int set" where
"isuccs i n = (case i of
@@ -40,7 +40,7 @@
"exits P = succs P 0 - {0..< size P}"
-subsection \<open>Basic properties of @{term exec_n}\<close>
+subsection \<open>Basic properties of \<^term>\<open>exec_n\<close>\<close>
lemma exec_n_exec:
"P \<turnstile> c \<rightarrow>^n c' \<Longrightarrow> P \<turnstile> c \<rightarrow>* c'"
@@ -89,7 +89,7 @@
lemmas exec_n_simps = exec_n_step exec_n_end
-subsection \<open>Basic properties of @{term succs}\<close>
+subsection \<open>Basic properties of \<^term>\<open>succs\<close>\<close>
lemma succs_simps [simp]:
"succs [ADD] n = {n + 1}"
@@ -334,7 +334,7 @@
text \<open>
- Dropping the left context of a potentially incomplete execution of @{term c}.
+ Dropping the left context of a potentially incomplete execution of \<^term>\<open>c\<close>.
\<close>
lemma exec1_drop_left:
--- a/src/HOL/IMP/Def_Init_Big.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Def_Init_Big.thy Sat Jan 05 17:24:33 2019 +0100
@@ -33,7 +33,7 @@
subsection "Soundness wrt Big Steps"
text\<open>Note the special form of the induction because one of the arguments
-of the inductive predicate is not a variable but the term @{term"Some s"}:\<close>
+of the inductive predicate is not a variable but the term \<^term>\<open>Some s\<close>:\<close>
theorem Sound:
"\<lbrakk> (c,Some s) \<Rightarrow> s'; D A c A'; A \<subseteq> dom s \<rbrakk>
--- a/src/HOL/IMP/Hoare_Total.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Hoare_Total.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,7 +19,7 @@
correctness is written \<open>\<turnstile>\<^sub>t {P}c{Q}\<close> and defined
inductively. The rules for \<open>\<turnstile>\<^sub>t\<close> differ from those for
\<open>\<turnstile>\<close> only in the one place where nontermination can arise: the
-@{term While}-rule.\<close>
+\<^term>\<open>While\<close>-rule.\<close>
inductive
hoaret :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool" ("\<turnstile>\<^sub>t ({(1_)}/ (_)/ {(1_)})" 50)
@@ -42,7 +42,7 @@
conseq: "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s; \<turnstile>\<^sub>t {P}c{Q}; \<forall>s. Q s \<longrightarrow> Q' s \<rbrakk> \<Longrightarrow>
\<turnstile>\<^sub>t {P'}c{Q'}"
-text\<open>The @{term While}-rule is like the one for partial correctness but it
+text\<open>The \<^term>\<open>While\<close>-rule is like the one for partial correctness but it
requires additionally that with every execution of the loop body some measure
relation @{term[source]"T :: state \<Rightarrow> nat \<Rightarrow> bool"} decreases.
The following functional version is more intuitive:\<close>
@@ -133,7 +133,7 @@
done
-text\<open>Now we define the number of iterations @{term "WHILE b DO c"} needs to
+text\<open>Now we define the number of iterations \<^term>\<open>WHILE b DO c\<close> needs to
terminate when started in state \<open>s\<close>. Because this is a truly partial
function, we define it as an (inductive) relation first:\<close>
@@ -150,7 +150,7 @@
case Its_Suc thus ?case by(metis Its.cases big_step_determ)
qed
-text\<open>For all terminating loops, @{const Its} yields a result:\<close>
+text\<open>For all terminating loops, \<^const>\<open>Its\<close> yields a result:\<close>
lemma WHILE_Its: "(WHILE b DO c,s) \<Rightarrow> t \<Longrightarrow> \<exists>n. Its b c s n"
proof(induction "WHILE b DO c" s t rule: big_step_induct)
@@ -199,7 +199,7 @@
qed
-text\<open>\noindent In the @{term While}-case, @{const Its} provides the obvious
+text\<open>\noindent In the \<^term>\<open>While\<close>-case, \<^const>\<open>Its\<close> provides the obvious
termination argument.
The actual completeness theorem follows directly, in the same manner
--- a/src/HOL/IMP/Live_True.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Live_True.thy Sat Jan 05 17:24:33 2019 +0100
@@ -116,8 +116,7 @@
thus ?case by (simp add: L.simps(5))
qed auto
-text\<open>Make @{const L} executable by replacing @{const lfp} with the @{const
-while} combinator from theory @{theory "HOL-Library.While_Combinator"}. The @{const while}
+text\<open>Make \<^const>\<open>L\<close> executable by replacing \<^const>\<open>lfp\<close> with the \<^const>\<open>while\<close> combinator from theory \<^theory>\<open>HOL-Library.While_Combinator\<close>. The \<^const>\<open>while\<close>
combinator obeys the recursion equation
@{thm[display] While_Combinator.while_unfold[no_vars]}
and is thus executable.\<close>
@@ -167,7 +166,7 @@
"iter f 0 p d = d" |
"iter f (Suc n) p d = (if f p = p then p else iter f n (f p) d)"
-text\<open>A version of @{const L} with a bounded number of iterations (here: 2)
+text\<open>A version of \<^const>\<open>L\<close> with a bounded number of iterations (here: 2)
in the WHILE case:\<close>
fun Lb :: "com \<Rightarrow> vname set \<Rightarrow> vname set" where
@@ -177,7 +176,7 @@
"Lb (IF b THEN c\<^sub>1 ELSE c\<^sub>2) X = vars b \<union> Lb c\<^sub>1 X \<union> Lb c\<^sub>2 X" |
"Lb (WHILE b DO c) X = iter (\<lambda>A. vars b \<union> X \<union> Lb c A) 2 {} (vars b \<union> rvars c \<union> X)"
-text\<open>@{const Lb} (and @{const iter}) is not monotone!\<close>
+text\<open>\<^const>\<open>Lb\<close> (and \<^const>\<open>iter\<close>) is not monotone!\<close>
lemma "let w = WHILE Bc False DO (''x'' ::= V ''y'';; ''z'' ::= V ''x'')
in \<not> (Lb w {''z''} \<subseteq> Lb w {''y'',''z''})"
by eval
--- a/src/HOL/IMP/Sec_Typing.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Sec_Typing.thy Sat Jan 05 17:24:33 2019 +0100
@@ -178,7 +178,7 @@
subsubsection "The Standard Typing System"
-text\<open>The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
+text\<open>The predicate \<^prop>\<open>l \<turnstile> c\<close> is nicely intuitive and executable. The
standard formulation, however, is slightly different, replacing the maximum
computation by an antimonotonicity rule. We introduce the standard system now
and show the equivalence with our formulation.\<close>
--- a/src/HOL/IMP/Sec_TypingT.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Sec_TypingT.thy Sat Jan 05 17:24:33 2019 +0100
@@ -167,7 +167,7 @@
subsubsection "The Standard System"
-text\<open>The predicate @{prop"l \<turnstile> c"} is nicely intuitive and executable. The
+text\<open>The predicate \<^prop>\<open>l \<turnstile> c\<close> is nicely intuitive and executable. The
standard formulation, however, is slightly different, replacing the maximum
computation by an antimonotonicity rule. We introduce the standard system now
and show the equivalence with our formulation.\<close>
--- a/src/HOL/IMP/Types.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Types.thy Sat Jan 05 17:24:33 2019 +0100
@@ -2,7 +2,7 @@
theory Types imports Star Complex_Main begin
-text \<open>We build on @{theory Complex_Main} instead of @{theory Main} to access
+text \<open>We build on \<^theory>\<open>Complex_Main\<close> instead of \<^theory>\<open>Main\<close> to access
the real numbers.\<close>
subsection "Arithmetic Expressions"
--- a/src/HOL/IMP/Vars.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMP/Vars.thy Sat Jan 05 17:24:33 2019 +0100
@@ -16,7 +16,7 @@
text\<open>This defines a type class ``vars'' with a single
function of (coincidentally) the same name. Then we define two separated
-instances of the class, one for @{typ aexp} and one for @{typ bexp}:\<close>
+instances of the class, one for \<^typ>\<open>aexp\<close> and one for \<^typ>\<open>bexp\<close>:\<close>
instantiation aexp :: vars
begin
--- a/src/HOL/IMPP/Hoare.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMPP/Hoare.thy Sat Jan 05 17:24:33 2019 +0100
@@ -209,16 +209,16 @@
*)
lemma thin [rule_format]: "G'||-ts \<Longrightarrow> \<forall>G. G' <= G \<longrightarrow> G||-ts"
apply (erule hoare_derivs.induct)
-apply (tactic \<open>ALLGOALS (EVERY'[clarify_tac @{context}, REPEAT o smp_tac @{context} 1])\<close>)
+apply (tactic \<open>ALLGOALS (EVERY'[clarify_tac \<^context>, REPEAT o smp_tac \<^context> 1])\<close>)
apply (rule hoare_derivs.empty)
apply (erule (1) hoare_derivs.insert)
apply (fast intro: hoare_derivs.asm)
apply (fast intro: hoare_derivs.cut)
apply (fast intro: hoare_derivs.weaken)
-apply (rule hoare_derivs.conseq, intro strip, tactic "smp_tac @{context} 2 1", clarify, tactic "smp_tac @{context} 1 1",rule exI, rule exI, erule (1) conjI)
+apply (rule hoare_derivs.conseq, intro strip, tactic "smp_tac \<^context> 2 1", clarify, tactic "smp_tac \<^context> 1 1",rule exI, rule exI, erule (1) conjI)
prefer 7
apply (rule_tac hoare_derivs.Body, drule_tac spec, erule_tac mp, fast)
-apply (tactic \<open>ALLGOALS (resolve_tac @{context} ((funpow 5 tl) @{thms hoare_derivs.intros}) THEN_ALL_NEW (fast_tac @{context}))\<close>)
+apply (tactic \<open>ALLGOALS (resolve_tac \<^context> ((funpow 5 tl) @{thms hoare_derivs.intros}) THEN_ALL_NEW (fast_tac \<^context>))\<close>)
done
lemma weak_Body: "G|-{P}. the (body pn) .{Q} ==> G|-{P}. BODY pn .{Q}"
@@ -278,7 +278,7 @@
lemma hoare_sound: "G||-ts ==> G||=ts"
apply (erule hoare_derivs.induct)
-apply (tactic \<open>TRYALL (eresolve_tac @{context} [@{thm Loop_sound_lemma}, @{thm Body_sound_lemma}] THEN_ALL_NEW assume_tac @{context})\<close>)
+apply (tactic \<open>TRYALL (eresolve_tac \<^context> [@{thm Loop_sound_lemma}, @{thm Body_sound_lemma}] THEN_ALL_NEW assume_tac \<^context>)\<close>)
apply (unfold hoare_valids_def)
apply blast
apply blast
@@ -286,11 +286,11 @@
apply (blast) (* cut *)
apply (blast) (* weaken *)
apply (tactic \<open>ALLGOALS (EVERY'
- [REPEAT o Rule_Insts.thin_tac @{context} "hoare_derivs _ _" [],
- simp_tac @{context}, clarify_tac @{context}, REPEAT o smp_tac @{context} 1])\<close>)
+ [REPEAT o Rule_Insts.thin_tac \<^context> "hoare_derivs _ _" [],
+ simp_tac \<^context>, clarify_tac \<^context>, REPEAT o smp_tac \<^context> 1])\<close>)
apply (simp_all (no_asm_use) add: triple_valid_def2)
-apply (intro strip, tactic "smp_tac @{context} 2 1", blast) (* conseq *)
-apply (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>) (* Skip, Ass, Local *)
+apply (intro strip, tactic "smp_tac \<^context> 2 1", blast) (* conseq *)
+apply (tactic \<open>ALLGOALS (clarsimp_tac \<^context>)\<close>) (* Skip, Ass, Local *)
prefer 3 apply (force) (* Call *)
apply (erule_tac [2] evaln_elim_cases) (* If *)
apply blast+
@@ -335,24 +335,24 @@
lemma MGF_lemma1 [rule_format (no_asm)]: "state_not_singleton \<Longrightarrow>
\<forall>pn \<in> dom body. G|-{=}.BODY pn.{->} \<Longrightarrow> WT c --> G|-{=}.c.{->}"
apply (induct_tac c)
-apply (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>)
+apply (tactic \<open>ALLGOALS (clarsimp_tac \<^context>)\<close>)
prefer 7 apply (fast intro: domI)
apply (erule_tac [6] MGT_alternD)
apply (unfold MGT_def)
apply (drule_tac [7] bspec, erule_tac [7] domI)
-apply (rule_tac [7] escape, tactic \<open>clarsimp_tac @{context} 7\<close>,
+apply (rule_tac [7] escape, tactic \<open>clarsimp_tac \<^context> 7\<close>,
rename_tac [7] "fun" y Z,
rule_tac [7] P1 = "%Z' s. s= (setlocs Z newlocs) [Loc Arg ::= fun Z]" in hoare_derivs.Call [THEN conseq1], erule_tac [7] conseq12)
apply (erule_tac [!] thin_rl)
apply (rule hoare_derivs.Skip [THEN conseq2])
apply (rule_tac [2] hoare_derivs.Ass [THEN conseq1])
-apply (rule_tac [3] escape, tactic \<open>clarsimp_tac @{context} 3\<close>,
+apply (rule_tac [3] escape, tactic \<open>clarsimp_tac \<^context> 3\<close>,
rename_tac [3] loc "fun" y Z,
rule_tac [3] P1 = "%Z' s. s= (Z[Loc loc::=fun Z])" in hoare_derivs.Local [THEN conseq1],
erule_tac [3] conseq12)
apply (erule_tac [5] hoare_derivs.Comp, erule_tac [5] conseq12)
-apply (tactic \<open>(resolve_tac @{context} @{thms hoare_derivs.If} THEN_ALL_NEW
- eresolve_tac @{context} @{thms conseq12}) 6\<close>)
+apply (tactic \<open>(resolve_tac \<^context> @{thms hoare_derivs.If} THEN_ALL_NEW
+ eresolve_tac \<^context> @{thms conseq12}) 6\<close>)
apply (rule_tac [8] hoare_derivs.Loop [THEN conseq2], erule_tac [8] conseq12)
apply auto
done
@@ -367,7 +367,7 @@
shows "finite U ==> uG = mgt_call`U ==>
\<forall>G. G <= uG --> n <= card uG --> card G = card uG - n --> (\<forall>c. wt c --> P G {mgt c})"
apply (induct_tac n)
-apply (tactic \<open>ALLGOALS (clarsimp_tac @{context})\<close>)
+apply (tactic \<open>ALLGOALS (clarsimp_tac \<^context>)\<close>)
apply (subgoal_tac "G = mgt_call ` U")
prefer 2
apply (simp add: card_seteq)
@@ -436,7 +436,7 @@
apply (frule finite_subset)
apply (rule finite_dom_body [THEN finite_imageI])
apply (rotate_tac 2)
-apply (tactic "make_imp_tac @{context} 1")
+apply (tactic "make_imp_tac \<^context> 1")
apply (erule finite_induct)
apply (clarsimp intro!: hoare_derivs.empty)
apply (clarsimp intro!: hoare_derivs.insert simp del: range_composition)
--- a/src/HOL/IMPP/Misc.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMPP/Misc.thy Sat Jan 05 17:24:33 2019 +0100
@@ -87,12 +87,12 @@
apply (simp (no_asm_use) add: triple_valid_def2)
apply clarsimp
apply (drule_tac x = "s<Y>" in spec)
-apply (tactic "smp_tac @{context} 1 1")
+apply (tactic "smp_tac \<^context> 1 1")
apply (drule spec)
apply (drule_tac x = "s[Loc Y::=a s]" in spec)
apply (simp (no_asm_use))
apply (erule (1) notE impE)
-apply (tactic "smp_tac @{context} 1 1")
+apply (tactic "smp_tac \<^context> 1 1")
apply simp
done
--- a/src/HOL/IMPP/Natural.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IMPP/Natural.thy Sat Jan 05 17:24:33 2019 +0100
@@ -112,7 +112,7 @@
lemma evaln_evalc: "<c,s> -n-> t ==> <c,s> -c-> t"
apply (erule evaln.induct)
apply (tactic \<open>
- ALLGOALS (resolve_tac @{context} @{thms evalc.intros} THEN_ALL_NEW assume_tac @{context})
+ ALLGOALS (resolve_tac \<^context> @{thms evalc.intros} THEN_ALL_NEW assume_tac \<^context>)
\<close>)
done
@@ -139,12 +139,12 @@
lemma evalc_evaln: "<c,s> -c-> t \<Longrightarrow> \<exists>n. <c,s> -n-> t"
apply (erule evalc.induct)
-apply (tactic \<open>ALLGOALS (REPEAT o eresolve_tac @{context} [exE])\<close>)
-apply (tactic \<open>TRYALL (EVERY' [dresolve_tac @{context} @{thms evaln_max2}, assume_tac @{context},
- REPEAT o eresolve_tac @{context} [exE, conjE]])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o eresolve_tac \<^context> [exE])\<close>)
+apply (tactic \<open>TRYALL (EVERY' [dresolve_tac \<^context> @{thms evaln_max2}, assume_tac \<^context>,
+ REPEAT o eresolve_tac \<^context> [exE, conjE]])\<close>)
apply (tactic
- \<open>ALLGOALS (resolve_tac @{context} [exI] THEN'
- resolve_tac @{context} @{thms evaln.intros} THEN_ALL_NEW assume_tac @{context})\<close>)
+ \<open>ALLGOALS (resolve_tac \<^context> [exI] THEN'
+ resolve_tac \<^context> @{thms evaln.intros} THEN_ALL_NEW assume_tac \<^context>)\<close>)
done
lemma eval_eq: "<c,s> -c-> t = (\<exists>n. <c,s> -n-> t)"
--- a/src/HOL/IOA/Solve.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/IOA/Solve.thy Sat Jan 05 17:24:33 2019 +0100
@@ -145,8 +145,8 @@
apply force
apply (simp (no_asm) add: conj_disj_distribR cong add: conj_cong split: if_split)
apply (tactic \<open>
- REPEAT((resolve_tac @{context} [conjI, impI] 1 ORELSE eresolve_tac @{context} [conjE] 1) THEN
- asm_full_simp_tac(@{context} addsimps [@{thm comp1_reachable}, @{thm comp2_reachable}]) 1)\<close>)
+ REPEAT((resolve_tac \<^context> [conjI, impI] 1 ORELSE eresolve_tac \<^context> [conjE] 1) THEN
+ asm_full_simp_tac(\<^context> addsimps [@{thm comp1_reachable}, @{thm comp2_reachable}]) 1)\<close>)
done
--- a/src/HOL/Imperative_HOL/Heap.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Imperative_HOL/Heap.thy Sat Jan 05 17:24:33 2019 +0100
@@ -85,10 +85,10 @@
text \<open>Syntactic convenience\<close>
setup \<open>
- Sign.add_const_constraint (@{const_name Array}, SOME @{typ "nat \<Rightarrow> 'a::heap array"})
- #> Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a::heap ref"})
- #> Sign.add_const_constraint (@{const_name addr_of_array}, SOME @{typ "'a::heap array \<Rightarrow> nat"})
- #> Sign.add_const_constraint (@{const_name addr_of_ref}, SOME @{typ "'a::heap ref \<Rightarrow> nat"})
+ Sign.add_const_constraint (\<^const_name>\<open>Array\<close>, SOME \<^typ>\<open>nat \<Rightarrow> 'a::heap array\<close>)
+ #> Sign.add_const_constraint (\<^const_name>\<open>Ref\<close>, SOME \<^typ>\<open>nat \<Rightarrow> 'a::heap ref\<close>)
+ #> Sign.add_const_constraint (\<^const_name>\<open>addr_of_array\<close>, SOME \<^typ>\<open>'a::heap array \<Rightarrow> nat\<close>)
+ #> Sign.add_const_constraint (\<^const_name>\<open>addr_of_ref\<close>, SOME \<^typ>\<open>'a::heap ref \<Rightarrow> nat\<close>)
\<close>
hide_const (open) empty
--- a/src/HOL/Imperative_HOL/Heap_Monad.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Imperative_HOL/Heap_Monad.thy Sat Jan 05 17:24:33 2019 +0100
@@ -233,7 +233,7 @@
definition raise :: "String.literal \<Rightarrow> 'a Heap" \<comment> \<open>the literal is just decoration\<close>
where "raise s = Heap (\<lambda>_. None)"
-code_datatype raise \<comment> \<open>avoid @{const "Heap"} formally\<close>
+code_datatype raise \<comment> \<open>avoid \<^const>\<open>Heap\<close> formally\<close>
lemma execute_raise [execute_simps]:
"execute (raise s) = (\<lambda>_. None)"
@@ -455,8 +455,8 @@
unfolding effect_def execute.simps
by blast
-declaration \<open>Partial_Function.init "heap" @{term heap.fixp_fun}
- @{term heap.mono_body} @{thm heap.fixp_rule_uc} @{thm heap.fixp_induct_uc}
+declaration \<open>Partial_Function.init "heap" \<^term>\<open>heap.fixp_fun\<close>
+ \<^term>\<open>heap.mono_body\<close> @{thm heap.fixp_rule_uc} @{thm heap.fixp_induct_uc}
(SOME @{thm fixp_induct_heap})\<close>
@@ -633,14 +633,14 @@
val imp_program =
let
- val is_bind = curry (=) @{const_name bind};
- val is_return = curry (=) @{const_name return};
+ val is_bind = curry (=) \<^const_name>\<open>bind\<close>;
+ val is_return = curry (=) \<^const_name>\<open>return\<close>;
val dummy_name = "";
val dummy_case_term = IVar NONE;
(*assumption: dummy values are not relevant for serialization*)
- val unitT = @{type_name unit} `%% [];
+ val unitT = \<^type_name>\<open>unit\<close> `%% [];
val unitt =
- IConst { sym = Code_Symbol.Constant @{const_name Unity}, typargs = [], dicts = [], dom = [],
+ IConst { sym = Code_Symbol.Constant \<^const_name>\<open>Unity\<close>, typargs = [], dicts = [], dom = [],
annotation = NONE };
fun dest_abs ((v, ty) `|=> t, _) = ((v, ty), t)
| dest_abs (t, ty) =
--- a/src/HOL/Imperative_HOL/Overview.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Imperative_HOL/Overview.thy Sat Jan 05 17:24:33 2019 +0100
@@ -33,22 +33,20 @@
section \<open>A polymorphic heap inside a monad\<close>
text \<open>
- Heaps (@{type heap}) can be populated by values of class @{class
- heap}; HOL's default types are already instantiated to class @{class
- heap}. Class @{class heap} is a subclass of @{class countable}; see
- theory \<open>Countable\<close> for ways to instantiate types as @{class countable}.
+ Heaps (\<^type>\<open>heap\<close>) can be populated by values of class \<^class>\<open>heap\<close>; HOL's default types are already instantiated to class \<^class>\<open>heap\<close>. Class \<^class>\<open>heap\<close> is a subclass of \<^class>\<open>countable\<close>; see
+ theory \<open>Countable\<close> for ways to instantiate types as \<^class>\<open>countable\<close>.
- The heap is wrapped up in a monad @{typ "'a Heap"} by means of the
+ The heap is wrapped up in a monad \<^typ>\<open>'a Heap\<close> by means of the
following specification:
\begin{quote}
- @{datatype Heap}
+ \<^datatype>\<open>Heap\<close>
\end{quote}
Unwrapping of this monad type happens through
\begin{quote}
- @{term_type execute} \\
+ \<^term_type>\<open>execute\<close> \\
@{thm execute.simps [no_vars]}
\end{quote}
@@ -66,31 +64,30 @@
Monadic expression involve the usual combinators:
\begin{quote}
- @{term_type return} \\
- @{term_type bind} \\
- @{term_type raise}
+ \<^term_type>\<open>return\<close> \\
+ \<^term_type>\<open>bind\<close> \\
+ \<^term_type>\<open>raise\<close>
\end{quote}
- This is also associated with nice monad do-syntax. The @{typ
- string} argument to @{const raise} is just a codified comment.
+ This is also associated with nice monad do-syntax. The \<^typ>\<open>string\<close> argument to \<^const>\<open>raise\<close> is just a codified comment.
Among a couple of generic combinators the following is helpful for
establishing invariants:
\begin{quote}
- @{term_type assert} \\
+ \<^term_type>\<open>assert\<close> \\
@{thm assert_def [no_vars]}
\end{quote}
\<close>
-section \<open>Relational reasoning about @{type Heap} expressions\<close>
+section \<open>Relational reasoning about \<^type>\<open>Heap\<close> expressions\<close>
text \<open>
To establish correctness of imperative programs, predicate
\begin{quote}
- @{term_type effect}
+ \<^term_type>\<open>effect\<close>
\end{quote}
provides a simple relational calculus. Primitive rules are \<open>effectI\<close> and \<open>effectE\<close>, rules appropriate for reasoning about
@@ -101,13 +98,13 @@
on the heap at all; reasoning then can be easier using predicate
\begin{quote}
- @{term_type success}
+ \<^term_type>\<open>success\<close>
\end{quote}
- Introduction rules for @{const success} are available in the
+ Introduction rules for \<^const>\<open>success\<close> are available in the
\<open>success_intro\<close> fact collection.
- @{const execute}, @{const effect}, @{const success} and @{const bind}
+ \<^const>\<open>execute\<close>, \<^const>\<open>effect\<close>, \<^const>\<open>success\<close> and \<^const>\<open>bind\<close>
are related by rules \<open>execute_bind_success\<close>, \<open>success_bind_executeI\<close>, \<open>success_bind_effectI\<close>, \<open>effect_bindI\<close>, \<open>effect_bindE\<close> and \<open>execute_bind_eq_SomeI\<close>.
\<close>
@@ -141,27 +138,27 @@
Heap operations:
\begin{quote}
- @{term_type Array.alloc} \\
- @{term_type Array.present} \\
- @{term_type Array.get} \\
- @{term_type Array.set} \\
- @{term_type Array.length} \\
- @{term_type Array.update} \\
- @{term_type Array.noteq}
+ \<^term_type>\<open>Array.alloc\<close> \\
+ \<^term_type>\<open>Array.present\<close> \\
+ \<^term_type>\<open>Array.get\<close> \\
+ \<^term_type>\<open>Array.set\<close> \\
+ \<^term_type>\<open>Array.length\<close> \\
+ \<^term_type>\<open>Array.update\<close> \\
+ \<^term_type>\<open>Array.noteq\<close>
\end{quote}
Monad operations:
\begin{quote}
- @{term_type Array.new} \\
- @{term_type Array.of_list} \\
- @{term_type Array.make} \\
- @{term_type Array.len} \\
- @{term_type Array.nth} \\
- @{term_type Array.upd} \\
- @{term_type Array.map_entry} \\
- @{term_type Array.swap} \\
- @{term_type Array.freeze}
+ \<^term_type>\<open>Array.new\<close> \\
+ \<^term_type>\<open>Array.of_list\<close> \\
+ \<^term_type>\<open>Array.make\<close> \\
+ \<^term_type>\<open>Array.len\<close> \\
+ \<^term_type>\<open>Array.nth\<close> \\
+ \<^term_type>\<open>Array.upd\<close> \\
+ \<^term_type>\<open>Array.map_entry\<close> \\
+ \<^term_type>\<open>Array.swap\<close> \\
+ \<^term_type>\<open>Array.freeze\<close>
\end{quote}
\<close>
@@ -171,20 +168,20 @@
Heap operations:
\begin{quote}
- @{term_type Ref.alloc} \\
- @{term_type Ref.present} \\
- @{term_type Ref.get} \\
- @{term_type Ref.set} \\
- @{term_type Ref.noteq}
+ \<^term_type>\<open>Ref.alloc\<close> \\
+ \<^term_type>\<open>Ref.present\<close> \\
+ \<^term_type>\<open>Ref.get\<close> \\
+ \<^term_type>\<open>Ref.set\<close> \\
+ \<^term_type>\<open>Ref.noteq\<close>
\end{quote}
Monad operations:
\begin{quote}
- @{term_type Ref.ref} \\
- @{term_type Ref.lookup} \\
- @{term_type Ref.update} \\
- @{term_type Ref.change}
+ \<^term_type>\<open>Ref.ref\<close> \\
+ \<^term_type>\<open>Ref.lookup\<close> \\
+ \<^term_type>\<open>Ref.update\<close> \\
+ \<^term_type>\<open>Ref.change\<close>
\end{quote}
\<close>
--- a/src/HOL/Imperative_HOL/ex/Linked_Lists.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Imperative_HOL/ex/Linked_Lists.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
section \<open>Definition of Linked Lists\<close>
-setup \<open>Sign.add_const_constraint (@{const_name Ref}, SOME @{typ "nat \<Rightarrow> 'a::type ref"})\<close>
+setup \<open>Sign.add_const_constraint (\<^const_name>\<open>Ref\<close>, SOME \<^typ>\<open>nat \<Rightarrow> 'a::type ref\<close>)\<close>
datatype 'a node = Empty | Node 'a "'a node ref"
primrec
--- a/src/HOL/Import/import_data.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Import/import_data.ML Sat Jan 05 17:24:33 2019 +0100
@@ -102,39 +102,39 @@
end
val _ = Theory.setup
- (Attrib.setup @{binding import_const}
+ (Attrib.setup \<^binding>\<open>import_const\<close>
(Scan.lift Parse.name --
- Scan.option (Scan.lift @{keyword ":"} |-- Args.const {proper = true, strict = false}) >>
+ Scan.option (Scan.lift \<^keyword>\<open>:\<close> |-- Args.const {proper = true, strict = false}) >>
(fn (s1, s2) => Thm.declaration_attribute
(fn th => Context.mapping (add_const_def s1 th s2) I)))
"declare a theorem as an equality that maps the given constant")
val _ = Theory.setup
- (Attrib.setup @{binding import_type}
+ (Attrib.setup \<^binding>\<open>import_type\<close>
(Scan.lift (Parse.name -- Parse.name -- Parse.name) >>
(fn ((tyname, absname), repname) => Thm.declaration_attribute
(fn th => Context.mapping (add_typ_def tyname absname repname th) I)))
"declare a type_definition theorem as a map for an imported type with abs and rep")
val _ =
- Outer_Syntax.command @{command_keyword import_type_map}
+ Outer_Syntax.command \<^command_keyword>\<open>import_type_map\<close>
"map external type name to existing Isabelle/HOL type name"
- ((Parse.name --| @{keyword ":"}) -- Parse.type_const >>
+ ((Parse.name --| \<^keyword>\<open>:\<close>) -- Parse.type_const >>
(fn (ty_name1, ty_name2) => Toplevel.theory (add_typ_map_cmd ty_name1 ty_name2)))
val _ =
- Outer_Syntax.command @{command_keyword import_const_map}
+ Outer_Syntax.command \<^command_keyword>\<open>import_const_map\<close>
"map external const name to existing Isabelle/HOL const name"
- ((Parse.name --| @{keyword ":"}) -- Parse.const >>
+ ((Parse.name --| \<^keyword>\<open>:\<close>) -- Parse.const >>
(fn (cname1, cname2) => Toplevel.theory (add_const_map_cmd cname1 cname2)))
(* Initial type and constant maps, for types and constants that are not
defined, which means their definitions do not appear in the proof dump *)
val _ = Theory.setup
- (add_typ_map "bool" @{type_name bool} #>
- add_typ_map "fun" @{type_name fun} #>
- add_typ_map "ind" @{type_name ind} #>
- add_const_map "=" @{const_name HOL.eq} #>
- add_const_map "@" @{const_name "Eps"})
+ (add_typ_map "bool" \<^type_name>\<open>bool\<close> #>
+ add_typ_map "fun" \<^type_name>\<open>fun\<close> #>
+ add_typ_map "ind" \<^type_name>\<open>ind\<close> #>
+ add_const_map "=" \<^const_name>\<open>HOL.eq\<close> #>
+ add_const_map "@" \<^const_name>\<open>Eps\<close>)
end
--- a/src/HOL/Import/import_rule.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Import/import_rule.ML Sat Jan 05 17:24:33 2019 +0100
@@ -214,7 +214,7 @@
val th2 = meta_mp nonempty td_th
val c =
case Thm.concl_of th2 of
- _ $ (Const(@{const_name Ex},_) $ Abs(_,_,Const(@{const_name Set.member},_) $ _ $ c)) => c
+ _ $ (Const(\<^const_name>\<open>Ex\<close>,_) $ Abs(_,_,Const(\<^const_name>\<open>Set.member\<close>,_) $ _ $ c)) => c
| _ => error "type_introduction: bad type definition theorem"
val tfrees = Term.add_tfrees c []
val tnames = sort_strings (map fst tfrees)
@@ -357,9 +357,9 @@
| process tstate (#"1", [th]) = getth th tstate |>> conj1 |-> setth
| process tstate (#"2", [th]) = getth th tstate |>> conj2 |-> setth
| process tstate (#"H", [t]) =
- gettm t tstate |>> Thm.apply @{cterm Trueprop} |>> Thm.trivial |-> setth
+ gettm t tstate |>> Thm.apply \<^cterm>\<open>Trueprop\<close> |>> Thm.trivial |-> setth
| process tstate (#"A", [_, t]) =
- gettm t tstate |>> Thm.apply @{cterm Trueprop} |>> Skip_Proof.make_thm_cterm |-> setth
+ gettm t tstate |>> Thm.apply \<^cterm>\<open>Trueprop\<close> |>> Skip_Proof.make_thm_cterm |-> setth
| process tstate (#"C", [th1, th2]) =
getth th1 tstate ||>> getth th2 |>> (fn (t1, t2) => comb t1 t2) |-> setth
| process tstate (#"T", [th1, th2]) =
@@ -413,7 +413,7 @@
end
| process (thy, state) (#"Y", [name, _, _]) = setth (mtydef name thy) (thy, state)
| process (thy, state) (#"t", [n]) =
- setty (Thm.global_ctyp_of thy (TFree ("'" ^ (transl_qm n), @{sort type}))) (thy, state)
+ setty (Thm.global_ctyp_of thy (TFree ("'" ^ (transl_qm n), \<^sort>\<open>type\<close>))) (thy, state)
| process (thy, state) (#"a", n :: l) =
fold_map getty l (thy, state) |>>
(fn tys => Thm.global_ctyp_of thy (Type (gettyname n thy, map Thm.typ_of tys))) |-> setty
@@ -442,7 +442,7 @@
fun process_file path thy =
(thy, init_state) |> File.fold_lines process_line path |> fst
-val _ = Outer_Syntax.command @{command_keyword import_file}
+val _ = Outer_Syntax.command \<^command_keyword>\<open>import_file\<close>
"import a recorded proof file"
(Parse.path >> (fn name => Toplevel.theory (fn thy => process_file (Path.explode name) thy)))
--- a/src/HOL/Induct/Comb.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Induct/Comb.thy Sat Jan 05 17:24:33 2019 +0100
@@ -127,7 +127,7 @@
apply (blast intro: rtrancl_trans)+
done
-text \<open>Counterexample to the diamond property for @{term "x \<rightarrow>\<^sup>1 y"}\<close>
+text \<open>Counterexample to the diamond property for \<^term>\<open>x \<rightarrow>\<^sup>1 y\<close>\<close>
lemma not_diamond_contract: "~ diamond(contract)"
by (unfold diamond_def, metis S_contractE contract.K)
@@ -169,7 +169,7 @@
text \<open>
\<^medskip>
- Equivalence of @{prop "p \<rightarrow> q"} and @{prop "p \<Rrightarrow> q"}.
+ Equivalence of \<^prop>\<open>p \<rightarrow> q\<close> and \<^prop>\<open>p \<Rrightarrow> q\<close>.
\<close>
lemma contract_subset_parcontract: "contract \<subseteq> parcontract"
--- a/src/HOL/Induct/PropLog.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Induct/PropLog.thy Sat Jan 05 17:24:33 2019 +0100
@@ -185,16 +185,15 @@
subsection\<open>Completeness -- lemmas for reducing the set of assumptions\<close>
text \<open>
- For the case @{prop "hyps p t - insert #v Y |- p"} we also have @{prop
- "hyps p t - {#v} \<subseteq> hyps p (t-{v})"}.
+ For the case \<^prop>\<open>hyps p t - insert #v Y |- p\<close> we also have \<^prop>\<open>hyps p t - {#v} \<subseteq> hyps p (t-{v})\<close>.
\<close>
lemma hyps_Diff: "hyps p (t-{v}) <= insert (#v->false) ((hyps p t)-{#v})"
by (induct p) auto
text \<open>
- For the case @{prop "hyps p t - insert (#v -> Fls) Y |- p"} we also have
- @{prop "hyps p t-{#v->Fls} \<subseteq> hyps p (insert v t)"}.
+ For the case \<^prop>\<open>hyps p t - insert (#v -> Fls) Y |- p\<close> we also have
+ \<^prop>\<open>hyps p t-{#v->Fls} \<subseteq> hyps p (insert v t)\<close>.
\<close>
lemma hyps_insert: "hyps p (insert v t) <= insert (#v) (hyps p t-{#v->false})"
@@ -209,8 +208,8 @@
by fast
text \<open>
- The set @{term "hyps p t"} is finite, and elements have the form
- @{term "#v"} or @{term "#v->Fls"}.
+ The set \<^term>\<open>hyps p t\<close> is finite, and elements have the form
+ \<^term>\<open>#v\<close> or \<^term>\<open>#v->Fls\<close>.
\<close>
lemma hyps_finite: "finite(hyps p t)"
@@ -226,7 +225,7 @@
subsubsection \<open>Completeness theorem\<close>
text \<open>
- Induction on the finite set of assumptions @{term "hyps p t0"}. We
+ Induction on the finite set of assumptions \<^term>\<open>hyps p t0\<close>. We
may repeatedly subtract assumptions until none are left!
\<close>
--- a/src/HOL/Induct/QuoDataType.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Induct/QuoDataType.thy Sat Jan 05 17:24:33 2019 +0100
@@ -115,7 +115,7 @@
| "freediscrim (CRYPT K X) = freediscrim X + 2"
| "freediscrim (DECRYPT K X) = freediscrim X - 2"
-text\<open>This theorem helps us prove @{term "Nonce N \<noteq> MPair X Y"}\<close>
+text\<open>This theorem helps us prove \<^term>\<open>Nonce N \<noteq> MPair X Y\<close>\<close>
theorem msgrel_imp_eq_freediscrim:
"U \<sim> V \<Longrightarrow> freediscrim U = freediscrim V"
by (induct set: msgrel) auto
@@ -151,8 +151,8 @@
Abs_Msg (\<Union>U \<in> Rep_Msg X. msgrel``{DECRYPT K U})"
-text\<open>Reduces equality of equivalence classes to the @{term msgrel} relation:
- @{term "(msgrel `` {x} = msgrel `` {y}) = ((x,y) \<in> msgrel)"}\<close>
+text\<open>Reduces equality of equivalence classes to the \<^term>\<open>msgrel\<close> relation:
+ \<^term>\<open>(msgrel `` {x} = msgrel `` {y}) = ((x,y) \<in> msgrel)\<close>\<close>
lemmas equiv_msgrel_iff = eq_equiv_class_iff [OF equiv_msgrel UNIV_I UNIV_I]
declare equiv_msgrel_iff [simp]
@@ -227,7 +227,7 @@
by (auto simp add: congruent_def msgrel_imp_eq_freenonces)
-text\<open>Now prove the four equations for @{term nonces}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>nonces\<close>\<close>
lemma nonces_Nonce [simp]: "nonces (Nonce N) = {N}"
by (simp add: nonces_def Nonce_def
@@ -261,7 +261,7 @@
lemma left_congruent: "(\<lambda>U. msgrel `` {freeleft U}) respects msgrel"
by (auto simp add: congruent_def msgrel_imp_eqv_freeleft)
-text\<open>Now prove the four equations for @{term left}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>left\<close>\<close>
lemma left_Nonce [simp]: "left (Nonce N) = Nonce N"
by (simp add: left_def Nonce_def
@@ -295,7 +295,7 @@
lemma right_congruent: "(\<lambda>U. msgrel `` {freeright U}) respects msgrel"
by (auto simp add: congruent_def msgrel_imp_eqv_freeright)
-text\<open>Now prove the four equations for @{term right}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>right\<close>\<close>
lemma right_Nonce [simp]: "right (Nonce N) = Nonce N"
by (simp add: right_def Nonce_def
@@ -325,7 +325,7 @@
lemma NONCE_imp_eq: "NONCE m \<sim> NONCE n \<Longrightarrow> m = n"
by (drule msgrel_imp_eq_freenonces, simp)
-text\<open>Can also be proved using the function @{term nonces}\<close>
+text\<open>Can also be proved using the function \<^term>\<open>nonces\<close>\<close>
lemma Nonce_Nonce_eq [iff]: "(Nonce m = Nonce n) = (m = n)"
by (auto simp add: Nonce_def msgrel_refl dest: NONCE_imp_eq)
@@ -430,7 +430,7 @@
lemma discrim_congruent: "(\<lambda>U. {freediscrim U}) respects msgrel"
by (auto simp add: congruent_def msgrel_imp_eq_freediscrim)
-text\<open>Now prove the four equations for @{term discrim}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>discrim\<close>\<close>
lemma discrim_Nonce [simp]: "discrim (Nonce N) = 0"
by (simp add: discrim_def Nonce_def
--- a/src/HOL/Induct/QuoNestedDataType.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Induct/QuoNestedDataType.thy Sat Jan 05 17:24:33 2019 +0100
@@ -168,8 +168,8 @@
Abs_Exp (\<Union>Us \<in> listset (map Rep_Exp Xs). exprel `` {FNCALL F Us})"
-text\<open>Reduces equality of equivalence classes to the @{term exprel} relation:
- @{term "(exprel `` {x} = exprel `` {y}) = ((x,y) \<in> exprel)"}\<close>
+text\<open>Reduces equality of equivalence classes to the \<^term>\<open>exprel\<close> relation:
+ \<^term>\<open>(exprel `` {x} = exprel `` {y}) = ((x,y) \<in> exprel)\<close>\<close>
lemmas equiv_exprel_iff = eq_equiv_class_iff [OF equiv_exprel UNIV_I UNIV_I]
declare equiv_exprel_iff [simp]
@@ -237,8 +237,8 @@
qed
text\<open>It is not clear what to do with FnCall: it's argument is an abstraction
-of an @{typ "exp list"}. Is it just Nil or Cons? What seems to work best is to
-regard an @{typ "exp list"} as a @{term "listrel exprel"} equivalence class\<close>
+of an \<^typ>\<open>exp list\<close>. Is it just Nil or Cons? What seems to work best is to
+regard an \<^typ>\<open>exp list\<close> as a \<^term>\<open>listrel exprel\<close> equivalence class\<close>
text\<open>This theorem is easily proved but never used. There's no obvious way
even to state the analogous result, \<open>FnCall_Cons\<close>.\<close>
@@ -288,13 +288,13 @@
lemma vars_respects: "freevars respects exprel"
by (auto simp add: congruent_def exprel_imp_eq_freevars)
-text\<open>The extension of the function @{term vars} to lists\<close>
+text\<open>The extension of the function \<^term>\<open>vars\<close> to lists\<close>
primrec vars_list :: "exp list \<Rightarrow> nat set" where
"vars_list [] = {}"
| "vars_list(E#Es) = vars E \<union> vars_list Es"
-text\<open>Now prove the three equations for @{term vars}\<close>
+text\<open>Now prove the three equations for \<^term>\<open>vars\<close>\<close>
lemma vars_Variable [simp]: "vars (Var N) = {N}"
by (simp add: vars_def Var_def
@@ -325,7 +325,7 @@
lemma VAR_imp_eq: "VAR m \<sim> VAR n \<Longrightarrow> m = n"
by (drule exprel_imp_eq_freevars, simp)
-text\<open>Can also be proved using the function @{term vars}\<close>
+text\<open>Can also be proved using the function \<^term>\<open>vars\<close>\<close>
lemma Var_Var_eq [iff]: "(Var m = Var n) = (m = n)"
by (auto simp add: Var_def exprel_refl dest: VAR_imp_eq)
@@ -344,7 +344,7 @@
apply (drule exprel_imp_eq_freediscrim, simp)
done
-subsection\<open>Injectivity of @{term FnCall}\<close>
+subsection\<open>Injectivity of \<^term>\<open>FnCall\<close>\<close>
definition
"fun" :: "exp \<Rightarrow> nat" where
@@ -400,7 +400,7 @@
lemma discrim_respects: "(\<lambda>U. {freediscrim U}) respects exprel"
by (auto simp add: congruent_def exprel_imp_eq_freediscrim)
-text\<open>Now prove the four equations for @{term discrim}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>discrim\<close>\<close>
lemma discrim_Var [simp]: "discrim (Var N) = 0"
by (simp add: discrim_def Var_def
--- a/src/HOL/Isar_Examples/Cantor.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Isar_Examples/Cantor.thy Sat Jan 05 17:24:33 2019 +0100
@@ -80,7 +80,7 @@
text \<open>
The following treatment of Cantor's Theorem follows the classic example from
- the early 1990s, e.g.\ see the file @{verbatim "92/HOL/ex/set.ML"} in
+ the early 1990s, e.g.\ see the file \<^verbatim>\<open>92/HOL/ex/set.ML\<close> in
Isabelle92 or @{cite \<open>\S18.7\<close> "paulson-isa-book"}. The old tactic scripts
synthesize key information of the proof by refinement of schematic goal
states. In contrast, the Isar proof needs to say explicitly what is proven.
--- a/src/HOL/Isar_Examples/Hoare.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Isar_Examples/Hoare.thy Sat Jan 05 17:24:33 2019 +0100
@@ -187,8 +187,7 @@
While the first part is still a somewhat intelligible specification of the
concrete syntactic representation of our Hoare language, the actual ``ML
drivers'' is quite involved. Just note that the we re-use the basic
- quote/antiquote translations as already defined in Isabelle/Pure (see @{ML
- Syntax_Trans.quote_tr}, and @{ML Syntax_Trans.quote_tr'},).
+ quote/antiquote translations as already defined in Isabelle/Pure (see \<^ML>\<open>Syntax_Trans.quote_tr\<close>, and \<^ML>\<open>Syntax_Trans.quote_tr'\<close>,).
\<close>
syntax
@@ -213,9 +212,9 @@
parse_translation \<open>
let
- fun quote_tr [t] = Syntax_Trans.quote_tr @{syntax_const "_antiquote"} t
+ fun quote_tr [t] = Syntax_Trans.quote_tr \<^syntax_const>\<open>_antiquote\<close> t
| quote_tr ts = raise TERM ("quote_tr", ts);
- in [(@{syntax_const "_quote"}, K quote_tr)] end
+ in [(\<^syntax_const>\<open>_quote\<close>, K quote_tr)] end
\<close>
text \<open>
@@ -227,24 +226,24 @@
print_translation \<open>
let
fun quote_tr' f (t :: ts) =
- Term.list_comb (f $ Syntax_Trans.quote_tr' @{syntax_const "_antiquote"} t, ts)
+ Term.list_comb (f $ Syntax_Trans.quote_tr' \<^syntax_const>\<open>_antiquote\<close> t, ts)
| quote_tr' _ _ = raise Match;
- val assert_tr' = quote_tr' (Syntax.const @{syntax_const "_Assert"});
+ val assert_tr' = quote_tr' (Syntax.const \<^syntax_const>\<open>_Assert\<close>);
- fun bexp_tr' name ((Const (@{const_syntax Collect}, _) $ t) :: ts) =
+ fun bexp_tr' name ((Const (\<^const_syntax>\<open>Collect\<close>, _) $ t) :: ts) =
quote_tr' (Syntax.const name) (t :: ts)
| bexp_tr' _ _ = raise Match;
fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) =
- quote_tr' (Syntax.const @{syntax_const "_Assign"} $ Syntax_Trans.update_name_tr' f)
+ quote_tr' (Syntax.const \<^syntax_const>\<open>_Assign\<close> $ Syntax_Trans.update_name_tr' f)
(Abs (x, dummyT, Syntax_Trans.const_abs_tr' k) :: ts)
| assign_tr' _ = raise Match;
in
- [(@{const_syntax Collect}, K assert_tr'),
- (@{const_syntax Basic}, K assign_tr'),
- (@{const_syntax Cond}, K (bexp_tr' @{syntax_const "_Cond"})),
- (@{const_syntax While}, K (bexp_tr' @{syntax_const "_While_inv"}))]
+ [(\<^const_syntax>\<open>Collect\<close>, K assert_tr'),
+ (\<^const_syntax>\<open>Basic\<close>, K assign_tr'),
+ (\<^const_syntax>\<open>Cond\<close>, K (bexp_tr' \<^syntax_const>\<open>_Cond\<close>)),
+ (\<^const_syntax>\<open>While\<close>, K (bexp_tr' \<^syntax_const>\<open>_While_inv\<close>))]
end
\<close>
--- a/src/HOL/Isar_Examples/Structured_Statements.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Isar_Examples/Structured_Statements.thy Sat Jan 05 17:24:33 2019 +0100
@@ -152,9 +152,9 @@
have C
proof -
- show ?thesis when "A x" (is ?A) for x :: 'a \<comment> \<open>abstract @{term x}\<close>
+ show ?thesis when "A x" (is ?A) for x :: 'a \<comment> \<open>abstract \<^term>\<open>x\<close>\<close>
using that \<proof>
- show "?A a" \<comment> \<open>concrete @{term a}\<close>
+ show "?A a" \<comment> \<open>concrete \<^term>\<open>a\<close>\<close>
\<proof>
qed
end
--- a/src/HOL/Lattice/Lattice.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Lattice/Lattice.thy Sat Jan 05 17:24:33 2019 +0100
@@ -319,8 +319,8 @@
Given a structure with binary operations \<open>\<sqinter>\<close> and \<open>\<squnion>\<close>
such that (A), (C), and (AB) hold (cf.\
\S\ref{sec:lattice-algebra}). This structure represents a lattice,
- if the relation @{term "x \<sqsubseteq> y"} is defined as @{term "x \<sqinter> y = x"}
- (alternatively as @{term "x \<squnion> y = y"}). Furthermore, infimum and
+ if the relation \<^term>\<open>x \<sqsubseteq> y\<close> is defined as \<^term>\<open>x \<sqinter> y = x\<close>
+ (alternatively as \<^term>\<open>x \<squnion> y = y\<close>). Furthermore, infimum and
supremum with respect to this ordering coincide with the original
\<open>\<sqinter>\<close> and \<open>\<squnion>\<close> operations.
\<close>
@@ -331,7 +331,7 @@
subsubsection \<open>Linear orders\<close>
text \<open>
- Linear orders with @{term minimum} and @{term maximum} operations
+ Linear orders with \<^term>\<open>minimum\<close> and \<^term>\<open>maximum\<close> operations
are a (degenerate) example of lattice structures.
\<close>
@@ -368,8 +368,7 @@
qed
text \<open>
- The lattice operations on linear orders indeed coincide with @{term
- minimum} and @{term maximum}.
+ The lattice operations on linear orders indeed coincide with \<^term>\<open>minimum\<close> and \<^term>\<open>maximum\<close>.
\<close>
theorem meet_mimimum: "x \<sqinter> y = minimum x y"
@@ -578,8 +577,8 @@
text \<open>
\medskip A semi-morphisms is a function \<open>f\<close> that preserves the
- lattice operations in the following manner: @{term "f (x \<sqinter> y) \<sqsubseteq> f x
- \<sqinter> f y"} and @{term "f x \<squnion> f y \<sqsubseteq> f (x \<squnion> y)"}, respectively. Any of
+ lattice operations in the following manner: \<^term>\<open>f (x \<sqinter> y) \<sqsubseteq> f x
+ \<sqinter> f y\<close> and \<^term>\<open>f x \<squnion> f y \<sqsubseteq> f (x \<squnion> y)\<close>, respectively. Any of
these properties is equivalent with monotonicity.
\<close>
--- a/src/HOL/Lattice/Orders.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Lattice/Orders.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,8 +9,7 @@
subsection \<open>Ordered structures\<close>
text \<open>
- We define several classes of ordered structures over some type @{typ
- 'a} with relation \<open>\<sqsubseteq> :: 'a \<Rightarrow> 'a \<Rightarrow> bool\<close>. For a
+ We define several classes of ordered structures over some type \<^typ>\<open>'a\<close> with relation \<open>\<sqsubseteq> :: 'a \<Rightarrow> 'a \<Rightarrow> bool\<close>. For a
\emph{quasi-order} that relation is required to be reflexive and
transitive, for a \emph{partial order} it also has to be
anti-symmetric, while for a \emph{linear order} all elements are
@@ -65,7 +64,7 @@
by (simp add: leq_dual_def)
text \<open>
- \medskip Functions @{term dual} and @{term undual} are inverse to
+ \medskip Functions \<^term>\<open>dual\<close> and \<^term>\<open>undual\<close> are inverse to
each other; this entails the following fundamental properties.
\<close>
@@ -79,7 +78,7 @@
by (rule ext) simp
text \<open>
- \medskip Since @{term dual} (and @{term undual}) are both injective
+ \medskip Since \<^term>\<open>dual\<close> (and \<^term>\<open>undual\<close>) are both injective
and surjective, the basic logical connectives (equality,
quantification etc.) are transferred as follows.
\<close>
--- a/src/HOL/Library/Going_To_Filter.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Going_To_Filter.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,11 +12,11 @@
begin
definition going_to_within :: "('a \<Rightarrow> 'b) \<Rightarrow> 'b filter \<Rightarrow> 'a set \<Rightarrow> 'a filter"
- ("(_)/ going'_to (_)/ within (_)" [1000,60,60] 60) where
+ (\<open>(_)/ going'_to (_)/ within (_)\<close> [1000,60,60] 60) where
"f going_to F within A = inf (filtercomap f F) (principal A)"
abbreviation going_to :: "('a \<Rightarrow> 'b) \<Rightarrow> 'b filter \<Rightarrow> 'a filter"
- (infix "going'_to" 60)
+ (infix \<open>going'_to\<close> 60)
where "f going_to F \<equiv> f going_to F within UNIV"
text \<open>
--- a/src/HOL/Library/Landau_Symbols.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Landau_Symbols.thy Sat Jan 05 17:24:33 2019 +0100
@@ -26,38 +26,38 @@
\<close>
definition bigo :: "'a filter \<Rightarrow> ('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> ('a \<Rightarrow> 'b) set"
- ("(1O[_]'(_'))")
+ (\<open>(1O[_]'(_'))\<close>)
where "bigo F g = {f. (\<exists>c>0. eventually (\<lambda>x. norm (f x) \<le> c * norm (g x)) F)}"
definition smallo :: "'a filter \<Rightarrow> ('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> ('a \<Rightarrow> 'b) set"
- ("(1o[_]'(_'))")
+ (\<open>(1o[_]'(_'))\<close>)
where "smallo F g = {f. (\<forall>c>0. eventually (\<lambda>x. norm (f x) \<le> c * norm (g x)) F)}"
definition bigomega :: "'a filter \<Rightarrow> ('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> ('a \<Rightarrow> 'b) set"
- ("(1\<Omega>[_]'(_'))")
+ (\<open>(1\<Omega>[_]'(_'))\<close>)
where "bigomega F g = {f. (\<exists>c>0. eventually (\<lambda>x. norm (f x) \<ge> c * norm (g x)) F)}"
definition smallomega :: "'a filter \<Rightarrow> ('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> ('a \<Rightarrow> 'b) set"
- ("(1\<omega>[_]'(_'))")
+ (\<open>(1\<omega>[_]'(_'))\<close>)
where "smallomega F g = {f. (\<forall>c>0. eventually (\<lambda>x. norm (f x) \<ge> c * norm (g x)) F)}"
definition bigtheta :: "'a filter \<Rightarrow> ('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> ('a \<Rightarrow> 'b) set"
- ("(1\<Theta>[_]'(_'))")
+ (\<open>(1\<Theta>[_]'(_'))\<close>)
where "bigtheta F g = bigo F g \<inter> bigomega F g"
-abbreviation bigo_at_top ("(2O'(_'))") where
+abbreviation bigo_at_top (\<open>(2O'(_'))\<close>) where
"O(g) \<equiv> bigo at_top g"
-abbreviation smallo_at_top ("(2o'(_'))") where
+abbreviation smallo_at_top (\<open>(2o'(_'))\<close>) where
"o(g) \<equiv> smallo at_top g"
-abbreviation bigomega_at_top ("(2\<Omega>'(_'))") where
+abbreviation bigomega_at_top (\<open>(2\<Omega>'(_'))\<close>) where
"\<Omega>(g) \<equiv> bigomega at_top g"
-abbreviation smallomega_at_top ("(2\<omega>'(_'))") where
+abbreviation smallomega_at_top (\<open>(2\<omega>'(_'))\<close>) where
"\<omega>(g) \<equiv> smallomega at_top g"
-abbreviation bigtheta_at_top ("(2\<Theta>'(_'))") where
+abbreviation bigtheta_at_top (\<open>(2\<Theta>'(_'))\<close>) where
"\<Theta>(g) \<equiv> bigtheta at_top g"
@@ -1649,7 +1649,7 @@
named_theorems asymp_equiv_simps
definition asymp_equiv :: "('a \<Rightarrow> ('b :: real_normed_field)) \<Rightarrow> 'a filter \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> bool"
- ("_ \<sim>[_] _" [51, 10, 51] 50)
+ (\<open>_ \<sim>[_] _\<close> [51, 10, 51] 50)
where "f \<sim>[F] g \<longleftrightarrow> ((\<lambda>x. if f x = 0 \<and> g x = 0 then 1 else f x / g x) \<longlongrightarrow> 1) F"
abbreviation (input) asymp_equiv_at_top where
@@ -1657,7 +1657,7 @@
bundle asymp_equiv_notation
begin
-notation asymp_equiv_at_top (infix "\<sim>" 50)
+notation asymp_equiv_at_top (infix \<open>\<sim>\<close> 50)
end
lemma asymp_equivI: "((\<lambda>x. if f x = 0 \<and> g x = 0 then 1 else f x / g x) \<longlongrightarrow> 1) F \<Longrightarrow> f \<sim>[F] g"
--- a/src/HOL/Library/Lub_Glb.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Lub_Glb.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,10 +10,10 @@
text \<open>Thanks to suggestions by James Margetson\<close>
-definition setle :: "'a set \<Rightarrow> 'a::ord \<Rightarrow> bool" (infixl "*<=" 70)
+definition setle :: "'a set \<Rightarrow> 'a::ord \<Rightarrow> bool" (infixl \<open>*<=\<close> 70)
where "S *<= x = (\<forall>y\<in>S. y \<le> x)"
-definition setge :: "'a::ord \<Rightarrow> 'a set \<Rightarrow> bool" (infixl "<=*" 70)
+definition setge :: "'a::ord \<Rightarrow> 'a set \<Rightarrow> bool" (infixl \<open><=*\<close> 70)
where "x <=* S = (\<forall>y\<in>S. x \<le> y)"
--- a/src/HOL/Library/Omega_Words_Fun.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Omega_Words_Fun.thy Sat Jan 05 17:24:33 2019 +0100
@@ -44,11 +44,11 @@
\<close>
definition
- conc :: "['a list, 'a word] \<Rightarrow> 'a word" (infixr "\<frown>" 65)
+ conc :: "['a list, 'a word] \<Rightarrow> 'a word" (infixr \<open>\<frown>\<close> 65)
where "w \<frown> x == \<lambda>n. if n < length w then w!n else x (n - length w)"
definition
- iter :: "'a list \<Rightarrow> 'a word" ("(_\<^sup>\<omega>)" [1000])
+ iter :: "'a list \<Rightarrow> 'a word" (\<open>(_\<^sup>\<omega>)\<close> [1000])
where "iter w == if w = [] then undefined else (\<lambda>n. w!(n mod (length w)))"
lemma conc_empty[simp]: "[] \<frown> w = w"
@@ -116,7 +116,7 @@
definition suffix :: "[nat, 'a word] \<Rightarrow> 'a word"
where "suffix k x \<equiv> \<lambda>n. x (k+n)"
-definition subsequence :: "'a word \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a list" ("_ [_ \<rightarrow> _]" 900)
+definition subsequence :: "'a word \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a list" (\<open>_ [_ \<rightarrow> _]\<close> 900)
where "subsequence w i j \<equiv> map w [i..<j]"
abbreviation prefix :: "nat \<Rightarrow> 'a word \<Rightarrow> 'a list"
@@ -287,7 +287,7 @@
subsection \<open>Prepending\<close>
-primrec build :: "'a \<Rightarrow> 'a word \<Rightarrow> 'a word" (infixr "##" 65)
+primrec build :: "'a \<Rightarrow> 'a word \<Rightarrow> 'a word" (infixr \<open>##\<close> 65)
where "(a ## w) 0 = a" | "(a ## w) (Suc i) = w i"
lemma build_eq[iff]: "a\<^sub>1 ## w\<^sub>1 = a\<^sub>2 ## w\<^sub>2 \<longleftrightarrow> a\<^sub>1 = a\<^sub>2 \<and> w\<^sub>1 = w\<^sub>2"
--- a/src/HOL/Library/Pattern_Aliases.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Pattern_Aliases.thy Sat Jan 05 17:24:33 2019 +0100
@@ -97,7 +97,7 @@
fun check_pattern_syntax t =
case strip_all t of
- (vars, @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs)) =>
+ (vars, \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs)) =>
let
fun go (Const (\<^const_name>\<open>as\<close>, _) $ pat $ var, rhs) =
let
@@ -126,7 +126,7 @@
fun uncheck_pattern_syntax ctxt t =
case strip_all t of
- (vars, @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs)) =>
+ (vars, \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs)) =>
let
(* restricted to going down abstractions; ignores eta-contracted rhs *)
fun go lhs (rhs as Const (\<^const_name>\<open>Let\<close>, _) $ pat $ Abs (name, typ, t)) ctxt frees =
--- a/src/HOL/Library/Permutation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Permutation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports Multiset
begin
-inductive perm :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool" ("_ <~~> _" [50, 50] 50) (* FIXME proper infix, without ambiguity!? *)
+inductive perm :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool" (\<open>_ <~~> _\<close> [50, 50] 50) (* FIXME proper infix, without ambiguity!? *)
where
Nil [intro!]: "[] <~~> []"
| swap [intro!]: "y # x # l <~~> x # y # l"
--- a/src/HOL/Library/Stream.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Stream.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
begin
codatatype (sset: 'a) stream =
- SCons (shd: 'a) (stl: "'a stream") (infixr "##" 65)
+ SCons (shd: 'a) (stl: "'a stream") (infixr \<open>##\<close> 65)
for
map: smap
rel: stream_all2
@@ -44,7 +44,7 @@
subsection \<open>prepend list to stream\<close>
-primrec shift :: "'a list \<Rightarrow> 'a stream \<Rightarrow> 'a stream" (infixr "@-" 65) where
+primrec shift :: "'a list \<Rightarrow> 'a stream \<Rightarrow> 'a stream" (infixr \<open>@-\<close> 65) where
"shift [] s = s"
| "shift (x # xs) s = x ## shift xs s"
@@ -136,7 +136,7 @@
subsection \<open>nth, take, drop for streams\<close>
-primrec snth :: "'a stream \<Rightarrow> nat \<Rightarrow> 'a" (infixl "!!" 100) where
+primrec snth :: "'a stream \<Rightarrow> nat \<Rightarrow> 'a" (infixl \<open>!!\<close> 100) where
"s !! 0 = shd s"
| "s !! Suc n = stl s !! n"
--- a/src/HOL/Library/Type_Length.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/Type_Length.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
class len0 =
fixes len_of :: "'a itself \<Rightarrow> nat"
-syntax "_type_length" :: "type \<Rightarrow> nat" ("(1LENGTH/(1'(_')))")
+syntax "_type_length" :: "type \<Rightarrow> nat" (\<open>(1LENGTH/(1'(_')))\<close>)
translations "LENGTH('a)" \<rightharpoonup>
"CONST len_of (CONST Pure.type :: 'a itself)"
--- a/src/HOL/Library/code_lazy.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/code_lazy.ML Sat Jan 05 17:24:33 2019 +0100
@@ -378,7 +378,7 @@
||>> fold_map (mk_name ("case_" ^ short_type_name ^ "_lazy_") "") ctr_names
val mk_Lazy_delay_eq =
- (#const lazy_ctr_def $ mk_delay (Bound 0), Rep_lazy $ (Bound 0 $ @{const Unity}))
+ (#const lazy_ctr_def $ mk_delay (Bound 0), Rep_lazy $ (Bound 0 $ \<^const>\<open>Unity\<close>))
|> mk_eq |> all_abs [\<^typ>\<open>unit\<close> --> lazy_type]
val (Lazy_delay_thm, lthy8a) = mk_thm
((Lazy_delay_eq_name, mk_Lazy_delay_eq), [#thm lazy_ctr_def, @{thm force_delay}])
--- a/src/HOL/Library/code_test.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Library/code_test.ML Sat Jan 05 17:24:33 2019 +0100
@@ -195,7 +195,7 @@
val T = fastype_of tuple
in
\<^term>\<open>Some :: (unit \<Rightarrow> yxml_of_term) \<Rightarrow> (unit \<Rightarrow> yxml_of_term) option\<close> $
- (absdummy \<^typ>\<open>unit\<close> (@{const yxml_string_of_term} $
+ (absdummy \<^typ>\<open>unit\<close> (\<^const>\<open>yxml_string_of_term\<close> $
(Const (\<^const_name>\<open>Code_Evaluation.term_of\<close>, T --> \<^typ>\<open>term\<close>) $ tuple)))
end
--- a/src/HOL/Matrix_LP/Compute_Oracle/compute.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Matrix_LP/Compute_Oracle/compute.ML Sat Jan 05 17:24:33 2019 +0100
@@ -340,7 +340,7 @@
fun merge_shyps shyps1 shyps2 = Sorttab.keys (add_shyps shyps2 (add_shyps shyps1 Sorttab.empty))
val (_, export_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding compute}, fn (thy, hyps, shyps, prop) =>
+ (Thm.add_oracle (\<^binding>\<open>compute\<close>, fn (thy, hyps, shyps, prop) =>
let
val shyptab = add_shyps shyps Sorttab.empty
fun delete s shyptab = Sorttab.delete s shyptab handle Sorttab.UNDEF _ => shyptab
@@ -435,7 +435,7 @@
val (encoding, a) = remove_types encoding a
val (encoding, b) = remove_types encoding b
val (eq, encoding) =
- Encode.insert (Const (@{const_name Pure.eq}, ty --> ty --> @{typ "prop"})) encoding
+ Encode.insert (Const (\<^const_name>\<open>Pure.eq\<close>, ty --> ty --> \<^typ>\<open>prop\<close>)) encoding
in
(encoding, EqPrem (a, b, ty, eq))
end handle TERM _ => let val (encoding, t) = remove_types encoding t in (encoding, Prem t) end)
@@ -582,7 +582,7 @@
case match_aterms varsubst b' a' of
NONE =>
let
- fun mk s = Syntax.string_of_term_global @{theory Pure}
+ fun mk s = Syntax.string_of_term_global \<^theory>\<open>Pure\<close>
(infer_types (naming_of computer) (encoding_of computer) ty s)
val left = "computed left side: "^(mk a')
val right = "computed right side: "^(mk b')
@@ -631,7 +631,7 @@
val varsubst = varsubst_of_theorem th
val encoding = encoding_of computer
val naming = naming_of computer
- fun infer t = infer_types naming encoding @{typ "prop"} t
+ fun infer t = infer_types naming encoding \<^typ>\<open>prop\<close> t
fun run t = infer (runprog (prog_of computer) (apply_subst true varsubst t))
fun runprem p = run (prem2term p)
val prop = Logic.list_implies (map runprem (prems_of_theorem th), run (concl_of_theorem th))
--- a/src/HOL/Matrix_LP/float_arith.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Matrix_LP/float_arith.ML Sat Jan 05 17:24:33 2019 +0100
@@ -203,10 +203,10 @@
approx_dec_by_bin n (q,r)
end
-fun mk_float (a, b) = @{term "float"} $
+fun mk_float (a, b) = \<^term>\<open>float\<close> $
HOLogic.mk_prod (apply2 (HOLogic.mk_number HOLogic.intT) (a, b));
-fun dest_float (Const (@{const_name float}, _) $ (Const (@{const_name Pair}, _) $ a $ b)) =
+fun dest_float (Const (\<^const_name>\<open>float\<close>, _) $ (Const (\<^const_name>\<open>Pair\<close>, _) $ a $ b)) =
apply2 (snd o HOLogic.dest_number) (a, b)
| dest_float t = ((snd o HOLogic.dest_number) t, 0);
--- a/src/HOL/Matrix_LP/fspmlp.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Matrix_LP/fspmlp.ML Sat Jan 05 17:24:33 2019 +0100
@@ -182,10 +182,10 @@
exception Load of string;
-val empty_spvec = @{term "Nil :: real spvec"};
-fun cons_spvec x xs = @{term "Cons :: nat * real => real spvec => real spvec"} $ x $ xs;
-val empty_spmat = @{term "Nil :: real spmat"};
-fun cons_spmat x xs = @{term "Cons :: nat * real spvec => real spmat => real spmat"} $ x $ xs;
+val empty_spvec = \<^term>\<open>Nil :: real spvec\<close>;
+fun cons_spvec x xs = \<^term>\<open>Cons :: nat * real => real spvec => real spvec\<close> $ x $ xs;
+val empty_spmat = \<^term>\<open>Nil :: real spmat\<close>;
+fun cons_spmat x xs = \<^term>\<open>Cons :: nat * real spvec => real spmat => real spmat\<close> $ x $ xs;
fun calcr safe_propagation xlen names prec A b =
let
@@ -276,8 +276,8 @@
val b1 = Inttab.lookup r1 index
val b2 = Inttab.lookup r2 index
in
- (add_row_entry r12_1 index @{term "lbound :: real => real"} ((names index)^"l") b1,
- add_row_entry r12_2 index @{term "ubound :: real => real"} ((names index)^"u") b2)
+ (add_row_entry r12_1 index \<^term>\<open>lbound :: real => real\<close> ((names index)^"l") b1,
+ add_row_entry r12_2 index \<^term>\<open>ubound :: real => real\<close> ((names index)^"u") b2)
end
val (r1, r2) = abs_estimate xlen r1 r2
--- a/src/HOL/Matrix_LP/matrixlp.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Matrix_LP/matrixlp.ML Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
"SparseMatrix.sorted_sp_simps"
"ComputeNumeral.natnorm"}; (*"ComputeNumeral.number_norm"*)
-val computer = PCompute.make Compute.SML @{theory} compute_thms []
+val computer = PCompute.make Compute.SML \<^theory> compute_thms []
fun matrix_compute c = hd (PCompute.rewrite computer [c])
--- a/src/HOL/Metis_Examples/Message.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Metis_Examples/Message.thy Sat Jan 05 17:24:33 2019 +0100
@@ -217,7 +217,7 @@
text\<open>This allows \<open>blast\<close> to simplify occurrences of
- @{term "parts(G\<union>H)"} in the assumption.\<close>
+ \<^term>\<open>parts(G\<union>H)\<close> in the assumption.\<close>
lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE]
declare in_parts_UnE [elim!]
@@ -461,8 +461,8 @@
text\<open>Case analysis: either the message is secure, or it is not! Effective,
but can cause subgoals to blow up! Use with \<open>if_split\<close>; apparently
-\<open>split_tac\<close> does not cope with patterns such as @{term"analz (insert
-(Crypt K X) H)"}\<close>
+\<open>split_tac\<close> does not cope with patterns such as \<^term>\<open>analz (insert
+(Crypt K X) H)\<close>\<close>
lemma analz_Crypt_if [simp]:
"analz (insert (Crypt K X) H) =
(if (Key (invKey K) \<in> analz H)
@@ -576,7 +576,7 @@
by (auto, erule synth.induct, auto)
text\<open>NO \<open>Agent_synth\<close>, as any Agent name can be synthesized.
- The same holds for @{term Number}\<close>
+ The same holds for \<^term>\<open>Number\<close>\<close>
inductive_cases Nonce_synth [elim!]: "Nonce n \<in> synth H"
inductive_cases Key_synth [elim!]: "Key K \<in> synth H"
inductive_cases Hash_synth [elim!]: "Hash X \<in> synth H"
--- a/src/HOL/MicroJava/BV/BVExample.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/BV/BVExample.thy Sat Jan 05 17:24:33 2019 +0100
@@ -227,8 +227,8 @@
subsection "Welltypings"
text \<open>
- We show welltypings of the methods @{term append_name} in class @{term list_name},
- and @{term makelist_name} in class @{term test_name}:
+ We show welltypings of the methods \<^term>\<open>append_name\<close> in class \<^term>\<open>list_name\<close>,
+ and \<^term>\<open>makelist_name\<close> in class \<^term>\<open>test_name\<close>:
\<close>
lemmas eff_simps [simp] = eff_def norm_eff_def xcpt_eff_def
declare appInvoke [simp del]
--- a/src/HOL/MicroJava/BV/BVSpec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/BV/BVSpec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -97,8 +97,8 @@
simp, simp, simp add: wf_mdecl_def wt_method_def)
text \<open>
- We could leave out the check @{term "pc' < max_pc"} in the
- definition of @{term wt_instr} in the context of @{term wt_method}.
+ We could leave out the check \<^term>\<open>pc' < max_pc\<close> in the
+ definition of \<^term>\<open>wt_instr\<close> in the context of \<^term>\<open>wt_method\<close>.
\<close>
lemma wt_instr_def2:
"\<lbrakk> wt_jvm_prog G Phi; is_class G C;
--- a/src/HOL/MicroJava/BV/Effect.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/BV/Effect.thy Sat Jan 05 17:24:33 2019 +0100
@@ -228,7 +228,7 @@
text \<open>
\medskip
-simp rules for @{term app}
+simp rules for \<^term>\<open>app\<close>
\<close>
lemma appNone[simp]: "app i G maxs rT pc et None = True" by simp
--- a/src/HOL/MicroJava/BV/Typing_Framework_JVM.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/BV/Typing_Framework_JVM.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
"opt_states G maxs maxr \<equiv> opt (\<Union>{list n (types G) |n. n \<le> maxs} \<times> list maxr (err (types G)))"
-subsection \<open>Executability of @{term check_bounded}\<close>
+subsection \<open>Executability of \<^term>\<open>check_bounded\<close>\<close>
primrec list_all'_rec :: "('a \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> bool"
where
--- a/src/HOL/MicroJava/DFA/Err.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/DFA/Err.thy Sat Jan 05 17:24:33 2019 +0100
@@ -302,8 +302,8 @@
done
text \<open>
- If @{term "AS = {}"} the thm collapses to
- @{prop "order r & closed {Err} f & Err +_f Err = Err"}
+ If \<^term>\<open>AS = {}\<close> the thm collapses to
+ \<^prop>\<open>order r & closed {Err} f & Err +_f Err = Err\<close>
which may not hold
\<close>
lemma err_semilat_UnionI:
--- a/src/HOL/MicroJava/DFA/Typing_Framework_err.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/DFA/Typing_Framework_err.thy Sat Jan 05 17:24:33 2019 +0100
@@ -161,8 +161,8 @@
text \<open>
There used to be a condition here that each instruction must have a
successor. This is not needed any more, because the definition of
- @{term error} trivially ensures that there is a successor for
- the critical case where @{term app} does not hold.
+ \<^term>\<open>error\<close> trivially ensures that there is a successor for
+ the critical case where \<^term>\<open>app\<close> does not hold.
\<close>
lemma wt_err_imp_wt_app_eff:
assumes wt: "wt_err_step r (err_step (size ts) app step) ts"
--- a/src/HOL/MicroJava/J/JTypeSafe.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/J/JTypeSafe.thy Sat Jan 05 17:24:33 2019 +0100
@@ -138,7 +138,7 @@
apply( drule (3) Call_lemma)
apply( clarsimp simp add: wf_java_mdecl_def)
apply( erule_tac V = "method sig x = y" for sig x y in thin_rl)
-apply( drule spec, erule impE, erule_tac [2] notE impE, tactic "assume_tac @{context} 2")
+apply( drule spec, erule impE, erule_tac [2] notE impE, tactic "assume_tac \<^context> 2")
apply( rule conformsI)
apply( erule conforms_heapD)
apply( rule lconf_ext)
@@ -156,10 +156,10 @@
apply( fast intro: hext_trans)
apply( rule conjI)
apply( rule_tac [2] impI)
-apply( erule_tac [2] notE impE, tactic "assume_tac @{context} 2")
+apply( erule_tac [2] notE impE, tactic "assume_tac \<^context> 2")
apply( frule_tac [2] conf_widen)
-apply( tactic "assume_tac @{context} 4")
-apply( tactic "assume_tac @{context} 2")
+apply( tactic "assume_tac \<^context> 4")
+apply( tactic "assume_tac \<^context> 2")
prefer 2
apply( fast elim!: widen_trans)
apply (rule conforms_xcpt_change)
@@ -200,10 +200,10 @@
\<comment> \<open>several simplifications, XcptE, XcptEs, XcptS, Skip, Nil??\<close>
apply( simp_all)
-apply( tactic "ALLGOALS (REPEAT o resolve_tac @{context} [impI, allI])")
-apply( tactic \<open>ALLGOALS (eresolve_tac @{context} [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
- THEN_ALL_NEW (full_simp_tac (put_simpset (simpset_of @{theory_context Conform}) @{context})))\<close>)
-apply(tactic "ALLGOALS (EVERY' [REPEAT o (eresolve_tac @{context} [conjE]), REPEAT o hyp_subst_tac @{context}])")
+apply( tactic "ALLGOALS (REPEAT o resolve_tac \<^context> [impI, allI])")
+apply( tactic \<open>ALLGOALS (eresolve_tac \<^context> [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
+ THEN_ALL_NEW (full_simp_tac (put_simpset (simpset_of \<^theory_context>\<open>Conform\<close>) \<^context>)))\<close>)
+apply(tactic "ALLGOALS (EVERY' [REPEAT o (eresolve_tac \<^context> [conjE]), REPEAT o hyp_subst_tac \<^context>])")
\<comment> \<open>Level 7\<close>
\<comment> \<open>15 NewC\<close>
@@ -228,8 +228,8 @@
apply( erule conf_litval)
\<comment> \<open>13 BinOp\<close>
-apply (tactic "forward_hyp_tac @{context}")
-apply (tactic "forward_hyp_tac @{context}")
+apply (tactic "forward_hyp_tac \<^context>")
+apply (tactic "forward_hyp_tac \<^context>")
apply( rule conjI, erule (1) hext_trans)
apply( erule conjI)
apply( clarsimp)
@@ -241,14 +241,14 @@
apply( fast elim: conforms_localD [THEN lconfD])
\<comment> \<open>for FAss\<close>
-apply( tactic \<open>EVERY'[eresolve_tac @{context} [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
- THEN_ALL_NEW (full_simp_tac @{context}), REPEAT o (eresolve_tac @{context} [conjE]), hyp_subst_tac @{context}] 3\<close>)
+apply( tactic \<open>EVERY'[eresolve_tac \<^context> [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
+ THEN_ALL_NEW (full_simp_tac \<^context>), REPEAT o (eresolve_tac \<^context> [conjE]), hyp_subst_tac \<^context>] 3\<close>)
\<comment> \<open>for if\<close>
-apply( tactic \<open>(Induct_Tacs.case_tac @{context} "the_Bool v" [] NONE THEN_ALL_NEW
- (asm_full_simp_tac @{context})) 7\<close>)
+apply( tactic \<open>(Induct_Tacs.case_tac \<^context> "the_Bool v" [] NONE THEN_ALL_NEW
+ (asm_full_simp_tac \<^context>)) 7\<close>)
-apply (tactic "forward_hyp_tac @{context}")
+apply (tactic "forward_hyp_tac \<^context>")
\<comment> \<open>11+1 if\<close>
prefer 7
@@ -277,8 +277,8 @@
\<comment> \<open>7 LAss\<close>
apply (fold fun_upd_def)
-apply( tactic \<open>(eresolve_tac @{context} [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
- THEN_ALL_NEW (full_simp_tac @{context})) 1\<close>)
+apply( tactic \<open>(eresolve_tac \<^context> [@{thm ty_expr.cases}, @{thm ty_exprs.cases}, @{thm wt_stmt.cases}]
+ THEN_ALL_NEW (full_simp_tac \<^context>)) 1\<close>)
apply (intro strip)
apply (case_tac E)
apply (simp)
@@ -296,7 +296,7 @@
apply(drule (1) ty_expr_ty_exprs_wt_stmt.Loop)
apply(force elim: hext_trans)
-apply (tactic "forward_hyp_tac @{context}")
+apply (tactic "forward_hyp_tac \<^context>")
\<comment> \<open>4 Cond\<close>
prefer 4
@@ -319,7 +319,7 @@
apply( case_tac "x2")
\<comment> \<open>x2 = None\<close>
apply (simp)
- apply (tactic "forward_hyp_tac @{context}", clarify)
+ apply (tactic "forward_hyp_tac \<^context>", clarify)
apply( drule eval_no_xcpt)
apply( erule FAss_type_sound, rule HOL.refl, assumption+)
\<comment> \<open>x2 = Some a\<close>
@@ -327,7 +327,7 @@
apply( fast intro: hext_trans)
-apply( tactic "prune_params_tac @{context}")
+apply( tactic "prune_params_tac \<^context>")
\<comment> \<open>Level 52\<close>
\<comment> \<open>1 Call\<close>
--- a/src/HOL/MicroJava/J/State.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/J/State.thy Sat Jan 05 17:24:33 2019 +0100
@@ -154,7 +154,7 @@
lemma c_hupd_fst [simp]: "fst (c_hupd h (x, s)) = x"
by (simp add: c_hupd_def split_beta)
-text \<open>Naive implementation for @{term "new_Addr"} by exhaustive search\<close>
+text \<open>Naive implementation for \<^term>\<open>new_Addr\<close> by exhaustive search\<close>
definition gen_new_Addr :: "aheap => nat \<Rightarrow> loc \<times> val option" where
"gen_new_Addr h n \<equiv>
--- a/src/HOL/MicroJava/J/WellForm.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/J/WellForm.thy Sat Jan 05 17:24:33 2019 +0100
@@ -315,7 +315,7 @@
apply( clarify)
apply( frule fields_rec, assumption)
apply( fastforce)
-apply( tactic "safe_tac (put_claset HOL_cs @{context})")
+apply( tactic "safe_tac (put_claset HOL_cs \<^context>)")
apply( subst fields_rec)
apply( assumption)
apply( assumption)
@@ -492,7 +492,7 @@
prefer 2
apply( rotate_tac -1, frule ssubst, erule_tac [2] asm_rl)
apply( tactic "asm_full_simp_tac
- (put_simpset HOL_ss @{context} addsimps [@{thm not_None_eq} RS sym]) 1")
+ (put_simpset HOL_ss \<^context> addsimps [@{thm not_None_eq} RS sym]) 1")
apply( simp_all (no_asm_simp) del: split_paired_Ex)
apply( frule (1) class_wf)
apply( simp (no_asm_simp) only: split_tupled_all)
--- a/src/HOL/MicroJava/JVM/JVMExceptions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/JVM/JVMExceptions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -48,7 +48,7 @@
text \<open>
Only program counters that are mentioned in the exception table
- can be returned by @{term match_exception_table}:
+ can be returned by \<^term>\<open>match_exception_table\<close>:
\<close>
lemma match_exception_table_in_et:
"match_exception_table G C pc et = Some pc' \<Longrightarrow> \<exists>e \<in> set et. pc' = fst (snd (snd e))"
--- a/src/HOL/MicroJava/JVM/JVMListExample.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/MicroJava/JVM/JVMListExample.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,7 +9,7 @@
begin
text \<open>
- Since the types @{typ cnam}, \<open>vnam\<close>, and \<open>mname\<close> are
+ Since the types \<^typ>\<open>cnam\<close>, \<open>vnam\<close>, and \<open>mname\<close> are
anonymous, we describe distinctness of names in the example by axioms:
\<close>
axiomatization list_nam test_nam :: cnam
--- a/src/HOL/Mirabelle/Tools/mirabelle.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Mirabelle/Tools/mirabelle.ML Sat Jan 05 17:24:33 2019 +0100
@@ -42,10 +42,10 @@
(* Mirabelle configuration *)
-val logfile = Attrib.setup_config_string @{binding mirabelle_logfile} (K "")
-val timeout = Attrib.setup_config_int @{binding mirabelle_timeout} (K 30)
-val start_line = Attrib.setup_config_int @{binding mirabelle_start_line} (K 0)
-val end_line = Attrib.setup_config_int @{binding mirabelle_end_line} (K ~1)
+val logfile = Attrib.setup_config_string \<^binding>\<open>mirabelle_logfile\<close> (K "")
+val timeout = Attrib.setup_config_int \<^binding>\<open>mirabelle_timeout\<close> (K 30)
+val start_line = Attrib.setup_config_int \<^binding>\<open>mirabelle_start_line\<close> (K 0)
+val end_line = Attrib.setup_config_int \<^binding>\<open>mirabelle_end_line\<close> (K ~1)
(* Mirabelle core *)
--- a/src/HOL/Mutabelle/MutabelleExtra.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Mutabelle/MutabelleExtra.thy Sat Jan 05 17:24:33 2019 +0100
@@ -41,7 +41,7 @@
MutabelleExtra.thms_of false thy
|> MutabelleExtra.take_random 200
|> (fn thms => MutabelleExtra.mutate_theorems_and_write_report
- @{theory} (1, 50) mtds thms (log_directory ^ "/" ^ name)))
+ \<^theory> (1, 50) mtds thms (log_directory ^ "/" ^ name)))
\<close>
--- a/src/HOL/Mutabelle/mutabelle.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Mutabelle/mutabelle.ML Sat Jan 05 17:24:33 2019 +0100
@@ -383,7 +383,7 @@
(*mutate origTerm iter times by only exchanging subterms*)
fun mutate_exc origTerm commutatives iter =
- mutate 0 origTerm @{theory Main} commutatives [] iter;
+ mutate 0 origTerm \<^theory>\<open>Main\<close> commutatives [] iter;
(*mutate origTerm iter times by only inserting signature functions*)
--- a/src/HOL/Mutabelle/mutabelle_extra.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Mutabelle/mutabelle_extra.ML Sat Jan 05 17:24:33 2019 +0100
@@ -124,7 +124,7 @@
| SOME _ => (GenuineCex, Quickcheck.timings_of result)
end) ()
handle Timeout.TIMEOUT _ =>
- (Timeout, [("timelimit", Real.floor (Options.default_real @{system_option auto_time_limit}))])
+ (Timeout, [("timelimit", Real.floor (Options.default_real \<^system_option>\<open>auto_time_limit\<close>))])
fun quickcheck_mtd change_options quickcheck_generator =
("quickcheck_" ^ quickcheck_generator, invoke_quickcheck change_options)
@@ -209,22 +209,22 @@
(* filtering forbidden theorems and mutants *)
-val comms = [@{const_name HOL.eq}, @{const_name HOL.disj}, @{const_name HOL.conj}]
+val comms = [\<^const_name>\<open>HOL.eq\<close>, \<^const_name>\<open>HOL.disj\<close>, \<^const_name>\<open>HOL.conj\<close>]
val forbidden =
[(* (@{const_name "power"}, "'a"), *)
(*(@{const_name induct_equal}, "'a"),
(@{const_name induct_implies}, "'a"),
(@{const_name induct_conj}, "'a"),*)
- (@{const_name "undefined"}, "'a"),
- (@{const_name "default"}, "'a"),
- (@{const_name "Pure.dummy_pattern"}, "'a::{}"),
- (@{const_name "HOL.simp_implies"}, "prop => prop => prop"),
- (@{const_name "bot_fun_inst.bot_fun"}, "'a"),
- (@{const_name "top_fun_inst.top_fun"}, "'a"),
- (@{const_name "Pure.term"}, "'a"),
- (@{const_name "top_class.top"}, "'a"),
- (@{const_name "Quotient.Quot_True"}, "'a")(*,
+ (\<^const_name>\<open>undefined\<close>, "'a"),
+ (\<^const_name>\<open>default\<close>, "'a"),
+ (\<^const_name>\<open>Pure.dummy_pattern\<close>, "'a::{}"),
+ (\<^const_name>\<open>HOL.simp_implies\<close>, "prop => prop => prop"),
+ (\<^const_name>\<open>bot_fun_inst.bot_fun\<close>, "'a"),
+ (\<^const_name>\<open>top_fun_inst.top_fun\<close>, "'a"),
+ (\<^const_name>\<open>Pure.term\<close>, "'a"),
+ (\<^const_name>\<open>top_class.top\<close>, "'a"),
+ (\<^const_name>\<open>Quotient.Quot_True\<close>, "'a")(*,
(@{const_name "uminus"}, "'a"),
(@{const_name "Nat.size"}, "'a"),
(@{const_name "Groups.abs"}, "'a") *)]
@@ -233,7 +233,7 @@
["finite_intvl_succ_class",
"nibble"]
-val forbidden_consts = [@{const_name Pure.type}]
+val forbidden_consts = [\<^const_name>\<open>Pure.type\<close>]
fun is_forbidden_theorem (s, th) =
let val consts = Term.add_const_names (Thm.prop_of th) [] in
@@ -247,54 +247,54 @@
end
val forbidden_mutant_constnames =
-[@{const_name HOL.induct_equal},
- @{const_name HOL.induct_implies},
- @{const_name HOL.induct_conj},
- @{const_name HOL.induct_forall},
- @{const_name undefined},
- @{const_name default},
- @{const_name Pure.dummy_pattern},
- @{const_name "HOL.simp_implies"},
- @{const_name "bot_fun_inst.bot_fun"},
- @{const_name "top_fun_inst.top_fun"},
- @{const_name "Pure.term"},
- @{const_name "top_class.top"},
+[\<^const_name>\<open>HOL.induct_equal\<close>,
+ \<^const_name>\<open>HOL.induct_implies\<close>,
+ \<^const_name>\<open>HOL.induct_conj\<close>,
+ \<^const_name>\<open>HOL.induct_forall\<close>,
+ \<^const_name>\<open>undefined\<close>,
+ \<^const_name>\<open>default\<close>,
+ \<^const_name>\<open>Pure.dummy_pattern\<close>,
+ \<^const_name>\<open>HOL.simp_implies\<close>,
+ \<^const_name>\<open>bot_fun_inst.bot_fun\<close>,
+ \<^const_name>\<open>top_fun_inst.top_fun\<close>,
+ \<^const_name>\<open>Pure.term\<close>,
+ \<^const_name>\<open>top_class.top\<close>,
(*@{const_name "HOL.equal"},*)
- @{const_name "Quotient.Quot_True"},
- @{const_name "equal_fun_inst.equal_fun"},
- @{const_name "equal_bool_inst.equal_bool"},
- @{const_name "ord_fun_inst.less_eq_fun"},
- @{const_name "ord_fun_inst.less_fun"},
- @{const_name Meson.skolem},
- @{const_name ATP.fequal},
- @{const_name ATP.fEx},
- @{const_name enum_prod_inst.enum_all_prod},
- @{const_name enum_prod_inst.enum_ex_prod},
- @{const_name Quickcheck_Random.catch_match},
- @{const_name Quickcheck_Exhaustive.unknown},
- @{const_name Num.Bit0}, @{const_name Num.Bit1}
+ \<^const_name>\<open>Quotient.Quot_True\<close>,
+ \<^const_name>\<open>equal_fun_inst.equal_fun\<close>,
+ \<^const_name>\<open>equal_bool_inst.equal_bool\<close>,
+ \<^const_name>\<open>ord_fun_inst.less_eq_fun\<close>,
+ \<^const_name>\<open>ord_fun_inst.less_fun\<close>,
+ \<^const_name>\<open>Meson.skolem\<close>,
+ \<^const_name>\<open>ATP.fequal\<close>,
+ \<^const_name>\<open>ATP.fEx\<close>,
+ \<^const_name>\<open>enum_prod_inst.enum_all_prod\<close>,
+ \<^const_name>\<open>enum_prod_inst.enum_ex_prod\<close>,
+ \<^const_name>\<open>Quickcheck_Random.catch_match\<close>,
+ \<^const_name>\<open>Quickcheck_Exhaustive.unknown\<close>,
+ \<^const_name>\<open>Num.Bit0\<close>, \<^const_name>\<open>Num.Bit1\<close>
(*@{const_name Pure.imp}, @{const_name Pure.eq}*)]
val forbidden_mutant_consts =
[
- (@{const_name "Groups.zero_class.zero"}, @{typ "prop => prop => prop"}),
- (@{const_name "Groups.one_class.one"}, @{typ "prop => prop => prop"}),
- (@{const_name "Groups.plus_class.plus"}, @{typ "prop => prop => prop"}),
- (@{const_name "Groups.minus_class.minus"}, @{typ "prop => prop => prop"}),
- (@{const_name "Groups.times_class.times"}, @{typ "prop => prop => prop"}),
- (@{const_name "Lattices.inf_class.inf"}, @{typ "prop => prop => prop"}),
- (@{const_name "Lattices.sup_class.sup"}, @{typ "prop => prop => prop"}),
- (@{const_name "Orderings.bot_class.bot"}, @{typ "prop => prop => prop"}),
- (@{const_name "Orderings.ord_class.min"}, @{typ "prop => prop => prop"}),
- (@{const_name "Orderings.ord_class.max"}, @{typ "prop => prop => prop"}),
- (@{const_name "Rings.modulo"}, @{typ "prop => prop => prop"}),
- (@{const_name Rings.divide}, @{typ "prop => prop => prop"}),
- (@{const_name "GCD.gcd_class.gcd"}, @{typ "prop => prop => prop"}),
- (@{const_name "GCD.gcd_class.lcm"}, @{typ "prop => prop => prop"}),
- (@{const_name "Orderings.bot_class.bot"}, @{typ "bool => prop"}),
- (@{const_name "Groups.one_class.one"}, @{typ "bool => prop"}),
- (@{const_name "Groups.zero_class.zero"},@{typ "bool => prop"}),
- (@{const_name "equal_class.equal"},@{typ "bool => bool => bool"})]
+ (\<^const_name>\<open>Groups.zero_class.zero\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Groups.one_class.one\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Groups.plus_class.plus\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Groups.minus_class.minus\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Groups.times_class.times\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Lattices.inf_class.inf\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Lattices.sup_class.sup\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Orderings.bot_class.bot\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Orderings.ord_class.min\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Orderings.ord_class.max\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Rings.modulo\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Rings.divide\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>GCD.gcd_class.gcd\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>GCD.gcd_class.lcm\<close>, \<^typ>\<open>prop => prop => prop\<close>),
+ (\<^const_name>\<open>Orderings.bot_class.bot\<close>, \<^typ>\<open>bool => prop\<close>),
+ (\<^const_name>\<open>Groups.one_class.one\<close>, \<^typ>\<open>bool => prop\<close>),
+ (\<^const_name>\<open>Groups.zero_class.zero\<close>,\<^typ>\<open>bool => prop\<close>),
+ (\<^const_name>\<open>equal_class.equal\<close>,\<^typ>\<open>bool => bool => bool\<close>)]
fun is_forbidden_mutant t =
let
--- a/src/HOL/NanoJava/AxSem.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/NanoJava/AxSem.thy Sat Jan 05 17:24:33 2019 +0100
@@ -152,7 +152,7 @@
"(A' |\<turnstile> C \<longrightarrow> (\<forall>A. A' \<subseteq> A \<longrightarrow> A |\<turnstile> C )) \<and>
(A' \<turnstile>\<^sub>e {P} e {Q} \<longrightarrow> (\<forall>A. A' \<subseteq> A \<longrightarrow> A \<turnstile>\<^sub>e {P} e {Q}))"
apply (rule hoare_ehoare.induct)
-apply (tactic "ALLGOALS(EVERY'[clarify_tac @{context}, REPEAT o smp_tac @{context} 1])")
+apply (tactic "ALLGOALS(EVERY'[clarify_tac \<^context>, REPEAT o smp_tac \<^context> 1])")
apply (blast intro: hoare_ehoare.Skip)
apply (blast intro: hoare_ehoare.Comp)
apply (blast intro: hoare_ehoare.Cond)
@@ -164,7 +164,7 @@
apply (blast intro: hoare_ehoare.NewC)
apply (blast intro: hoare_ehoare.Cast)
apply (erule hoare_ehoare.Call)
-apply (rule, drule spec, erule conjE, tactic "smp_tac @{context} 1 1", assumption)
+apply (rule, drule spec, erule conjE, tactic "smp_tac \<^context> 1 1", assumption)
apply blast
apply (blast intro!: hoare_ehoare.Meth)
apply (blast intro!: hoare_ehoare.Impl)
@@ -172,9 +172,9 @@
apply (blast intro: hoare_ehoare.ConjI)
apply (blast intro: hoare_ehoare.ConjE)
apply (rule hoare_ehoare.Conseq)
-apply (rule, drule spec, erule conjE, tactic "smp_tac @{context} 1 1", assumption+)
+apply (rule, drule spec, erule conjE, tactic "smp_tac \<^context> 1 1", assumption+)
apply (rule hoare_ehoare.eConseq)
-apply (rule, drule spec, erule conjE, tactic "smp_tac @{context} 1 1", assumption+)
+apply (rule, drule spec, erule conjE, tactic "smp_tac \<^context> 1 1", assumption+)
done
lemma cThin: "\<lbrakk>A' |\<turnstile> C; A' \<subseteq> A\<rbrakk> \<Longrightarrow> A |\<turnstile> C"
--- a/src/HOL/NanoJava/Equivalence.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/NanoJava/Equivalence.thy Sat Jan 05 17:24:33 2019 +0100
@@ -91,17 +91,17 @@
by(simp add: cnvalids_def nvalids_def nvalid_def2)
lemma hoare_sound_main:"\<And>t. (A |\<turnstile> C \<longrightarrow> A |\<Turnstile> C) \<and> (A |\<turnstile>\<^sub>e t \<longrightarrow> A |\<Turnstile>\<^sub>e t)"
-apply (tactic "split_all_tac @{context} 1", rename_tac P e Q)
+apply (tactic "split_all_tac \<^context> 1", rename_tac P e Q)
apply (rule hoare_ehoare.induct)
(*18*)
-apply (tactic \<open>ALLGOALS (REPEAT o dresolve_tac @{context} [@{thm all_conjunct2}, @{thm all3_conjunct2}])\<close>)
-apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "hoare _ _" [])\<close>)
-apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac @{context} "ehoare _ _" [])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o dresolve_tac \<^context> [@{thm all_conjunct2}, @{thm all3_conjunct2}])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac \<^context> "hoare _ _" [])\<close>)
+apply (tactic \<open>ALLGOALS (REPEAT o Rule_Insts.thin_tac \<^context> "ehoare _ _" [])\<close>)
apply (simp_all only: cnvalid1_eq cenvalid_def2)
apply fast
apply fast
apply fast
- apply (clarify,tactic "smp_tac @{context} 1 1",erule(2) Loop_sound_lemma,(rule HOL.refl)+)
+ apply (clarify,tactic "smp_tac \<^context> 1 1",erule(2) Loop_sound_lemma,(rule HOL.refl)+)
apply fast
apply fast
apply fast
--- a/src/HOL/NanoJava/Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/NanoJava/Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -131,7 +131,7 @@
apply auto
apply (case_tac "aa=a")
apply auto
-apply (tactic "smp_tac @{context} 1 1")
+apply (tactic "smp_tac \<^context> 1 1")
apply (case_tac "aa=a")
apply auto
done
--- a/src/HOL/NanoJava/State.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/NanoJava/State.thy Sat Jan 05 17:24:33 2019 +0100
@@ -51,8 +51,8 @@
"init_locs C m s \<equiv> s (| locals := locals s ++
init_vars (map_of (lcl (the (method C m)))) |)"
-text \<open>The first parameter of @{term set_locs} is of type @{typ state}
- rather than @{typ locals} in order to keep @{typ locals} private.\<close>
+text \<open>The first parameter of \<^term>\<open>set_locs\<close> is of type \<^typ>\<open>state\<close>
+ rather than \<^typ>\<open>locals\<close> in order to keep \<^typ>\<open>locals\<close> private.\<close>
definition set_locs :: "state => state => state" where
"set_locs s s' \<equiv> s' (| locals := locals s |)"
--- a/src/HOL/Nitpick_Examples/Manual_Nits.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nitpick_Examples/Manual_Nits.thy Sat Jan 05 17:24:33 2019 +0100
@@ -137,7 +137,7 @@
\<close>
declaration \<open>
-Nitpick_Model.register_term_postprocessor @{typ my_int} my_int_postproc
+Nitpick_Model.register_term_postprocessor \<^typ>\<open>my_int\<close> my_int_postproc
\<close>
lemma "add x y = add x x"
--- a/src/HOL/Nitpick_Examples/Mini_Nits.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nitpick_Examples/Mini_Nits.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,95 +17,95 @@
total_consts = smart]
ML \<open>
-val check = Minipick.minipick @{context}
-val expect = Minipick.minipick_expect @{context}
+val check = Minipick.minipick \<^context>
+val expect = Minipick.minipick_expect \<^context>
val none = expect "none"
val genuine = expect "genuine"
val unknown = expect "unknown"
\<close>
-ML \<open>genuine 1 @{prop "x = Not"}\<close>
-ML \<open>none 1 @{prop "\<exists>x. x = Not"}\<close>
-ML \<open>none 1 @{prop "\<not> False"}\<close>
-ML \<open>genuine 1 @{prop "\<not> True"}\<close>
-ML \<open>none 1 @{prop "\<not> \<not> b \<longleftrightarrow> b"}\<close>
-ML \<open>none 1 @{prop True}\<close>
-ML \<open>genuine 1 @{prop False}\<close>
-ML \<open>genuine 1 @{prop "True \<longleftrightarrow> False"}\<close>
-ML \<open>none 1 @{prop "True \<longleftrightarrow> \<not> False"}\<close>
-ML \<open>none 4 @{prop "\<forall>x. x = x"}\<close>
-ML \<open>none 4 @{prop "\<exists>x. x = x"}\<close>
-ML \<open>none 1 @{prop "\<forall>x. x = y"}\<close>
-ML \<open>genuine 2 @{prop "\<forall>x. x = y"}\<close>
-ML \<open>none 2 @{prop "\<exists>x. x = y"}\<close>
-ML \<open>none 2 @{prop "\<forall>x::'a \<times> 'a. x = x"}\<close>
-ML \<open>none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"}\<close>
-ML \<open>genuine 2 @{prop "\<forall>x::'a \<times> 'a. x = y"}\<close>
-ML \<open>none 2 @{prop "\<exists>x::'a \<times> 'a. x = y"}\<close>
-ML \<open>none 1 @{prop "All = Ex"}\<close>
-ML \<open>genuine 2 @{prop "All = Ex"}\<close>
-ML \<open>none 1 @{prop "All P = Ex P"}\<close>
-ML \<open>genuine 2 @{prop "All P = Ex P"}\<close>
-ML \<open>none 4 @{prop "x = y \<longrightarrow> P x = P y"}\<close>
-ML \<open>none 4 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x = P y"}\<close>
-ML \<open>none 2 @{prop "(x::'a \<times> 'a) = y \<longrightarrow> P x y = P y x"}\<close>
-ML \<open>none 4 @{prop "\<exists>x::'a \<times> 'a. x = y \<longrightarrow> P x = P y"}\<close>
-ML \<open>none 2 @{prop "(x::'a \<Rightarrow> 'a) = y \<longrightarrow> P x = P y"}\<close>
-ML \<open>none 2 @{prop "\<exists>x::'a \<Rightarrow> 'a. x = y \<longrightarrow> P x = P y"}\<close>
-ML \<open>genuine 1 @{prop "(=) X = Ex"}\<close>
-ML \<open>none 2 @{prop "\<forall>x::'a \<Rightarrow> 'a. x = x"}\<close>
-ML \<open>none 1 @{prop "x = y"}\<close>
-ML \<open>genuine 1 @{prop "x \<longleftrightarrow> y"}\<close>
-ML \<open>genuine 2 @{prop "x = y"}\<close>
-ML \<open>genuine 1 @{prop "X \<subseteq> Y"}\<close>
-ML \<open>none 1 @{prop "P \<and> Q \<longleftrightarrow> Q \<and> P"}\<close>
-ML \<open>none 1 @{prop "P \<and> Q \<longrightarrow> P"}\<close>
-ML \<open>none 1 @{prop "P \<or> Q \<longleftrightarrow> Q \<or> P"}\<close>
-ML \<open>genuine 1 @{prop "P \<or> Q \<longrightarrow> P"}\<close>
-ML \<open>none 1 @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> (\<not> P \<or> Q)"}\<close>
-ML \<open>none 4 @{prop "{a} = {a, a}"}\<close>
-ML \<open>genuine 2 @{prop "{a} = {a, b}"}\<close>
-ML \<open>genuine 1 @{prop "{a} \<noteq> {a, b}"}\<close>
-ML \<open>none 4 @{prop "{}\<^sup>+ = {}"}\<close>
-ML \<open>none 4 @{prop "UNIV\<^sup>+ = UNIV"}\<close>
-ML \<open>none 4 @{prop "(UNIV :: ('a \<times> 'b) set) - {} = UNIV"}\<close>
-ML \<open>none 4 @{prop "{} - (UNIV :: ('a \<times> 'b) set) = {}"}\<close>
-ML \<open>none 1 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
-ML \<open>genuine 2 @{prop "{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
-ML \<open>none 4 @{prop "a \<noteq> c \<Longrightarrow> {(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}"}\<close>
-ML \<open>none 4 @{prop "A \<union> B = {x. x \<in> A \<or> x \<in> B}"}\<close>
-ML \<open>none 4 @{prop "A \<inter> B = {x. x \<in> A \<and> x \<in> B}"}\<close>
-ML \<open>none 4 @{prop "A - B = (\<lambda>x. A x \<and> \<not> B x)"}\<close>
-ML \<open>none 4 @{prop "\<exists>a b. (a, b) = (b, a)"}\<close>
-ML \<open>genuine 2 @{prop "(a, b) = (b, a)"}\<close>
-ML \<open>genuine 2 @{prop "(a, b) \<noteq> (b, a)"}\<close>
-ML \<open>none 4 @{prop "\<exists>a b::'a \<times> 'a. (a, b) = (b, a)"}\<close>
-ML \<open>genuine 2 @{prop "(a::'a \<times> 'a, b) = (b, a)"}\<close>
-ML \<open>none 4 @{prop "\<exists>a b::'a \<times> 'a \<times> 'a. (a, b) = (b, a)"}\<close>
-ML \<open>genuine 2 @{prop "(a::'a \<times> 'a \<times> 'a, b) \<noteq> (b, a)"}\<close>
-ML \<open>none 4 @{prop "\<exists>a b::'a \<Rightarrow> 'a. (a, b) = (b, a)"}\<close>
-ML \<open>genuine 1 @{prop "(a::'a \<Rightarrow> 'a, b) \<noteq> (b, a)"}\<close>
-ML \<open>none 4 @{prop "fst (a, b) = a"}\<close>
-ML \<open>none 1 @{prop "fst (a, b) = b"}\<close>
-ML \<open>genuine 2 @{prop "fst (a, b) = b"}\<close>
-ML \<open>genuine 2 @{prop "fst (a, b) \<noteq> b"}\<close>
-ML \<open>genuine 2 @{prop "f ((x, z), y) = (x, z)"}\<close>
-ML \<open>none 2 @{prop "(\<forall>x. f x = fst x) \<longrightarrow> f ((x, z), y) = (x, z)"}\<close>
-ML \<open>none 4 @{prop "snd (a, b) = b"}\<close>
-ML \<open>none 1 @{prop "snd (a, b) = a"}\<close>
-ML \<open>genuine 2 @{prop "snd (a, b) = a"}\<close>
-ML \<open>genuine 2 @{prop "snd (a, b) \<noteq> a"}\<close>
-ML \<open>genuine 1 @{prop P}\<close>
-ML \<open>genuine 1 @{prop "(\<lambda>x. P) a"}\<close>
-ML \<open>genuine 1 @{prop "(\<lambda>x y z. P y x z) a b c"}\<close>
-ML \<open>none 4 @{prop "\<exists>f. f = (\<lambda>x. x) \<and> f y = y"}\<close>
-ML \<open>genuine 1 @{prop "\<exists>f. f p \<noteq> p \<and> (\<forall>a b. f (a, b) = (a, b))"}\<close>
-ML \<open>none 2 @{prop "\<exists>f. \<forall>a b. f (a, b) = (a, b)"}\<close>
-ML \<open>none 3 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (y, x)"}\<close>
-ML \<open>genuine 2 @{prop "f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (x, y)"}\<close>
-ML \<open>none 4 @{prop "f = (\<lambda>x. f x)"}\<close>
-ML \<open>none 4 @{prop "f = (\<lambda>x. f x::'a \<Rightarrow> bool)"}\<close>
-ML \<open>none 4 @{prop "f = (\<lambda>x y. f x y)"}\<close>
-ML \<open>none 4 @{prop "f = (\<lambda>x y. f x y::'a \<Rightarrow> bool)"}\<close>
+ML \<open>genuine 1 \<^prop>\<open>x = Not\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>\<exists>x. x = Not\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>\<not> False\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>\<not> True\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>\<not> \<not> b \<longleftrightarrow> b\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>True\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>False\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>True \<longleftrightarrow> False\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>True \<longleftrightarrow> \<not> False\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<forall>x. x = x\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>x. x = x\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>\<forall>x. x = y\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>\<forall>x. x = y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<exists>x. x = y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<forall>x::'a \<times> 'a. x = x\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<exists>x::'a \<times> 'a. x = y\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>\<forall>x::'a \<times> 'a. x = y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<exists>x::'a \<times> 'a. x = y\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>All = Ex\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>All = Ex\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>All P = Ex P\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>All P = Ex P\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>x = y \<longrightarrow> P x = P y\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>(x::'a \<times> 'a) = y \<longrightarrow> P x = P y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>(x::'a \<times> 'a) = y \<longrightarrow> P x y = P y x\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>x::'a \<times> 'a. x = y \<longrightarrow> P x = P y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>(x::'a \<Rightarrow> 'a) = y \<longrightarrow> P x = P y\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<exists>x::'a \<Rightarrow> 'a. x = y \<longrightarrow> P x = P y\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>(=) X = Ex\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<forall>x::'a \<Rightarrow> 'a. x = x\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>x = y\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>x \<longleftrightarrow> y\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>x = y\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>X \<subseteq> Y\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>P \<and> Q \<longleftrightarrow> Q \<and> P\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>P \<and> Q \<longrightarrow> P\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>P \<or> Q \<longleftrightarrow> Q \<or> P\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>P \<or> Q \<longrightarrow> P\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>(P \<longrightarrow> Q) \<longleftrightarrow> (\<not> P \<or> Q)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>{a} = {a, a}\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>{a} = {a, b}\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>{a} \<noteq> {a, b}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>{}\<^sup>+ = {}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>UNIV\<^sup>+ = UNIV\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>(UNIV :: ('a \<times> 'b) set) - {} = UNIV\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>{} - (UNIV :: ('a \<times> 'b) set) = {}\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>{(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>a \<noteq> c \<Longrightarrow> {(a, b), (b, c)}\<^sup>+ = {(a, b), (a, c), (b, c)}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>A \<union> B = {x. x \<in> A \<or> x \<in> B}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>A \<inter> B = {x. x \<in> A \<and> x \<in> B}\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>A - B = (\<lambda>x. A x \<and> \<not> B x)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>a b. (a, b) = (b, a)\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>(a, b) = (b, a)\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>(a, b) \<noteq> (b, a)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>a b::'a \<times> 'a. (a, b) = (b, a)\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>(a::'a \<times> 'a, b) = (b, a)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>a b::'a \<times> 'a \<times> 'a. (a, b) = (b, a)\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>(a::'a \<times> 'a \<times> 'a, b) \<noteq> (b, a)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>a b::'a \<Rightarrow> 'a. (a, b) = (b, a)\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>(a::'a \<Rightarrow> 'a, b) \<noteq> (b, a)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>fst (a, b) = a\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>fst (a, b) = b\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>fst (a, b) = b\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>fst (a, b) \<noteq> b\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>f ((x, z), y) = (x, z)\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>(\<forall>x. f x = fst x) \<longrightarrow> f ((x, z), y) = (x, z)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>snd (a, b) = b\<close>\<close>
+ML \<open>none 1 \<^prop>\<open>snd (a, b) = a\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>snd (a, b) = a\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>snd (a, b) \<noteq> a\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>P\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>(\<lambda>x. P) a\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>(\<lambda>x y z. P y x z) a b c\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>\<exists>f. f = (\<lambda>x. x) \<and> f y = y\<close>\<close>
+ML \<open>genuine 1 \<^prop>\<open>\<exists>f. f p \<noteq> p \<and> (\<forall>a b. f (a, b) = (a, b))\<close>\<close>
+ML \<open>none 2 \<^prop>\<open>\<exists>f. \<forall>a b. f (a, b) = (a, b)\<close>\<close>
+ML \<open>none 3 \<^prop>\<open>f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (y, x)\<close>\<close>
+ML \<open>genuine 2 \<^prop>\<open>f = (\<lambda>a b. (b, a)) \<longrightarrow> f x y = (x, y)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>f = (\<lambda>x. f x)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>f = (\<lambda>x. f x::'a \<Rightarrow> bool)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>f = (\<lambda>x y. f x y)\<close>\<close>
+ML \<open>none 4 \<^prop>\<open>f = (\<lambda>x y. f x y::'a \<Rightarrow> bool)\<close>\<close>
end
--- a/src/HOL/Nitpick_Examples/Mono_Nits.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nitpick_Examples/Mono_Nits.thy Sat Jan 05 17:24:33 2019 +0100
@@ -20,8 +20,8 @@
exception BUG
-val thy = @{theory}
-val ctxt = @{context}
+val thy = \<^theory>
+val ctxt = \<^context>
val subst = []
val tac_timeout = seconds 1.0
val case_names = case_const_names ctxt
@@ -50,11 +50,11 @@
val binarize = false
fun is_mono t =
- Nitpick_Mono.formulas_monotonic hol_ctxt binarize @{typ 'a} ([t], [])
+ Nitpick_Mono.formulas_monotonic hol_ctxt binarize \<^typ>\<open>'a\<close> ([t], [])
fun is_const t =
let val T = fastype_of t in
- Logic.mk_implies (Logic.mk_equals (Free ("dummyP", T), t), @{const False})
+ Logic.mk_implies (Logic.mk_equals (Free ("dummyP", T), t), \<^const>\<open>False\<close>)
|> is_mono
end
@@ -66,78 +66,78 @@
ML \<open>Nitpick_Mono.trace := false\<close>
-ML_val \<open>const @{term "A::('a\<Rightarrow>'b)"}\<close>
-ML_val \<open>const @{term "(A::'a set) = A"}\<close>
-ML_val \<open>const @{term "(A::'a set set) = A"}\<close>
-ML_val \<open>const @{term "(\<lambda>x::'a set. a \<in> x)"}\<close>
-ML_val \<open>const @{term "{{a::'a}} = C"}\<close>
-ML_val \<open>const @{term "{f::'a\<Rightarrow>nat} = {g::'a\<Rightarrow>nat}"}\<close>
-ML_val \<open>const @{term "A \<union> (B::'a set)"}\<close>
-ML_val \<open>const @{term "\<lambda>A B x::'a. A x \<or> B x"}\<close>
-ML_val \<open>const @{term "P (a::'a)"}\<close>
-ML_val \<open>const @{term "\<lambda>a::'a. b (c (d::'a)) (e::'a) (f::'a)"}\<close>
-ML_val \<open>const @{term "\<forall>A::'a set. a \<in> A"}\<close>
-ML_val \<open>const @{term "\<forall>A::'a set. P A"}\<close>
-ML_val \<open>const @{term "P \<or> Q"}\<close>
-ML_val \<open>const @{term "A \<union> B = (C::'a set)"}\<close>
-ML_val \<open>const @{term "(\<lambda>A B x::'a. A x \<or> B x) A B = C"}\<close>
-ML_val \<open>const @{term "(if P then (A::'a set) else B) = C"}\<close>
-ML_val \<open>const @{term "let A = (C::'a set) in A \<union> B"}\<close>
-ML_val \<open>const @{term "THE x::'b. P x"}\<close>
-ML_val \<open>const @{term "(\<lambda>x::'a. False)"}\<close>
-ML_val \<open>const @{term "(\<lambda>x::'a. True)"}\<close>
-ML_val \<open>const @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. False)"}\<close>
-ML_val \<open>const @{term "(\<lambda>x::'a. True) = (\<lambda>x::'a. True)"}\<close>
-ML_val \<open>const @{term "Let (a::'a) A"}\<close>
-ML_val \<open>const @{term "A (a::'a)"}\<close>
-ML_val \<open>const @{term "insert (a::'a) A = B"}\<close>
-ML_val \<open>const @{term "- (A::'a set)"}\<close>
-ML_val \<open>const @{term "finite (A::'a set)"}\<close>
-ML_val \<open>const @{term "\<not> finite (A::'a set)"}\<close>
-ML_val \<open>const @{term "finite (A::'a set set)"}\<close>
-ML_val \<open>const @{term "\<lambda>a::'a. A a \<and> \<not> B a"}\<close>
-ML_val \<open>const @{term "A < (B::'a set)"}\<close>
-ML_val \<open>const @{term "A \<le> (B::'a set)"}\<close>
-ML_val \<open>const @{term "[a::'a]"}\<close>
-ML_val \<open>const @{term "[a::'a set]"}\<close>
-ML_val \<open>const @{term "[A \<union> (B::'a set)]"}\<close>
-ML_val \<open>const @{term "[A \<union> (B::'a set)] = [C]"}\<close>
-ML_val \<open>const @{term "{(\<lambda>x::'a. x = a)} = C"}\<close>
-ML_val \<open>const @{term "(\<lambda>a::'a. \<not> A a) = B"}\<close>
-ML_val \<open>const @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> f a \<and> g a \<longrightarrow> \<not> f a"}\<close>
-ML_val \<open>const @{term "\<lambda>A B x::'a. A x \<and> B x \<and> A = B"}\<close>
-ML_val \<open>const @{term "p = (\<lambda>(x::'a) (y::'a). P x \<or> \<not> Q y)"}\<close>
-ML_val \<open>const @{term "p = (\<lambda>(x::'a) (y::'a). p x y :: bool)"}\<close>
-ML_val \<open>const @{term "p = (\<lambda>A B x. A x \<and> \<not> B x) (\<lambda>x. True) (\<lambda>y. x \<noteq> y)"}\<close>
-ML_val \<open>const @{term "p = (\<lambda>y. x \<noteq> y)"}\<close>
-ML_val \<open>const @{term "(\<lambda>x. (p::'a\<Rightarrow>bool\<Rightarrow>bool) x False)"}\<close>
-ML_val \<open>const @{term "(\<lambda>x y. (p::'a\<Rightarrow>'a\<Rightarrow>bool\<Rightarrow>bool) x y False)"}\<close>
-ML_val \<open>const @{term "f = (\<lambda>x::'a. P x \<longrightarrow> Q x)"}\<close>
-ML_val \<open>const @{term "\<forall>a::'a. P a"}\<close>
+ML_val \<open>const \<^term>\<open>A::('a\<Rightarrow>'b)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(A::'a set) = A\<close>\<close>
+ML_val \<open>const \<^term>\<open>(A::'a set set) = A\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x::'a set. a \<in> x)\<close>\<close>
+ML_val \<open>const \<^term>\<open>{{a::'a}} = C\<close>\<close>
+ML_val \<open>const \<^term>\<open>{f::'a\<Rightarrow>nat} = {g::'a\<Rightarrow>nat}\<close>\<close>
+ML_val \<open>const \<^term>\<open>A \<union> (B::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<lambda>A B x::'a. A x \<or> B x\<close>\<close>
+ML_val \<open>const \<^term>\<open>P (a::'a)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<lambda>a::'a. b (c (d::'a)) (e::'a) (f::'a)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<forall>A::'a set. a \<in> A\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<forall>A::'a set. P A\<close>\<close>
+ML_val \<open>const \<^term>\<open>P \<or> Q\<close>\<close>
+ML_val \<open>const \<^term>\<open>A \<union> B = (C::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>A B x::'a. A x \<or> B x) A B = C\<close>\<close>
+ML_val \<open>const \<^term>\<open>(if P then (A::'a set) else B) = C\<close>\<close>
+ML_val \<open>const \<^term>\<open>let A = (C::'a set) in A \<union> B\<close>\<close>
+ML_val \<open>const \<^term>\<open>THE x::'b. P x\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x::'a. False)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x::'a. True)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x::'a. False) = (\<lambda>x::'a. False)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x::'a. True) = (\<lambda>x::'a. True)\<close>\<close>
+ML_val \<open>const \<^term>\<open>Let (a::'a) A\<close>\<close>
+ML_val \<open>const \<^term>\<open>A (a::'a)\<close>\<close>
+ML_val \<open>const \<^term>\<open>insert (a::'a) A = B\<close>\<close>
+ML_val \<open>const \<^term>\<open>- (A::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>finite (A::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<not> finite (A::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>finite (A::'a set set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<lambda>a::'a. A a \<and> \<not> B a\<close>\<close>
+ML_val \<open>const \<^term>\<open>A < (B::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>A \<le> (B::'a set)\<close>\<close>
+ML_val \<open>const \<^term>\<open>[a::'a]\<close>\<close>
+ML_val \<open>const \<^term>\<open>[a::'a set]\<close>\<close>
+ML_val \<open>const \<^term>\<open>[A \<union> (B::'a set)]\<close>\<close>
+ML_val \<open>const \<^term>\<open>[A \<union> (B::'a set)] = [C]\<close>\<close>
+ML_val \<open>const \<^term>\<open>{(\<lambda>x::'a. x = a)} = C\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>a::'a. \<not> A a) = B\<close>\<close>
+ML_val \<open>const \<^prop>\<open>\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> f a \<and> g a \<longrightarrow> \<not> f a\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<lambda>A B x::'a. A x \<and> B x \<and> A = B\<close>\<close>
+ML_val \<open>const \<^term>\<open>p = (\<lambda>(x::'a) (y::'a). P x \<or> \<not> Q y)\<close>\<close>
+ML_val \<open>const \<^term>\<open>p = (\<lambda>(x::'a) (y::'a). p x y :: bool)\<close>\<close>
+ML_val \<open>const \<^term>\<open>p = (\<lambda>A B x. A x \<and> \<not> B x) (\<lambda>x. True) (\<lambda>y. x \<noteq> y)\<close>\<close>
+ML_val \<open>const \<^term>\<open>p = (\<lambda>y. x \<noteq> y)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x. (p::'a\<Rightarrow>bool\<Rightarrow>bool) x False)\<close>\<close>
+ML_val \<open>const \<^term>\<open>(\<lambda>x y. (p::'a\<Rightarrow>'a\<Rightarrow>bool\<Rightarrow>bool) x y False)\<close>\<close>
+ML_val \<open>const \<^term>\<open>f = (\<lambda>x::'a. P x \<longrightarrow> Q x)\<close>\<close>
+ML_val \<open>const \<^term>\<open>\<forall>a::'a. P a\<close>\<close>
-ML_val \<open>nonconst @{term "\<forall>P (a::'a). P a"}\<close>
-ML_val \<open>nonconst @{term "THE x::'a. P x"}\<close>
-ML_val \<open>nonconst @{term "SOME x::'a. P x"}\<close>
-ML_val \<open>nonconst @{term "(\<lambda>A B x::'a. A x \<or> B x) = myunion"}\<close>
-ML_val \<open>nonconst @{term "(\<lambda>x::'a. False) = (\<lambda>x::'a. True)"}\<close>
-ML_val \<open>nonconst @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"}\<close>
+ML_val \<open>nonconst \<^term>\<open>\<forall>P (a::'a). P a\<close>\<close>
+ML_val \<open>nonconst \<^term>\<open>THE x::'a. P x\<close>\<close>
+ML_val \<open>nonconst \<^term>\<open>SOME x::'a. P x\<close>\<close>
+ML_val \<open>nonconst \<^term>\<open>(\<lambda>A B x::'a. A x \<or> B x) = myunion\<close>\<close>
+ML_val \<open>nonconst \<^term>\<open>(\<lambda>x::'a. False) = (\<lambda>x::'a. True)\<close>\<close>
+ML_val \<open>nonconst \<^prop>\<open>\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h\<close>\<close>
-ML_val \<open>mono @{prop "Q (\<forall>x::'a set. P x)"}\<close>
-ML_val \<open>mono @{prop "P (a::'a)"}\<close>
-ML_val \<open>mono @{prop "{a} = {b::'a}"}\<close>
-ML_val \<open>mono @{prop "(\<lambda>x. x = a) = (\<lambda>y. y = (b::'a))"}\<close>
-ML_val \<open>mono @{prop "(a::'a) \<in> P \<and> P \<union> P = P"}\<close>
-ML_val \<open>mono @{prop "\<forall>F::'a set set. P"}\<close>
-ML_val \<open>mono @{prop "\<not> (\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h)"}\<close>
-ML_val \<open>mono @{prop "\<not> Q (\<forall>x::'a set. P x)"}\<close>
-ML_val \<open>mono @{prop "\<not> (\<forall>x::'a. P x)"}\<close>
-ML_val \<open>mono @{prop "myall P = (P = (\<lambda>x::'a. True))"}\<close>
-ML_val \<open>mono @{prop "myall P = (P = (\<lambda>x::'a. False))"}\<close>
-ML_val \<open>mono @{prop "\<forall>x::'a. P x"}\<close>
-ML_val \<open>mono @{term "(\<lambda>A B x::'a. A x \<or> B x) \<noteq> myunion"}\<close>
+ML_val \<open>mono \<^prop>\<open>Q (\<forall>x::'a set. P x)\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>P (a::'a)\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>{a} = {b::'a}\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>(\<lambda>x. x = a) = (\<lambda>y. y = (b::'a))\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>(a::'a) \<in> P \<and> P \<union> P = P\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>\<forall>F::'a set set. P\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>\<not> (\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h)\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>\<not> Q (\<forall>x::'a set. P x)\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>\<not> (\<forall>x::'a. P x)\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>myall P = (P = (\<lambda>x::'a. True))\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>myall P = (P = (\<lambda>x::'a. False))\<close>\<close>
+ML_val \<open>mono \<^prop>\<open>\<forall>x::'a. P x\<close>\<close>
+ML_val \<open>mono \<^term>\<open>(\<lambda>A B x::'a. A x \<or> B x) \<noteq> myunion\<close>\<close>
-ML_val \<open>nonmono @{prop "A = (\<lambda>x::'a. True) \<and> A = (\<lambda>x. False)"}\<close>
-ML_val \<open>nonmono @{prop "\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h"}\<close>
+ML_val \<open>nonmono \<^prop>\<open>A = (\<lambda>x::'a. True) \<and> A = (\<lambda>x. False)\<close>\<close>
+ML_val \<open>nonmono \<^prop>\<open>\<forall>F f g (h::'a set). F f \<and> F g \<and> \<not> a \<in> f \<and> a \<in> g \<longrightarrow> F h\<close>\<close>
ML \<open>
val preproc_timeout = seconds 5.0
@@ -180,7 +180,7 @@
fun check_theorem (name, th) =
let
val t = th |> Thm.prop_of |> Type.legacy_freeze |> close_form
- val neg_t = Logic.mk_implies (t, @{prop False})
+ val neg_t = Logic.mk_implies (t, \<^prop>\<open>False\<close>)
val (nondef_ts, def_ts, _, _, _, _) =
Timeout.apply preproc_timeout (preprocess_formulas hol_ctxt [])
neg_t
--- a/src/HOL/Nitpick_Examples/Refute_Nits.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nitpick_Examples/Refute_Nits.thy Sat Jan 05 17:24:33 2019 +0100
@@ -408,7 +408,7 @@
nitpick [expect = genuine]
oops
-subsubsection \<open>@{const undefined}\<close>
+subsubsection \<open>\<^const>\<open>undefined\<close>\<close>
lemma "undefined"
nitpick [expect = genuine]
@@ -426,7 +426,7 @@
nitpick [expect = genuine]
oops
-subsubsection \<open>@{const The}\<close>
+subsubsection \<open>\<^const>\<open>The\<close>\<close>
lemma "The P"
nitpick [expect = genuine]
@@ -448,7 +448,7 @@
nitpick [expect = genuine]
oops
-subsubsection \<open>@{const Eps}\<close>
+subsubsection \<open>\<^const>\<open>Eps\<close>\<close>
lemma "Eps P"
nitpick [expect = genuine]
@@ -525,7 +525,7 @@
subsubsection \<open>Subtypes (typedef), typedecl\<close>
-text \<open>A completely unspecified non-empty subset of @{typ "'a"}:\<close>
+text \<open>A completely unspecified non-empty subset of \<^typ>\<open>'a\<close>:\<close>
definition "myTdef = insert (undefined::'a) (undefined::'a set)"
--- a/src/HOL/Nitpick_Examples/minipick.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nitpick_Examples/minipick.ML Sat Jan 05 17:24:33 2019 +0100
@@ -24,27 +24,27 @@
S_Rep |
R_Rep of bool
-fun check_type ctxt raw_infinite (Type (@{type_name fun}, Ts)) =
+fun check_type ctxt raw_infinite (Type (\<^type_name>\<open>fun\<close>, Ts)) =
List.app (check_type ctxt raw_infinite) Ts
- | check_type ctxt raw_infinite (Type (@{type_name prod}, Ts)) =
+ | check_type ctxt raw_infinite (Type (\<^type_name>\<open>prod\<close>, Ts)) =
List.app (check_type ctxt raw_infinite) Ts
- | check_type _ _ @{typ bool} = ()
- | check_type _ _ (TFree (_, @{sort "{}"})) = ()
- | check_type _ _ (TFree (_, @{sort HOL.type})) = ()
+ | check_type _ _ \<^typ>\<open>bool\<close> = ()
+ | check_type _ _ (TFree (_, \<^sort>\<open>{}\<close>)) = ()
+ | check_type _ _ (TFree (_, \<^sort>\<open>HOL.type\<close>)) = ()
| check_type ctxt raw_infinite T =
if raw_infinite T then
()
else
error ("Not supported: Type " ^ quote (Syntax.string_of_typ ctxt T) ^ ".")
-fun atom_schema_of S_Rep card (Type (@{type_name fun}, [T1, T2])) =
+fun atom_schema_of S_Rep card (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) =
replicate_list (card T1) (atom_schema_of S_Rep card T2)
| atom_schema_of (R_Rep true) card
- (Type (@{type_name fun}, [T1, @{typ bool}])) =
+ (Type (\<^type_name>\<open>fun\<close>, [T1, \<^typ>\<open>bool\<close>])) =
atom_schema_of S_Rep card T1
- | atom_schema_of (rep as R_Rep _) card (Type (@{type_name fun}, [T1, T2])) =
+ | atom_schema_of (rep as R_Rep _) card (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) =
atom_schema_of S_Rep card T1 @ atom_schema_of rep card T2
- | atom_schema_of _ card (Type (@{type_name prod}, Ts)) =
+ | atom_schema_of _ card (Type (\<^type_name>\<open>prod\<close>, Ts)) =
maps (atom_schema_of S_Rep card) Ts
| atom_schema_of _ card T = [card T]
val arity_of = length ooo atom_schema_of
@@ -79,7 +79,7 @@
fun S_rep_from_F NONE f = RelIf (f, true_atom, false_atom)
| S_rep_from_F (SOME true) f = RelIf (f, true_atom, None)
| S_rep_from_F (SOME false) f = RelIf (Not f, false_atom, None)
- fun R_rep_from_S_rep (Type (@{type_name fun}, [T1, T2])) r =
+ fun R_rep_from_S_rep (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) r =
if total andalso T2 = bool_T then
let
val jss = atom_schema_of S_Rep card T1 |> map (rpair 0)
@@ -109,12 +109,12 @@
|> foldl1 Union
end
| R_rep_from_S_rep _ r = r
- fun S_rep_from_R_rep Ts (T as Type (@{type_name fun}, _)) r =
+ fun S_rep_from_R_rep Ts (T as Type (\<^type_name>\<open>fun\<close>, _)) r =
Comprehension (decls_for S_Rep card Ts T,
RelEq (R_rep_from_S_rep T
(rel_expr_for_bound_var card S_Rep (T :: Ts) 0), r))
| S_rep_from_R_rep _ _ r = r
- fun partial_eq pos Ts (Type (@{type_name fun}, [T1, T2])) t1 t2 =
+ fun partial_eq pos Ts (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) t1 t2 =
HOLogic.mk_all ("x", T1,
HOLogic.eq_const T2 $ (incr_boundvars 1 t1 $ Bound 0)
$ (incr_boundvars 1 t2 $ Bound 0))
@@ -127,26 +127,26 @@
|> (if pos then Some o Intersect else Lone o Union)
and to_F pos Ts t =
(case t of
- @{const Not} $ t1 => Not (to_F (Option.map not pos) Ts t1)
- | @{const False} => False
- | @{const True} => True
- | Const (@{const_name All}, _) $ Abs (_, T, t') =>
+ \<^const>\<open>Not\<close> $ t1 => Not (to_F (Option.map not pos) Ts t1)
+ | \<^const>\<open>False\<close> => False
+ | \<^const>\<open>True\<close> => True
+ | Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, T, t') =>
if pos = SOME true andalso not (complete T) then False
else All (decls_for S_Rep card Ts T, to_F pos (T :: Ts) t')
- | (t0 as Const (@{const_name All}, _)) $ t1 =>
+ | (t0 as Const (\<^const_name>\<open>All\<close>, _)) $ t1 =>
to_F pos Ts (t0 $ eta_expand Ts t1 1)
- | Const (@{const_name Ex}, _) $ Abs (_, T, t') =>
+ | Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, T, t') =>
if pos = SOME false andalso not (complete T) then True
else Exist (decls_for S_Rep card Ts T, to_F pos (T :: Ts) t')
- | (t0 as Const (@{const_name Ex}, _)) $ t1 =>
+ | (t0 as Const (\<^const_name>\<open>Ex\<close>, _)) $ t1 =>
to_F pos Ts (t0 $ eta_expand Ts t1 1)
- | Const (@{const_name HOL.eq}, Type (_, [T, _])) $ t1 $ t2 =>
+ | Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [T, _])) $ t1 $ t2 =>
(case pos of
NONE => RelEq (to_R_rep Ts t1, to_R_rep Ts t2)
| SOME pos => partial_eq pos Ts T t1 t2)
- | Const (@{const_name ord_class.less_eq},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [T', @{typ bool}]), _]))
+ | Const (\<^const_name>\<open>ord_class.less_eq\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [T', \<^typ>\<open>bool\<close>]), _]))
$ t1 $ t2 =>
(case pos of
NONE => Subset (to_R_rep Ts t1, to_R_rep Ts t2)
@@ -158,11 +158,11 @@
Subset (Join (to_R_rep Ts t1, true_atom),
Difference (atom_seq_product_of S_Rep card T',
Join (to_R_rep Ts t2, false_atom))))
- | @{const HOL.conj} $ t1 $ t2 => And (to_F pos Ts t1, to_F pos Ts t2)
- | @{const HOL.disj} $ t1 $ t2 => Or (to_F pos Ts t1, to_F pos Ts t2)
- | @{const HOL.implies} $ t1 $ t2 =>
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 => And (to_F pos Ts t1, to_F pos Ts t2)
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 => Or (to_F pos Ts t1, to_F pos Ts t2)
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 =>
Implies (to_F (Option.map not pos) Ts t1, to_F pos Ts t2)
- | Const (@{const_name Set.member}, _) $ t1 $ t2 => to_F pos Ts (t2 $ t1)
+ | Const (\<^const_name>\<open>Set.member\<close>, _) $ t1 $ t2 => to_F pos Ts (t2 $ t1)
| t1 $ t2 =>
(case pos of
NONE => Subset (to_S_rep Ts t2, to_R_rep Ts t1)
@@ -181,22 +181,22 @@
handle SAME () => F_from_S_rep pos (to_R_rep Ts t)
and to_S_rep Ts t =
case t of
- Const (@{const_name Pair}, _) $ t1 $ t2 =>
+ Const (\<^const_name>\<open>Pair\<close>, _) $ t1 $ t2 =>
Product (to_S_rep Ts t1, to_S_rep Ts t2)
- | Const (@{const_name Pair}, _) $ _ => to_S_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name Pair}, _) => to_S_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name fst}, _) $ t1 =>
+ | Const (\<^const_name>\<open>Pair\<close>, _) $ _ => to_S_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>Pair\<close>, _) => to_S_rep Ts (eta_expand Ts t 2)
+ | Const (\<^const_name>\<open>fst\<close>, _) $ t1 =>
let val fst_arity = arity_of S_Rep card (fastype_of1 (Ts, t)) in
Project (to_S_rep Ts t1, num_seq 0 fst_arity)
end
- | Const (@{const_name fst}, _) => to_S_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name snd}, _) $ t1 =>
+ | Const (\<^const_name>\<open>fst\<close>, _) => to_S_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>snd\<close>, _) $ t1 =>
let
val pair_arity = arity_of S_Rep card (fastype_of1 (Ts, t1))
val snd_arity = arity_of S_Rep card (fastype_of1 (Ts, t))
val fst_arity = pair_arity - snd_arity
in Project (to_S_rep Ts t1, num_seq fst_arity snd_arity) end
- | Const (@{const_name snd}, _) => to_S_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>snd\<close>, _) => to_S_rep Ts (eta_expand Ts t 1)
| Bound j => rel_expr_for_bound_var card S_Rep Ts j
| _ => S_rep_from_R_rep Ts (fastype_of1 (Ts, t)) (to_R_rep Ts t)
and partial_set_op swap1 swap2 op1 op2 Ts t1 t2 =
@@ -211,37 +211,37 @@
end
and to_R_rep Ts t =
(case t of
- @{const Not} => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name All}, _) => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name Ex}, _) => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name HOL.eq}, _) $ _ => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name HOL.eq}, _) => to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name ord_class.less_eq},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _])) $ _ =>
+ \<^const>\<open>Not\<close> => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>All\<close>, _) => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>Ex\<close>, _) => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>HOL.eq\<close>, _) => to_R_rep Ts (eta_expand Ts t 2)
+ | Const (\<^const_name>\<open>ord_class.less_eq\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _])) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name ord_class.less_eq}, _) =>
+ | Const (\<^const_name>\<open>ord_class.less_eq\<close>, _) =>
to_R_rep Ts (eta_expand Ts t 2)
- | @{const HOL.conj} $ _ => to_R_rep Ts (eta_expand Ts t 1)
- | @{const HOL.conj} => to_R_rep Ts (eta_expand Ts t 2)
- | @{const HOL.disj} $ _ => to_R_rep Ts (eta_expand Ts t 1)
- | @{const HOL.disj} => to_R_rep Ts (eta_expand Ts t 2)
- | @{const HOL.implies} $ _ => to_R_rep Ts (eta_expand Ts t 1)
- | @{const HOL.implies} => to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name Set.member}, _) $ _ =>
+ | \<^const>\<open>HOL.conj\<close> $ _ => to_R_rep Ts (eta_expand Ts t 1)
+ | \<^const>\<open>HOL.conj\<close> => to_R_rep Ts (eta_expand Ts t 2)
+ | \<^const>\<open>HOL.disj\<close> $ _ => to_R_rep Ts (eta_expand Ts t 1)
+ | \<^const>\<open>HOL.disj\<close> => to_R_rep Ts (eta_expand Ts t 2)
+ | \<^const>\<open>HOL.implies\<close> $ _ => to_R_rep Ts (eta_expand Ts t 1)
+ | \<^const>\<open>HOL.implies\<close> => to_R_rep Ts (eta_expand Ts t 2)
+ | Const (\<^const_name>\<open>Set.member\<close>, _) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name Set.member}, _) => to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name Collect}, _) $ t' => to_R_rep Ts t'
- | Const (@{const_name Collect}, _) => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name bot_class.bot},
- T as Type (@{type_name fun}, [T', @{typ bool}])) =>
+ | Const (\<^const_name>\<open>Set.member\<close>, _) => to_R_rep Ts (eta_expand Ts t 2)
+ | Const (\<^const_name>\<open>Collect\<close>, _) $ t' => to_R_rep Ts t'
+ | Const (\<^const_name>\<open>Collect\<close>, _) => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>bot_class.bot\<close>,
+ T as Type (\<^type_name>\<open>fun\<close>, [T', \<^typ>\<open>bool\<close>])) =>
if total then empty_n_ary_rel (arity_of (R_Rep total) card T)
else Product (atom_seq_product_of (R_Rep total) card T', false_atom)
- | Const (@{const_name top_class.top},
- T as Type (@{type_name fun}, [T', @{typ bool}])) =>
+ | Const (\<^const_name>\<open>top_class.top\<close>,
+ T as Type (\<^type_name>\<open>fun\<close>, [T', \<^typ>\<open>bool\<close>])) =>
if total then atom_seq_product_of (R_Rep total) card T
else Product (atom_seq_product_of (R_Rep total) card T', true_atom)
- | Const (@{const_name insert}, Type (_, [T, _])) $ t1 $ t2 =>
+ | Const (\<^const_name>\<open>insert\<close>, Type (_, [T, _])) $ t1 $ t2 =>
if total then
Union (to_S_rep Ts t1, to_R_rep Ts t2)
else
@@ -258,9 +258,9 @@
Difference (kt2, Product (atom_seq_product_of S_Rep card T,
false_atom)))
end
- | Const (@{const_name insert}, _) $ _ => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name insert}, _) => to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name trancl},
+ | Const (\<^const_name>\<open>insert\<close>, _) $ _ => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>insert\<close>, _) => to_R_rep Ts (eta_expand Ts t 2)
+ | Const (\<^const_name>\<open>trancl\<close>,
Type (_, [Type (_, [Type (_, [T', _]), _]), _])) $ t1 =>
if arity_of S_Rep card T' = 1 then
if total then
@@ -281,57 +281,57 @@
end
else
error "Not supported: Transitive closure for function or pair type."
- | Const (@{const_name trancl}, _) => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name inf_class.inf},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _]))
+ | Const (\<^const_name>\<open>trancl\<close>, _) => to_R_rep Ts (eta_expand Ts t 1)
+ | Const (\<^const_name>\<open>inf_class.inf\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _]))
$ t1 $ t2 =>
if total then Intersect (to_R_rep Ts t1, to_R_rep Ts t2)
else partial_set_op true true Intersect Union Ts t1 t2
- | Const (@{const_name inf_class.inf}, _) $ _ =>
+ | Const (\<^const_name>\<open>inf_class.inf\<close>, _) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name inf_class.inf}, _) =>
+ | Const (\<^const_name>\<open>inf_class.inf\<close>, _) =>
to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name sup_class.sup},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _]))
+ | Const (\<^const_name>\<open>sup_class.sup\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _]))
$ t1 $ t2 =>
if total then Union (to_R_rep Ts t1, to_R_rep Ts t2)
else partial_set_op true true Union Intersect Ts t1 t2
- | Const (@{const_name sup_class.sup}, _) $ _ =>
+ | Const (\<^const_name>\<open>sup_class.sup\<close>, _) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name sup_class.sup}, _) =>
+ | Const (\<^const_name>\<open>sup_class.sup\<close>, _) =>
to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name minus_class.minus},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _]))
+ | Const (\<^const_name>\<open>minus_class.minus\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _]))
$ t1 $ t2 =>
if total then Difference (to_R_rep Ts t1, to_R_rep Ts t2)
else partial_set_op true false Intersect Union Ts t1 t2
- | Const (@{const_name minus_class.minus},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _])) $ _ =>
+ | Const (\<^const_name>\<open>minus_class.minus\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _])) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name minus_class.minus},
- Type (@{type_name fun},
- [Type (@{type_name fun}, [_, @{typ bool}]), _])) =>
+ | Const (\<^const_name>\<open>minus_class.minus\<close>,
+ Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>fun\<close>, [_, \<^typ>\<open>bool\<close>]), _])) =>
to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name Pair}, _) $ _ $ _ => to_S_rep Ts t
- | Const (@{const_name Pair}, _) $ _ => to_S_rep Ts t
- | Const (@{const_name Pair}, _) => to_S_rep Ts t
- | Const (@{const_name fst}, _) $ _ => raise SAME ()
- | Const (@{const_name fst}, _) => raise SAME ()
- | Const (@{const_name snd}, _) $ _ => raise SAME ()
- | Const (@{const_name snd}, _) => raise SAME ()
- | @{const False} => false_atom
- | @{const True} => true_atom
+ | Const (\<^const_name>\<open>Pair\<close>, _) $ _ $ _ => to_S_rep Ts t
+ | Const (\<^const_name>\<open>Pair\<close>, _) $ _ => to_S_rep Ts t
+ | Const (\<^const_name>\<open>Pair\<close>, _) => to_S_rep Ts t
+ | Const (\<^const_name>\<open>fst\<close>, _) $ _ => raise SAME ()
+ | Const (\<^const_name>\<open>fst\<close>, _) => raise SAME ()
+ | Const (\<^const_name>\<open>snd\<close>, _) $ _ => raise SAME ()
+ | Const (\<^const_name>\<open>snd\<close>, _) => raise SAME ()
+ | \<^const>\<open>False\<close> => false_atom
+ | \<^const>\<open>True\<close> => true_atom
| Free (x as (_, T)) =>
Rel (arity_of (R_Rep total) card T, find_index (curry (op =) x) frees)
| Term.Var _ => error "Not supported: Schematic variables."
| Bound _ => raise SAME ()
| Abs (_, T, t') =>
(case (total, fastype_of1 (T :: Ts, t')) of
- (true, @{typ bool}) =>
+ (true, \<^typ>\<open>bool\<close>) =>
Comprehension (decls_for S_Rep card Ts T, to_F NONE (T :: Ts) t')
| (_, T') =>
Comprehension (decls_for S_Rep card Ts T @
@@ -341,7 +341,7 @@
to_R_rep (T :: Ts) t')))
| t1 $ t2 =>
(case fastype_of1 (Ts, t) of
- @{typ bool} =>
+ \<^typ>\<open>bool\<close> =>
if total then
S_rep_from_F NONE (to_F NONE Ts t)
else
@@ -374,7 +374,7 @@
end
fun declarative_axiom_for_rel_expr total card Ts
- (Type (@{type_name fun}, [T1, T2])) r =
+ (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) r =
if total andalso body_type T2 = bool_T then
True
else
@@ -388,28 +388,28 @@
(Rel (arity_of (R_Rep total) card T, i))
(* Hack to make the old code work as is with sets. *)
-fun unsetify_type (Type (@{type_name set}, [T])) = unsetify_type T --> bool_T
+fun unsetify_type (Type (\<^type_name>\<open>set\<close>, [T])) = unsetify_type T --> bool_T
| unsetify_type (Type (s, Ts)) = Type (s, map unsetify_type Ts)
| unsetify_type T = T
fun kodkod_problem_from_term ctxt total raw_card raw_infinite t =
let
val thy = Proof_Context.theory_of ctxt
- fun card (Type (@{type_name fun}, [T1, T2])) =
+ fun card (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) =
reasonable_power (card T2) (card T1)
- | card (Type (@{type_name prod}, [T1, T2])) = card T1 * card T2
- | card @{typ bool} = 2
+ | card (Type (\<^type_name>\<open>prod\<close>, [T1, T2])) = card T1 * card T2
+ | card \<^typ>\<open>bool\<close> = 2
| card T = Int.max (1, raw_card T)
- fun complete (Type (@{type_name fun}, [T1, T2])) =
+ fun complete (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) =
concrete T1 andalso complete T2
- | complete (Type (@{type_name prod}, Ts)) = forall complete Ts
+ | complete (Type (\<^type_name>\<open>prod\<close>, Ts)) = forall complete Ts
| complete T = not (raw_infinite T)
- and concrete (Type (@{type_name fun}, [T1, T2])) =
+ and concrete (Type (\<^type_name>\<open>fun\<close>, [T1, T2])) =
complete T1 andalso concrete T2
- | concrete (Type (@{type_name prod}, Ts)) = forall concrete Ts
+ | concrete (Type (\<^type_name>\<open>prod\<close>, Ts)) = forall concrete Ts
| concrete _ = true
val neg_t =
- @{const Not} $ Object_Logic.atomize_term ctxt t
+ \<^const>\<open>Not\<close> $ Object_Logic.atomize_term ctxt t
|> map_types unsetify_type
val _ = fold_types (K o check_type ctxt raw_infinite) neg_t ()
val frees = Term.add_frees neg_t []
@@ -445,7 +445,7 @@
| Error (s, _) => error ("Kodkod error: " ^ s)
end
-val default_raw_infinite = member (op =) [@{typ nat}, @{typ int}]
+val default_raw_infinite = member (op =) [\<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close>]
fun minipick ctxt n t =
let
--- a/src/HOL/Nominal/Examples/Crary.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/Examples/Crary.thy Sat Jan 05 17:24:33 2019 +0100
@@ -957,7 +957,7 @@
text \<open>We leave soundness as an exercise - just like Crary in the ATS book :-) \\
@{prop[mode=IfThen] "\<lbrakk>\<Gamma> \<turnstile> s \<Leftrightarrow> t : T; \<Gamma> \<turnstile> t : T; \<Gamma> \<turnstile> s : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s \<equiv> t : T"} \\
- @{prop "\<lbrakk>\<Gamma> \<turnstile> s \<leftrightarrow> t : T; \<Gamma> \<turnstile> t : T; \<Gamma> \<turnstile> s : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s \<equiv> t : T"}
+ \<^prop>\<open>\<lbrakk>\<Gamma> \<turnstile> s \<leftrightarrow> t : T; \<Gamma> \<turnstile> t : T; \<Gamma> \<turnstile> s : T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s \<equiv> t : T\<close>
\<close>
end
--- a/src/HOL/Nominal/Examples/Fsub.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/Examples/Fsub.thy Sat Jan 05 17:24:33 2019 +0100
@@ -49,7 +49,7 @@
text \<open>To be polite to the eye, some more familiar notation is introduced.
Because of the change in the order of arguments, one needs to use
translation rules, instead of syntax annotations at the term-constructors
- as given above for @{term "Arrow"}.\<close>
+ as given above for \<^term>\<open>Arrow\<close>.\<close>
abbreviation
Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty" ("(3\<forall>_<:_./ _)" [0, 0, 10] 10)
@@ -66,12 +66,12 @@
where
"\<lambda>X<:T. t \<equiv> trm.TAbs X t T"
-text \<open>Again there are numerous facts that are proved automatically for @{typ "ty"}
- and @{typ "trm"}: for example that the set of free variables, i.e.~the \<open>support\<close>,
+text \<open>Again there are numerous facts that are proved automatically for \<^typ>\<open>ty\<close>
+ and \<^typ>\<open>trm\<close>: for example that the set of free variables, i.e.~the \<open>support\<close>,
is finite. However note that nominal-datatype declarations do \emph{not} define
``classical" constructor-based datatypes, but rather define $\alpha$-equivalence
- classes---we can for example show that $\alpha$-equivalent @{typ "ty"}s
- and @{typ "trm"}s are equal:\<close>
+ classes---we can for example show that $\alpha$-equivalent \<^typ>\<open>ty\<close>s
+ and \<^typ>\<open>trm\<close>s are equal:\<close>
lemma alpha_illustration:
shows "(\<forall>X<:T. Tvar X) = (\<forall>Y<:T. Tvar Y)"
@@ -218,11 +218,11 @@
apply(auto simp add: fresh_prod fresh_list_cons tyvrs_fresh)
done
-text \<open>Not all lists of type @{typ "env"} are well-formed. One condition
- requires that in @{term "TVarB X S#\<Gamma>"} all free variables of @{term "S"} must be
- in the @{term "ty_dom"} of @{term "\<Gamma>"}, that is @{term "S"} must be \<open>closed\<close>
- in @{term "\<Gamma>"}. The set of free variables of @{term "S"} is the
- \<open>support\<close> of @{term "S"}.\<close>
+text \<open>Not all lists of type \<^typ>\<open>env\<close> are well-formed. One condition
+ requires that in \<^term>\<open>TVarB X S#\<Gamma>\<close> all free variables of \<^term>\<open>S\<close> must be
+ in the \<^term>\<open>ty_dom\<close> of \<^term>\<open>\<Gamma>\<close>, that is \<^term>\<open>S\<close> must be \<open>closed\<close>
+ in \<^term>\<open>\<Gamma>\<close>. The set of free variables of \<^term>\<open>S\<close> is the
+ \<open>support\<close> of \<^term>\<open>S\<close>.\<close>
definition "closed_in" :: "ty \<Rightarrow> env \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100) where
"S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(ty_dom \<Gamma>)"
@@ -594,7 +594,7 @@
text \<open>The definition for the subtyping-relation follows quite closely what is written
in the POPLmark-paper, except for the premises dealing with well-formed contexts and
- the freshness constraint @{term "X\<sharp>\<Gamma>"} in the \<open>S_Forall\<close>-rule. (The freshness
+ the freshness constraint \<^term>\<open>X\<sharp>\<Gamma>\<close> in the \<open>S_Forall\<close>-rule. (The freshness
constraint is specific to the \emph{nominal approach}. Note, however, that the constraint
does \emph{not} make the subtyping-relation ``partial"\ldots because we work over
$\alpha$-equivalence classes.)\<close>
@@ -846,28 +846,28 @@
\begin{quote}
\begin{lemma}[Transitivity and Narrowing] \
\begin{enumerate}
-\item If @{term "\<Gamma> \<turnstile> S<:Q"} and @{term "\<Gamma> \<turnstile> Q<:T"}, then @{term "\<Gamma> \<turnstile> S<:T"}.
-\item If \<open>\<Gamma>,X<:Q,\<Delta> \<turnstile> M<:N\<close> and @{term "\<Gamma> \<turnstile> P<:Q"} then \<open>\<Gamma>,X<:P,\<Delta> \<turnstile> M<:N\<close>.
+\item If \<^term>\<open>\<Gamma> \<turnstile> S<:Q\<close> and \<^term>\<open>\<Gamma> \<turnstile> Q<:T\<close>, then \<^term>\<open>\<Gamma> \<turnstile> S<:T\<close>.
+\item If \<open>\<Gamma>,X<:Q,\<Delta> \<turnstile> M<:N\<close> and \<^term>\<open>\<Gamma> \<turnstile> P<:Q\<close> then \<open>\<Gamma>,X<:P,\<Delta> \<turnstile> M<:N\<close>.
\end{enumerate}
\end{lemma}
The two parts are proved simultaneously, by induction on the size
-of @{term "Q"}. The argument for part (2) assumes that part (1) has
-been established already for the @{term "Q"} in question; part (1) uses
-part (2) only for strictly smaller @{term "Q"}.
+of \<^term>\<open>Q\<close>. The argument for part (2) assumes that part (1) has
+been established already for the \<^term>\<open>Q\<close> in question; part (1) uses
+part (2) only for strictly smaller \<^term>\<open>Q\<close>.
\end{quote}
-For the induction on the size of @{term "Q"}, we use the induction-rule
+For the induction on the size of \<^term>\<open>Q\<close>, we use the induction-rule
\<open>measure_induct_rule\<close>:
\begin{center}
@{thm measure_induct_rule[of "size_ty",no_vars]}
\end{center}
-That means in order to show a property @{term "P a"} for all @{term "a"},
-the induct-rule requires to prove that for all @{term x} @{term "P x"} holds using the
-assumption that for all @{term y} whose size is strictly smaller than
-that of @{term x} the property @{term "P y"} holds.\<close>
+That means in order to show a property \<^term>\<open>P a\<close> for all \<^term>\<open>a\<close>,
+the induct-rule requires to prove that for all \<^term>\<open>x\<close> \<^term>\<open>P x\<close> holds using the
+assumption that for all \<^term>\<open>y\<close> whose size is strictly smaller than
+that of \<^term>\<open>x\<close> the property \<^term>\<open>P y\<close> holds.\<close>
lemma
shows subtype_transitivity: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T"
--- a/src/HOL/Nominal/Examples/Standardization.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/Examples/Standardization.thy Sat Jan 05 17:24:33 2019 +0100
@@ -682,7 +682,7 @@
qed
text \<open>
-@{term NF} characterizes exactly the terms that are in normal form.
+\<^term>\<open>NF\<close> characterizes exactly the terms that are in normal form.
\<close>
lemma NF_eq: "NF t = (\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t')"
--- a/src/HOL/Nominal/Nominal.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/Nominal.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
(* polymorphic constants for permutation and swapping *)
consts
- perm :: "'x prm \<Rightarrow> 'a \<Rightarrow> 'a" (infixr "\<bullet>" 80)
+ perm :: "'x prm \<Rightarrow> 'a \<Rightarrow> 'a" (infixr \<open>\<bullet>\<close> 80)
swap :: "('x \<times> 'x) \<Rightarrow> 'x \<Rightarrow> 'x"
(* a "private" copy of the option type used in the abstraction function *)
@@ -187,7 +187,7 @@
section \<open>permutation equality\<close>
(*==============================*)
-definition prm_eq :: "'x prm \<Rightarrow> 'x prm \<Rightarrow> bool" (" _ \<triangleq> _ " [80,80] 80) where
+definition prm_eq :: "'x prm \<Rightarrow> 'x prm \<Rightarrow> bool" (\<open> _ \<triangleq> _ \<close> [80,80] 80) where
"pi1 \<triangleq> pi2 \<longleftrightarrow> (\<forall>a::'x. pi1\<bullet>a = pi2\<bullet>a)"
section \<open>Support, Freshness and Supports\<close>
@@ -195,10 +195,10 @@
definition supp :: "'a \<Rightarrow> ('x set)" where
"supp x = {a . (infinite {b . [(a,b)]\<bullet>x \<noteq> x})}"
-definition fresh :: "'x \<Rightarrow> 'a \<Rightarrow> bool" ("_ \<sharp> _" [80,80] 80) where
+definition fresh :: "'x \<Rightarrow> 'a \<Rightarrow> bool" (\<open>_ \<sharp> _\<close> [80,80] 80) where
"a \<sharp> x \<longleftrightarrow> a \<notin> supp x"
-definition supports :: "'x set \<Rightarrow> 'a \<Rightarrow> bool" (infixl "supports" 80) where
+definition supports :: "'x set \<Rightarrow> 'a \<Rightarrow> bool" (infixl \<open>supports\<close> 80) where
"S supports x \<longleftrightarrow> (\<forall>a b. (a\<notin>S \<and> b\<notin>S \<longrightarrow> [(a,b)]\<bullet>x=x))"
(* lemmas about supp *)
@@ -386,7 +386,7 @@
by (simp_all add: fresh_prod)
ML \<open>
- val mksimps_pairs = (@{const_name Nominal.fresh}, @{thms fresh_prodD}) :: mksimps_pairs;
+ val mksimps_pairs = (\<^const_name>\<open>Nominal.fresh\<close>, @{thms fresh_prodD}) :: mksimps_pairs;
\<close>
declaration \<open>fn _ =>
Simplifier.map_ss (Simplifier.set_mksimps (mksimps mksimps_pairs))
@@ -2414,7 +2414,7 @@
(*================================================================*)
consts
- fresh_star :: "'b \<Rightarrow> 'a \<Rightarrow> bool" ("_ \<sharp>* _" [100,100] 100)
+ fresh_star :: "'b \<Rightarrow> 'a \<Rightarrow> bool" (\<open>_ \<sharp>* _\<close> [100,100] 100)
overloading fresh_star_set \<equiv> "fresh_star :: 'b set \<Rightarrow> 'a \<Rightarrow> bool"
begin
@@ -2951,7 +2951,7 @@
shows "pt TYPE('x\<Rightarrow>('a noption)) TYPE('x)"
by (rule pt_fun_inst[OF at_pt_inst[OF at],OF pt_noption_inst[OF pt],OF at])
-definition abs_fun :: "'x\<Rightarrow>'a\<Rightarrow>('x\<Rightarrow>('a noption))" ("[_]._" [100,100] 100) where
+definition abs_fun :: "'x\<Rightarrow>'a\<Rightarrow>('x\<Rightarrow>('a noption))" (\<open>[_]._\<close> [100,100] 100) where
"[a].x \<equiv> (\<lambda>b. (if b=a then nSome(x) else (if b\<sharp>x then nSome([(a,b)]\<bullet>x) else nNone)))"
(* FIXME: should be called perm_if and placed close to the definition of permutations on bools *)
@@ -3385,7 +3385,7 @@
definition "ABS = ABS_set"
-typedef ('x, 'a) ABS ("\<guillemotleft>_\<guillemotright>_" [1000,1000] 1000) =
+typedef ('x, 'a) ABS (\<open>\<guillemotleft>_\<guillemotright>_\<close> [1000,1000] 1000) =
"ABS::('x\<Rightarrow>('a noption)) set"
morphisms Rep_ABS Abs_ABS
unfolding ABS_def
--- a/src/HOL/Nominal/nominal_atoms.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_atoms.ML Sat Jan 05 17:24:33 2019 +0100
@@ -86,7 +86,7 @@
fun mk_Cons x xs =
let val T = fastype_of x
- in Const (@{const_name Cons}, T --> HOLogic.listT T --> HOLogic.listT T) $ x $ xs end;
+ in Const (\<^const_name>\<open>Cons\<close>, T --> HOLogic.listT T --> HOLogic.listT T) $ x $ xs end;
fun add_thms_string args = Global_Theory.add_thms ((map o apfst o apfst) Binding.name args);
fun add_thmss_string args = Global_Theory.add_thmss ((map o apfst o apfst) Binding.name args);
@@ -99,20 +99,20 @@
val (_,thy1) =
fold_map (fn ak => fn thy =>
- let val dt = ((Binding.name ak, [], NoSyn), [(Binding.name ak, [@{typ nat}], NoSyn)])
+ let val dt = ((Binding.name ak, [], NoSyn), [(Binding.name ak, [\<^typ>\<open>nat\<close>], NoSyn)])
val (dt_names, thy1) = BNF_LFP_Compat.add_datatype [BNF_LFP_Compat.Kill_Type_Args] [dt] thy;
val injects = maps (#inject o BNF_LFP_Compat.the_info thy1 []) dt_names;
val ak_type = Type (Sign.intern_type thy1 ak,[])
val ak_sign = Sign.intern_const thy1 ak
- val inj_type = @{typ nat} --> ak_type
- val inj_on_type = inj_type --> @{typ "nat set"} --> @{typ bool}
+ val inj_type = \<^typ>\<open>nat\<close> --> ak_type
+ val inj_on_type = inj_type --> \<^typ>\<open>nat set\<close> --> \<^typ>\<open>bool\<close>
(* first statement *)
val stmnt1 = HOLogic.mk_Trueprop
- (Const (@{const_name "inj_on"},inj_on_type) $
- Const (ak_sign,inj_type) $ HOLogic.mk_UNIV @{typ nat})
+ (Const (\<^const_name>\<open>inj_on\<close>,inj_on_type) $
+ Const (ak_sign,inj_type) $ HOLogic.mk_UNIV \<^typ>\<open>nat\<close>)
val simp1 = @{thm inj_on_def} :: injects;
@@ -128,7 +128,7 @@
(* second statement *)
val y = Free ("y",ak_type)
val stmnt2 = HOLogic.mk_Trueprop
- (HOLogic.mk_exists ("x",@{typ nat},HOLogic.mk_eq (y,Const (ak_sign,inj_type) $ Bound 0)))
+ (HOLogic.mk_exists ("x",\<^typ>\<open>nat\<close>,HOLogic.mk_eq (y,Const (ak_sign,inj_type) $ Bound 0)))
val proof2 = fn {prems, context = ctxt} =>
Induct_Tacs.case_tac ctxt "y" [] NONE 1 THEN
@@ -142,7 +142,7 @@
val stmnt3 = HOLogic.mk_Trueprop
(HOLogic.mk_not
- (Const (@{const_name finite}, HOLogic.mk_setT ak_type --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT ak_type --> HOLogic.boolT) $
HOLogic.mk_UNIV ak_type))
val simp2 = [@{thm image_def},@{thm bex_UNIV}]@inject_thm
@@ -179,9 +179,9 @@
val b = Free ("b", T);
val c = Free ("c", T);
val ab = Free ("ab", HOLogic.mk_prodT (T, T))
- val cif = Const (@{const_name If}, HOLogic.boolT --> T --> T --> T);
+ val cif = Const (\<^const_name>\<open>If\<close>, HOLogic.boolT --> T --> T --> T);
val cswap_akname = Const (full_swap_name, swapT);
- val cswap = Const (@{const_name Nominal.swap}, swapT)
+ val cswap = Const (\<^const_name>\<open>Nominal.swap\<close>, swapT)
val name = swap_name ^ "_def";
val def1 = HOLogic.mk_Trueprop (HOLogic.mk_eq
@@ -215,7 +215,7 @@
val xs = Free ("xs", mk_permT T);
val a = Free ("a", T) ;
- val cnil = Const (@{const_name Nil}, mk_permT T);
+ val cnil = Const (\<^const_name>\<open>Nil\<close>, mk_permT T);
val def1 = HOLogic.mk_Trueprop (HOLogic.mk_eq (prm $ cnil $ a, a));
@@ -245,7 +245,7 @@
val perm_def_name = ak_name ^ "_prm_" ^ ak_name';
val pi = Free ("pi", mk_permT T);
val a = Free ("a", T');
- val cperm = Const (@{const_name Nominal.perm}, mk_permT T --> T' --> T');
+ val cperm = Const (\<^const_name>\<open>Nominal.perm\<close>, mk_permT T --> T' --> T');
val thy'' = Sign.add_path "rec" thy'
val cperm_def = Const (Sign.full_bname thy'' perm_def_name, mk_permT T --> T' --> T');
val thy''' = Sign.parent_path thy'';
@@ -265,7 +265,7 @@
let
val ak_name_qu = Sign.full_bname thy5 (ak_name);
val i_type = Type(ak_name_qu,[]);
- val cat = Const (@{const_name Nominal.at}, Term.itselfT i_type --> HOLogic.boolT);
+ val cat = Const (\<^const_name>\<open>Nominal.at\<close>, Term.itselfT i_type --> HOLogic.boolT);
val at_type = Logic.mk_type i_type;
fun proof ctxt =
simp_tac (put_simpset HOL_ss ctxt
@@ -290,14 +290,14 @@
val (pt_ax_classes,thy7) = fold_map (fn (ak_name, T) => fn thy =>
let
val cl_name = "pt_"^ak_name;
- val ty = TFree("'a", @{sort type});
+ val ty = TFree("'a", \<^sort>\<open>type\<close>);
val x = Free ("x", ty);
val pi1 = Free ("pi1", mk_permT T);
val pi2 = Free ("pi2", mk_permT T);
- val cperm = Const (@{const_name Nominal.perm}, mk_permT T --> ty --> ty);
- val cnil = Const (@{const_name Nil}, mk_permT T);
- val cappend = Const (@{const_name append}, mk_permT T --> mk_permT T --> mk_permT T);
- val cprm_eq = Const (@{const_name Nominal.prm_eq}, mk_permT T --> mk_permT T --> HOLogic.boolT);
+ val cperm = Const (\<^const_name>\<open>Nominal.perm\<close>, mk_permT T --> ty --> ty);
+ val cnil = Const (\<^const_name>\<open>Nil\<close>, mk_permT T);
+ val cappend = Const (\<^const_name>\<open>append\<close>, mk_permT T --> mk_permT T --> mk_permT T);
+ val cprm_eq = Const (\<^const_name>\<open>Nominal.prm_eq\<close>, mk_permT T --> mk_permT T --> HOLogic.boolT);
(* nil axiom *)
val axiom1 = HOLogic.mk_Trueprop (HOLogic.mk_eq
(cperm $ cnil $ x, x));
@@ -309,7 +309,7 @@
(HOLogic.mk_Trueprop (cprm_eq $ pi1 $ pi2),
HOLogic.mk_Trueprop (HOLogic.mk_eq (cperm $ pi1 $ x, cperm $ pi2 $ x)));
in
- Axclass.define_class (Binding.name cl_name, @{sort type}) []
+ Axclass.define_class (Binding.name cl_name, \<^sort>\<open>type\<close>) []
[((Binding.name (cl_name ^ "1"), [Simplifier.simp_add]), [axiom1]),
((Binding.name (cl_name ^ "2"), []), [axiom2]),
((Binding.name (cl_name ^ "3"), []), [axiom3])] thy
@@ -327,7 +327,7 @@
val i_type1 = TFree("'x",[pt_name_qu]);
val i_type2 = Type(ak_name_qu,[]);
val cpt =
- Const (@{const_name Nominal.pt}, (Term.itselfT i_type1)-->(Term.itselfT i_type2)-->HOLogic.boolT);
+ Const (\<^const_name>\<open>Nominal.pt\<close>, (Term.itselfT i_type1)-->(Term.itselfT i_type2)-->HOLogic.boolT);
val pt_type = Logic.mk_type i_type1;
val at_type = Logic.mk_type i_type2;
fun proof ctxt =
@@ -350,10 +350,10 @@
let
val cl_name = "fs_"^ak_name;
val pt_name = Sign.full_bname thy ("pt_"^ak_name);
- val ty = TFree("'a",@{sort type});
+ val ty = TFree("'a",\<^sort>\<open>type\<close>);
val x = Free ("x", ty);
- val csupp = Const (@{const_name Nominal.supp}, ty --> HOLogic.mk_setT T);
- val cfinite = Const (@{const_name finite}, HOLogic.mk_setT T --> HOLogic.boolT)
+ val csupp = Const (\<^const_name>\<open>Nominal.supp\<close>, ty --> HOLogic.mk_setT T);
+ val cfinite = Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT T --> HOLogic.boolT)
val axiom1 = HOLogic.mk_Trueprop (cfinite $ (csupp $ x));
@@ -373,7 +373,7 @@
val fs_name_qu = Sign.full_bname thy11 ("fs_"^ak_name);
val i_type1 = TFree("'x",[fs_name_qu]);
val i_type2 = Type(ak_name_qu,[]);
- val cfs = Const (@{const_name Nominal.fs},
+ val cfs = Const (\<^const_name>\<open>Nominal.fs\<close>,
(Term.itselfT i_type1)-->(Term.itselfT i_type2)-->HOLogic.boolT);
val fs_type = Logic.mk_type i_type1;
val at_type = Logic.mk_type i_type2;
@@ -395,19 +395,19 @@
fold_map (fn (ak_name', T') => fn thy' =>
let
val cl_name = "cp_"^ak_name^"_"^ak_name';
- val ty = TFree("'a",@{sort type});
+ val ty = TFree("'a",\<^sort>\<open>type\<close>);
val x = Free ("x", ty);
val pi1 = Free ("pi1", mk_permT T);
val pi2 = Free ("pi2", mk_permT T');
- val cperm1 = Const (@{const_name Nominal.perm}, mk_permT T --> ty --> ty);
- val cperm2 = Const (@{const_name Nominal.perm}, mk_permT T' --> ty --> ty);
- val cperm3 = Const (@{const_name Nominal.perm}, mk_permT T --> mk_permT T' --> mk_permT T');
+ val cperm1 = Const (\<^const_name>\<open>Nominal.perm\<close>, mk_permT T --> ty --> ty);
+ val cperm2 = Const (\<^const_name>\<open>Nominal.perm\<close>, mk_permT T' --> ty --> ty);
+ val cperm3 = Const (\<^const_name>\<open>Nominal.perm\<close>, mk_permT T --> mk_permT T' --> mk_permT T');
val ax1 = HOLogic.mk_Trueprop
(HOLogic.mk_eq (cperm1 $ pi1 $ (cperm2 $ pi2 $ x),
cperm2 $ (cperm3 $ pi1 $ pi2) $ (cperm1 $ pi1 $ x)));
in
- Axclass.define_class (Binding.name cl_name, @{sort type}) []
+ Axclass.define_class (Binding.name cl_name, \<^sort>\<open>type\<close>) []
[((Binding.name (cl_name ^ "1"), []), [ax1])] thy'
end) ak_names_types thy) ak_names_types thy12;
@@ -423,7 +423,7 @@
val i_type0 = TFree("'a",[cp_name_qu]);
val i_type1 = Type(ak_name_qu,[]);
val i_type2 = Type(ak_name_qu',[]);
- val ccp = Const (@{const_name Nominal.cp},
+ val ccp = Const (\<^const_name>\<open>Nominal.cp\<close>,
(Term.itselfT i_type0)-->(Term.itselfT i_type1)-->
(Term.itselfT i_type2)-->HOLogic.boolT);
val at_type = Logic.mk_type i_type1;
@@ -460,7 +460,7 @@
val ak_name_qu' = Sign.full_bname thy' ak_name';
val i_type1 = Type(ak_name_qu,[]);
val i_type2 = Type(ak_name_qu',[]);
- val cdj = Const (@{const_name Nominal.disjoint},
+ val cdj = Const (\<^const_name>\<open>Nominal.disjoint\<close>,
(Term.itselfT i_type1)-->(Term.itselfT i_type2)-->HOLogic.boolT);
val at_type = Logic.mk_type i_type1;
val at_type' = Logic.mk_type i_type2;
@@ -555,14 +555,14 @@
val pt_thm_unit = pt_unit_inst;
in
thy
- |> Axclass.prove_arity (@{type_name fun},[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_fun)
- |> Axclass.prove_arity (@{type_name set},[[cls_name]],[cls_name]) (pt_proof pt_thm_set)
- |> Axclass.prove_arity (@{type_name noption},[[cls_name]],[cls_name]) (pt_proof pt_thm_noptn)
- |> Axclass.prove_arity (@{type_name option},[[cls_name]],[cls_name]) (pt_proof pt_thm_optn)
- |> Axclass.prove_arity (@{type_name list},[[cls_name]],[cls_name]) (pt_proof pt_thm_list)
- |> Axclass.prove_arity (@{type_name prod},[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_prod)
- |> Axclass.prove_arity (@{type_name nprod},[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_nprod)
- |> Axclass.prove_arity (@{type_name unit},[],[cls_name]) (pt_proof pt_thm_unit)
+ |> Axclass.prove_arity (\<^type_name>\<open>fun\<close>,[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_fun)
+ |> Axclass.prove_arity (\<^type_name>\<open>set\<close>,[[cls_name]],[cls_name]) (pt_proof pt_thm_set)
+ |> Axclass.prove_arity (\<^type_name>\<open>noption\<close>,[[cls_name]],[cls_name]) (pt_proof pt_thm_noptn)
+ |> Axclass.prove_arity (\<^type_name>\<open>option\<close>,[[cls_name]],[cls_name]) (pt_proof pt_thm_optn)
+ |> Axclass.prove_arity (\<^type_name>\<open>list\<close>,[[cls_name]],[cls_name]) (pt_proof pt_thm_list)
+ |> Axclass.prove_arity (\<^type_name>\<open>prod\<close>,[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_prod)
+ |> Axclass.prove_arity (\<^type_name>\<open>nprod\<close>,[[cls_name],[cls_name]],[cls_name]) (pt_proof pt_thm_nprod)
+ |> Axclass.prove_arity (\<^type_name>\<open>unit\<close>,[],[cls_name]) (pt_proof pt_thm_unit)
end) ak_names thy13;
(******** fs_<ak> class instances ********)
@@ -622,11 +622,11 @@
val fs_thm_optn = fs_inst RS fs_option_inst;
in
thy
- |> Axclass.prove_arity (@{type_name unit},[],[cls_name]) (fs_proof fs_thm_unit)
- |> Axclass.prove_arity (@{type_name prod},[[cls_name],[cls_name]],[cls_name]) (fs_proof fs_thm_prod)
- |> Axclass.prove_arity (@{type_name nprod},[[cls_name],[cls_name]],[cls_name]) (fs_proof fs_thm_nprod)
- |> Axclass.prove_arity (@{type_name list},[[cls_name]],[cls_name]) (fs_proof fs_thm_list)
- |> Axclass.prove_arity (@{type_name option},[[cls_name]],[cls_name]) (fs_proof fs_thm_optn)
+ |> Axclass.prove_arity (\<^type_name>\<open>unit\<close>,[],[cls_name]) (fs_proof fs_thm_unit)
+ |> Axclass.prove_arity (\<^type_name>\<open>prod\<close>,[[cls_name],[cls_name]],[cls_name]) (fs_proof fs_thm_prod)
+ |> Axclass.prove_arity (\<^type_name>\<open>nprod\<close>,[[cls_name],[cls_name]],[cls_name]) (fs_proof fs_thm_nprod)
+ |> Axclass.prove_arity (\<^type_name>\<open>list\<close>,[[cls_name]],[cls_name]) (fs_proof fs_thm_list)
+ |> Axclass.prove_arity (\<^type_name>\<open>option\<close>,[[cls_name]],[cls_name]) (fs_proof fs_thm_optn)
end) ak_names thy20;
(******** cp_<ak>_<ai> class instances ********)
@@ -706,13 +706,13 @@
val cp_thm_set = cp_inst RS cp_set_inst;
in
thy'
- |> Axclass.prove_arity (@{type_name unit},[],[cls_name]) (cp_proof cp_thm_unit)
- |> Axclass.prove_arity (@{type_name Product_Type.prod}, [[cls_name],[cls_name]],[cls_name]) (cp_proof cp_thm_prod)
- |> Axclass.prove_arity (@{type_name list},[[cls_name]],[cls_name]) (cp_proof cp_thm_list)
- |> Axclass.prove_arity (@{type_name fun},[[cls_name],[cls_name]],[cls_name]) (cp_proof cp_thm_fun)
- |> Axclass.prove_arity (@{type_name option},[[cls_name]],[cls_name]) (cp_proof cp_thm_optn)
- |> Axclass.prove_arity (@{type_name noption},[[cls_name]],[cls_name]) (cp_proof cp_thm_noptn)
- |> Axclass.prove_arity (@{type_name set},[[cls_name]],[cls_name]) (cp_proof cp_thm_set)
+ |> Axclass.prove_arity (\<^type_name>\<open>unit\<close>,[],[cls_name]) (cp_proof cp_thm_unit)
+ |> Axclass.prove_arity (\<^type_name>\<open>Product_Type.prod\<close>, [[cls_name],[cls_name]],[cls_name]) (cp_proof cp_thm_prod)
+ |> Axclass.prove_arity (\<^type_name>\<open>list\<close>,[[cls_name]],[cls_name]) (cp_proof cp_thm_list)
+ |> Axclass.prove_arity (\<^type_name>\<open>fun\<close>,[[cls_name],[cls_name]],[cls_name]) (cp_proof cp_thm_fun)
+ |> Axclass.prove_arity (\<^type_name>\<open>option\<close>,[[cls_name]],[cls_name]) (cp_proof cp_thm_optn)
+ |> Axclass.prove_arity (\<^type_name>\<open>noption\<close>,[[cls_name]],[cls_name]) (cp_proof cp_thm_noptn)
+ |> Axclass.prove_arity (\<^type_name>\<open>set\<close>,[[cls_name]],[cls_name]) (cp_proof cp_thm_set)
end) ak_names thy) ak_names thy25;
(* show that discrete nominal types are permutation types, finitely *)
@@ -759,18 +759,18 @@
in
thy26
- |> discrete_pt_inst @{type_name nat} @{thm perm_nat_def}
- |> discrete_fs_inst @{type_name nat} @{thm perm_nat_def}
- |> discrete_cp_inst @{type_name nat} @{thm perm_nat_def}
- |> discrete_pt_inst @{type_name bool} @{thm perm_bool_def}
- |> discrete_fs_inst @{type_name bool} @{thm perm_bool_def}
- |> discrete_cp_inst @{type_name bool} @{thm perm_bool_def}
- |> discrete_pt_inst @{type_name int} @{thm perm_int_def}
- |> discrete_fs_inst @{type_name int} @{thm perm_int_def}
- |> discrete_cp_inst @{type_name int} @{thm perm_int_def}
- |> discrete_pt_inst @{type_name char} @{thm perm_char_def}
- |> discrete_fs_inst @{type_name char} @{thm perm_char_def}
- |> discrete_cp_inst @{type_name char} @{thm perm_char_def}
+ |> discrete_pt_inst \<^type_name>\<open>nat\<close> @{thm perm_nat_def}
+ |> discrete_fs_inst \<^type_name>\<open>nat\<close> @{thm perm_nat_def}
+ |> discrete_cp_inst \<^type_name>\<open>nat\<close> @{thm perm_nat_def}
+ |> discrete_pt_inst \<^type_name>\<open>bool\<close> @{thm perm_bool_def}
+ |> discrete_fs_inst \<^type_name>\<open>bool\<close> @{thm perm_bool_def}
+ |> discrete_cp_inst \<^type_name>\<open>bool\<close> @{thm perm_bool_def}
+ |> discrete_pt_inst \<^type_name>\<open>int\<close> @{thm perm_int_def}
+ |> discrete_fs_inst \<^type_name>\<open>int\<close> @{thm perm_int_def}
+ |> discrete_cp_inst \<^type_name>\<open>int\<close> @{thm perm_int_def}
+ |> discrete_pt_inst \<^type_name>\<open>char\<close> @{thm perm_char_def}
+ |> discrete_fs_inst \<^type_name>\<open>char\<close> @{thm perm_char_def}
+ |> discrete_cp_inst \<^type_name>\<open>char\<close> @{thm perm_char_def}
end;
@@ -1027,7 +1027,7 @@
(* syntax und parsing *)
val _ =
- Outer_Syntax.command @{command_keyword atom_decl} "declare new kinds of atoms"
+ Outer_Syntax.command \<^command_keyword>\<open>atom_decl\<close> "declare new kinds of atoms"
(Scan.repeat1 Parse.name >> (Toplevel.theory o create_nom_typedecls));
end;
--- a/src/HOL/Nominal/nominal_datatype.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_datatype.ML Sat Jan 05 17:24:33 2019 +0100
@@ -87,16 +87,16 @@
val dj_cp = @{thm dj_cp};
-fun dest_permT (Type (@{type_name fun},
- [Type (@{type_name list}, [Type (@{type_name Product_Type.prod}, [T, _])]),
- Type (@{type_name fun}, [_, U])])) = (T, U);
+fun dest_permT (Type (\<^type_name>\<open>fun\<close>,
+ [Type (\<^type_name>\<open>list\<close>, [Type (\<^type_name>\<open>Product_Type.prod\<close>, [T, _])]),
+ Type (\<^type_name>\<open>fun\<close>, [_, U])])) = (T, U);
-fun permTs_of (Const (@{const_name Nominal.perm}, T) $ t $ u) = fst (dest_permT T) :: permTs_of u
+fun permTs_of (Const (\<^const_name>\<open>Nominal.perm\<close>, T) $ t $ u) = fst (dest_permT T) :: permTs_of u
| permTs_of _ = [];
fun perm_simproc' ctxt ct =
(case Thm.term_of ct of
- Const (@{const_name Nominal.perm}, T) $ t $ (u as Const (@{const_name Nominal.perm}, U) $ r $ s) =>
+ Const (\<^const_name>\<open>Nominal.perm\<close>, T) $ t $ (u as Const (\<^const_name>\<open>Nominal.perm\<close>, U) $ r $ s) =>
let
val thy = Proof_Context.theory_of ctxt;
val (aT as Type (a, []), S) = dest_permT T;
@@ -117,8 +117,8 @@
| _ => NONE);
val perm_simproc =
- Simplifier.make_simproc @{context} "perm_simp"
- {lhss = [@{term "pi1 \<bullet> (pi2 \<bullet> x)"}], proc = K perm_simproc'};
+ Simplifier.make_simproc \<^context> "perm_simp"
+ {lhss = [\<^term>\<open>pi1 \<bullet> (pi2 \<bullet> x)\<close>], proc = K perm_simproc'};
fun projections ctxt rule =
Project_Rule.projections ctxt rule
@@ -142,25 +142,25 @@
let
val T = fastype_of1 (Ts, t);
val U = fastype_of1 (Ts, u)
- in Const (@{const_name Nominal.perm}, T --> U --> U) $ t $ u end;
+ in Const (\<^const_name>\<open>Nominal.perm\<close>, T --> U --> U) $ t $ u end;
fun perm_of_pair (x, y) =
let
val T = fastype_of x;
val pT = mk_permT T
- in Const (@{const_name Cons}, HOLogic.mk_prodT (T, T) --> pT --> pT) $
- HOLogic.mk_prod (x, y) $ Const (@{const_name Nil}, pT)
+ in Const (\<^const_name>\<open>Cons\<close>, HOLogic.mk_prodT (T, T) --> pT --> pT) $
+ HOLogic.mk_prod (x, y) $ Const (\<^const_name>\<open>Nil\<close>, pT)
end;
fun mk_not_sym ths = maps (fn th =>
(case Thm.prop_of th of
- _ $ (Const (@{const_name Not}, _) $ (Const (@{const_name HOL.eq}, _) $ _ $ _)) =>
+ _ $ (Const (\<^const_name>\<open>Not\<close>, _) $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ _)) =>
[th, th RS not_sym]
| _ => [th])) ths;
-fun fresh_const T U = Const (@{const_name Nominal.fresh}, T --> U --> HOLogic.boolT);
+fun fresh_const T U = Const (\<^const_name>\<open>Nominal.fresh\<close>, T --> U --> HOLogic.boolT);
fun fresh_star_const T U =
- Const (@{const_name Nominal.fresh_star}, HOLogic.mk_setT T --> U --> HOLogic.boolT);
+ Const (\<^const_name>\<open>Nominal.fresh_star\<close>, HOLogic.mk_setT T --> U --> HOLogic.boolT);
fun gen_nominal_datatype prep_specs (config: Old_Datatype_Aux.config) dts thy =
let
@@ -189,8 +189,8 @@
(Sign.full_name thy n, Sign.full_name thy (Binding.suffix_name "_Rep" n))) dts;
val rps = map Library.swap ps;
- fun replace_types (Type (@{type_name ABS}, [T, U])) =
- Type (@{type_name fun}, [T, Type (@{type_name noption}, [replace_types U])])
+ fun replace_types (Type (\<^type_name>\<open>ABS\<close>, [T, U])) =
+ Type (\<^type_name>\<open>fun\<close>, [T, Type (\<^type_name>\<open>noption\<close>, [replace_types U])])
| replace_types (Type (s, Ts)) =
Type (the_default s (AList.lookup op = ps s), map replace_types Ts)
| replace_types T = T;
@@ -212,14 +212,14 @@
(**** define permutation functions ****)
- val permT = mk_permT (TFree ("'x", @{sort type}));
+ val permT = mk_permT (TFree ("'x", \<^sort>\<open>type\<close>));
val pi = Free ("pi", permT);
val perm_types = map (fn (i, _) =>
let val T = nth_dtyp i
in permT --> T --> T end) descr;
val perm_names' = Old_Datatype_Prop.indexify_names (map (fn (i, _) =>
"perm_" ^ Old_Datatype_Aux.name_of_typ (nth_dtyp i)) descr);
- val perm_names = replicate (length new_type_names) @{const_name Nominal.perm} @
+ val perm_names = replicate (length new_type_names) \<^const_name>\<open>Nominal.perm\<close> @
map (Sign.full_bname thy1) (List.drop (perm_names', length new_type_names));
val perm_names_types = perm_names ~~ perm_types;
val perm_names_types' = perm_names' ~~ perm_types;
@@ -240,16 +240,16 @@
fold_rev (Term.abs o pair "x") Us
(Free (nth perm_names_types' (Old_Datatype_Aux.body_index dt)) $ pi $
list_comb (x, map (fn (i, U) =>
- Const (@{const_name Nominal.perm}, permT --> U --> U) $
- (Const (@{const_name rev}, permT --> permT) $ pi) $
+ Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> U --> U) $
+ (Const (\<^const_name>\<open>rev\<close>, permT --> permT) $ pi) $
Bound i) ((length Us - 1 downto 0) ~~ Us)))
end
- else Const (@{const_name Nominal.perm}, permT --> T --> T) $ pi $ x
+ else Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> T --> T) $ pi $ x
end;
in
((Binding.empty_atts, HOLogic.mk_Trueprop (HOLogic.mk_eq
(Free (nth perm_names_types' i) $
- Free ("pi", mk_permT (TFree ("'x", @{sort type}))) $
+ Free ("pi", mk_permT (TFree ("'x", \<^sort>\<open>type\<close>))) $
list_comb (c, args),
list_comb (c, map perm_arg (dts ~~ args))))), [], [])
end) constrs
@@ -278,7 +278,7 @@
(map (fn (c as (s, T), x) =>
let val [T1, T2] = binder_types T
in HOLogic.mk_eq (Const c $ pi $ Free (x, T2),
- Const (@{const_name Nominal.perm}, T) $ pi $ Free (x, T2))
+ Const (\<^const_name>\<open>Nominal.perm\<close>, T) $ pi $ Free (x, T2))
end)
(perm_names_types ~~ perm_indnames))))
(fn {context = ctxt, ...} =>
@@ -298,7 +298,7 @@
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj
(map (fn ((s, T), x) => HOLogic.mk_eq
(Const (s, permT --> T --> T) $
- Const (@{const_name Nil}, permT) $ Free (x, T),
+ Const (\<^const_name>\<open>Nil\<close>, permT) $ Free (x, T),
Free (x, T)))
(perm_names ~~
map body_type perm_types ~~ perm_indnames)))))
@@ -332,7 +332,7 @@
(map (fn ((s, T), x) =>
let val perm = Const (s, permT --> T --> T)
in HOLogic.mk_eq
- (perm $ (Const (@{const_name append}, permT --> permT --> permT) $
+ (perm $ (Const (\<^const_name>\<open>append\<close>, permT --> permT --> permT) $
pi1 $ pi2) $ Free (x, T),
perm $ pi1 $ (perm $ pi2 $ Free (x, T)))
end)
@@ -364,7 +364,7 @@
in List.take (map Drule.export_without_context (Old_Datatype_Aux.split_conj_thm
(Goal.prove_global_future thy2 [] []
(augment_sort thy2 [pt_class_of thy2 a] (Logic.mk_implies
- (HOLogic.mk_Trueprop (Const (@{const_name Nominal.prm_eq},
+ (HOLogic.mk_Trueprop (Const (\<^const_name>\<open>Nominal.prm_eq\<close>,
permT --> permT --> HOLogic.boolT) $ pi1 $ pi2),
HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj
(map (fn ((s, T), x) =>
@@ -422,7 +422,7 @@
val pi2 = Free ("pi2", permT2);
val perm1 = Const (s, permT1 --> T --> T);
val perm2 = Const (s, permT2 --> T --> T);
- val perm3 = Const (@{const_name Nominal.perm}, permT1 --> permT2 --> permT2)
+ val perm3 = Const (\<^const_name>\<open>Nominal.perm\<close>, permT1 --> permT2 --> permT2)
in HOLogic.mk_eq
(perm1 $ pi1 $ (perm2 $ pi2 $ Free (x, T)),
perm2 $ (perm3 $ pi1 $ pi2) $ (perm1 $ pi1 $ Free (x, T)))
@@ -472,17 +472,17 @@
(map (fn (i, _) => Old_Datatype_Aux.name_of_typ (nth_dtyp i) ^ "_set") descr);
val big_rep_name =
space_implode "_" (Old_Datatype_Prop.indexify_names (map_filter
- (fn (i, (@{type_name noption}, _, _)) => NONE
+ (fn (i, (\<^type_name>\<open>noption\<close>, _, _)) => NONE
| (i, _) => SOME (Old_Datatype_Aux.name_of_typ (nth_dtyp i))) descr)) ^ "_set";
val _ = warning ("big_rep_name: " ^ big_rep_name);
fun strip_option (dtf as Old_Datatype_Aux.DtType ("fun", [dt, Old_Datatype_Aux.DtRec i])) =
(case AList.lookup op = descr i of
- SOME (@{type_name noption}, _, [(_, [dt']), _]) =>
+ SOME (\<^type_name>\<open>noption\<close>, _, [(_, [dt']), _]) =>
apfst (cons dt) (strip_option dt')
| _ => ([], dtf))
| strip_option (Old_Datatype_Aux.DtType ("fun",
- [dt, Old_Datatype_Aux.DtType (@{type_name noption}, [dt'])])) =
+ [dt, Old_Datatype_Aux.DtType (\<^type_name>\<open>noption\<close>, [dt'])])) =
apfst (cons dt) (strip_option dt')
| strip_option dt = ([], dt);
@@ -503,8 +503,8 @@
val free' = Old_Datatype_Aux.app_bnds free (length Us);
fun mk_abs_fun T (i, t) =
let val U = fastype_of t
- in (i + 1, Const (@{const_name Nominal.abs_fun}, [T, U, T] --->
- Type (@{type_name noption}, [U])) $ Old_Datatype_Aux.mk_Free "y" T i $ t)
+ in (i + 1, Const (\<^const_name>\<open>Nominal.abs_fun\<close>, [T, U, T] --->
+ Type (\<^type_name>\<open>noption\<close>, [U])) $ Old_Datatype_Aux.mk_Free "y" T i $ t)
end
in (j + 1, j' + length Ts,
case dt'' of
@@ -523,7 +523,7 @@
val (intr_ts, (rep_set_names', recTs')) =
apfst flat (apsnd ListPair.unzip (ListPair.unzip (map_filter
- (fn ((_, (@{type_name noption}, _, _)), _) => NONE
+ (fn ((_, (\<^type_name>\<open>noption\<close>, _, _)), _) => NONE
| ((i, (_, _, constrs)), rep_set_name) =>
let val T = nth_dtyp i
in SOME (map (make_intr rep_set_name T) constrs,
@@ -550,7 +550,7 @@
val abs_perm = Global_Theory.get_thms thy4 "abs_perm";
val perm_indnames' = map_filter
- (fn (x, (_, (@{type_name noption}, _, _))) => NONE | (x, _) => SOME x)
+ (fn (x, (_, (\<^type_name>\<open>noption\<close>, _, _))) => NONE | (x, _) => SOME x)
(perm_indnames ~~ descr);
fun mk_perm_closed name = map (fn th => Drule.export_without_context (th RS mp))
@@ -563,7 +563,7 @@
val S = Const (s, T --> HOLogic.boolT);
val permT = mk_permT (Type (name, []))
in HOLogic.mk_imp (S $ Free (x, T),
- S $ (Const (@{const_name Nominal.perm}, permT --> T --> T) $
+ S $ (Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> T --> T) $
Free ("pi", permT) $ Free (x, T)))
end) (rep_set_names'' ~~ recTs' ~~ perm_indnames')))))
(fn {context = ctxt, ...} => EVERY
@@ -584,7 +584,7 @@
|> fold_map (fn (((name, mx), tvs), (cname, U)) => fn thy =>
Typedef.add_typedef_global {overloaded = false}
(name, map (fn (v, _) => (v, dummyS)) tvs, mx) (* FIXME keep constraints!? *)
- (Const (@{const_name Collect}, (U --> HOLogic.boolT) --> HOLogic.mk_setT U) $
+ (Const (\<^const_name>\<open>Collect\<close>, (U --> HOLogic.boolT) --> HOLogic.mk_setT U) $
Const (cname, U --> HOLogic.boolT)) NONE
(fn ctxt =>
resolve_tac ctxt [exI] 1 THEN
@@ -593,15 +593,15 @@
(resolve_tac ctxt rep_intrs 1)) thy |> (fn ((_, r), thy) =>
let
val permT = mk_permT
- (TFree (singleton (Name.variant_list (map fst tvs)) "'a", @{sort type}));
+ (TFree (singleton (Name.variant_list (map fst tvs)) "'a", \<^sort>\<open>type\<close>));
val pi = Free ("pi", permT);
val T = Type (Sign.full_name thy name, map TFree tvs);
in apfst (pair r o hd)
(Global_Theory.add_defs_unchecked true
[((Binding.map_name (fn n => "prm_" ^ n ^ "_def") name, Logic.mk_equals
- (Const (@{const_name Nominal.perm}, permT --> T --> T) $ pi $ Free ("x", T),
+ (Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> T --> T) $ pi $ Free ("x", T),
Const (Sign.intern_const thy ("Abs_" ^ Binding.name_of name), U --> T) $
- (Const (@{const_name Nominal.perm}, permT --> U --> U) $ pi $
+ (Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> U --> U) $ pi $
(Const (Sign.intern_const thy ("Rep_" ^ Binding.name_of name), T --> U) $
Free ("x", T))))), [])] thy)
end))
@@ -683,12 +683,12 @@
val T = fastype_of x;
val U = fastype_of t
in
- Const (@{const_name Nominal.abs_fun}, T --> U --> T -->
- Type (@{type_name noption}, [U])) $ x $ t
+ Const (\<^const_name>\<open>Nominal.abs_fun\<close>, T --> U --> T -->
+ Type (\<^type_name>\<open>noption\<close>, [U])) $ x $ t
end;
val (ty_idxs, _) = List.foldl
- (fn ((i, (@{type_name noption}, _, _)), p) => p
+ (fn ((i, (\<^type_name>\<open>noption\<close>, _, _)), p) => p
| ((i, _), (ty_idxs, j)) => (ty_idxs @ [(i, j)], j + 1)) ([], 0) descr;
fun reindex (Old_Datatype_Aux.DtType (s, dts)) = Old_Datatype_Aux.DtType (s, map reindex dts)
@@ -704,7 +704,7 @@
in Long_Name.implode (Library.nth_map (length xs - i) (strip_suffix 4) xs) end;
val (descr'', ndescr) = ListPair.unzip (map_filter
- (fn (i, (@{type_name noption}, _, _)) => NONE
+ (fn (i, (\<^type_name>\<open>noption\<close>, _, _)) => NONE
| (i, (s, dts, constrs)) =>
let
val SOME index = AList.lookup op = ty_idxs i;
@@ -834,8 +834,8 @@
(augment_sort thy8
(pt_class_of thy8 atom :: map (cp_class_of thy8 atom) (remove (op =) atom dt_atoms))
(HOLogic.mk_Trueprop (HOLogic.mk_eq
- (Const (@{const_name Nominal.perm}, permT --> U --> U) $ pi $ (Rep $ x),
- Rep $ (Const (@{const_name Nominal.perm}, permT --> T --> T) $ pi $ x)))))
+ (Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> U --> U) $ pi $ (Rep $ x),
+ Rep $ (Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> T --> T) $ pi $ x)))))
(fn {context = ctxt, ...} =>
simp_tac (put_simpset HOL_basic_ss ctxt addsimps (perm_defs @ Abs_inverse_thms @
perm_closed_thms @ Rep_thms)) 1)
@@ -876,7 +876,7 @@
fun perm t =
let val T = fastype_of t
- in Const (@{const_name Nominal.perm}, permT --> T --> T) $ pi $ t end;
+ in Const (\<^const_name>\<open>Nominal.perm\<close>, permT --> T --> T) $ pi $ t end;
fun constr_arg (dts, dt) (j, l_args, r_args) =
let
@@ -997,14 +997,14 @@
val Ts = map fastype_of args1;
val c = list_comb (Const (cname, Ts ---> T), args1);
fun supp t =
- Const (@{const_name Nominal.supp}, fastype_of t --> HOLogic.mk_setT atomT) $ t;
+ Const (\<^const_name>\<open>Nominal.supp\<close>, fastype_of t --> HOLogic.mk_setT atomT) $ t;
fun fresh t = fresh_const atomT (fastype_of t) $ Free ("a", atomT) $ t;
val supp_thm = Goal.prove_global_future thy8 [] []
(augment_sort thy8 pt_cp_sort
(HOLogic.mk_Trueprop (HOLogic.mk_eq
(supp c,
if null dts then HOLogic.mk_set atomT []
- else foldr1 (HOLogic.mk_binop @{const_abbrev union}) (map supp args2)))))
+ else foldr1 (HOLogic.mk_binop \<^const_abbrev>\<open>union\<close>) (map supp args2)))))
(fn {context = ctxt, ...} =>
simp_tac (put_simpset HOL_basic_ss ctxt addsimps (supp_def ::
Un_assoc :: @{thm de_Morgan_conj} :: Collect_disj_eq :: finite_Un ::
@@ -1015,7 +1015,7 @@
Goal.prove_global_future thy8 [] [] (augment_sort thy8 pt_cp_sort
(HOLogic.mk_Trueprop (HOLogic.mk_eq
(fresh c,
- if null dts then @{term True}
+ if null dts then \<^term>\<open>True\<close>
else foldr1 HOLogic.mk_conj (map fresh args2)))))
(fn {context = ctxt, ...} =>
simp_tac (put_simpset HOL_ss ctxt addsimps [Un_iff, empty_iff, fresh_def, supp_thm]) 1))
@@ -1097,8 +1097,8 @@
(augment_sort thy8 (fs_class_of thy8 atom :: pt_cp_sort)
(HOLogic.mk_Trueprop
(foldr1 HOLogic.mk_conj (map (fn (s, T) =>
- Const (@{const_name finite}, HOLogic.mk_setT atomT --> HOLogic.boolT) $
- (Const (@{const_name Nominal.supp}, T --> HOLogic.mk_setT atomT) $ Free (s, T)))
+ Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT atomT --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, T --> HOLogic.mk_setT atomT) $ Free (s, T)))
(indnames ~~ recTs)))))
(fn {context = ctxt, ...} =>
Old_Datatype_Aux.ind_tac ctxt dt_induct indnames 1 THEN
@@ -1142,10 +1142,10 @@
val pnames = if length descr'' = 1 then ["P"]
else map (fn i => "P" ^ string_of_int i) (1 upto length descr'');
- val ind_sort = if null dt_atomTs then @{sort type}
+ val ind_sort = if null dt_atomTs then \<^sort>\<open>type\<close>
else Sign.minimize_sort thy9 (Sign.certify_sort thy9 (map (fs_class_of thy9) dt_atoms));
val fsT = TFree ("'n", ind_sort);
- val fsT' = TFree ("'n", @{sort type});
+ val fsT' = TFree ("'n", \<^sort>\<open>type\<close>);
val fresh_fs = map (fn (s, T) => (T, Free (s, fsT' --> HOLogic.mk_setT T)))
(Old_Datatype_Prop.indexify_names (replicate (length dt_atomTs) "f") ~~ dt_atomTs);
@@ -1205,7 +1205,7 @@
(constrs ~~ idxss)) (descr'' ~~ ndescr ~~ recTs);
val tnames = Old_Datatype_Prop.make_tnames recTs;
val zs = Name.variant_list tnames (replicate (length descr'') "z");
- val ind_concl = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop @{const_name HOL.conj})
+ val ind_concl = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop \<^const_name>\<open>HOL.conj\<close>)
(map (fn ((((i, _), T), tname), z) =>
make_pred fsT i T $ Free (z, fsT) $ Free (tname, T))
(descr'' ~~ recTs ~~ tnames ~~ zs)));
@@ -1213,14 +1213,14 @@
val ind_prems' =
map (fn (_, f as Free (_, T)) => Logic.all (Free ("x", fsT'))
- (HOLogic.mk_Trueprop (Const (@{const_name finite},
+ (HOLogic.mk_Trueprop (Const (\<^const_name>\<open>finite\<close>,
Term.range_type T -->
HOLogic.boolT) $ (f $ Free ("x", fsT'))))) fresh_fs @
maps (fn (((i, (_, _, constrs)), (_, idxss)), T) =>
map (make_ind_prem fsT' (fn T => fn t => fn u => HOLogic.Not $
HOLogic.mk_mem (t, the (AList.lookup op = fresh_fs T) $ u)) i T)
(constrs ~~ idxss)) (descr'' ~~ ndescr ~~ recTs);
- val ind_concl' = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop @{const_name HOL.conj})
+ val ind_concl' = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop \<^const_name>\<open>HOL.conj\<close>)
(map (fn ((((i, _), T), tname), z) =>
make_pred fsT' i T $ Free (z, fsT') $ Free (tname, T))
(descr'' ~~ recTs ~~ tnames ~~ zs)));
@@ -1230,7 +1230,7 @@
(Old_Datatype_Prop.indexify_names (replicate (length dt_atomTs) "pi") ~~
map mk_permT dt_atomTs) @ [("z", fsT')];
val aux_ind_Ts = rev (map snd aux_ind_vars);
- val aux_ind_concl = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop @{const_name HOL.conj})
+ val aux_ind_concl = HOLogic.mk_Trueprop (foldr1 (HOLogic.mk_binop \<^const_name>\<open>HOL.conj\<close>)
(map (fn (((i, _), T), tname) =>
HOLogic.list_all (aux_ind_vars, make_pred fsT' i T $ Bound 0 $
fold_rev (mk_perm aux_ind_Ts) (map Bound (length dt_atomTs downto 1))
@@ -1377,9 +1377,9 @@
cut_facts_tac iprems 1,
(resolve_tac context2 prems THEN_ALL_NEW
SUBGOAL (fn (t, i) => case Logic.strip_assums_concl t of
- _ $ (Const (@{const_name Nominal.fresh}, _) $ _ $ _) =>
+ _ $ (Const (\<^const_name>\<open>Nominal.fresh\<close>, _) $ _ $ _) =>
simp_tac ind_ss1' i
- | _ $ (Const (@{const_name Not}, _) $ _) =>
+ | _ $ (Const (\<^const_name>\<open>Not\<close>, _) $ _) =>
resolve_tac context2 freshs2' i
| _ => asm_simp_tac (put_simpset HOL_basic_ss context3 addsimps
pt2_atoms addsimprocs [perm_simproc]) i)) 1])
@@ -1403,7 +1403,7 @@
map (fn (_, f) =>
let val f' = Logic.varify_global f
in (dest_Var f',
- Thm.global_cterm_of thy9 (Const (@{const_name Nominal.supp}, fastype_of f')))
+ Thm.global_cterm_of thy9 (Const (\<^const_name>\<open>Nominal.supp\<close>, fastype_of f')))
end) fresh_fs) induct_aux;
val induct = Goal.prove_global_future thy9 []
@@ -1432,7 +1432,7 @@
val (rec_result_Ts', rec_fn_Ts') = Old_Datatype_Prop.make_primrec_Ts descr' used;
- val rec_sort = if null dt_atomTs then @{sort type} else
+ val rec_sort = if null dt_atomTs then \<^sort>\<open>type\<close> else
Sign.minimize_sort thy10 (Sign.certify_sort thy10 pt_cp_sort);
val rec_result_Ts = map (fn TFree (s, _) => TFree (s, rec_sort)) rec_result_Ts';
@@ -1493,8 +1493,8 @@
HOLogic.mk_Trueprop (nth rec_preds i $ Free y)) (recs ~~ frees'');
val prems5 = mk_fresh3 (recs ~~ frees'') frees';
val prems6 = maps (fn aT => map (fn y as (_, T) => HOLogic.mk_Trueprop
- (Const (@{const_name finite}, HOLogic.mk_setT aT --> HOLogic.boolT) $
- (Const (@{const_name Nominal.supp}, T --> HOLogic.mk_setT aT) $ Free y)))
+ (Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT aT --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, T --> HOLogic.mk_setT aT) $ Free y)))
frees'') atomTs;
val prems7 = map (fn x as (_, T) => HOLogic.mk_Trueprop
(fresh_const T fsT' $ Free x $ rec_ctxt)) binders;
@@ -1571,7 +1571,7 @@
(fn {context = ctxt, ...} =>
dresolve_tac ctxt [Thm.instantiate ([],
[((("pi", 0), permT),
- Thm.global_cterm_of thy11 (Const (@{const_name rev}, permT --> permT) $ pi))]) th] 1 THEN
+ Thm.global_cterm_of thy11 (Const (\<^const_name>\<open>rev\<close>, permT --> permT) $ pi))]) th] 1 THEN
NominalPermeq.perm_simp_tac (put_simpset HOL_ss ctxt) 1)) (ps ~~ ths)
in (ths, ths') end) dt_atomTs);
@@ -1582,9 +1582,9 @@
val name = Long_Name.base_name (fst (dest_Type aT));
val fs_name = Global_Theory.get_thm thy11 ("fs_" ^ name ^ "1");
val aset = HOLogic.mk_setT aT;
- val finite = Const (@{const_name finite}, aset --> HOLogic.boolT);
+ val finite = Const (\<^const_name>\<open>finite\<close>, aset --> HOLogic.boolT);
val fins = map (fn (f, T) => HOLogic.mk_Trueprop
- (finite $ (Const (@{const_name Nominal.supp}, T --> aset) $ f)))
+ (finite $ (Const (\<^const_name>\<open>Nominal.supp\<close>, T --> aset) $ f)))
(rec_fns ~~ rec_fn_Ts)
in
map (fn th => Drule.export_without_context (th RS mp)) (Old_Datatype_Aux.split_conj_thm
@@ -1598,7 +1598,7 @@
val y = Free ("y" ^ string_of_int i, U)
in
HOLogic.mk_imp (R $ x $ y,
- finite $ (Const (@{const_name Nominal.supp}, U --> aset) $ y))
+ finite $ (Const (\<^const_name>\<open>Nominal.supp\<close>, U --> aset) $ y))
end) (recTs ~~ rec_result_Ts ~~ rec_sets ~~
(1 upto length recTs))))))
(fn {prems = fins, context = ctxt} =>
@@ -1610,8 +1610,8 @@
val finite_premss = map (fn aT =>
map (fn (f, T) => HOLogic.mk_Trueprop
- (Const (@{const_name finite}, HOLogic.mk_setT aT --> HOLogic.boolT) $
- (Const (@{const_name Nominal.supp}, T --> HOLogic.mk_setT aT) $ f)))
+ (Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT aT --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, T --> HOLogic.mk_setT aT) $ f)))
(rec_fns ~~ rec_fn_Ts)) dt_atomTs;
val rec_fns' = map (augment_sort thy11 fs_cp_sort) rec_fns;
@@ -1650,7 +1650,7 @@
in EVERY
[resolve_tac ctxt [infer_instantiate ctxt
[(#1 (dest_Var S),
- Thm.cterm_of ctxt (Const (@{const_name Nominal.supp},
+ Thm.cterm_of ctxt (Const (\<^const_name>\<open>Nominal.supp\<close>,
fastype_of tuple --> HOLogic.mk_setT aT) $ tuple))]
supports_fresh] 1,
simp_tac (put_simpset HOL_basic_ss ctxt addsimps
@@ -1685,7 +1685,7 @@
val rec_unique_frees' =
Old_Datatype_Prop.indexify_names (replicate (length recTs) "y") ~~ rec_result_Ts;
val rec_unique_concls = map (fn ((x, U), R) =>
- Const (@{const_name Ex1}, (U --> HOLogic.boolT) --> HOLogic.boolT) $
+ Const (\<^const_name>\<open>Ex1\<close>, (U --> HOLogic.boolT) --> HOLogic.boolT) $
Abs ("y", U, R $ Free x $ Bound 0))
(rec_unique_frees ~~ rec_result_Ts ~~ rec_sets);
@@ -1693,7 +1693,7 @@
infer_instantiate (Proof_Context.init_global thy11)
(map (apsnd (Thm.global_cterm_of thy11 o augment_sort thy11 fs_cp_sort))
(map (fn (aT, f) => (#1 (dest_Var (Logic.varify_global f)), Abs ("z", HOLogic.unitT,
- Const (@{const_name Nominal.supp}, fun_tupleT --> HOLogic.mk_setT aT) $ fun_tuple)))
+ Const (\<^const_name>\<open>Nominal.supp\<close>, fun_tupleT --> HOLogic.mk_setT aT) $ fun_tuple)))
fresh_fs @
map (fn (((P, T), (x, U)), Q) =>
((P, 0),
@@ -1723,8 +1723,8 @@
val finite_ctxt_prems = map (fn aT =>
HOLogic.mk_Trueprop
- (Const (@{const_name finite}, HOLogic.mk_setT aT --> HOLogic.boolT) $
- (Const (@{const_name Nominal.supp}, fsT' --> HOLogic.mk_setT aT) $ rec_ctxt))) dt_atomTs;
+ (Const (\<^const_name>\<open>finite\<close>, HOLogic.mk_setT aT --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, fsT' --> HOLogic.mk_setT aT) $ rec_ctxt))) dt_atomTs;
val rec_unique_thms = Old_Datatype_Aux.split_conj_thm (Goal.prove
(Proof_Context.init_global thy11) (map fst rec_unique_frees)
@@ -1794,8 +1794,8 @@
| _ => false)) prems';
val fresh_prems = filter (fn th =>
(case Thm.prop_of th of
- _ $ (Const (@{const_name Nominal.fresh}, _) $ _ $ _) => true
- | _ $ (Const (@{const_name Not}, _) $ _) => true
+ _ $ (Const (\<^const_name>\<open>Nominal.fresh\<close>, _) $ _ $ _) => true
+ | _ $ (Const (\<^const_name>\<open>Not\<close>, _) $ _) => true
| _ => false)) prems';
val Ts = map fastype_of boundsl;
@@ -1897,7 +1897,7 @@
val ihs = filter (fn th =>
(case Thm.prop_of th of
- _ $ (Const (@{const_name All}, _) $ _) => true
+ _ $ (Const (\<^const_name>\<open>All\<close>, _) $ _) => true
| _ => false)) prems';
(** pi1 o rs = pi2 o vs , rs = pi1^-1 o pi2 o vs **)
@@ -2043,7 +2043,7 @@
(reccomb_names ~~ recTs ~~ rec_result_Ts))
|> (Global_Theory.add_defs false o map Thm.no_attributes) (map (fn ((((name, comb), set), T), T') =>
(Binding.name (Long_Name.base_name name ^ "_def"), Logic.mk_equals (comb, absfree ("x", T)
- (Const (@{const_name The}, (T' --> HOLogic.boolT) --> T') $ absfree ("y", T')
+ (Const (\<^const_name>\<open>The\<close>, (T' --> HOLogic.boolT) --> T') $ absfree ("y", T')
(set $ Free ("x", T) $ Free ("y", T'))))))
(reccomb_names ~~ reccombs ~~ rec_sets ~~ recTs ~~ rec_result_Ts));
@@ -2096,11 +2096,11 @@
val spec_cmd =
Parse.type_args_constrained -- Parse.binding -- Parse.opt_mixfix --
- (@{keyword "="} |-- Parse.enum1 "|" (Parse.binding -- Scan.repeat Parse.typ -- Parse.opt_mixfix))
+ (\<^keyword>\<open>=\<close> |-- Parse.enum1 "|" (Parse.binding -- Scan.repeat Parse.typ -- Parse.opt_mixfix))
>> (fn (((vs, t), mx), cons) => ((t, vs, mx), map Scan.triple1 cons));
val _ =
- Outer_Syntax.command @{command_keyword nominal_datatype} "define nominal datatypes"
+ Outer_Syntax.command \<^command_keyword>\<open>nominal_datatype\<close> "define nominal datatypes"
(Parse.and_list1 spec_cmd >>
(Toplevel.theory o nominal_datatype_cmd Old_Datatype_Aux.default_config));
--- a/src/HOL/Nominal/nominal_fresh_fun.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_fresh_fun.ML Sat Jan 05 17:24:33 2019 +0100
@@ -90,8 +90,8 @@
| get_inner_fresh_fun (v as Var _) = NONE
| get_inner_fresh_fun (Const _) = NONE
| get_inner_fresh_fun (Abs (_, _, t)) = get_inner_fresh_fun t
- | get_inner_fresh_fun (Const (@{const_name Nominal.fresh_fun},
- Type(@{type_name fun},[Type (@{type_name fun},[Type (T,_),_]),_])) $ u) = SOME T
+ | get_inner_fresh_fun (Const (\<^const_name>\<open>Nominal.fresh_fun\<close>,
+ Type(\<^type_name>\<open>fun\<close>,[Type (\<^type_name>\<open>fun\<close>,[Type (T,_),_]),_])) $ u) = SOME T
| get_inner_fresh_fun (t $ u) =
let val a = get_inner_fresh_fun u in
if a = NONE then get_inner_fresh_fun t else a
--- a/src/HOL/Nominal/nominal_inductive.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_inductive.ML Sat Jan 05 17:24:33 2019 +0100
@@ -40,11 +40,11 @@
th RS infer_instantiate ctxt [(#1 (dest_Var (Thm.term_of perm_boolI_pi)), pi)] perm_boolI;
fun mk_perm_bool_simproc names =
- Simplifier.make_simproc @{context} "perm_bool"
- {lhss = [@{term "perm pi x"}],
+ Simplifier.make_simproc \<^context> "perm_bool"
+ {lhss = [\<^term>\<open>perm pi x\<close>],
proc = fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name Nominal.perm}, _) $ _ $ t =>
+ Const (\<^const_name>\<open>Nominal.perm\<close>, _) $ _ $ t =>
if member (op =) names (the_default "" (try (head_of #> dest_Const #> fst) t))
then SOME perm_bool else NONE
| _ => NONE)};
@@ -73,14 +73,14 @@
| add_binders thy i (Abs (_, _, t)) bs = add_binders thy (i + 1) t bs
| add_binders thy i _ bs = bs;
-fun split_conj f names (Const (@{const_name HOL.conj}, _) $ p $ q) _ = (case head_of p of
+fun split_conj f names (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ p $ q) _ = (case head_of p of
Const (name, _) =>
if member (op =) names name then SOME (f p q) else NONE
| _ => NONE)
| split_conj _ _ _ _ = NONE;
fun strip_all [] t = t
- | strip_all (_ :: xs) (Const (@{const_name All}, _) $ Abs (s, T, t)) = strip_all xs t;
+ | strip_all (_ :: xs) (Const (\<^const_name>\<open>All\<close>, _) $ Abs (s, T, t)) = strip_all xs t;
(*********************************************************************)
(* maps R ... & (ALL pi_1 ... pi_n z. P z (pi_1 o ... o pi_n o t)) *)
@@ -91,17 +91,17 @@
(* where "id" protects the subformula from simplification *)
(*********************************************************************)
-fun inst_conj_all names ps pis (Const (@{const_name HOL.conj}, _) $ p $ q) _ =
+fun inst_conj_all names ps pis (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ p $ q) _ =
(case head_of p of
Const (name, _) =>
if member (op =) names name then SOME (HOLogic.mk_conj (p,
- Const (@{const_name Fun.id}, HOLogic.boolT --> HOLogic.boolT) $
+ Const (\<^const_name>\<open>Fun.id\<close>, HOLogic.boolT --> HOLogic.boolT) $
(subst_bounds (pis, strip_all pis q))))
else NONE
| _ => NONE)
| inst_conj_all names ps pis t u =
if member (op aconv) ps (head_of u) then
- SOME (Const (@{const_name Fun.id}, HOLogic.boolT --> HOLogic.boolT) $
+ SOME (Const (\<^const_name>\<open>Fun.id\<close>, HOLogic.boolT --> HOLogic.boolT) $
(subst_bounds (pis, strip_all pis t)))
else NONE
| inst_conj_all _ _ _ _ _ = NONE;
@@ -199,7 +199,7 @@
end) (Logic.strip_imp_prems raw_induct' ~~ avoids');
val atomTs = distinct op = (maps (map snd o #2) prems);
- val ind_sort = if null atomTs then @{sort type}
+ val ind_sort = if null atomTs then \<^sort>\<open>type\<close>
else Sign.minimize_sort thy (Sign.certify_sort thy (map (fn T => Sign.intern_class thy
("fs_" ^ Long_Name.base_name (fst (dest_Type T)))) atomTs));
val (fs_ctxt_tyname, _) = Name.variant "'n" (Variable.names_of ctxt');
@@ -276,7 +276,7 @@
("pt_" ^ Long_Name.base_name (fst (dest_Type aT)) ^ "2")) atomTs;
val eqvt_ss = simpset_of (put_simpset HOL_basic_ss (Proof_Context.init_global thy)
addsimps (eqvt_thms @ perm_pi_simp @ pt2_atoms)
- addsimprocs [mk_perm_bool_simproc [@{const_name Fun.id}],
+ addsimprocs [mk_perm_bool_simproc [\<^const_name>\<open>Fun.id\<close>],
NominalPermeq.perm_simproc_app, NominalPermeq.perm_simproc_fun]);
val fresh_bij = Global_Theory.get_thms thy "fresh_bij";
val perm_bij = Global_Theory.get_thms thy "perm_bij";
@@ -292,7 +292,7 @@
(** protect terms to avoid that fresh_prod interferes with **)
(** pairs used in introduction rules of inductive predicate **)
fun protect t =
- let val T = fastype_of t in Const (@{const_name Fun.id}, T --> T) $ t end;
+ let val T = fastype_of t in Const (\<^const_name>\<open>Fun.id\<close>, T --> T) $ t end;
val p = foldr1 HOLogic.mk_prod (map protect ts @ freshs1);
val ex = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop
(HOLogic.exists_const T $ Abs ("x", T,
@@ -336,7 +336,7 @@
fun concat_perm pi1 pi2 =
let val T = fastype_of pi1
in if T = fastype_of pi2 then
- Const (@{const_name append}, T --> T --> T) $ pi1 $ pi2
+ Const (\<^const_name>\<open>append\<close>, T --> T --> T) $ pi1 $ pi2
else pi2
end;
val pis'' = fold (concat_perm #> map) pis' pis;
@@ -678,16 +678,16 @@
(* outer syntax *)
val _ =
- Outer_Syntax.local_theory_to_proof @{command_keyword nominal_inductive}
+ Outer_Syntax.local_theory_to_proof \<^command_keyword>\<open>nominal_inductive\<close>
"prove equivariance and strong induction theorem for inductive predicate involving nominal datatypes"
- (Parse.name -- Scan.optional (@{keyword "avoids"} |-- Parse.and_list1 (Parse.name --
- (@{keyword ":"} |-- Scan.repeat1 Parse.name))) [] >> (fn (name, avoids) =>
+ (Parse.name -- Scan.optional (\<^keyword>\<open>avoids\<close> |-- Parse.and_list1 (Parse.name --
+ (\<^keyword>\<open>:\<close> |-- Scan.repeat1 Parse.name))) [] >> (fn (name, avoids) =>
prove_strong_ind name avoids));
val _ =
- Outer_Syntax.local_theory @{command_keyword equivariance}
+ Outer_Syntax.local_theory \<^command_keyword>\<open>equivariance\<close>
"prove equivariance for inductive predicate involving nominal datatypes"
- (Parse.name -- Scan.optional (@{keyword "["} |-- Parse.list1 Parse.name --| @{keyword "]"}) [] >>
+ (Parse.name -- Scan.optional (\<^keyword>\<open>[\<close> |-- Parse.list1 Parse.name --| \<^keyword>\<open>]\<close>) [] >>
(fn (name, atoms) => prove_eqvt name atoms));
end
--- a/src/HOL/Nominal/nominal_inductive2.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_inductive2.ML Sat Jan 05 17:24:33 2019 +0100
@@ -44,11 +44,11 @@
th RS infer_instantiate ctxt [(#1 (dest_Var (Thm.term_of perm_boolI_pi)), pi)] perm_boolI;
fun mk_perm_bool_simproc names =
- Simplifier.make_simproc @{context} "perm_bool"
- {lhss = [@{term "perm pi x"}],
+ Simplifier.make_simproc \<^context> "perm_bool"
+ {lhss = [\<^term>\<open>perm pi x\<close>],
proc = fn _ => fn _ => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name Nominal.perm}, _) $ _ $ t =>
+ Const (\<^const_name>\<open>Nominal.perm\<close>, _) $ _ $ t =>
if member (op =) names (the_default "" (try (head_of #> dest_Const #> fst) t))
then SOME perm_bool else NONE
| _ => NONE)};
@@ -78,14 +78,14 @@
| add_binders thy i (Abs (_, _, t)) bs = add_binders thy (i + 1) t bs
| add_binders thy i _ bs = bs;
-fun split_conj f names (Const (@{const_name HOL.conj}, _) $ p $ q) _ = (case head_of p of
+fun split_conj f names (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ p $ q) _ = (case head_of p of
Const (name, _) =>
if member (op =) names name then SOME (f p q) else NONE
| _ => NONE)
| split_conj _ _ _ _ = NONE;
fun strip_all [] t = t
- | strip_all (_ :: xs) (Const (@{const_name All}, _) $ Abs (s, T, t)) = strip_all xs t;
+ | strip_all (_ :: xs) (Const (\<^const_name>\<open>All\<close>, _) $ Abs (s, T, t)) = strip_all xs t;
(*********************************************************************)
(* maps R ... & (ALL pi_1 ... pi_n z. P z (pi_1 o ... o pi_n o t)) *)
@@ -96,17 +96,17 @@
(* where "id" protects the subformula from simplification *)
(*********************************************************************)
-fun inst_conj_all names ps pis (Const (@{const_name HOL.conj}, _) $ p $ q) _ =
+fun inst_conj_all names ps pis (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ p $ q) _ =
(case head_of p of
Const (name, _) =>
if member (op =) names name then SOME (HOLogic.mk_conj (p,
- Const (@{const_name Fun.id}, HOLogic.boolT --> HOLogic.boolT) $
+ Const (\<^const_name>\<open>Fun.id\<close>, HOLogic.boolT --> HOLogic.boolT) $
(subst_bounds (pis, strip_all pis q))))
else NONE
| _ => NONE)
| inst_conj_all names ps pis t u =
if member (op aconv) ps (head_of u) then
- SOME (Const (@{const_name Fun.id}, HOLogic.boolT --> HOLogic.boolT) $
+ SOME (Const (\<^const_name>\<open>Fun.id\<close>, HOLogic.boolT --> HOLogic.boolT) $
(subst_bounds (pis, strip_all pis t)))
else NONE
| inst_conj_all _ _ _ _ _ = NONE;
@@ -197,7 +197,7 @@
| add_set (t, T) ((u, U) :: ps) =
if T = U then
let val S = HOLogic.mk_setT T
- in (Const (@{const_name sup}, S --> S --> S) $ u $ t, T) :: ps
+ in (Const (\<^const_name>\<open>sup\<close>, S --> S --> S) $ u $ t, T) :: ps
end
else (u, U) :: add_set (t, T) ps
in
@@ -223,7 +223,7 @@
val atomTs = distinct op = (maps (map snd o #2) prems);
val atoms = map (fst o dest_Type) atomTs;
- val ind_sort = if null atomTs then @{sort type}
+ val ind_sort = if null atomTs then \<^sort>\<open>type\<close>
else Sign.minimize_sort thy (Sign.certify_sort thy (map (fn a => Sign.intern_class thy
("fs_" ^ Long_Name.base_name a)) atoms));
val (fs_ctxt_tyname, _) = Name.variant "'n" (Variable.names_of ctxt');
@@ -284,7 +284,7 @@
(maps (fn (t, T) => map (fn (u, U) => HOLogic.mk_Trueprop
(NominalDatatype.fresh_star_const U T $ u $ t)) sets)
(ts ~~ binder_types (fastype_of p)) @
- map (fn (u, U) => HOLogic.mk_Trueprop (Const (@{const_name finite},
+ map (fn (u, U) => HOLogic.mk_Trueprop (Const (\<^const_name>\<open>finite\<close>,
HOLogic.mk_setT U --> HOLogic.boolT) $ u)) sets) |>
split_list) prems |> split_list;
@@ -293,7 +293,7 @@
("pt_" ^ Long_Name.base_name a ^ "2")) atoms;
val eqvt_ss = simpset_of (put_simpset HOL_basic_ss (Proof_Context.init_global thy)
addsimps (eqvt_thms @ perm_pi_simp @ pt2_atoms)
- addsimprocs [mk_perm_bool_simproc [@{const_name Fun.id}],
+ addsimprocs [mk_perm_bool_simproc [\<^const_name>\<open>Fun.id\<close>],
NominalPermeq.perm_simproc_app, NominalPermeq.perm_simproc_fun]);
val fresh_star_bij = Global_Theory.get_thms thy "fresh_star_bij";
val pt_insts = map (NominalAtoms.pt_inst_of thy) atoms;
@@ -313,7 +313,7 @@
(** protect terms to avoid that fresh_star_prod_set interferes with **)
(** pairs used in introduction rules of inductive predicate **)
fun protect t =
- let val T = fastype_of t in Const (@{const_name Fun.id}, T --> T) $ t end;
+ let val T = fastype_of t in Const (\<^const_name>\<open>Fun.id\<close>, T --> T) $ t end;
val p = foldr1 HOLogic.mk_prod (map protect ts);
val atom = fst (dest_Type T);
val {at_inst, ...} = NominalAtoms.the_atom_info thy atom;
@@ -394,7 +394,7 @@
fun concat_perm pi1 pi2 =
let val T = fastype_of pi1
in if T = fastype_of pi2 then
- Const (@{const_name append}, T --> T --> T) $ pi1 $ pi2
+ Const (\<^const_name>\<open>append\<close>, T --> T --> T) $ pi1 $ pi2
else pi2
end;
val pis'' = fold_rev (concat_perm #> map) pis' pis;
@@ -486,12 +486,12 @@
(* outer syntax *)
val _ =
- Outer_Syntax.local_theory_to_proof @{command_keyword nominal_inductive2}
+ Outer_Syntax.local_theory_to_proof \<^command_keyword>\<open>nominal_inductive2\<close>
"prove strong induction theorem for inductive predicate involving nominal datatypes"
(Parse.name --
- Scan.option (@{keyword "("} |-- Parse.!!! (Parse.name --| @{keyword ")"})) --
- (Scan.optional (@{keyword "avoids"} |-- Parse.enum1 "|" (Parse.name --
- (@{keyword ":"} |-- Parse.and_list1 Parse.term))) []) >> (fn ((name, rule_name), avoids) =>
+ Scan.option (\<^keyword>\<open>(\<close> |-- Parse.!!! (Parse.name --| \<^keyword>\<open>)\<close>)) --
+ (Scan.optional (\<^keyword>\<open>avoids\<close> |-- Parse.enum1 "|" (Parse.name --
+ (\<^keyword>\<open>:\<close> |-- Parse.and_list1 Parse.term))) []) >> (fn ((name, rule_name), avoids) =>
prove_strong_ind name rule_name avoids));
end
--- a/src/HOL/Nominal/nominal_permeq.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_permeq.ML Sat Jan 05 17:24:33 2019 +0100
@@ -93,15 +93,15 @@
(* constant or when (f x) is a permuation with two or more arguments *)
fun applicable_app t =
(case (strip_comb t) of
- (Const (@{const_name Nominal.perm},_),ts) => (length ts) >= 2
+ (Const (\<^const_name>\<open>Nominal.perm\<close>,_),ts) => (length ts) >= 2
| (Const _,_) => false
| _ => true)
in
case redex of
(* case pi o (f x) == (pi o f) (pi o x) *)
- (Const(@{const_name Nominal.perm},
- Type(@{type_name fun},
- [Type(@{type_name list}, [Type(@{type_name prod},[Type(n,_),_])]),_])) $ pi $ (f $ x)) =>
+ (Const(\<^const_name>\<open>Nominal.perm\<close>,
+ Type(\<^type_name>\<open>fun\<close>,
+ [Type(\<^type_name>\<open>list\<close>, [Type(\<^type_name>\<open>prod\<close>,[Type(n,_),_])]),_])) $ pi $ (f $ x)) =>
(if (applicable_app f) then
let
val name = Long_Name.base_name n
@@ -113,8 +113,8 @@
end
val perm_simproc_app =
- Simplifier.make_simproc @{context} "perm_simproc_app"
- {lhss = [@{term "Nominal.perm pi x"}], proc = K perm_simproc_app'}
+ Simplifier.make_simproc \<^context> "perm_simproc_app"
+ {lhss = [\<^term>\<open>Nominal.perm pi x\<close>], proc = K perm_simproc_app'}
(* a simproc that deals with permutation instances in front of functions *)
fun perm_simproc_fun' ctxt ct =
@@ -123,20 +123,20 @@
fun applicable_fun t =
(case (strip_comb t) of
(Abs _ ,[]) => true
- | (Const (@{const_name Nominal.perm},_),_) => false
+ | (Const (\<^const_name>\<open>Nominal.perm\<close>,_),_) => false
| (Const _, _) => true
| _ => false)
in
case redex of
(* case pi o f == (%x. pi o (f ((rev pi)o x))) *)
- (Const(@{const_name Nominal.perm},_) $ pi $ f) =>
+ (Const(\<^const_name>\<open>Nominal.perm\<close>,_) $ pi $ f) =>
(if applicable_fun f then SOME perm_fun_def else NONE)
| _ => NONE
end
val perm_simproc_fun =
- Simplifier.make_simproc @{context} "perm_simproc_fun"
- {lhss = [@{term "Nominal.perm pi x"}], proc = K perm_simproc_fun'}
+ Simplifier.make_simproc \<^context> "perm_simproc_fun"
+ {lhss = [\<^term>\<open>Nominal.perm pi x\<close>], proc = K perm_simproc_fun'}
(* function for simplyfying permutations *)
(* stac contains the simplifiation tactic that is *)
@@ -190,9 +190,9 @@
fun perm_compose_simproc' ctxt ct =
(case Thm.term_of ct of
- (Const (@{const_name Nominal.perm}, Type (@{type_name fun}, [Type (@{type_name list},
- [Type (@{type_name Product_Type.prod}, [T as Type (tname,_),_])]),_])) $ pi1 $ (Const (@{const_name Nominal.perm},
- Type (@{type_name fun}, [Type (@{type_name list}, [Type (@{type_name Product_Type.prod}, [U as Type (uname,_),_])]),_])) $
+ (Const (\<^const_name>\<open>Nominal.perm\<close>, Type (\<^type_name>\<open>fun\<close>, [Type (\<^type_name>\<open>list\<close>,
+ [Type (\<^type_name>\<open>Product_Type.prod\<close>, [T as Type (tname,_),_])]),_])) $ pi1 $ (Const (\<^const_name>\<open>Nominal.perm\<close>,
+ Type (\<^type_name>\<open>fun\<close>, [Type (\<^type_name>\<open>list\<close>, [Type (\<^type_name>\<open>Product_Type.prod\<close>, [U as Type (uname,_),_])]),_])) $
pi2 $ t)) =>
let
val thy = Proof_Context.theory_of ctxt
@@ -217,8 +217,8 @@
| _ => NONE);
val perm_compose_simproc =
- Simplifier.make_simproc @{context} "perm_compose"
- {lhss = [@{term "Nominal.perm pi1 (Nominal.perm pi2 t)"}],
+ Simplifier.make_simproc \<^context> "perm_compose"
+ {lhss = [\<^term>\<open>Nominal.perm pi1 (Nominal.perm pi2 t)\<close>],
proc = K perm_compose_simproc'}
fun perm_compose_tac ctxt i =
@@ -297,7 +297,7 @@
let val goal = nth (cprems_of st) (i - 1)
in
case Envir.eta_contract (Logic.strip_assums_concl (Thm.term_of goal)) of
- _ $ (Const (@{const_name finite}, _) $ (Const (@{const_name Nominal.supp}, T) $ x)) =>
+ _ $ (Const (\<^const_name>\<open>finite\<close>, _) $ (Const (\<^const_name>\<open>Nominal.supp\<close>, T) $ x)) =>
let
val ps = Logic.strip_params (Thm.term_of goal);
val Ts = rev (map snd ps);
@@ -306,7 +306,7 @@
HOLogic.pair_const (fastype_of1 (Ts, v)) (fastype_of1 (Ts, s)) $ v $ s)
vs HOLogic.unit;
val s' = fold_rev Term.abs ps
- (Const (@{const_name Nominal.supp}, fastype_of1 (Ts, s) -->
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, fastype_of1 (Ts, s) -->
Term.range_type T) $ s);
val supports_rule' = Thm.lift_rule goal supports_rule;
val _ $ (_ $ S $ _) =
@@ -341,7 +341,7 @@
val ctxt2 = ctxt addsimps [supp_prod,supp_unit,finite_Un,finite_emptyI,conj_absorb]@fin_supp
in
case Logic.strip_assums_concl (Thm.term_of goal) of
- _ $ (Const (@{const_name Nominal.fresh}, Type ("fun", [T, _])) $ _ $ t) =>
+ _ $ (Const (\<^const_name>\<open>Nominal.fresh\<close>, Type ("fun", [T, _])) $ _ $ t) =>
let
val ps = Logic.strip_params (Thm.term_of goal);
val Ts = rev (map snd ps);
@@ -351,7 +351,7 @@
vs HOLogic.unit;
val s' =
fold_rev Term.abs ps
- (Const (@{const_name Nominal.supp}, fastype_of1 (Ts, s) --> HOLogic.mk_setT T) $ s);
+ (Const (\<^const_name>\<open>Nominal.supp\<close>, fastype_of1 (Ts, s) --> HOLogic.mk_setT T) $ s);
val supports_fresh_rule' = Thm.lift_rule goal supports_fresh_rule;
val _ $ (_ $ S $ _) =
Logic.strip_assums_concl (hd (Thm.prems_of supports_fresh_rule'));
--- a/src/HOL/Nominal/nominal_primrec.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_primrec.ML Sat Jan 05 17:24:33 2019 +0100
@@ -32,8 +32,8 @@
fun unquantify t =
let
- val (vs, Ts) = split_list (strip_qnt_vars @{const_name Pure.all} t);
- val body = strip_qnt_body @{const_name Pure.all} t;
+ val (vs, Ts) = split_list (strip_qnt_vars \<^const_name>\<open>Pure.all\<close> t);
+ val body = strip_qnt_body \<^const_name>\<open>Pure.all\<close> t;
val (vs', _) = fold_map Name.variant vs (Name.make_context (fold_aterms
(fn Free (v, _) => insert (op =) v | _ => I) body []))
in (curry subst_bounds (map2 (curry Free) vs' Ts |> rev) body) end;
@@ -151,7 +151,7 @@
(case AList.lookup (op =) eqns cname of
NONE => (warning ("No equation for constructor " ^ quote cname ^
"\nin definition of function " ^ quote fname);
- (fnames', fnss', (Const (@{const_name undefined}, dummyT))::fns))
+ (fnames', fnss', (Const (\<^const_name>\<open>undefined\<close>, dummyT))::fns))
| SOME (ls, cargs', rs, rhs, eq) =>
let
val recs = filter (Old_Datatype_Aux.is_rec_type o snd) (cargs' ~~ cargs);
@@ -189,7 +189,7 @@
case AList.lookup (op =) fns i of
NONE =>
let
- val dummy_fns = map (fn (_, cargs) => Const (@{const_name undefined},
+ val dummy_fns = map (fn (_, cargs) => Const (\<^const_name>\<open>undefined\<close>,
replicate (length cargs + length (filter Old_Datatype_Aux.is_rec_type cargs))
dummyT ---> HOLogic.unitT)) constrs;
val _ = warning ("No function definition for datatype " ^ quote tname)
@@ -307,7 +307,7 @@
curry (List.take o swap) (length fvars) |> map (Thm.cterm_of lthy');
val invs' = (case invs of
NONE => map (fn (i, _) =>
- Abs ("x", fastype_of (snd (nth defs' i)), @{term True})) descr
+ Abs ("x", fastype_of (snd (nth defs' i)), \<^term>\<open>True\<close>)) descr
| SOME invs' => map (prep_term lthy') invs');
val inst = (map (#1 o dest_Var) fvars ~~ cfs) @
(map #1 pvars ~~ map (Thm.cterm_of lthy') invs') @
@@ -391,17 +391,17 @@
val freshness_context = Parse.reserved "freshness_context";
val invariant = Parse.reserved "invariant";
-fun unless_flag scan = Scan.unless ((freshness_context || invariant) -- @{keyword ":"}) scan;
+fun unless_flag scan = Scan.unless ((freshness_context || invariant) -- \<^keyword>\<open>:\<close>) scan;
-val parser1 = (freshness_context -- @{keyword ":"}) |-- unless_flag Parse.term >> SOME;
-val parser2 = (invariant -- @{keyword ":"}) |--
+val parser1 = (freshness_context -- \<^keyword>\<open>:\<close>) |-- unless_flag Parse.term >> SOME;
+val parser2 = (invariant -- \<^keyword>\<open>:\<close>) |--
(Scan.repeat1 (unless_flag Parse.term) >> SOME) -- Scan.optional parser1 NONE ||
(parser1 >> pair NONE);
val options =
- Scan.optional (@{keyword "("} |-- Parse.!!! (parser2 --| @{keyword ")"})) (NONE, NONE);
+ Scan.optional (\<^keyword>\<open>(\<close> |-- Parse.!!! (parser2 --| \<^keyword>\<open>)\<close>)) (NONE, NONE);
val _ =
- Outer_Syntax.local_theory_to_proof @{command_keyword nominal_primrec}
+ Outer_Syntax.local_theory_to_proof \<^command_keyword>\<open>nominal_primrec\<close>
"define primitive recursive functions on nominal datatypes"
(options -- Parse.vars -- Parse.for_fixes -- Parse_Spec.where_multi_specs
>> (fn ((((invs, fctxt), vars), params), specs) =>
--- a/src/HOL/Nominal/nominal_thmdecls.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nominal/nominal_thmdecls.ML Sat Jan 05 17:24:33 2019 +0100
@@ -43,7 +43,7 @@
(* equality-lemma can be derived. *)
exception EQVT_FORM of string
-val nominal_eqvt_debug = Attrib.setup_config_bool @{binding nominal_eqvt_debug} (K false);
+val nominal_eqvt_debug = Attrib.setup_config_bool \<^binding>\<open>nominal_eqvt_debug\<close> (K false);
fun tactic ctxt (msg, tac) =
if Config.get ctxt nominal_eqvt_debug
@@ -54,7 +54,7 @@
let
val thy = Proof_Context.theory_of ctxt
val T = fastype_of pi'
- val mypifree = Thm.cterm_of ctxt (Const (@{const_name "rev"}, T --> T) $ pi')
+ val mypifree = Thm.cterm_of ctxt (Const (\<^const_name>\<open>rev\<close>, T --> T) $ pi')
val perm_pi_simp = Global_Theory.get_thms thy "perm_pi_simp"
in
EVERY1 [tactic ctxt ("iffI applied", resolve_tac ctxt @{thms iffI}),
@@ -70,7 +70,7 @@
fun get_derived_thm ctxt hyp concl orig_thm pi typi =
let
val pi' = Var (pi, typi);
- val lhs = Const (@{const_name "perm"}, typi --> HOLogic.boolT --> HOLogic.boolT) $ pi' $ hyp;
+ val lhs = Const (\<^const_name>\<open>perm\<close>, typi --> HOLogic.boolT --> HOLogic.boolT) $ pi' $ hyp;
val ([goal_term, pi''], ctxt') = Variable.import_terms false
[HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs, concl)), pi'] ctxt
val _ = writeln (Syntax.string_of_term ctxt' goal_term);
@@ -85,7 +85,7 @@
let
fun replace n ty =
let
- val c = Const (@{const_name "perm"}, typi --> ty --> ty)
+ val c = Const (\<^const_name>\<open>perm\<close>, typi --> ty --> ty)
val v1 = Var (pi, typi)
val v2 = Var (n, ty)
in
@@ -100,8 +100,8 @@
fun get_pi t thy =
let fun get_pi_aux s =
(case s of
- (Const (@{const_name "perm"} ,typrm) $
- (Var (pi,typi as Type(@{type_name "list"}, [Type (@{type_name Product_Type.prod}, [Type (tyatm,[]),_])]))) $
+ (Const (\<^const_name>\<open>perm\<close> ,typrm) $
+ (Var (pi,typi as Type(\<^type_name>\<open>list\<close>, [Type (\<^type_name>\<open>Product_Type.prod\<close>, [Type (tyatm,[]),_])]))) $
(Var (n,ty))) =>
let
(* FIXME: this should be an operation the library *)
@@ -134,7 +134,7 @@
val thms_to_be_added =
(case Thm.prop_of orig_thm of
(* case: eqvt-lemma is of the implicational form *)
- (Const(@{const_name Pure.imp}, _) $ (Const (@{const_name Trueprop},_) $ hyp) $ (Const (@{const_name Trueprop},_) $ concl)) =>
+ (Const(\<^const_name>\<open>Pure.imp\<close>, _) $ (Const (\<^const_name>\<open>Trueprop\<close>,_) $ hyp) $ (Const (\<^const_name>\<open>Trueprop\<close>,_) $ concl)) =>
let
val (pi,typi) = get_pi concl thy
in
@@ -146,8 +146,8 @@
else raise EQVT_FORM "Type Implication"
end
(* case: eqvt-lemma is of the equational form *)
- | (Const (@{const_name Trueprop}, _) $ (Const (@{const_name HOL.eq}, _) $
- (Const (@{const_name "perm"},typrm) $ Var (pi,typi) $ lhs) $ rhs)) =>
+ | (Const (\<^const_name>\<open>Trueprop\<close>, _) $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $
+ (Const (\<^const_name>\<open>perm\<close>,typrm) $ Var (pi,typi) $ lhs) $ rhs)) =>
(if (apply_pi lhs (pi,typi)) = rhs
then [orig_thm]
else raise EQVT_FORM "Type Equality")
@@ -169,11 +169,11 @@
val get_eqvt_thms = Context.Proof #> Data.get;
val setup =
- Attrib.setup @{binding eqvt} (Attrib.add_del eqvt_add eqvt_del)
+ Attrib.setup \<^binding>\<open>eqvt\<close> (Attrib.add_del eqvt_add eqvt_del)
"equivariance theorem declaration" #>
- Attrib.setup @{binding eqvt_force} (Attrib.add_del eqvt_force_add eqvt_force_del)
+ Attrib.setup \<^binding>\<open>eqvt_force\<close> (Attrib.add_del eqvt_force_add eqvt_force_del)
"equivariance theorem declaration (without checking the form of the lemma)" #>
- Global_Theory.add_thms_dynamic (@{binding eqvts}, Data.get);
+ Global_Theory.add_thms_dynamic (\<^binding>\<open>eqvts\<close>, Data.get);
end;
--- a/src/HOL/Nonstandard_Analysis/CLim.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/CLim.thy Sat Jan 05 17:24:33 2019 +0100
@@ -135,7 +135,7 @@
lemma NSCDERIV_pow: "NSDERIV (\<lambda>x. x ^ n) x :> complex_of_real (real n) * (x ^ (n - 1))"
by (metis CDERIV_pow NSDERIV_DERIV_iff One_nat_def)
-text \<open>Can't relax the premise @{term "x \<noteq> 0"}: it isn't continuous at zero.\<close>
+text \<open>Can't relax the premise \<^term>\<open>x \<noteq> 0\<close>: it isn't continuous at zero.\<close>
lemma NSCDERIV_inverse: "x \<noteq> 0 \<Longrightarrow> NSDERIV (\<lambda>x. inverse x) x :> - (inverse x)\<^sup>2"
for x :: complex
unfolding numeral_2_eq_2 by (rule NSDERIV_inverse)
@@ -145,7 +145,7 @@
unfolding numeral_2_eq_2 by (rule DERIV_inverse)
-subsection \<open>Derivative of Reciprocals (Function @{term inverse})\<close>
+subsection \<open>Derivative of Reciprocals (Function \<^term>\<open>inverse\<close>)\<close>
lemma CDERIV_inverse_fun:
"DERIV f x :> d \<Longrightarrow> f x \<noteq> 0 \<Longrightarrow> DERIV (\<lambda>x. inverse (f x)) x :> - (d * inverse ((f x)\<^sup>2))"
--- a/src/HOL/Nonstandard_Analysis/Free_Ultrafilter.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/Free_Ultrafilter.thy Sat Jan 05 17:24:33 2019 +0100
@@ -49,7 +49,7 @@
text \<open>
A filter \<open>F\<close> is an ultrafilter iff it is a maximal filter,
- i.e. whenever \<open>G\<close> is a filter and @{prop "F \<subseteq> G"} then @{prop "F = G"}
+ i.e. whenever \<open>G\<close> is a filter and \<^prop>\<open>F \<subseteq> G\<close> then \<^prop>\<open>F = G\<close>
\<close>
text \<open>
--- a/src/HOL/Nonstandard_Analysis/HDeriv.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HDeriv.thy Sat Jan 05 17:24:33 2019 +0100
@@ -228,7 +228,7 @@
lemma NSDERIV_inverse:
fixes x :: "'a::real_normed_field"
- assumes "x \<noteq> 0" \<comment> \<open>can't get rid of @{term "x \<noteq> 0"} because it isn't continuous at zero\<close>
+ assumes "x \<noteq> 0" \<comment> \<open>can't get rid of \<^term>\<open>x \<noteq> 0\<close> because it isn't continuous at zero\<close>
shows "NSDERIV (\<lambda>x. inverse x) x :> - (inverse x ^ Suc (Suc 0))"
proof -
{
--- a/src/HOL/Nonstandard_Analysis/HLim.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HLim.thy Sat Jan 05 17:24:33 2019 +0100
@@ -108,7 +108,7 @@
by (simp add: NSLIM_def)
-subsubsection \<open>Equivalence of @{term filterlim} and @{term NSLIM}\<close>
+subsubsection \<open>Equivalence of \<^term>\<open>filterlim\<close> and \<^term>\<open>NSLIM\<close>\<close>
lemma LIM_NSLIM:
assumes f: "f \<midarrow>a\<rightarrow> L"
--- a/src/HOL/Nonstandard_Analysis/HSEQ.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HSEQ.thy Sat Jan 05 17:24:33 2019 +0100
@@ -150,7 +150,7 @@
by (blast intro: NSLIMSEQ_imp_Suc NSLIMSEQ_Suc)
-subsubsection \<open>Equivalence of @{term LIMSEQ} and @{term NSLIMSEQ}\<close>
+subsubsection \<open>Equivalence of \<^term>\<open>LIMSEQ\<close> and \<^term>\<open>NSLIMSEQ\<close>\<close>
lemma LIMSEQ_NSLIMSEQ:
assumes X: "X \<longlonglongrightarrow> L"
@@ -199,7 +199,7 @@
by (blast intro: LIMSEQ_NSLIMSEQ NSLIMSEQ_LIMSEQ)
-subsubsection \<open>Derived theorems about @{term NSLIMSEQ}\<close>
+subsubsection \<open>Derived theorems about \<^term>\<open>NSLIMSEQ\<close>\<close>
text \<open>We prove the NS version from the standard one, since the NS proof
seems more complicated than the standard one above!\<close>
@@ -477,8 +477,7 @@
subsection \<open>Power Sequences\<close>
-text \<open>The sequence @{term "x^n"} tends to 0 if @{term "0\<le>x"} and @{term
- "x<1"}. Proof will use (NS) Cauchy equivalence for convergence and
+text \<open>The sequence \<^term>\<open>x^n\<close> tends to 0 if \<^term>\<open>0\<le>x\<close> and \<^term>\<open>x<1\<close>. Proof will use (NS) Cauchy equivalence for convergence and
also fact that bounded and monotonic sequence converges.\<close>
text \<open>We now use NS criterion to bring proof of theorem through.\<close>
--- a/src/HOL/Nonstandard_Analysis/HSeries.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HSeries.thy Sat Jan 05 17:24:33 2019 +0100
@@ -26,11 +26,11 @@
lemma sumhr_app: "sumhr (M, N, f) = ( *f2* (\<lambda>m n. sum f {m..<n})) M N"
by (simp add: sumhr_def)
-text \<open>Base case in definition of @{term sumr}.\<close>
+text \<open>Base case in definition of \<^term>\<open>sumr\<close>.\<close>
lemma sumhr_zero [simp]: "\<And>m. sumhr (m, 0, f) = 0"
unfolding sumhr_app by transfer simp
-text \<open>Recursive case in definition of @{term sumr}.\<close>
+text \<open>Recursive case in definition of \<^term>\<open>sumr\<close>.\<close>
lemma sumhr_if:
"\<And>m n. sumhr (m, n + 1, f) = (if n + 1 \<le> m then 0 else sumhr (m, n, f) + ( *f* f) n)"
unfolding sumhr_app by transfer simp
@@ -87,7 +87,7 @@
subsection \<open>Nonstandard Sums\<close>
text \<open>Infinite sums are obtained by summing to some infinite hypernatural
- (such as @{term whn}).\<close>
+ (such as \<^term>\<open>whn\<close>).\<close>
lemma sumhr_hypreal_of_hypnat_omega: "sumhr (0, whn, \<lambda>i. 1) = hypreal_of_hypnat whn"
by (simp add: sumhr_const)
--- a/src/HOL/Nonstandard_Analysis/HTranscendental.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HTranscendental.thy Sat Jan 05 17:24:33 2019 +0100
@@ -592,7 +592,7 @@
by (insert NSLIMSEQ_mult [OF NSLIMSEQ_sin_pi NSLIMSEQ_cos_one], simp)
-text\<open>A familiar approximation to @{term "cos x"} when @{term x} is small\<close>
+text\<open>A familiar approximation to \<^term>\<open>cos x\<close> when \<^term>\<open>x\<close> is small\<close>
lemma STAR_cos_Infinitesimal_approx:
fixes x :: "'a::{real_normed_field,banach} star"
--- a/src/HOL/Nonstandard_Analysis/HyperDef.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HyperDef.thy Sat Jan 05 17:24:33 2019 +0100
@@ -90,7 +90,7 @@
by (simp add: Reals_def Standard_def)
-subsection \<open>Injection from @{typ hypreal}\<close>
+subsection \<open>Injection from \<^typ>\<open>hypreal\<close>\<close>
definition of_hypreal :: "hypreal \<Rightarrow> 'a::real_algebra_1 star"
where [transfer_unfold]: "of_hypreal = *f* of_real"
@@ -133,7 +133,7 @@
by transfer (rule of_real_eq_0_iff)
-subsection \<open>Properties of @{term starrel}\<close>
+subsection \<open>Properties of \<^term>\<open>starrel\<close>\<close>
lemma lemma_starrel_refl [simp]: "x \<in> starrel `` {x}"
by (simp add: starrel_def)
@@ -145,7 +145,7 @@
declare equiv_starrel [THEN eq_equiv_class_iff, simp]
-subsection \<open>@{term hypreal_of_real}: the Injection from @{typ real} to @{typ hypreal}\<close>
+subsection \<open>\<^term>\<open>hypreal_of_real\<close>: the Injection from \<^typ>\<open>real\<close> to \<^typ>\<open>hypreal\<close>\<close>
lemma inj_star_of: "inj star_of"
by (rule inj_onI) simp
@@ -160,7 +160,7 @@
by simp
-subsection \<open>Properties of @{term star_n}\<close>
+subsection \<open>Properties of \<^term>\<open>star_n\<close>\<close>
lemma star_n_add: "star_n X + star_n Y = star_n (\<lambda>n. X n + Y n)"
by (simp only: star_add_def starfun2_star_n)
@@ -199,7 +199,7 @@
subsection \<open>Existence of Infinite Hyperreal Number\<close>
text \<open>Existence of infinite number not corresponding to any real number.
- Use assumption that member @{term \<U>} is not finite.\<close>
+ Use assumption that member \<^term>\<open>\<U>\<close> is not finite.\<close>
text \<open>A few lemmas first.\<close>
@@ -286,7 +286,7 @@
#> Lin_Arith.add_simps [@{thm star_of_zero}, @{thm star_of_one},
@{thm star_of_numeral}, @{thm star_of_add},
@{thm star_of_minus}, @{thm star_of_diff}, @{thm star_of_mult}]
- #> Lin_Arith.add_inj_const (@{const_name "StarDef.star_of"}, @{typ "real \<Rightarrow> hypreal"}))
+ #> Lin_Arith.add_inj_const (\<^const_name>\<open>StarDef.star_of\<close>, \<^typ>\<open>real \<Rightarrow> hypreal\<close>))
\<close>
simproc_setup fast_arith_hypreal ("(m::hypreal) < n" | "(m::hypreal) \<le> n" | "(m::hypreal) = n") =
@@ -432,7 +432,7 @@
lemma hyperpow_two_hrabs [simp]: "\<bar>x::'a::linordered_idom star\<bar> pow 2 = x pow 2"
by (simp add: hyperpow_hrabs)
-text \<open>The precondition could be weakened to @{term "0\<le>x"}.\<close>
+text \<open>The precondition could be weakened to \<^term>\<open>0\<le>x\<close>.\<close>
lemma hypreal_mult_less_mono: "u < v \<Longrightarrow> x < y \<Longrightarrow> 0 < v \<Longrightarrow> 0 < x \<Longrightarrow> u * x < v * y"
for u v x y :: hypreal
by (simp add: mult_strict_mono order_less_imp_le)
--- a/src/HOL/Nonstandard_Analysis/HyperNat.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/HyperNat.thy Sat Jan 05 17:24:33 2019 +0100
@@ -165,7 +165,7 @@
by (simp add: Nats_eq_Standard)
-subsection \<open>Infinite Hypernatural Numbers -- @{term HNatInfinite}\<close>
+subsection \<open>Infinite Hypernatural Numbers -- \<^term>\<open>HNatInfinite\<close>\<close>
text \<open>The set of infinite hypernatural numbers.\<close>
definition HNatInfinite :: "hypnat set"
@@ -306,7 +306,7 @@
subsubsection \<open>Alternative characterization of the set of infinite hypernaturals\<close>
-text \<open>@{term "HNatInfinite = {N. \<forall>n \<in> Nats. n < N}"}\<close>
+text \<open>\<^term>\<open>HNatInfinite = {N. \<forall>n \<in> Nats. n < N}\<close>\<close>
(*??delete? similar reasoning in hypnat_omega_gt_SHNat above*)
lemma HNatInfinite_FreeUltrafilterNat_lemma:
@@ -326,7 +326,7 @@
done
-subsubsection \<open>Alternative Characterization of @{term HNatInfinite} using Free Ultrafilter\<close>
+subsubsection \<open>Alternative Characterization of \<^term>\<open>HNatInfinite\<close> using Free Ultrafilter\<close>
lemma HNatInfinite_FreeUltrafilterNat:
"star_n X \<in> HNatInfinite \<Longrightarrow> \<forall>u. eventually (\<lambda>n. u < X n) \<U>"
--- a/src/HOL/Nonstandard_Analysis/NSA.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/NSA.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1373,7 +1373,7 @@
by (simp add: monad_def)
-subsection \<open>Proof that @{term "x \<approx> y"} implies @{term"\<bar>x\<bar> \<approx> \<bar>y\<bar>"}\<close>
+subsection \<open>Proof that \<^term>\<open>x \<approx> y\<close> implies \<^term>\<open>\<bar>x\<bar> \<approx> \<bar>y\<bar>\<close>\<close>
lemma approx_subset_monad: "x \<approx> y \<Longrightarrow> {x, y} \<le> monad x"
by (simp (no_asm)) (simp add: approx_monad_iff)
@@ -1468,7 +1468,7 @@
done
-subsection \<open>More @{term HFinite} and @{term Infinitesimal} Theorems\<close>
+subsection \<open>More \<^term>\<open>HFinite\<close> and \<^term>\<open>Infinitesimal\<close> Theorems\<close>
text \<open>
Interesting slightly counterintuitive theorem: necessary
@@ -1754,7 +1754,7 @@
subsection \<open>Alternative Definitions using Free Ultrafilter\<close>
-subsubsection \<open>@{term HFinite}\<close>
+subsubsection \<open>\<^term>\<open>HFinite\<close>\<close>
lemma HFinite_FreeUltrafilterNat:
"star_n X \<in> HFinite \<Longrightarrow> \<exists>u. eventually (\<lambda>n. norm (X n) < u) \<U>"
@@ -1778,7 +1778,7 @@
by (blast intro!: HFinite_FreeUltrafilterNat FreeUltrafilterNat_HFinite)
-subsubsection \<open>@{term HInfinite}\<close>
+subsubsection \<open>\<^term>\<open>HInfinite\<close>\<close>
lemma lemma_Compl_eq: "- {n. u < norm (f n)} = {n. norm (f n) \<le> u}"
by auto
@@ -1835,7 +1835,7 @@
by (blast intro!: HInfinite_FreeUltrafilterNat FreeUltrafilterNat_HInfinite)
-subsubsection \<open>@{term Infinitesimal}\<close>
+subsubsection \<open>\<^term>\<open>Infinitesimal\<close>\<close>
lemma ball_SReal_eq: "(\<forall>x::hypreal \<in> Reals. P x) \<longleftrightarrow> (\<forall>x::real. P (star_of x))"
by (auto simp: SReal_def)
@@ -1930,7 +1930,7 @@
lemma Compl_real_le_eq: "- {n::nat. real n \<le> u} = {n. u < real n}"
by (auto dest!: order_le_less_trans simp add: linorder_not_le)
-text \<open>@{term \<omega>} is a member of @{term HInfinite}.\<close>
+text \<open>\<^term>\<open>\<omega>\<close> is a member of \<^term>\<open>HInfinite\<close>.\<close>
theorem HInfinite_omega [simp]: "\<omega> \<in> HInfinite"
apply (simp add: omega_def)
apply (rule FreeUltrafilterNat_HInfinite)
--- a/src/HOL/Nonstandard_Analysis/NSCA.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/NSCA.thy Sat Jan 05 17:24:33 2019 +0100
@@ -229,7 +229,7 @@
"[| r \<in> SComplex; s \<in> SComplex; r \<approx> x; s \<approx> x|] ==> r = s"
by (blast intro: SComplex_approx_iff [THEN iffD1] approx_trans2)
-subsection \<open>Properties of @{term hRe}, @{term hIm} and @{term HComplex}\<close>
+subsection \<open>Properties of \<^term>\<open>hRe\<close>, \<^term>\<open>hIm\<close> and \<^term>\<open>HComplex\<close>\<close>
lemma abs_hRe_le_hcmod: "\<And>x. \<bar>hRe x\<bar> \<le> hcmod x"
--- a/src/HOL/Nonstandard_Analysis/NSComplex.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/NSComplex.thy Sat Jan 05 17:24:33 2019 +0100
@@ -189,7 +189,7 @@
by (rule diff_eq_eq)
-subsection \<open>Embedding Properties for @{term hcomplex_of_hypreal} Map\<close>
+subsection \<open>Embedding Properties for \<^term>\<open>hcomplex_of_hypreal\<close> Map\<close>
lemma hRe_hcomplex_of_hypreal [simp]: "\<And>z. hRe (hcomplex_of_hypreal z) = z"
by transfer (rule Re_complex_of_real)
@@ -315,7 +315,7 @@
by transfer (rule complex_mult_cnj)
-subsection \<open>More Theorems about the Function @{term hcmod}\<close>
+subsection \<open>More Theorems about the Function \<^term>\<open>hcmod\<close>\<close>
lemma hcmod_hcomplex_of_hypreal_of_nat [simp]:
"hcmod (hcomplex_of_hypreal (hypreal_of_nat n)) = hypreal_of_nat n"
@@ -376,7 +376,7 @@
by (blast intro: ccontr dest: hcpow_not_zero)
-subsection \<open>The Function @{term hsgn}\<close>
+subsection \<open>The Function \<^term>\<open>hsgn\<close>\<close>
lemma hsgn_zero [simp]: "hsgn 0 = 0"
by transfer (rule sgn_zero)
@@ -556,7 +556,7 @@
by transfer (rule exp_add)
-subsection \<open>@{term hcomplex_of_complex}: the Injection from type @{typ complex} to to @{typ hcomplex}\<close>
+subsection \<open>\<^term>\<open>hcomplex_of_complex\<close>: the Injection from type \<^typ>\<open>complex\<close> to to \<^typ>\<open>hcomplex\<close>\<close>
lemma hcomplex_of_complex_i: "iii = hcomplex_of_complex \<i>"
by (rule iii_def)
--- a/src/HOL/Nonstandard_Analysis/NatStar.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/NatStar.thy Sat Jan 05 17:24:33 2019 +0100
@@ -97,8 +97,8 @@
lemma starfun_pow3: "\<And>R. ( *f* (\<lambda>r. r ^ n)) R = R pow hypnat_of_nat n"
by transfer (rule refl)
-text \<open>The @{term hypreal_of_hypnat} function as a nonstandard extension of
- @{term real_of_nat}.\<close>
+text \<open>The \<^term>\<open>hypreal_of_hypnat\<close> function as a nonstandard extension of
+ \<^term>\<open>real_of_nat\<close>.\<close>
lemma starfunNat_real_of_nat: "( *f* real) = hypreal_of_hypnat"
by transfer (simp add: fun_eq_iff)
--- a/src/HOL/Nonstandard_Analysis/StarDef.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/StarDef.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
subsection \<open>A Free Ultrafilter over the Naturals\<close>
-definition FreeUltrafilterNat :: "nat filter" ("\<U>")
+definition FreeUltrafilterNat :: "nat filter" (\<open>\<U>\<close>)
where "\<U> = (SOME U. freeultrafilter U)"
lemma freeultrafilter_FreeUltrafilterNat: "freeultrafilter \<U>"
@@ -53,7 +53,7 @@
apply auto
done
-text \<open>Proving that @{term starrel} is an equivalence relation.\<close>
+text \<open>Proving that \<^term>\<open>starrel\<close> is an equivalence relation.\<close>
lemma starrel_iff [iff]: "(X, Y) \<in> starrel \<longleftrightarrow> eventually (\<lambda>n. X n = Y n) \<U>"
by (simp add: starrel_def)
@@ -81,7 +81,7 @@
by (simp add: FreeUltrafilterNat.proper)
text \<open>Standard principles that play a central role in the transfer tactic.\<close>
-definition Ifun :: "('a \<Rightarrow> 'b) star \<Rightarrow> 'a star \<Rightarrow> 'b star" ("(_ \<star>/ _)" [300, 301] 300)
+definition Ifun :: "('a \<Rightarrow> 'b) star \<Rightarrow> 'a star \<Rightarrow> 'b star" (\<open>(_ \<star>/ _)\<close> [300, 301] 300)
where "Ifun f \<equiv>
\<lambda>x. Abs_star (\<Union>F\<in>Rep_star f. \<Union>X\<in>Rep_star x. starrel``{\<lambda>n. F n (X n)})"
@@ -168,8 +168,8 @@
definition Standard :: "'a star set"
where "Standard = range star_of"
-text \<open>Transfer tactic should remove occurrences of @{term star_of}.\<close>
-setup \<open>Transfer_Principle.add_const @{const_name star_of}\<close>
+text \<open>Transfer tactic should remove occurrences of \<^term>\<open>star_of\<close>.\<close>
+setup \<open>Transfer_Principle.add_const \<^const_name>\<open>star_of\<close>\<close>
lemma star_of_inject: "star_of x = star_of y \<longleftrightarrow> x = y"
by transfer (rule refl)
@@ -180,8 +180,8 @@
subsection \<open>Internal functions\<close>
-text \<open>Transfer tactic should remove occurrences of @{term Ifun}.\<close>
-setup \<open>Transfer_Principle.add_const @{const_name Ifun}\<close>
+text \<open>Transfer tactic should remove occurrences of \<^term>\<open>Ifun\<close>.\<close>
+setup \<open>Transfer_Principle.add_const \<^const_name>\<open>Ifun\<close>\<close>
lemma Ifun_star_of [simp]: "star_of f \<star> star_of x = star_of (f x)"
by transfer (rule refl)
@@ -192,10 +192,10 @@
text \<open>Nonstandard extensions of functions.\<close>
-definition starfun :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a star \<Rightarrow> 'b star" ("*f* _" [80] 80)
+definition starfun :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a star \<Rightarrow> 'b star" (\<open>*f* _\<close> [80] 80)
where "starfun f \<equiv> \<lambda>x. star_of f \<star> x"
-definition starfun2 :: "('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> 'a star \<Rightarrow> 'b star \<Rightarrow> 'c star" ("*f2* _" [80] 80)
+definition starfun2 :: "('a \<Rightarrow> 'b \<Rightarrow> 'c) \<Rightarrow> 'a star \<Rightarrow> 'b star \<Rightarrow> 'c star" (\<open>*f2* _\<close> [80] 80)
where "starfun2 f \<equiv> \<lambda>x y. star_of f \<star> x \<star> y"
declare starfun_def [transfer_unfold]
@@ -273,16 +273,16 @@
lemma unstar_star_of [simp]: "unstar (star_of p) = p"
by (simp add: unstar_def star_of_inject)
-text \<open>Transfer tactic should remove occurrences of @{term unstar}.\<close>
-setup \<open>Transfer_Principle.add_const @{const_name unstar}\<close>
+text \<open>Transfer tactic should remove occurrences of \<^term>\<open>unstar\<close>.\<close>
+setup \<open>Transfer_Principle.add_const \<^const_name>\<open>unstar\<close>\<close>
lemma transfer_unstar [transfer_intro]: "p \<equiv> star_n P \<Longrightarrow> unstar p \<equiv> eventually P \<U>"
by (simp only: unstar_star_n)
-definition starP :: "('a \<Rightarrow> bool) \<Rightarrow> 'a star \<Rightarrow> bool" ("*p* _" [80] 80)
+definition starP :: "('a \<Rightarrow> bool) \<Rightarrow> 'a star \<Rightarrow> bool" (\<open>*p* _\<close> [80] 80)
where "*p* P = (\<lambda>x. unstar (star_of P \<star> x))"
-definition starP2 :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a star \<Rightarrow> 'b star \<Rightarrow> bool" ("*p2* _" [80] 80)
+definition starP2 :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a star \<Rightarrow> 'b star \<Rightarrow> bool" (\<open>*p2* _\<close> [80] 80)
where "*p2* P = (\<lambda>x y. unstar (star_of P \<star> x \<star> y))"
declare starP_def [transfer_unfold]
@@ -309,8 +309,8 @@
lemma Iset_star_n: "(star_n X \<in> Iset (star_n A)) = (eventually (\<lambda>n. X n \<in> A n) \<U>)"
by (simp add: Iset_def starP2_star_n)
-text \<open>Transfer tactic should remove occurrences of @{term Iset}.\<close>
-setup \<open>Transfer_Principle.add_const @{const_name Iset}\<close>
+text \<open>Transfer tactic should remove occurrences of \<^term>\<open>Iset\<close>.\<close>
+setup \<open>Transfer_Principle.add_const \<^const_name>\<open>Iset\<close>\<close>
lemma transfer_mem [transfer_intro]:
"x \<equiv> star_n X \<Longrightarrow> a \<equiv> Iset (star_n A) \<Longrightarrow> x \<in> a \<equiv> eventually (\<lambda>n. X n \<in> A n) \<U>"
@@ -341,7 +341,7 @@
text \<open>Nonstandard extensions of sets.\<close>
-definition starset :: "'a set \<Rightarrow> 'a star set" ("*s* _" [80] 80)
+definition starset :: "'a set \<Rightarrow> 'a star set" (\<open>*s* _\<close> [80] 80)
where "starset A = Iset (star_of A)"
declare starset_def [transfer_unfold]
@@ -515,7 +515,7 @@
Standard_abs Standard_mod
-text \<open>@{term star_of} preserves class operations.\<close>
+text \<open>\<^term>\<open>star_of\<close> preserves class operations.\<close>
lemma star_of_add: "star_of (x + y) = star_of x + star_of y"
by transfer (rule refl)
@@ -542,7 +542,7 @@
by transfer (rule refl)
-text \<open>@{term star_of} preserves numerals.\<close>
+text \<open>\<^term>\<open>star_of\<close> preserves numerals.\<close>
lemma star_of_zero: "star_of 0 = 0"
by transfer (rule refl)
@@ -551,7 +551,7 @@
by transfer (rule refl)
-text \<open>@{term star_of} preserves orderings.\<close>
+text \<open>\<^term>\<open>star_of\<close> preserves orderings.\<close>
lemma star_of_less: "(star_of x < star_of y) = (x < y)"
by transfer (rule refl)
--- a/src/HOL/Nonstandard_Analysis/transfer_principle.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Nonstandard_Analysis/transfer_principle.ML Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
consts = Library.merge (op =) (consts1, consts2)};
);
-fun unstar_typ (Type (@{type_name star}, [t])) = unstar_typ t
+fun unstar_typ (Type (\<^type_name>\<open>star\<close>, [t])) = unstar_typ t
| unstar_typ (Type (a, Ts)) = Type (a, map unstar_typ Ts)
| unstar_typ T = T
@@ -54,12 +54,12 @@
in
fun transfer_star_tac ctxt =
let
- fun thm_of (Const (@{const_name Ifun}, _) $ t $ u) = @{thm transfer_Ifun} OF [thm_of t, thm_of u]
- | thm_of (Const (@{const_name star_of}, _) $ _) = @{thm star_of_def}
- | thm_of (Const (@{const_name star_n}, _) $ _) = @{thm Pure.reflexive}
+ fun thm_of (Const (\<^const_name>\<open>Ifun\<close>, _) $ t $ u) = @{thm transfer_Ifun} OF [thm_of t, thm_of u]
+ | thm_of (Const (\<^const_name>\<open>star_of\<close>, _) $ _) = @{thm star_of_def}
+ | thm_of (Const (\<^const_name>\<open>star_n\<close>, _) $ _) = @{thm Pure.reflexive}
| thm_of _ = raise MATCH;
- fun thm_of_goal (Const (@{const_name Pure.eq}, _) $ t $ (Const (@{const_name star_n}, _) $ _)) =
+ fun thm_of_goal (Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t $ (Const (\<^const_name>\<open>star_n\<close>, _) $ _)) =
thm_of t
| thm_of_goal _ = raise MATCH;
in
@@ -125,11 +125,11 @@
val _ =
Theory.setup
- (Attrib.setup @{binding transfer_intro} (Attrib.add_del intro_add intro_del)
+ (Attrib.setup \<^binding>\<open>transfer_intro\<close> (Attrib.add_del intro_add intro_del)
"declaration of transfer introduction rule" #>
- Attrib.setup @{binding transfer_unfold} (Attrib.add_del unfold_add unfold_del)
+ Attrib.setup \<^binding>\<open>transfer_unfold\<close> (Attrib.add_del unfold_add unfold_del)
"declaration of transfer unfolding rule" #>
- Attrib.setup @{binding transfer_refold} (Attrib.add_del refold_add refold_del)
+ Attrib.setup \<^binding>\<open>transfer_refold\<close> (Attrib.add_del refold_add refold_del)
"declaration of transfer refolding rule")
end;
--- a/src/HOL/Number_Theory/Cong.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Number_Theory/Cong.thy Sat Jan 05 17:24:33 2019 +0100
@@ -37,10 +37,10 @@
context unique_euclidean_semiring
begin
-definition cong :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" ("(1[_ = _] '(()mod _'))")
+definition cong :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" (\<open>(1[_ = _] '(()mod _'))\<close>)
where "cong b c a \<longleftrightarrow> b mod a = c mod a"
-abbreviation notcong :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" ("(1[_ \<noteq> _] '(()mod _'))")
+abbreviation notcong :: "'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" (\<open>(1[_ \<noteq> _] '(()mod _'))\<close>)
where "notcong b c a \<equiv> \<not> cong b c a"
lemma cong_refl [simp]:
@@ -254,7 +254,7 @@
(auto intro!: coprime_cong_mult prod_coprime_right)
-subsection \<open>Congruences on @{typ nat} and @{typ int}\<close>
+subsection \<open>Congruences on \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close>\<close>
lemma cong_int_iff:
"[int m = int q] (mod int n) \<longleftrightarrow> [m = q] (mod n)"
--- a/src/HOL/Number_Theory/Eratosthenes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Number_Theory/Eratosthenes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -23,7 +23,7 @@
subsection \<open>Main corpus\<close>
-text \<open>The sieve is modelled as a list of booleans, where @{const False} means \emph{marked out}.\<close>
+text \<open>The sieve is modelled as a list of booleans, where \<^const>\<open>False\<close> means \emph{marked out}.\<close>
type_synonym marks = "bool list"
@@ -175,8 +175,8 @@
\begin{itemize}
- \item @{const sieve} can abort as soon as @{term n} is too big to let
- @{const mark_out} have any effect.
+ \item \<^const>\<open>sieve\<close> can abort as soon as \<^term>\<open>n\<close> is too big to let
+ \<^const>\<open>mark_out\<close> have any effect.
\item Search for further primes can be given up as soon as the search
position exceeds the square root of the maximum candidate.
--- a/src/HOL/Number_Theory/Fib.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Number_Theory/Fib.thy Sat Jan 05 17:24:33 2019 +0100
@@ -42,7 +42,7 @@
text \<open>
The naive approach is very inefficient since the branching recursion leads to many
- values of @{term fib} being computed multiple times. We can avoid this by ``remembering''
+ values of \<^term>\<open>fib\<close> being computed multiple times. We can avoid this by ``remembering''
the last two values in the sequence, yielding a tail-recursive version.
This is far from optimal (it takes roughly $O(n\cdot M(n))$ time where $M(n)$ is the
time required to multiply two $n$-bit integers), but much better than the naive version,
--- a/src/HOL/Orderings.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Orderings.thy Sat Jan 05 17:24:33 2019 +0100
@@ -512,7 +512,7 @@
fun struct_tac ((s, ops), thms) ctxt facts =
let
val [eq, le, less] = ops;
- fun decomp thy (@{const Trueprop} $ t) =
+ fun decomp thy (\<^const>\<open>Trueprop\<close> $ t) =
let
fun excluded t =
(* exclude numeric types: linear arithmetic subsumes transitivity *)
--- a/src/HOL/Predicate_Compile_Examples/Lambda_Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Predicate_Compile_Examples/Lambda_Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -87,7 +87,7 @@
setup \<open>Code_Prolog.map_code_options (K
{ ensure_groundness = true,
limit_globally = NONE,
- limited_types = [(@{typ nat}, 1), (@{typ "type"}, 1), (@{typ dB}, 1), (@{typ "type list"}, 1)],
+ limited_types = [(\<^typ>\<open>nat\<close>, 1), (\<^typ>\<open>type\<close>, 1), (\<^typ>\<open>dB\<close>, 1), (\<^typ>\<open>type list\<close>, 1)],
limited_predicates = [(["typing"], 2), (["nthel1"], 2)],
replacing = [(("typing", "limited_typing"), "quickcheck"),
(("nthel1", "limited_nthel1"), "lim_typing")],
--- a/src/HOL/Predicate_Compile_Examples/List_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Predicate_Compile_Examples/List_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
setup \<open>Code_Prolog.map_code_options (K
{ensure_groundness = true,
limit_globally = NONE,
- limited_types = [(@{typ nat}, 2), (@{typ "nat list"}, 4)],
+ limited_types = [(\<^typ>\<open>nat\<close>, 2), (\<^typ>\<open>nat list\<close>, 4)],
limited_predicates = [(["appendP"], 4), (["revP"], 4)],
replacing =
[(("appendP", "limited_appendP"), "quickcheck"),
--- a/src/HOL/Predicate_Compile_Examples/Predicate_Compile_Quickcheck_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Predicate_Compile_Examples/Predicate_Compile_Quickcheck_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -295,8 +295,7 @@
definition mv :: "('a :: semiring_0) list list \<Rightarrow> 'a list \<Rightarrow> 'a list"
where [simp]: "mv M v = map (scalar_product v) M"
text \<open>
- This defines the matrix vector multiplication. To work properly @{term
-"matrix M m n \<and> length v = n"} must hold.
+ This defines the matrix vector multiplication. To work properly \<^term>\<open>matrix M m n \<and> length v = n\<close> must hold.
\<close>
subsection "Compressed matrix"
--- a/src/HOL/Predicate_Compile_Examples/Reg_Exp_Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Predicate_Compile_Examples/Reg_Exp_Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -106,7 +106,7 @@
setup \<open>Code_Prolog.map_code_options (K
{ensure_groundness = true,
limit_globally = NONE,
- limited_types = [(@{typ Sym}, 0), (@{typ "Sym list"}, 2), (@{typ RE}, 6)],
+ limited_types = [(\<^typ>\<open>Sym\<close>, 0), (\<^typ>\<open>Sym list\<close>, 2), (\<^typ>\<open>RE\<close>, 6)],
limited_predicates = [(["repIntPa"], 2), (["repP"], 2), (["subP"], 0),
(["accepts", "acceptsaux", "seqSplit", "seqSplita", "seqSplitaux", "seqSplitb"], 25)],
replacing =
--- a/src/HOL/Predicate_Compile_Examples/Specialisation_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Predicate_Compile_Examples/Specialisation_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,7 +15,7 @@
"greater_than_index xs = (\<forall>i x. nth_el' xs i = Some x --> x > i)"
code_pred (expected_modes: i => bool) [inductify, skip_proof, specialise] greater_than_index .
-ML_val \<open>Core_Data.intros_of @{context} @{const_name specialised_nth_el'P}\<close>
+ML_val \<open>Core_Data.intros_of \<^context> \<^const_name>\<open>specialised_nth_el'P\<close>\<close>
thm greater_than_index.equation
@@ -44,7 +44,7 @@
thm max_of_my_SucP.equation
-ML_val \<open>Core_Data.intros_of @{context} @{const_name specialised_max_natP}\<close>
+ML_val \<open>Core_Data.intros_of \<^context> \<^const_name>\<open>specialised_max_natP\<close>\<close>
values "{x. max_of_my_SucP x 6}"
--- a/src/HOL/Probability/Fin_Map.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/Fin_Map.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,10 +8,10 @@
imports "HOL-Analysis.Finite_Product_Measure" "HOL-Library.Finite_Map"
begin
-text \<open>The @{type fmap} type can be instantiated to @{class polish_space}, needed for the proof of
- projective limit. @{const extensional} functions are used for the representation in order to
- stay close to the developments of (finite) products @{const Pi\<^sub>E} and their sigma-algebra
- @{const Pi\<^sub>M}.\<close>
+text \<open>The \<^type>\<open>fmap\<close> type can be instantiated to \<^class>\<open>polish_space\<close>, needed for the proof of
+ projective limit. \<^const>\<open>extensional\<close> functions are used for the representation in order to
+ stay close to the developments of (finite) products \<^const>\<open>Pi\<^sub>E\<close> and their sigma-algebra
+ \<^const>\<open>Pi\<^sub>M\<close>.\<close>
type_notation fmap ("(_ \<Rightarrow>\<^sub>F /_)" [22, 21] 21)
@@ -83,7 +83,7 @@
subsection \<open>Product set of Finite Maps\<close>
-text \<open>This is @{term Pi} for Finite Maps, most of this is copied\<close>
+text \<open>This is \<^term>\<open>Pi\<close> for Finite Maps, most of this is copied\<close>
definition Pi' :: "'i set \<Rightarrow> ('i \<Rightarrow> 'a set) \<Rightarrow> ('i \<Rightarrow>\<^sub>F 'a) set" where
"Pi' I A = { P. domain P = I \<and> (\<forall>i. i \<in> I \<longrightarrow> (P)\<^sub>F i \<in> A i) } "
@@ -93,7 +93,7 @@
translations
"\<Pi>' x\<in>A. B" == "CONST Pi' A (\<lambda>x. B)"
-subsubsection\<open>Basic Properties of @{term Pi'}\<close>
+subsubsection\<open>Basic Properties of \<^term>\<open>Pi'\<close>\<close>
lemma Pi'_I[intro!]: "domain f = A \<Longrightarrow> (\<And>x. x \<in> A \<Longrightarrow> f x \<in> B x) \<Longrightarrow> f \<in> Pi' A B"
by (simp add: Pi'_def)
--- a/src/HOL/Probability/Information.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/Information.thy Sat Jan 05 17:24:33 2019 +0100
@@ -32,7 +32,7 @@
context information_space
begin
-text \<open>Introduce some simplification rules for logarithm of base @{term b}.\<close>
+text \<open>Introduce some simplification rules for logarithm of base \<^term>\<open>b\<close>.\<close>
lemma log_neg_const:
assumes "x \<le> 0"
--- a/src/HOL/Probability/Probability_Mass_Function.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/Probability_Mass_Function.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1568,7 +1568,7 @@
(auto simp add: pmf.rel_map intro: pmf.rel_mono[THEN le_funD, THEN le_funD, THEN le_boolD, THEN mp, OF _ pq] fg)
text \<open>
- Proof that @{const rel_pmf} preserves orders.
+ Proof that \<^const>\<open>rel_pmf\<close> preserves orders.
Antisymmetry proof follows Thm. 1 in N. Saheb-Djahromi, Cpo's of measures for nondeterminism,
Theoretical Computer Science 12(1):19--37, 1980,
\<^url>\<open>https://doi.org/10.1016/0304-3975(80)90003-1\<close>
--- a/src/HOL/Probability/Probability_Measure.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/Probability_Measure.thy Sat Jan 05 17:24:33 2019 +0100
@@ -239,16 +239,16 @@
subsection \<open>Introduce binder for probability\<close>
syntax
- "_prob" :: "pttrn \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic" ("('\<P>'((/_ in _./ _)'))")
+ "_prob" :: "pttrn \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic" (\<open>('\<P>'((/_ in _./ _)'))\<close>)
translations
"\<P>(x in M. P)" => "CONST measure M {x \<in> CONST space M. P}"
print_translation \<open>
let
- fun to_pattern (Const (@{const_syntax Pair}, _) $ l $ r) =
- Syntax.const @{const_syntax Pair} :: to_pattern l @ to_pattern r
- | to_pattern (t as (Const (@{syntax_const "_bound"}, _)) $ _) = [t]
+ fun to_pattern (Const (\<^const_syntax>\<open>Pair\<close>, _) $ l $ r) =
+ Syntax.const \<^const_syntax>\<open>Pair\<close> :: to_pattern l @ to_pattern r
+ | to_pattern (t as (Const (\<^syntax_const>\<open>_bound\<close>, _)) $ _) = [t]
fun mk_pattern ((t, n) :: xs) = mk_patterns n xs |>> curry list_comb t
and mk_patterns 0 xs = ([], xs)
@@ -261,32 +261,32 @@
end
fun unnest_tuples
- (Const (@{syntax_const "_pattern"}, _) $
+ (Const (\<^syntax_const>\<open>_pattern\<close>, _) $
t1 $
- (t as (Const (@{syntax_const "_pattern"}, _) $ _ $ _)))
+ (t as (Const (\<^syntax_const>\<open>_pattern\<close>, _) $ _ $ _)))
= let
val (_ $ t2 $ t3) = unnest_tuples t
in
- Syntax.const @{syntax_const "_pattern"} $
+ Syntax.const \<^syntax_const>\<open>_pattern\<close> $
unnest_tuples t1 $
- (Syntax.const @{syntax_const "_patterns"} $ t2 $ t3)
+ (Syntax.const \<^syntax_const>\<open>_patterns\<close> $ t2 $ t3)
end
| unnest_tuples pat = pat
- fun tr' [sig_alg, Const (@{const_syntax Collect}, _) $ t] =
+ fun tr' [sig_alg, Const (\<^const_syntax>\<open>Collect\<close>, _) $ t] =
let
- val bound_dummyT = Const (@{syntax_const "_bound"}, dummyT)
+ val bound_dummyT = Const (\<^syntax_const>\<open>_bound\<close>, dummyT)
fun go pattern elem
- (Const (@{const_syntax "conj"}, _) $
- (Const (@{const_syntax Set.member}, _) $ elem' $ (Const (@{const_syntax space}, _) $ sig_alg')) $
+ (Const (\<^const_syntax>\<open>conj\<close>, _) $
+ (Const (\<^const_syntax>\<open>Set.member\<close>, _) $ elem' $ (Const (\<^const_syntax>\<open>space\<close>, _) $ sig_alg')) $
u)
= let
val _ = if sig_alg aconv sig_alg' andalso to_pattern elem' = rev elem then () else raise Match;
val (pat, rest) = mk_pattern (rev pattern);
val _ = case rest of [] => () | _ => raise Match
in
- Syntax.const @{syntax_const "_prob"} $ unnest_tuples pat $ sig_alg $ u
+ Syntax.const \<^syntax_const>\<open>_prob\<close> $ unnest_tuples pat $ sig_alg $ u
end
| go pattern elem (Abs abs) =
let
@@ -294,16 +294,16 @@
in
go ((x, 0) :: pattern) (bound_dummyT $ tx :: elem) t
end
- | go pattern elem (Const (@{const_syntax case_prod}, _) $ t) =
+ | go pattern elem (Const (\<^const_syntax>\<open>case_prod\<close>, _) $ t) =
go
- ((Syntax.const @{syntax_const "_pattern"}, 2) :: pattern)
- (Syntax.const @{const_syntax Pair} :: elem)
+ ((Syntax.const \<^syntax_const>\<open>_pattern\<close>, 2) :: pattern)
+ (Syntax.const \<^const_syntax>\<open>Pair\<close> :: elem)
t
in
go [] [] t
end
in
- [(@{const_syntax Sigma_Algebra.measure}, K tr')]
+ [(\<^const_syntax>\<open>Sigma_Algebra.measure\<close>, K tr')]
end
\<close>
@@ -311,7 +311,7 @@
"cond_prob M P Q = \<P>(\<omega> in M. P \<omega> \<and> Q \<omega>) / \<P>(\<omega> in M. Q \<omega>)"
syntax
- "_conditional_prob" :: "pttrn \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic" ("('\<P>'(_ in _. _ \<bar>/ _'))")
+ "_conditional_prob" :: "pttrn \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic" (\<open>('\<P>'(_ in _. _ \<bar>/ _'))\<close>)
translations
"\<P>(x in M. P \<bar> Q)" => "CONST cond_prob M (\<lambda>x. P) (\<lambda>x. Q)"
--- a/src/HOL/Probability/SPMF.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/SPMF.thy Sat Jan 05 17:24:33 2019 +0100
@@ -50,7 +50,7 @@
lemma ennreal_lt_0: "x < 0 \<Longrightarrow> ennreal x = 0"
by(simp add: ennreal_eq_0_iff)
-subsubsection \<open>More about @{typ "'a option"}\<close>
+subsubsection \<open>More about \<^typ>\<open>'a option\<close>\<close>
lemma None_in_map_option_image [simp]: "None \<in> map_option f ` A \<longleftrightarrow> None \<in> A"
by auto
@@ -764,7 +764,7 @@
context includes lifting_syntax
begin
-text \<open>We do not yet have a relator for @{typ "'a measure"}, so we combine @{const measure} and @{const measure_pmf}\<close>
+text \<open>We do not yet have a relator for \<^typ>\<open>'a measure\<close>, so we combine \<^const>\<open>measure\<close> and \<^const>\<open>measure_pmf\<close>\<close>
lemma measure_pmf_parametric:
"(rel_pmf A ===> rel_pred A ===> (=)) (\<lambda>p. measure (measure_pmf p)) (\<lambda>q. measure (measure_pmf q))"
proof(rule rel_funI)+
@@ -786,7 +786,7 @@
end
-subsection \<open>From @{typ "'a pmf"} to @{typ "'a spmf"}\<close>
+subsection \<open>From \<^typ>\<open>'a pmf\<close> to \<^typ>\<open>'a spmf\<close>\<close>
definition spmf_of_pmf :: "'a pmf \<Rightarrow> 'a spmf"
where "spmf_of_pmf = map_pmf Some"
@@ -984,7 +984,7 @@
subsection \<open>Ordering on spmfs\<close>
text \<open>
- @{const rel_pmf} does not preserve a ccpo structure. Counterexample by Saheb-Djahromi:
+ \<^const>\<open>rel_pmf\<close> does not preserve a ccpo structure. Counterexample by Saheb-Djahromi:
Take prefix order over \<open>bool llist\<close> and
the set \<open>range (\<lambda>n :: nat. uniform (llist_n n))\<close> where \<open>llist_n\<close> is the set
of all \<open>llist\<close>s of length \<open>n\<close> and \<open>uniform\<close> returns a uniform distribution over
@@ -1223,13 +1223,13 @@
lemma ord_spmf_eqD_measure_spmf: "ord_spmf (=) p q \<Longrightarrow> measure_spmf p \<le> measure_spmf q"
by (subst le_measure) (auto simp: ord_spmf_eqD_emeasure)
-subsection \<open>CCPO structure for the flat ccpo @{term "ord_option (=)"}\<close>
+subsection \<open>CCPO structure for the flat ccpo \<^term>\<open>ord_option (=)\<close>\<close>
context fixes Y :: "'a spmf set" begin
definition lub_spmf :: "'a spmf"
where "lub_spmf = embed_spmf (\<lambda>x. enn2real (SUP p \<in> Y. ennreal (spmf p x)))"
- \<comment> \<open>We go through @{typ ennreal} to have a sensible definition even if @{term Y} is empty.\<close>
+ \<comment> \<open>We go through \<^typ>\<open>ennreal\<close> to have a sensible definition even if \<^term>\<open>Y\<close> is empty.\<close>
lemma lub_spmf_empty [simp]: "SPMF.lub_spmf {} = return_pmf None"
by(simp add: SPMF.lub_spmf_def bot_ereal_def)
@@ -1285,7 +1285,7 @@
done
text \<open>
- Chains on @{typ "'a spmf"} maintain countable support.
+ Chains on \<^typ>\<open>'a spmf\<close> maintain countable support.
Thanks to Johannes Hölzl for the proof idea.
\<close>
lemma spmf_chain_countable: "countable (\<Union>p\<in>Y. set_spmf p)"
@@ -1528,8 +1528,8 @@
rewrites "lub_spmf {} \<equiv> return_pmf None"
by(rule partial_function_definitions_spmf) simp
-declaration \<open>Partial_Function.init "spmf" @{term spmf.fixp_fun}
- @{term spmf.mono_body} @{thm spmf.fixp_rule_uc} @{thm spmf.fixp_induct_uc}
+declaration \<open>Partial_Function.init "spmf" \<^term>\<open>spmf.fixp_fun\<close>
+ \<^term>\<open>spmf.mono_body\<close> @{thm spmf.fixp_rule_uc} @{thm spmf.fixp_induct_uc}
NONE\<close>
declare spmf.leq_refl[simp]
@@ -1698,7 +1698,7 @@
lemma nn_integral_map_spmf [simp]: "nn_integral (measure_spmf (map_spmf f p)) g = nn_integral (measure_spmf p) (g \<circ> f)"
by(auto 4 3 simp add: measure_spmf_def nn_integral_distr nn_integral_restrict_space intro: nn_integral_cong split: split_indicator)
-subsubsection \<open>Admissibility of @{term rel_spmf}\<close>
+subsubsection \<open>Admissibility of \<^term>\<open>rel_spmf\<close>\<close>
lemma rel_spmf_measureD:
assumes "rel_spmf R p q"
@@ -1973,7 +1973,7 @@
lemma integral_spmf_of_set: "integral\<^sup>L (measure_spmf (spmf_of_set A)) f = sum f A / card A"
by(clarsimp simp add: spmf_of_set_def integral_pmf_of_set card_gt_0_iff simp del: spmf_of_pmf_pmf_of_set)
-notepad begin \<comment> \<open>@{const pmf_of_set} is not fully parametric.\<close>
+notepad begin \<comment> \<open>\<^const>\<open>pmf_of_set\<close> is not fully parametric.\<close>
define R :: "nat \<Rightarrow> nat \<Rightarrow> bool" where "R x y \<longleftrightarrow> (x \<noteq> 0 \<longrightarrow> y = 0)" for x y
define A :: "nat set" where "A = {0, 1}"
define B :: "nat set" where "B = {0, 1, 2}"
--- a/src/HOL/Probability/ex/Dining_Cryptographers.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/ex/Dining_Cryptographers.thy Sat Jan 05 17:24:33 2019 +0100
@@ -152,8 +152,8 @@
note inj_inv = this
txt \<open>
- We now construct the possible inversions for @{term xs} when the payer is
- @{term i}.
+ We now construct the possible inversions for \<^term>\<open>xs\<close> when the payer is
+ \<^term>\<open>i\<close>.
\<close>
define zs where "zs = map (\<lambda>p. if p \<in> {min i j<..max i j} then \<not> ys ! p else ys ! p) [0..<n]"
--- a/src/HOL/Probability/ex/Measure_Not_CCC.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Probability/ex/Measure_Not_CCC.thy Sat Jan 05 17:24:33 2019 +0100
@@ -16,8 +16,8 @@
of all countable and co-countable real sets. We also define $\mathbb{R}$ to be the discrete
measurable space on the reals.
- Now, the diagonal predicate @{term "\<lambda>x y. x = y"} is $\mathbb{R}$-$\mathbb{B}^\mathbb{C}$-measurable,
- but @{term "\<lambda>(x, y). x = y"} is not $(\mathbb{R} \times \mathbb{C})$-$\mathbb{B}$-measurable.
+ Now, the diagonal predicate \<^term>\<open>\<lambda>x y. x = y\<close> is $\mathbb{R}$-$\mathbb{B}^\mathbb{C}$-measurable,
+ but \<^term>\<open>\<lambda>(x, y). x = y\<close> is not $(\mathbb{R} \times \mathbb{C})$-$\mathbb{B}$-measurable.
\<close>
definition COCOUNT :: "real measure" where
--- a/src/HOL/Prolog/Test.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Prolog/Test.thy Sat Jan 05 17:24:33 2019 +0100
@@ -233,9 +233,9 @@
(* disjunction in atom: *)
lemma "(\<forall>P. g P :- (P => b \<or> a)) => g(a \<or> b)"
- apply (tactic "step_tac (put_claset HOL_cs @{context}) 1")
- apply (tactic "step_tac (put_claset HOL_cs @{context}) 1")
- apply (tactic "step_tac (put_claset HOL_cs @{context}) 1")
+ apply (tactic "step_tac (put_claset HOL_cs \<^context>) 1")
+ apply (tactic "step_tac (put_claset HOL_cs \<^context>) 1")
+ apply (tactic "step_tac (put_claset HOL_cs \<^context>) 1")
prefer 2
apply fast
apply fast
--- a/src/HOL/Prolog/prolog.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Prolog/prolog.ML Sat Jan 05 17:24:33 2019 +0100
@@ -2,7 +2,7 @@
Author: David von Oheimb (based on a lecture on Lambda Prolog by Nadathur)
*)
-Options.default_put_bool @{system_option show_main_goal} true;
+Options.default_put_bool \<^system_option>\<open>show_main_goal\<close> true;
structure Prolog =
struct
@@ -10,17 +10,17 @@
exception not_HOHH;
fun isD t = case t of
- Const(@{const_name Trueprop},_)$t => isD t
- | Const(@{const_name HOL.conj} ,_)$l$r => isD l andalso isD r
- | Const(@{const_name HOL.implies},_)$l$r => isG l andalso isD r
- | Const(@{const_name Pure.imp},_)$l$r => isG l andalso isD r
- | Const(@{const_name All},_)$Abs(s,_,t) => isD t
- | Const(@{const_name Pure.all},_)$Abs(s,_,t) => isD t
- | Const(@{const_name HOL.disj},_)$_$_ => false
- | Const(@{const_name Ex} ,_)$_ => false
- | Const(@{const_name Not},_)$_ => false
- | Const(@{const_name True},_) => false
- | Const(@{const_name False},_) => false
+ Const(\<^const_name>\<open>Trueprop\<close>,_)$t => isD t
+ | Const(\<^const_name>\<open>HOL.conj\<close> ,_)$l$r => isD l andalso isD r
+ | Const(\<^const_name>\<open>HOL.implies\<close>,_)$l$r => isG l andalso isD r
+ | Const(\<^const_name>\<open>Pure.imp\<close>,_)$l$r => isG l andalso isD r
+ | Const(\<^const_name>\<open>All\<close>,_)$Abs(s,_,t) => isD t
+ | Const(\<^const_name>\<open>Pure.all\<close>,_)$Abs(s,_,t) => isD t
+ | Const(\<^const_name>\<open>HOL.disj\<close>,_)$_$_ => false
+ | Const(\<^const_name>\<open>Ex\<close> ,_)$_ => false
+ | Const(\<^const_name>\<open>Not\<close>,_)$_ => false
+ | Const(\<^const_name>\<open>True\<close>,_) => false
+ | Const(\<^const_name>\<open>False\<close>,_) => false
| l $ r => isD l
| Const _ (* rigid atom *) => true
| Bound _ (* rigid atom *) => true
@@ -29,17 +29,17 @@
anything else *) => false
and
isG t = case t of
- Const(@{const_name Trueprop},_)$t => isG t
- | Const(@{const_name HOL.conj} ,_)$l$r => isG l andalso isG r
- | Const(@{const_name HOL.disj} ,_)$l$r => isG l andalso isG r
- | Const(@{const_name HOL.implies},_)$l$r => isD l andalso isG r
- | Const(@{const_name Pure.imp},_)$l$r => isD l andalso isG r
- | Const(@{const_name All},_)$Abs(_,_,t) => isG t
- | Const(@{const_name Pure.all},_)$Abs(_,_,t) => isG t
- | Const(@{const_name Ex} ,_)$Abs(_,_,t) => isG t
- | Const(@{const_name True},_) => true
- | Const(@{const_name Not},_)$_ => false
- | Const(@{const_name False},_) => false
+ Const(\<^const_name>\<open>Trueprop\<close>,_)$t => isG t
+ | Const(\<^const_name>\<open>HOL.conj\<close> ,_)$l$r => isG l andalso isG r
+ | Const(\<^const_name>\<open>HOL.disj\<close> ,_)$l$r => isG l andalso isG r
+ | Const(\<^const_name>\<open>HOL.implies\<close>,_)$l$r => isD l andalso isG r
+ | Const(\<^const_name>\<open>Pure.imp\<close>,_)$l$r => isD l andalso isG r
+ | Const(\<^const_name>\<open>All\<close>,_)$Abs(_,_,t) => isG t
+ | Const(\<^const_name>\<open>Pure.all\<close>,_)$Abs(_,_,t) => isG t
+ | Const(\<^const_name>\<open>Ex\<close> ,_)$Abs(_,_,t) => isG t
+ | Const(\<^const_name>\<open>True\<close>,_) => true
+ | Const(\<^const_name>\<open>Not\<close>,_)$_ => false
+ | Const(\<^const_name>\<open>False\<close>,_) => false
| _ (* atom *) => true;
val check_HOHH_tac1 = PRIMITIVE (fn thm =>
@@ -52,17 +52,17 @@
fun atomizeD ctxt thm =
let
fun at thm = case Thm.concl_of thm of
- _$(Const(@{const_name All} ,_)$Abs(s,_,_))=>
+ _$(Const(\<^const_name>\<open>All\<close> ,_)$Abs(s,_,_))=>
let val s' = if s="P" then "PP" else s in
at(thm RS (Rule_Insts.read_instantiate ctxt [((("x", 0), Position.none), s')] [s'] spec))
end
- | _$(Const(@{const_name HOL.conj},_)$_$_) => at(thm RS conjunct1)@at(thm RS conjunct2)
- | _$(Const(@{const_name HOL.implies},_)$_$_) => at(thm RS mp)
+ | _$(Const(\<^const_name>\<open>HOL.conj\<close>,_)$_$_) => at(thm RS conjunct1)@at(thm RS conjunct2)
+ | _$(Const(\<^const_name>\<open>HOL.implies\<close>,_)$_$_) => at(thm RS mp)
| _ => [thm]
in map zero_var_indexes (at thm) end;
val atomize_ss =
- (empty_simpset @{context} |> Simplifier.set_mksimps (mksimps mksimps_pairs))
+ (empty_simpset \<^context> |> Simplifier.set_mksimps (mksimps mksimps_pairs))
addsimps [
@{thm all_conj_distrib}, (* "(! x. P x & Q x) = ((! x. P x) & (! x. Q x))" *)
@{thm imp_conjL} RS sym, (* "(D :- G1 :- G2) = (D :- G1 & G2)" *)
@@ -76,8 +76,8 @@
-- is nice, but cannot instantiate unknowns in the assumptions *)
fun hyp_resolve_tac ctxt = SUBGOAL (fn (subgoal, i) =>
let
- fun ap (Const(@{const_name All},_)$Abs(_,_,t))=(case ap t of (k,a,t) => (k+1,a ,t))
- | ap (Const(@{const_name HOL.implies},_)$_$t) =(case ap t of (k,_,t) => (k,true ,t))
+ fun ap (Const(\<^const_name>\<open>All\<close>,_)$Abs(_,_,t))=(case ap t of (k,a,t) => (k+1,a ,t))
+ | ap (Const(\<^const_name>\<open>HOL.implies\<close>,_)$_$t) =(case ap t of (k,_,t) => (k,true ,t))
| ap t = (0,false,t);
(*
fun rep_goal (Const (@{const_name Pure.all},_)$Abs (_,_,t)) = rep_goal t
--- a/src/HOL/Proofs/Lambda/Commutation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/Lambda/Commutation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -129,9 +129,9 @@
lemma Church_Rosser_confluent: "Church_Rosser R = confluent R"
apply (unfold square_def commute_def diamond_def Church_Rosser_def)
- apply (tactic \<open>safe_tac (put_claset HOL_cs @{context})\<close>)
+ apply (tactic \<open>safe_tac (put_claset HOL_cs \<^context>)\<close>)
apply (tactic \<open>
- blast_tac (put_claset HOL_cs @{context} addIs
+ blast_tac (put_claset HOL_cs \<^context> addIs
[@{thm sup_ge2} RS @{thm rtranclp_mono} RS @{thm predicate2D} RS @{thm rtranclp_trans},
@{thm rtranclp_converseI}, @{thm conversepI},
@{thm sup_ge1} RS @{thm rtranclp_mono} RS @{thm predicate2D}]) 1\<close>)
--- a/src/HOL/Proofs/Lambda/Eta.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/Lambda/Eta.thy Sat Jan 05 17:24:33 2019 +0100
@@ -169,7 +169,7 @@
subsection \<open>Implicit definition of \<open>eta\<close>\<close>
-text \<open>@{term "Abs (lift s 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s"}\<close>
+text \<open>\<^term>\<open>Abs (lift s 0 \<degree> Var 0) \<rightarrow>\<^sub>\<eta> s\<close>\<close>
lemma not_free_iff_lifted:
"(\<not> free s i) = (\<exists>t. s = lift t i)"
--- a/src/HOL/Proofs/Lambda/Lambda.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/Lambda/Lambda.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,7 +36,7 @@
declare subst_Var [simp del]
-text \<open>Optimized versions of @{term subst} and @{term lift}.\<close>
+text \<open>Optimized versions of \<^term>\<open>subst\<close> and \<^term>\<open>lift\<close>.\<close>
primrec
liftn :: "[nat, dB, nat] => dB"
--- a/src/HOL/Proofs/Lambda/NormalForm.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/Lambda/NormalForm.thy Sat Jan 05 17:24:33 2019 +0100
@@ -68,7 +68,7 @@
by (unfold listall_def) simp
text \<open>
-@{term "listsp"} is equivalent to @{term "listall"}, but cannot be
+\<^term>\<open>listsp\<close> is equivalent to \<^term>\<open>listall\<close>, but cannot be
used for program extraction.
\<close>
@@ -186,7 +186,7 @@
done
text \<open>
-@{term NF} characterizes exactly the terms that are in normal form.
+\<^term>\<open>NF\<close> characterizes exactly the terms that are in normal form.
\<close>
lemma NF_eq: "NF t = (\<forall>t'. \<not> t \<rightarrow>\<^sub>\<beta> t')"
--- a/src/HOL/Proofs/Lambda/StrongNorm.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/Lambda/StrongNorm.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,7 +36,7 @@
lemma subst_Var_IT: "IT r \<Longrightarrow> IT (r[Var i/j])"
apply (induct arbitrary: i j set: IT)
- txt \<open>Case @{term Var}:\<close>
+ txt \<open>Case \<^term>\<open>Var\<close>:\<close>
apply (simp (no_asm) add: subst_Var)
apply
((rule conjI impI)+,
@@ -47,12 +47,12 @@
rule listsp.Cons,
fast,
assumption)+
- txt \<open>Case @{term Lambda}:\<close>
+ txt \<open>Case \<^term>\<open>Lambda\<close>:\<close>
apply atomize
apply simp
apply (rule IT.Lambda)
apply fast
- txt \<open>Case @{term Beta}:\<close>
+ txt \<open>Case \<^term>\<open>Beta\<close>:\<close>
apply atomize
apply (simp (no_asm_use) add: subst_subst [symmetric])
apply (rule IT.Beta)
--- a/src/HOL/Proofs/ex/Proof_Terms.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/ex/Proof_Terms.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,8 +29,8 @@
val prf = Proofterm.proof_of body;
(*clean output*)
- Pretty.writeln (Proof_Syntax.pretty_clean_proof_of @{context} false thm);
- Pretty.writeln (Proof_Syntax.pretty_clean_proof_of @{context} true thm);
+ Pretty.writeln (Proof_Syntax.pretty_clean_proof_of \<^context> false thm);
+ Pretty.writeln (Proof_Syntax.pretty_clean_proof_of \<^context> true thm);
(*all theorems used in the graph of nested proofs*)
val all_thms =
@@ -41,7 +41,7 @@
text \<open>
The result refers to various basic facts of Isabelle/HOL: @{thm [source]
HOL.impI}, @{thm [source] HOL.conjE}, @{thm [source] HOL.conjI} etc. The
- combinator @{ML Proofterm.fold_body_thms} recursively explores the graph of
+ combinator \<^ML>\<open>Proofterm.fold_body_thms\<close> recursively explores the graph of
the proofs of all theorems being used here.
\<^medskip>
@@ -50,8 +50,8 @@
\<close>
ML_val \<open>
- val thy = @{theory};
- val ctxt = @{context};
+ val thy = \<^theory>;
+ val ctxt = \<^context>;
val prf =
Proof_Syntax.read_proof thy true false
"impI \<cdot> _ \<cdot> _ \<bullet> \
@@ -60,7 +60,7 @@
\ (\<^bold>\<lambda>(H: _) Ha: _. conjI \<cdot> _ \<cdot> _ \<bullet> Ha \<bullet> H))";
val thm =
prf
- |> Reconstruct.reconstruct_proof ctxt @{prop "A \<and> B \<longrightarrow> B \<and> A"}
+ |> Reconstruct.reconstruct_proof ctxt \<^prop>\<open>A \<and> B \<longrightarrow> B \<and> A\<close>
|> Proof_Checker.thm_of_proof thy
|> Drule.export_without_context;
\<close>
--- a/src/HOL/Proofs/ex/XML_Data.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Proofs/ex/XML_Data.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,31 +25,31 @@
subsection \<open>Examples\<close>
-ML \<open>val thy1 = @{theory}\<close>
+ML \<open>val thy1 = \<^theory>\<close>
lemma ex: "A \<longrightarrow> A" ..
ML_val \<open>
- val xml = export_proof @{context} @{thm ex};
+ val xml = export_proof \<^context> @{thm ex};
val thm = import_proof thy1 xml;
\<close>
ML_val \<open>
- val xml = export_proof @{context} @{thm de_Morgan};
+ val xml = export_proof \<^context> @{thm de_Morgan};
val thm = import_proof thy1 xml;
\<close>
ML_val \<open>
- val xml = export_proof @{context} @{thm Drinker's_Principle};
+ val xml = export_proof \<^context> @{thm Drinker's_Principle};
val thm = import_proof thy1 xml;
\<close>
text \<open>Some fairly large proof:\<close>
ML_val \<open>
- val xml = export_proof @{context} @{thm abs_less_iff};
+ val xml = export_proof \<^context> @{thm abs_less_iff};
val thm = import_proof thy1 xml;
- @{assert} (size (YXML.string_of_body xml) > 1000000);
+ \<^assert> (size (YXML.string_of_body xml) > 1000000);
\<close>
end
--- a/src/HOL/Quickcheck_Examples/Hotel_Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quickcheck_Examples/Hotel_Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -91,9 +91,9 @@
lemmas issued_simps[code] = issued_nil issued.simps(2)
-setup \<open>Predicate_Compile_Data.ignore_consts [@{const_name Set.member},
- @{const_name "issued"}, @{const_name "cards"}, @{const_name "isin"},
- @{const_name Collect}, @{const_name insert}]\<close>
+setup \<open>Predicate_Compile_Data.ignore_consts [\<^const_name>\<open>Set.member\<close>,
+ \<^const_name>\<open>issued\<close>, \<^const_name>\<open>cards\<close>, \<^const_name>\<open>isin\<close>,
+ \<^const_name>\<open>Collect\<close>, \<^const_name>\<open>insert\<close>]\<close>
ML_val \<open>Core_Data.force_modes_and_compilations\<close>
fun find_first :: "('a => 'b option) => 'a list => 'b option"
@@ -135,14 +135,14 @@
fun of_set compfuns (Type ("fun", [T, _])) =
case body_type (Predicate_Compile_Aux.mk_monadT compfuns T) of
Type ("Quickcheck_Exhaustive.three_valued", _) =>
- Const(@{const_name neg_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
- | _ => Const(@{const_name pos_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ Const(\<^const_name>\<open>neg_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ | _ => Const(\<^const_name>\<open>pos_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
fun member compfuns (U as Type ("fun", [T, _])) =
(absdummy T (absdummy (HOLogic.mk_setT T) (Predicate_Compile_Aux.mk_if compfuns
- (Const (@{const_name "Set.member"}, T --> HOLogic.mk_setT T --> @{typ bool}) $ Bound 1 $ Bound 0))))
+ (Const (\<^const_name>\<open>Set.member\<close>, T --> HOLogic.mk_setT T --> \<^typ>\<open>bool\<close>) $ Bound 1 $ Bound 0))))
in
- Core_Data.force_modes_and_compilations @{const_name Set.member}
+ Core_Data.force_modes_and_compilations \<^const_name>\<open>Set.member\<close>
[(oi, (of_set, false)), (ii, (member, false))]
end
\<close>
--- a/src/HOL/Quickcheck_Examples/Quickcheck_Interfaces.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quickcheck_Examples/Quickcheck_Interfaces.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,8 +19,8 @@
\<close>
ML \<open>
-val SOME testers = Quickcheck.mk_batch_validator @{context}
- [@{term "x = (1 :: nat)"}, @{term "x = (0 :: nat)"}, @{term "x <= (5 :: nat)"}, @{term "0 \<le> (x :: nat)"}]
+val SOME testers = Quickcheck.mk_batch_validator \<^context>
+ [\<^term>\<open>x = (1 :: nat)\<close>, \<^term>\<open>x = (0 :: nat)\<close>, \<^term>\<open>x <= (5 :: nat)\<close>, \<^term>\<open>0 \<le> (x :: nat)\<close>]
\<close>
text \<open>
--- a/src/HOL/Quickcheck_Examples/Quickcheck_Lattice_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quickcheck_Examples/Quickcheck_Lattice_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
declare [[quickcheck_finite_type_size=5]]
text \<open>We show how other default types help to find counterexamples to propositions if
- the standard default type @{typ int} is insufficient.\<close>
+ the standard default type \<^typ>\<open>int\<close> is insufficient.\<close>
notation
less_eq (infix "\<sqsubseteq>" 50) and
--- a/src/HOL/Quickcheck_Examples/Quickcheck_Narrowing_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quickcheck_Examples/Quickcheck_Narrowing_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -221,7 +221,7 @@
declare is_ord.simps(1)[code] is_ord_mkt[code]
-subsubsection \<open>Invalid Lemma due to typo in @{const l_bal}\<close>
+subsubsection \<open>Invalid Lemma due to typo in \<^const>\<open>l_bal\<close>\<close>
lemma is_ord_l_bal:
"\<lbrakk> is_ord(MKT (x :: nat) l r h); height l = height r + 2 \<rbrakk> \<Longrightarrow> is_ord(l_bal(x,l,r))"
--- a/src/HOL/Quickcheck_Examples/Quickcheck_Nesting.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quickcheck_Examples/Quickcheck_Nesting.thy Sat Jan 05 17:24:33 2019 +0100
@@ -7,7 +7,7 @@
open BNF_FP_Def_Sugar
open BNF_LFP_Compat
- val compat_plugin = Plugin_Name.declare_setup @{binding compat};
+ val compat_plugin = Plugin_Name.declare_setup \<^binding>\<open>compat\<close>;
fun compat fp_sugars =
perhaps (try (datatype_compat (map (fst o dest_Type o #T) fp_sugars)));
--- a/src/HOL/Quotient_Examples/Quotient_FSet.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quotient_Examples/Quotient_FSet.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1152,7 +1152,7 @@
by (lifting list_eq2.induct[simplified list_eq2_equiv[symmetric]])
ML \<open>
-fun dest_fsetT (Type (@{type_name fset}, [T])) = T
+fun dest_fsetT (Type (\<^type_name>\<open>fset\<close>, [T])) = T
| dest_fsetT T = raise TYPE ("dest_fsetT: fset type expected", [T], []);
\<close>
--- a/src/HOL/Quotient_Examples/Quotient_Int.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quotient_Examples/Quotient_Int.thy Sat Jan 05 17:24:33 2019 +0100
@@ -276,7 +276,7 @@
by (descending) (auto intro: int_induct2)
-text \<open>Magnitide of an Integer, as a Natural Number: @{term nat}\<close>
+text \<open>Magnitide of an Integer, as a Natural Number: \<^term>\<open>nat\<close>\<close>
definition
"int_to_nat_raw \<equiv> \<lambda>(x, y).x - (y::nat)"
--- a/src/HOL/Quotient_Examples/Quotient_Message.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Quotient_Examples/Quotient_Message.thy Sat Jan 05 17:24:33 2019 +0100
@@ -119,7 +119,7 @@
| "freediscrim (CRYPT K X) = freediscrim X + 2"
| "freediscrim (DECRYPT K X) = freediscrim X - 2"
-text\<open>This theorem helps us prove @{term "Nonce N \<noteq> MPair X Y"}\<close>
+text\<open>This theorem helps us prove \<^term>\<open>Nonce N \<noteq> MPair X Y\<close>\<close>
theorem msgrel_imp_eq_freediscrim:
assumes a: "U \<sim> V"
shows "freediscrim U = freediscrim V"
@@ -173,7 +173,7 @@
"freenonces"
by (rule msgrel_imp_eq_freenonces)
-text\<open>Now prove the four equations for @{term nonces}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>nonces\<close>\<close>
lemma nonces_Nonce [simp]:
shows "nonces (Nonce N) = {N}"
@@ -223,7 +223,7 @@
"freeright"
by (rule msgrel_imp_eqv_freeright)
-text\<open>Now prove the four equations for @{term right}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>right\<close>\<close>
lemma right_Nonce [simp]:
shows "right (Nonce N) = Nonce N"
@@ -243,7 +243,7 @@
subsection\<open>Injectivity Properties of Some Constructors\<close>
-text\<open>Can also be proved using the function @{term nonces}\<close>
+text\<open>Can also be proved using the function \<^term>\<open>nonces\<close>\<close>
lemma Nonce_Nonce_eq [iff]:
shows "(Nonce m = Nonce n) = (m = n)"
proof
@@ -328,7 +328,7 @@
"freediscrim"
by (rule msgrel_imp_eq_freediscrim)
-text\<open>Now prove the four equations for @{term discrim}\<close>
+text\<open>Now prove the four equations for \<^term>\<open>discrim\<close>\<close>
lemma discrim_Nonce [simp]:
shows "discrim (Nonce N) = 0"
--- a/src/HOL/Real_Asymp/Eventuallize.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Eventuallize.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,9 +5,9 @@
text \<open>
The following attribute and ML function lift a given theorem of the form
- @{prop "\<forall>x. A x \<longrightarrow> B x \<longrightarrow> C x"}
+ \<^prop>\<open>\<forall>x. A x \<longrightarrow> B x \<longrightarrow> C x\<close>
to
- @{prop "eventually A F \<Longrightarrow> eventually B F \<Longrightarrow> eventually C F"} .
+ \<^prop>\<open>eventually A F \<Longrightarrow> eventually B F \<Longrightarrow> eventually C F\<close> .
\<close>
ML \<open>
@@ -19,12 +19,12 @@
structure Eventuallize : EVENTUALLIZE =
struct
-fun dest_All (Const (@{const_name "HOL.All"}, _) $ Abs (x, T, t)) = (x, T, t)
- | dest_All (Const (@{const_name "HOL.All"}, T) $ f) =
+fun dest_All (Const (\<^const_name>\<open>HOL.All\<close>, _) $ Abs (x, T, t)) = (x, T, t)
+ | dest_All (Const (\<^const_name>\<open>HOL.All\<close>, T) $ f) =
("x", T |> dest_funT |> fst |> dest_funT |> fst, f $ Bound 0)
| dest_All t = raise TERM ("dest_All", [t])
-fun strip_imp (@{term "(\<longrightarrow>)"} $ a $ b) = apfst (cons a) (strip_imp b)
+fun strip_imp (\<^term>\<open>(\<longrightarrow>)\<close> $ a $ b) = apfst (cons a) (strip_imp b)
| strip_imp t = ([], t)
fun eventuallize ctxt thm n =
--- a/src/HOL/Real_Asymp/Inst_Existentials.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Inst_Existentials.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,7 +5,7 @@
text \<open>
Coinduction proofs in Isabelle often lead to proof obligations with nested conjunctions and
- existential quantifiers, e.g. @{prop "\<exists>x y. P x y \<and> (\<exists>z. Q x y z)"} .
+ existential quantifiers, e.g. \<^prop>\<open>\<exists>x y. P x y \<and> (\<exists>z. Q x y z)\<close> .
The following tactic allows instantiating these existentials with a given list of witnesses.
\<close>
--- a/src/HOL/Real_Asymp/Manual/Real_Asymp_Doc.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Manual/Real_Asymp_Doc.thy Sat Jan 05 17:24:33 2019 +0100
@@ -33,25 +33,25 @@
\<^item> Powers with constant natural exponent
- \<^item> @{term exp}, @{term ln}, @{term log}, @{term sqrt}, @{term "root k"} (for a constant k)
+ \<^item> \<^term>\<open>exp\<close>, \<^term>\<open>ln\<close>, \<^term>\<open>log\<close>, \<^term>\<open>sqrt\<close>, \<^term>\<open>root k\<close> (for a constant k)
- \<^item> @{term sin}, @{term cos}, @{term tan} at finite points
+ \<^item> \<^term>\<open>sin\<close>, \<^term>\<open>cos\<close>, \<^term>\<open>tan\<close> at finite points
- \<^item> @{term sinh}, @{term cosh}, @{term tanh}
+ \<^item> \<^term>\<open>sinh\<close>, \<^term>\<open>cosh\<close>, \<^term>\<open>tanh\<close>
- \<^item> @{term min}, @{term max}, @{term abs}
+ \<^item> \<^term>\<open>min\<close>, \<^term>\<open>max\<close>, \<^term>\<open>abs\<close>
Additionally, the following operations are supported in a `best effort' fashion using asymptotic
upper/lower bounds:
\<^item> Powers with variable natural exponent
- \<^item> @{term sin} and @{term cos} at \<open>\<plusminus>\<infinity>\<close>
+ \<^item> \<^term>\<open>sin\<close> and \<^term>\<open>cos\<close> at \<open>\<plusminus>\<infinity>\<close>
- \<^item> @{term floor}, @{term ceiling}, @{term frac}, and \<open>mod\<close>
+ \<^item> \<^term>\<open>floor\<close>, \<^term>\<open>ceiling\<close>, \<^term>\<open>frac\<close>, and \<open>mod\<close>
- Additionally, the @{term arctan} function is partially supported. The method may fail when
- the argument to @{term arctan} contains functions of different order of growth.
+ Additionally, the \<^term>\<open>arctan\<close> function is partially supported. The method may fail when
+ the argument to \<^term>\<open>arctan\<close> contains functions of different order of growth.
\<close>
@@ -62,20 +62,20 @@
@{method_def (HOL) real_asymp} : \<open>method\<close>
\]
- @{rail \<open>
+ \<^rail>\<open>
@@{method (HOL) real_asymp} opt? (@{syntax simpmod} * )
;
opt: '(' ('verbose' | 'fallback' ) ')'
;
@{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | '!' | 'del') |
'cong' (() | 'add' | 'del')) ':' @{syntax thms}
- \<close>}
+ \<close>
\<close>
text \<open>
The @{method real_asymp} method is a semi-automatic proof method for proving certain statements
related to the asymptotic behaviour of real-valued functions. In the following, let \<open>f\<close> and \<open>g\<close>
- be functions of type @{typ "real \<Rightarrow> real"} and \<open>F\<close> and \<open>G\<close> real filters.
+ be functions of type \<^typ>\<open>real \<Rightarrow> real\<close> and \<open>F\<close> and \<open>G\<close> real filters.
The functions \<open>f\<close> and \<open>g\<close> can be built from the operations mentioned before and may contain free
variables. The filters \<open>F\<close> and \<open>G\<close> can be either \<open>\<plusminus>\<infinity>\<close> or a finite point in \<open>\<real>\<close>, possibly
@@ -83,13 +83,13 @@
The class of statements that is supported by @{method real_asymp} then consists of:
- \<^item> Limits, i.\,e.\ @{prop "filterlim f F G"}
+ \<^item> Limits, i.\,e.\ \<^prop>\<open>filterlim f F G\<close>
- \<^item> Landau symbols, i.\,e.\ @{prop "f \<in> O[F](g)"} and analogously for \<^emph>\<open>o\<close>, \<open>\<Omega>\<close>, \<open>\<omega>\<close>, \<open>\<Theta>\<close>
+ \<^item> Landau symbols, i.\,e.\ \<^prop>\<open>f \<in> O[F](g)\<close> and analogously for \<^emph>\<open>o\<close>, \<open>\<Omega>\<close>, \<open>\<omega>\<close>, \<open>\<Theta>\<close>
- \<^item> Asymptotic equivalence, i.\,e.\ @{prop "f \<sim>[F] g"}
+ \<^item> Asymptotic equivalence, i.\,e.\ \<^prop>\<open>f \<sim>[F] g\<close>
- \<^item> Asymptotic inequalities, i.\,e.\ @{prop "eventually (\<lambda>x. f x \<le> g x) F"}
+ \<^item> Asymptotic inequalities, i.\,e.\ \<^prop>\<open>eventually (\<lambda>x. f x \<le> g x) F\<close>
For typical problems arising in practice that do not contain free variables, @{method real_asymp}
tends to succeed fully automatically within fractions of seconds, e.\,g.:
@@ -117,7 +117,7 @@
text \<open>
Here, @{method real_asymp} outputs an error message stating that it could not determine the
- sign of the free variable @{term "a :: real"}. In this case, the user must add the assumption
+ sign of the free variable \<^term>\<open>a :: real\<close>. In this case, the user must add the assumption
\<open>a > 0\<close> and give it to @{method real_asymp}.
\<close>
lemma
@@ -133,7 +133,7 @@
The same situation can also occur without free variables if the constant in question is a
complicated expression that the simplifier does not know enough ebout,
- e.\,g.\ @{term "pi - exp 1"}.
+ e.\,g.\ \<^term>\<open>pi - exp 1\<close>.
In order to trace problems with sign determination, the \<open>(verbose)\<close> option can be passed to
@{method real_asymp}. It will then print a detailed error message whenever it encounters
@@ -157,7 +157,7 @@
@{command_def (HOL) real_expansion} & : & \<open>context \<rightarrow>\<close>
\end{array}\]
- @{rail \<open>
+ \<^rail>\<open>
@@{command (HOL) real_limit} (limitopt*)
;
@@{command (HOL) real_expansion} (expansionopt*)
@@ -168,23 +168,23 @@
('limit' ':' @{syntax term}) |
('terms' ':' @{syntax nat} ('(' 'strict' ')') ?) |
('facts' ':' @{syntax thms})
- \<close>}
+ \<close>
\<^descr>@{command real_limit} computes the limit of the given function \<open>f(x)\<close> for as \<open>x\<close> tends
to the specified limit point. Additional facts can be provided with the \<open>facts\<close> option,
similarly to the @{command using} command with @{method real_asymp}. The limit point given
- by the \<open>limit\<close> option must be one of the filters @{term "at_top"}, @{term "at_bot"},
- @{term "at_left"}, or @{term "at_right"}. If no limit point is given, @{term "at_top"} is used
+ by the \<open>limit\<close> option must be one of the filters \<^term>\<open>at_top\<close>, \<^term>\<open>at_bot\<close>,
+ \<^term>\<open>at_left\<close>, or \<^term>\<open>at_right\<close>. If no limit point is given, \<^term>\<open>at_top\<close> is used
by default.
\<^medskip>
The output of @{command real_limit} can be \<open>\<infinity>\<close>, \<open>-\<infinity>\<close>, \<open>\<plusminus>\<infinity>\<close>, \<open>c\<close> (for some real constant \<open>c\<close>),
\<open>0\<^sup>+\<close>, or \<open>0\<^sup>-\<close>. The \<open>+\<close> and \<open>-\<close> in the last case indicate whether the approach is from above
- or from below (corresponding to @{term "at_right (0::real)"} and @{term "at_left (0::real)"});
+ or from below (corresponding to \<^term>\<open>at_right (0::real)\<close> and \<^term>\<open>at_left (0::real)\<close>);
for technical reasons, this information is currently not displayed if the limit is not 0.
\<^medskip>
- If the given function does not tend to a definite limit (e.\,g.\ @{term "sin x"} for \<open>x \<rightarrow> \<infinity>\<close>),
+ If the given function does not tend to a definite limit (e.\,g.\ \<^term>\<open>sin x\<close> for \<open>x \<rightarrow> \<infinity>\<close>),
the command might nevertheless succeed to compute an asymptotic upper and/or lower bound and
display the limits of these bounds instead.
@@ -204,7 +204,7 @@
of their implementation -- finding a multiseries expansion and reading off the limit -- are the
same as in the @{method real_asymp} method and therefore trustworthy, there is a small amount
of unverified code involved in pre-processing and printing (e.\,g.\ for reducing all the
- different options for the \<open>limit\<close> option to the @{term at_top} case).
+ different options for the \<open>limit\<close> option to the \<^term>\<open>at_top\<close> case).
\<close>
@@ -219,7 +219,7 @@
\<^descr>@{thm [source] real_asymp_reify_simps} specifies a list of (unconditional) equations that are
unfolded as a first step of @{method real_asymp} and the related commands. This can be used to
add support for operations that can be represented easily by other operations that are already
- supported, such as @{term sinh}, which is equal to @{term "\<lambda>x. (exp x - exp (-x)) / 2"}.
+ supported, such as \<^term>\<open>sinh\<close>, which is equal to \<^term>\<open>\<lambda>x. (exp x - exp (-x)) / 2\<close>.
\<^descr>@{thm [source] real_asymp_nat_reify} and @{thm [source] real_asymp_int_reify} is used to
convert operations on natural numbers or integers to operations on real numbers. This enables
@@ -236,13 +236,13 @@
The first step is to write an ML function that takes as arguments
\<^item> the expansion context
\<^item> the term \<open>t\<close> to expand (which will be of the form \<open>f(g\<^sub>1(x), \<dots>, g\<^sub>n(x))\<close>)
- \<^item> a list of \<open>n\<close> theorems of the form @{prop "(g\<^sub>i expands_to G\<^sub>i) bs"}
+ \<^item> a list of \<open>n\<close> theorems of the form \<^prop>\<open>(g\<^sub>i expands_to G\<^sub>i) bs\<close>
\<^item> the current basis \<open>bs\<close>
- and returns a theorem of the form @{prop "(t expands_to F) bs'"} and a new basis \<open>bs'\<close> which
+ and returns a theorem of the form \<^prop>\<open>(t expands_to F) bs'\<close> and a new basis \<open>bs'\<close> which
must be a superset of the original basis.
This function must then be registered as a handler for the operation by proving a vacuous lemma
- of the form @{prop "REAL_ASYMP_CUSTOM f"} (which is only used for tagging) and passing that
+ of the form \<^prop>\<open>REAL_ASYMP_CUSTOM f\<close> (which is only used for tagging) and passing that
lemma and the expansion function to @{ML [source] Exp_Log_Expression.register_custom_from_thm}
in a @{command local_setup} invocation.
@@ -253,7 +253,7 @@
involving \<open>constructors\<close>.
New constructors for this pattern matching can be registered by adding a theorem of the form
- @{prop "REAL_ASYMP_EVAL_CONSTRUCTOR c"} to the fact collection
+ \<^prop>\<open>REAL_ASYMP_EVAL_CONSTRUCTOR c\<close> to the fact collection
@{thm [source] exp_log_eval_constructor}, but this should be quite rare in practice.
\<^medskip>
--- a/src/HOL/Real_Asymp/Multiseries_Expansion.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Multiseries_Expansion.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4773,8 +4773,8 @@
text \<open>
The following is used by the automation in order to avoid writing terms like $x^2$ or $x^{-2}$
- as @{term "\<lambda>x::real. x powr 2"} etc.\ but as the more agreeable @{term "\<lambda>x::real. x ^ 2"} or
- @{term "\<lambda>x::real. inverse (x ^ 2)"}.
+ as \<^term>\<open>\<lambda>x::real. x powr 2\<close> etc.\ but as the more agreeable \<^term>\<open>\<lambda>x::real. x ^ 2\<close> or
+ \<^term>\<open>\<lambda>x::real. inverse (x ^ 2)\<close>.
\<close>
lemma intyness_0: "0 \<equiv> real 0"
@@ -4838,7 +4838,7 @@
text \<open>
- This is needed in order to handle things like @{term "\<lambda>n. f n ^ n"}.
+ This is needed in order to handle things like \<^term>\<open>\<lambda>n. f n ^ n\<close>.
\<close>
definition powr_nat :: "real \<Rightarrow> real \<Rightarrow> real" where
"powr_nat x y =
--- a/src/HOL/Real_Asymp/Real_Asymp_Approx.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Real_Asymp_Approx.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
begin
text \<open>
- For large enough constants (such as @{term "exp (exp 10000 :: real)"}), the
+ For large enough constants (such as \<^term>\<open>exp (exp 10000 :: real)\<close>), the
@{method approximation} method can require a huge amount of time and memory, effectively not
terminating and causing the entire prover environment to crash.
@@ -32,20 +32,20 @@
struct
val real_asymp_approx =
- Attrib.setup_config_bool @{binding real_asymp_approx} (K true)
+ Attrib.setup_config_bool \<^binding>\<open>real_asymp_approx\<close> (K true)
val nan = Real.fromInt 0 / Real.fromInt 0
fun clamp n = if n < 0 then 0 else n
-fun eval_nat (@{term "(+) :: nat => _"} $ a $ b) = bin (op +) (a, b)
- | eval_nat (@{term "(-) :: nat => _"} $ a $ b) = bin (clamp o op -) (a, b)
- | eval_nat (@{term "(*) :: nat => _"} $ a $ b) = bin (op *) (a, b)
- | eval_nat (@{term "(div) :: nat => _"} $ a $ b) = bin Int.div (a, b)
- | eval_nat (@{term "(^) :: nat => _"} $ a $ b) = bin (fn (a,b) => Integer.pow a b) (a, b)
- | eval_nat (t as (@{term "numeral :: _ => nat"} $ _)) = snd (HOLogic.dest_number t)
- | eval_nat (@{term "0 :: nat"}) = 0
- | eval_nat (@{term "1 :: nat"}) = 1
- | eval_nat (@{term "Nat.Suc"} $ a) = un (fn n => n + 1) a
+fun eval_nat (\<^term>\<open>(+) :: nat => _\<close> $ a $ b) = bin (op +) (a, b)
+ | eval_nat (\<^term>\<open>(-) :: nat => _\<close> $ a $ b) = bin (clamp o op -) (a, b)
+ | eval_nat (\<^term>\<open>(*) :: nat => _\<close> $ a $ b) = bin (op *) (a, b)
+ | eval_nat (\<^term>\<open>(div) :: nat => _\<close> $ a $ b) = bin Int.div (a, b)
+ | eval_nat (\<^term>\<open>(^) :: nat => _\<close> $ a $ b) = bin (fn (a,b) => Integer.pow a b) (a, b)
+ | eval_nat (t as (\<^term>\<open>numeral :: _ => nat\<close> $ _)) = snd (HOLogic.dest_number t)
+ | eval_nat (\<^term>\<open>0 :: nat\<close>) = 0
+ | eval_nat (\<^term>\<open>1 :: nat\<close>) = 1
+ | eval_nat (\<^term>\<open>Nat.Suc\<close> $ a) = un (fn n => n + 1) a
| eval_nat _ = ~1
and un f a =
let
@@ -63,19 +63,19 @@
fun sgn n =
if n < Real.fromInt 0 then Real.fromInt (~1) else Real.fromInt 1
-fun eval (@{term "(+) :: real => _"} $ a $ b) = eval a + eval b
- | eval (@{term "(-) :: real => _"} $ a $ b) = eval a - eval b
- | eval (@{term "(*) :: real => _"} $ a $ b) = eval a * eval b
- | eval (@{term "(/) :: real => _"} $ a $ b) =
+fun eval (\<^term>\<open>(+) :: real => _\<close> $ a $ b) = eval a + eval b
+ | eval (\<^term>\<open>(-) :: real => _\<close> $ a $ b) = eval a - eval b
+ | eval (\<^term>\<open>(*) :: real => _\<close> $ a $ b) = eval a * eval b
+ | eval (\<^term>\<open>(/) :: real => _\<close> $ a $ b) =
let val a = eval a; val b = eval b in
if Real.==(b, Real.fromInt 0) then nan else a / b
end
- | eval (@{term "inverse :: real => _"} $ a) = Real.fromInt 1 / eval a
- | eval (@{term "uminus :: real => _"} $ a) = Real.~ (eval a)
- | eval (@{term "exp :: real => _"} $ a) = Math.exp (eval a)
- | eval (@{term "ln :: real => _"} $ a) =
+ | eval (\<^term>\<open>inverse :: real => _\<close> $ a) = Real.fromInt 1 / eval a
+ | eval (\<^term>\<open>uminus :: real => _\<close> $ a) = Real.~ (eval a)
+ | eval (\<^term>\<open>exp :: real => _\<close> $ a) = Math.exp (eval a)
+ | eval (\<^term>\<open>ln :: real => _\<close> $ a) =
let val a = eval a in if a > Real.fromInt 0 then Math.ln a else nan end
- | eval (@{term "(powr) :: real => _"} $ a $ b) =
+ | eval (\<^term>\<open>(powr) :: real => _\<close> $ a $ b) =
let
val a = eval a; val b = eval b
in
@@ -86,7 +86,7 @@
else
Math.pow (a, b)
end
- | eval (@{term "(^) :: real => _"} $ a $ b) =
+ | eval (\<^term>\<open>(^) :: real => _\<close> $ a $ b) =
let
fun powr x y =
if not (Real.isFinite x) orelse y < 0 then
@@ -98,13 +98,13 @@
in
powr (eval a) (eval_nat b)
end
- | eval (@{term "root :: nat => real => _"} $ n $ a) =
+ | eval (\<^term>\<open>root :: nat => real => _\<close> $ n $ a) =
let val a = eval a; val n = eval_nat n in
if n = 0 then Real.fromInt 0
else sgn a * Math.pow (Real.abs a, Real.fromInt 1 / Real.fromInt n) end
- | eval (@{term "sqrt :: real => _"} $ a) =
+ | eval (\<^term>\<open>sqrt :: real => _\<close> $ a) =
let val a = eval a in sgn a * Math.sqrt (abs a) end
- | eval (@{term "log :: real => _"} $ a $ b) =
+ | eval (\<^term>\<open>log :: real => _\<close> $ a $ b) =
let
val (a, b) = apply2 eval (a, b)
in
@@ -113,10 +113,10 @@
else
nan
end
- | eval (t as (@{term "numeral :: _ => real"} $ _)) =
+ | eval (t as (\<^term>\<open>numeral :: _ => real\<close> $ _)) =
Real.fromInt (snd (HOLogic.dest_number t))
- | eval (@{term "0 :: real"}) = Real.fromInt 0
- | eval (@{term "1 :: real"}) = Real.fromInt 1
+ | eval (\<^term>\<open>0 :: real\<close>) = Real.fromInt 0
+ | eval (\<^term>\<open>1 :: real\<close>) = Real.fromInt 1
| eval _ = nan
fun sign_oracle_tac ctxt i =
@@ -125,13 +125,13 @@
let
val b =
case HOLogic.dest_Trueprop (Thm.term_of concl) of
- @{term "(<) :: real \<Rightarrow> _"} $ lhs $ rhs =>
+ \<^term>\<open>(<) :: real \<Rightarrow> _\<close> $ lhs $ rhs =>
let
val (x, y) = apply2 eval (lhs, rhs)
in
Real.isFinite x andalso Real.isFinite y andalso x < y
end
- | @{term "HOL.Not"} $ (@{term "(=) :: real \<Rightarrow> _"} $ lhs $ rhs) =>
+ | \<^term>\<open>HOL.Not\<close> $ (\<^term>\<open>(=) :: real \<Rightarrow> _\<close> $ lhs $ rhs) =>
let
val (x, y) = apply2 eval (lhs, rhs)
in
@@ -154,7 +154,7 @@
setup \<open>
Context.theory_map (
Multiseries_Expansion.register_sign_oracle
- (@{binding approximation_tac}, Real_Asymp_Approx.sign_oracle_tac))
+ (\<^binding>\<open>approximation_tac\<close>, Real_Asymp_Approx.sign_oracle_tac))
\<close>
lemma "filterlim (\<lambda>n. (1 + (2 / 3 :: real) ^ (n + 1)) ^ 2 ^ n / 2 powr (4 / 3) ^ (n - 1))
--- a/src/HOL/Real_Asymp/Real_Asymp_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/Real_Asymp_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -231,12 +231,12 @@
qed
-text \<open>@{url "http://math.stackexchange.com/questions/625574"}\<close>
+text \<open>\<^url>\<open>http://math.stackexchange.com/questions/625574\<close>\<close>
lemma "(\<lambda>x::real. (1 - 1/2 * x^2 - cos (x / (1 - x^2))) / x^4) \<midarrow>0\<rightarrow> 23/24"
by real_asymp
-text \<open>@{url "http://math.stackexchange.com/questions/122967"}\<close>
+text \<open>\<^url>\<open>http://math.stackexchange.com/questions/122967\<close>\<close>
real_limit "\<lambda>x. (x + 1) powr (1 + 1 / x) - x powr (1 + 1 / (x + a))"
@@ -270,11 +270,11 @@
end
-text \<open>@{url "http://math.stackexchange.com/questions/547538"}\<close>
+text \<open>\<^url>\<open>http://math.stackexchange.com/questions/547538\<close>\<close>
lemma "(\<lambda>x::real. ((x+4) powr (3/2) + exp x - 9) / x) \<midarrow>0\<rightarrow> 4"
by real_asymp
-text \<open>@{url "https://www.freemathhelp.com/forum/threads/93513"}\<close>
+text \<open>\<^url>\<open>https://www.freemathhelp.com/forum/threads/93513\<close>\<close>
lemma "((\<lambda>x::real. ((3 powr x + 4 powr x) / 4) powr (1/x)) \<longlongrightarrow> 4) at_top"
by real_asymp
@@ -282,7 +282,7 @@
by real_asymp
-text \<open>@{url "https://www.math.ucdavis.edu/~kouba/CalcOneDIRECTORY/limcondirectory/LimitConstant.html"}\<close>
+text \<open>\<^url>\<open>https://www.math.ucdavis.edu/~kouba/CalcOneDIRECTORY/limcondirectory/LimitConstant.html\<close>\<close>
lemma "(\<lambda>x::real. (cos (2*x) - 1) / (cos x - 1)) \<midarrow>0\<rightarrow> 4"
by real_asymp
@@ -294,7 +294,7 @@
lemma "filterlim (\<lambda>x::real. (3 powr x + 3 powr (2*x)) powr (1/x)) (nhds 9) at_top"
using powr_def[of 3 "2::real"] by real_asymp
-text \<open>@{url "https://www.math.ucdavis.edu/~kouba/CalcOneDIRECTORY/lhopitaldirectory/LHopital.html"}\<close>
+text \<open>\<^url>\<open>https://www.math.ucdavis.edu/~kouba/CalcOneDIRECTORY/lhopitaldirectory/LHopital.html\<close>\<close>
lemma "filterlim (\<lambda>x::real. (x^2 - 1) / (x^2 + 3*x - 4)) (nhds (2/5)) (at 1)"
by real_asymp
@@ -375,7 +375,7 @@
by (real_asymp (verbose))
-text \<open>@{url "http://calculus.nipissingu.ca/problems/limit_problems.html"}\<close>
+text \<open>\<^url>\<open>http://calculus.nipissingu.ca/problems/limit_problems.html\<close>\<close>
lemma "((\<lambda>x::real. (x^2 - 1) / \<bar>x - 1\<bar>) \<longlongrightarrow> -2) (at_left 1)"
"((\<lambda>x::real. (x^2 - 1) / \<bar>x - 1\<bar>) \<longlongrightarrow> 2) (at_right 1)"
by real_asymp+
@@ -385,27 +385,27 @@
by real_asymp+
-text \<open>@{url "https://math.stackexchange.com/questions/547538"}\<close>
+text \<open>\<^url>\<open>https://math.stackexchange.com/questions/547538\<close>\<close>
lemma "(\<lambda>x::real. ((x + 4) powr (3/2) + exp x - 9) / x) \<midarrow>0\<rightarrow> 4"
by real_asymp
-text \<open>@{url "https://math.stackexchange.com/questions/625574"}\<close>
+text \<open>\<^url>\<open>https://math.stackexchange.com/questions/625574\<close>\<close>
lemma "(\<lambda>x::real. (1 - x^2 / 2 - cos (x / (1 - x^2))) / x^4) \<midarrow>0\<rightarrow> 23/24"
by real_asymp
-text \<open>@{url "https://www.mapleprimes.com/questions/151308-A-Hard-Limit-Question"}\<close>
+text \<open>\<^url>\<open>https://www.mapleprimes.com/questions/151308-A-Hard-Limit-Question\<close>\<close>
lemma "(\<lambda>x::real. x / (x - 1) - 1 / ln x) \<midarrow>1\<rightarrow> 1 / 2"
by real_asymp
-text \<open>@{url "https://www.freemathhelp.com/forum/threads/93513-two-extremely-difficult-limit-problems"}\<close>
+text \<open>\<^url>\<open>https://www.freemathhelp.com/forum/threads/93513-two-extremely-difficult-limit-problems\<close>\<close>
lemma "((\<lambda>x::real. ((3 powr x + 4 powr x) / 4) powr (1/x)) \<longlongrightarrow> 4) at_top"
by real_asymp
lemma "((\<lambda>x::real. x powr 1.5 * (sqrt (x + 1) + sqrt (x - 1) - 2 * sqrt x)) \<longlongrightarrow> -1/4) at_top"
by real_asymp
-text \<open>@{url "https://math.stackexchange.com/questions/1390833"}\<close>
+text \<open>\<^url>\<open>https://math.stackexchange.com/questions/1390833\<close>\<close>
context
fixes a b :: real
assumes ab: "a + b > 0" "a + b = 1"
--- a/src/HOL/Real_Asymp/asymptotic_basis.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/asymptotic_basis.ML Sat Jan 05 17:24:33 2019 +0100
@@ -108,15 +108,15 @@
fun get_basis_list SEmpty = []
| get_basis_list (SNE basis) = get_basis_list' basis
-val get_basis_term = HOLogic.mk_list @{typ "real => real"} o get_basis_list
+val get_basis_term = HOLogic.mk_list \<^typ>\<open>real => real\<close> o get_basis_list
fun extract_basis_list thm =
let
val basis =
case HOLogic.dest_Trueprop (Thm.concl_of thm) of
- Const (@{const_name "is_expansion"}, _) $ _ $ basis => basis
- | Const (@{const_name "expands_to"}, _) $ _ $ _ $ basis => basis
- | Const (@{const_name "basis_wf"}, _) $ basis => basis
+ Const (\<^const_name>\<open>is_expansion\<close>, _) $ _ $ basis => basis
+ | Const (\<^const_name>\<open>expands_to\<close>, _) $ _ $ _ $ basis => basis
+ | Const (\<^const_name>\<open>basis_wf\<close>, _) $ basis => basis
| _ => raise THM ("get_basis", 1, [thm])
in
HOLogic.dest_list basis |> map Envir.eta_contract
@@ -154,7 +154,7 @@
raise BASIS ("Head mismatch", SNE basis')
val _ = if eq_list abconv (HOLogic.dest_list ln_basis, get_basis_list' basis)
then () else raise BASIS ("Incorrect basis in ln_thm", SNE basis')
- val _ = if abconv (ln_fun, @{term "\<lambda>(f::real\<Rightarrow>real) x. ln (f x)"} $ head) then () else
+ val _ = if abconv (ln_fun, \<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. ln (f x)\<close> $ head) then () else
raise BASIS ("Wrong function in ln_expansion", SNE basis')
val _ = if abconv (ln_exp, trimmed_thm |> concl_of' |> dest_arg) then () else
raise BASIS ("Wrong expansion in trimmed_thm", SNE basis')
@@ -191,7 +191,7 @@
combine_lifts (mk_lifting_aux (b :: bs) basis')
(get_basis_wf_thm basis RS @{thm is_lifting_lift}))
val bs' = get_basis_list basis
- fun err () = raise TERM ("mk_lifting", map (HOLogic.mk_list @{typ "real => real"}) [bs, bs'])
+ fun err () = raise TERM ("mk_lifting", map (HOLogic.mk_list \<^typ>\<open>real => real\<close>) [bs, bs'])
in
if subset (op aconv) (bs, bs') then
mk_lifting_aux bs basis handle Match => err ()
@@ -223,7 +223,7 @@
fun insert_ln' (SSng {wf_thm, head}) =
let
val head' = Envir.eta_contract
- (Abs ("x", @{typ real}, @{term "ln :: real \<Rightarrow> real"} $ (betapply (head, Bound 0))))
+ (Abs ("x", \<^typ>\<open>real\<close>, \<^term>\<open>ln :: real \<Rightarrow> real\<close> $ (betapply (head, Bound 0))))
val info1 = {wf_thm = wf_thm RS @{thm basis_wf_insert_ln(2)}, head = head}
val info2 = {wf_thm = wf_thm RS @{thm basis_wf_insert_ln(1)}, head = head'}
val ln_thm = wf_thm RS @{thm expands_to_insert_ln}
@@ -237,6 +237,6 @@
| insert_ln (SNE basis) = check_basis (SNE (insert_ln' basis))
val default_basis =
- SNE (SSng {head = @{term "\<lambda>x::real. x"}, wf_thm = @{thm default_basis_wf}})
+ SNE (SSng {head = \<^term>\<open>\<lambda>x::real. x\<close>, wf_thm = @{thm default_basis_wf}})
end
\ No newline at end of file
--- a/src/HOL/Real_Asymp/exp_log_expression.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/exp_log_expression.ML Sat Jan 05 17:24:33 2019 +0100
@@ -205,7 +205,7 @@
fun preproc_term_conv ctxt =
let
- val thms = Named_Theorems.get ctxt @{named_theorems "real_asymp_reify_simps"}
+ val thms = Named_Theorems.get ctxt \<^named_theorems>\<open>real_asymp_reify_simps\<close>
val thms = map (fn thm => thm RS @{thm HOL.eq_reflection}) thms
in
rewrite ctxt thms
@@ -215,7 +215,7 @@
let
val n = pat |> fastype_of |> strip_type |> fst |> length
val maxidx = Term.maxidx_of_term pat
- val vars = map (fn i => Var ((Name.uu_, maxidx + i), @{typ real})) (1 upto n)
+ val vars = map (fn i => Var ((Name.uu_, maxidx + i), \<^typ>\<open>real\<close>)) (1 upto n)
val net_pat = Library.foldl betapply (pat, vars)
val {name_table = tbl, net = net} = Custom_Funs.get context
val entry' = {pat = pat, net_pat = net_pat, expand = expand}
@@ -255,79 +255,79 @@
fun expr_to_term' (ConstExpr c) = c
| expr_to_term' X = Bound 0
| expr_to_term' (Add (a, b)) =
- @{term "(+) :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>(+) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Mult (a, b)) =
- @{term "(*) :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>(*) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Minus (a, b)) =
- @{term "(-) :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>(-) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Div (a, b)) =
- @{term "(/) :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>(/) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Uminus a) =
- @{term "uminus :: real => _"} $ expr_to_term' a
+ \<^term>\<open>uminus :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Inverse a) =
- @{term "inverse :: real => _"} $ expr_to_term' a
+ \<^term>\<open>inverse :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Ln a) =
- @{term "ln :: real => _"} $ expr_to_term' a
+ \<^term>\<open>ln :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Exp a) =
- @{term "exp :: real => _"} $ expr_to_term' a
+ \<^term>\<open>exp :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Powr (a,b)) =
- @{term "(powr) :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>(powr) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Powr_Nat (a,b)) =
- @{term "powr_nat :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>powr_nat :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (LnPowr (a,b)) =
- @{term "ln :: real => _"} $
- (@{term "(powr) :: real => _"} $ expr_to_term' a $ expr_to_term' b)
+ \<^term>\<open>ln :: real => _\<close> $
+ (\<^term>\<open>(powr) :: real => _\<close> $ expr_to_term' a $ expr_to_term' b)
| expr_to_term' (ExpLn a) =
- @{term "exp :: real => _"} $ (@{term "ln :: real => _"} $ expr_to_term' a)
+ \<^term>\<open>exp :: real => _\<close> $ (\<^term>\<open>ln :: real => _\<close> $ expr_to_term' a)
| expr_to_term' (Powr' (a,b)) =
- @{term "(powr) :: real => _"} $ expr_to_term' a $ b
+ \<^term>\<open>(powr) :: real => _\<close> $ expr_to_term' a $ b
| expr_to_term' (Power (a,b)) =
- @{term "(^) :: real => _"} $ expr_to_term' a $ b
+ \<^term>\<open>(^) :: real => _\<close> $ expr_to_term' a $ b
| expr_to_term' (Floor a) =
- @{term Multiseries_Expansion.rfloor} $ expr_to_term' a
+ \<^term>\<open>Multiseries_Expansion.rfloor\<close> $ expr_to_term' a
| expr_to_term' (Ceiling a) =
- @{term Multiseries_Expansion.rceil} $ expr_to_term' a
+ \<^term>\<open>Multiseries_Expansion.rceil\<close> $ expr_to_term' a
| expr_to_term' (Frac a) =
- @{term "Archimedean_Field.frac :: real \<Rightarrow> real"} $ expr_to_term' a
+ \<^term>\<open>Archimedean_Field.frac :: real \<Rightarrow> real\<close> $ expr_to_term' a
| expr_to_term' (NatMod (a,b)) =
- @{term "Multiseries_Expansion.rnatmod"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>Multiseries_Expansion.rnatmod\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Root (a,b)) =
- @{term "root :: nat \<Rightarrow> real \<Rightarrow> _"} $ b $ expr_to_term' a
+ \<^term>\<open>root :: nat \<Rightarrow> real \<Rightarrow> _\<close> $ b $ expr_to_term' a
| expr_to_term' (Sin a) =
- @{term "sin :: real => _"} $ expr_to_term' a
+ \<^term>\<open>sin :: real => _\<close> $ expr_to_term' a
| expr_to_term' (ArcTan a) =
- @{term "arctan :: real => _"} $ expr_to_term' a
+ \<^term>\<open>arctan :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Cos a) =
- @{term "cos :: real => _"} $ expr_to_term' a
+ \<^term>\<open>cos :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Absolute a) =
- @{term "abs :: real => _"} $ expr_to_term' a
+ \<^term>\<open>abs :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Sgn a) =
- @{term "sgn :: real => _"} $ expr_to_term' a
+ \<^term>\<open>sgn :: real => _\<close> $ expr_to_term' a
| expr_to_term' (Min (a,b)) =
- @{term "min :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>min :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Max (a,b)) =
- @{term "max :: real => _"} $ expr_to_term' a $ expr_to_term' b
+ \<^term>\<open>max :: real => _\<close> $ expr_to_term' a $ expr_to_term' b
| expr_to_term' (Custom (_, t, args)) = Envir.beta_eta_contract (
fold (fn e => fn t => betapply (t, expr_to_term' e )) args t)
in
- Abs ("x", @{typ "real"}, expr_to_term' e)
+ Abs ("x", \<^typ>\<open>real\<close>, expr_to_term' e)
end
fun reify_custom ctxt t =
let
val thy = Proof_Context.theory_of ctxt
val t = Envir.beta_eta_contract t
- val t' = Envir.beta_eta_contract (Term.abs ("x", @{typ real}) t)
+ val t' = Envir.beta_eta_contract (Term.abs ("x", \<^typ>\<open>real\<close>) t)
val {net, ...} = Custom_Funs.get (Context.Proof ctxt)
- val entries = Item_Net.retrieve_matching net (Term.subst_bound (Free ("x", @{typ real}), t))
+ val entries = Item_Net.retrieve_matching net (Term.subst_bound (Free ("x", \<^typ>\<open>real\<close>), t))
fun go {pat, name, ...} =
let
val n = pat |> fastype_of |> strip_type |> fst |> length
val maxidx = Term.maxidx_of_term t'
val vs = map (fn i => (Name.uu_, maxidx + i)) (1 upto n)
- val args = map (fn v => Var (v, @{typ "real => real"}) $ Bound 0) vs
+ val args = map (fn v => Var (v, \<^typ>\<open>real => real\<close>) $ Bound 0) vs
val pat' =
- Envir.beta_eta_contract (Term.abs ("x", @{typ "real"})
+ Envir.beta_eta_contract (Term.abs ("x", \<^typ>\<open>real\<close>)
(Library.foldl betapply (pat, args)))
val (T_insts, insts) = Pattern.match thy (pat', t') (Vartab.empty, Vartab.empty)
fun map_option _ [] acc = SOME (rev acc)
@@ -347,58 +347,58 @@
fun reify_aux ctxt t' t =
let
fun is_const t =
- fastype_of (Abs ("x", @{typ real}, t)) = @{typ "real \<Rightarrow> real"}
+ fastype_of (Abs ("x", \<^typ>\<open>real\<close>, t)) = \<^typ>\<open>real \<Rightarrow> real\<close>
andalso not (exists_subterm (fn t => t = Bound 0) t)
fun is_const' t = not (exists_subterm (fn t => t = Bound 0) t)
- fun reify'' (@{term "(+) :: real => _"} $ s $ t) =
+ fun reify'' (\<^term>\<open>(+) :: real => _\<close> $ s $ t) =
Add (reify' s, reify' t)
- | reify'' (@{term "(-) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(-) :: real => _\<close> $ s $ t) =
Minus (reify' s, reify' t)
- | reify'' (@{term "(*) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(*) :: real => _\<close> $ s $ t) =
Mult (reify' s, reify' t)
- | reify'' (@{term "(/) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(/) :: real => _\<close> $ s $ t) =
Div (reify' s, reify' t)
- | reify'' (@{term "uminus :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>uminus :: real => _\<close> $ s) =
Uminus (reify' s)
- | reify'' (@{term "inverse :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>inverse :: real => _\<close> $ s) =
Inverse (reify' s)
- | reify'' (@{term "ln :: real => _"} $ (@{term "(powr) :: real => _"} $ s $ t)) =
+ | reify'' (\<^term>\<open>ln :: real => _\<close> $ (\<^term>\<open>(powr) :: real => _\<close> $ s $ t)) =
LnPowr (reify' s, reify' t)
- | reify'' (@{term "exp :: real => _"} $ (@{term "ln :: real => _"} $ s)) =
+ | reify'' (\<^term>\<open>exp :: real => _\<close> $ (\<^term>\<open>ln :: real => _\<close> $ s)) =
ExpLn (reify' s)
- | reify'' (@{term "ln :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>ln :: real => _\<close> $ s) =
Ln (reify' s)
- | reify'' (@{term "exp :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>exp :: real => _\<close> $ s) =
Exp (reify' s)
- | reify'' (@{term "(powr) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(powr) :: real => _\<close> $ s $ t) =
(if is_const t then Powr' (reify' s, t) else Powr (reify' s, reify' t))
- | reify'' (@{term "powr_nat :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>powr_nat :: real => _\<close> $ s $ t) =
Powr_Nat (reify' s, reify' t)
- | reify'' (@{term "(^) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(^) :: real => _\<close> $ s $ t) =
(if is_const' t then Power (reify' s, t) else raise TERM ("reify", [t']))
- | reify'' (@{term "root"} $ s $ t) =
+ | reify'' (\<^term>\<open>root\<close> $ s $ t) =
(if is_const' s then Root (reify' t, s) else raise TERM ("reify", [t']))
- | reify'' (@{term "abs :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>abs :: real => _\<close> $ s) =
Absolute (reify' s)
- | reify'' (@{term "sgn :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>sgn :: real => _\<close> $ s) =
Sgn (reify' s)
- | reify'' (@{term "min :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>min :: real => _\<close> $ s $ t) =
Min (reify' s, reify' t)
- | reify'' (@{term "max :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>max :: real => _\<close> $ s $ t) =
Max (reify' s, reify' t)
- | reify'' (@{term "Multiseries_Expansion.rfloor"} $ s) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rfloor\<close> $ s) =
Floor (reify' s)
- | reify'' (@{term "Multiseries_Expansion.rceil"} $ s) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rceil\<close> $ s) =
Ceiling (reify' s)
- | reify'' (@{term "Archimedean_Field.frac :: real \<Rightarrow> real"} $ s) =
+ | reify'' (\<^term>\<open>Archimedean_Field.frac :: real \<Rightarrow> real\<close> $ s) =
Frac (reify' s)
- | reify'' (@{term "Multiseries_Expansion.rnatmod"} $ s $ t) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rnatmod\<close> $ s $ t) =
NatMod (reify' s, reify' t)
- | reify'' (@{term "sin :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>sin :: real => _\<close> $ s) =
Sin (reify' s)
- | reify'' (@{term "arctan :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>arctan :: real => _\<close> $ s) =
ArcTan (reify' s)
- | reify'' (@{term "cos :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>cos :: real => _\<close> $ s) =
Cos (reify' s)
| reify'' (Bound 0) = X
| reify'' t =
@@ -413,7 +413,7 @@
and reify' t = if is_const t then ConstExpr t else reify'' t
in
case Envir.eta_long [] t of
- Abs (_, @{typ real}, t'') => reify' t''
+ Abs (_, \<^typ>\<open>real\<close>, t'') => reify' t''
| _ => raise TERM ("reify", [t])
end
@@ -428,52 +428,52 @@
fun reify_simple_aux ctxt t' t =
let
fun is_const t =
- fastype_of (Abs ("x", @{typ real}, t)) = @{typ "real \<Rightarrow> real"}
+ fastype_of (Abs ("x", \<^typ>\<open>real\<close>, t)) = \<^typ>\<open>real \<Rightarrow> real\<close>
andalso not (exists_subterm (fn t => t = Bound 0) t)
fun is_const' t = not (exists_subterm (fn t => t = Bound 0) t)
- fun reify'' (@{term "(+) :: real => _"} $ s $ t) =
+ fun reify'' (\<^term>\<open>(+) :: real => _\<close> $ s $ t) =
Add (reify'' s, reify'' t)
- | reify'' (@{term "(-) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(-) :: real => _\<close> $ s $ t) =
Minus (reify'' s, reify'' t)
- | reify'' (@{term "(*) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(*) :: real => _\<close> $ s $ t) =
Mult (reify'' s, reify'' t)
- | reify'' (@{term "(/) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(/) :: real => _\<close> $ s $ t) =
Div (reify'' s, reify'' t)
- | reify'' (@{term "uminus :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>uminus :: real => _\<close> $ s) =
Uminus (reify'' s)
- | reify'' (@{term "inverse :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>inverse :: real => _\<close> $ s) =
Inverse (reify'' s)
- | reify'' (@{term "ln :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>ln :: real => _\<close> $ s) =
Ln (reify'' s)
- | reify'' (@{term "exp :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>exp :: real => _\<close> $ s) =
Exp (reify'' s)
- | reify'' (@{term "(powr) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(powr) :: real => _\<close> $ s $ t) =
Powr (reify'' s, reify'' t)
- | reify'' (@{term "powr_nat :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>powr_nat :: real => _\<close> $ s $ t) =
Powr_Nat (reify'' s, reify'' t)
- | reify'' (@{term "(^) :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>(^) :: real => _\<close> $ s $ t) =
(if is_const' t then Power (reify'' s, t) else raise TERM ("reify", [t']))
- | reify'' (@{term "root"} $ s $ t) =
+ | reify'' (\<^term>\<open>root\<close> $ s $ t) =
(if is_const' s then Root (reify'' t, s) else raise TERM ("reify", [t']))
- | reify'' (@{term "abs :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>abs :: real => _\<close> $ s) =
Absolute (reify'' s)
- | reify'' (@{term "sgn :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>sgn :: real => _\<close> $ s) =
Sgn (reify'' s)
- | reify'' (@{term "min :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>min :: real => _\<close> $ s $ t) =
Min (reify'' s, reify'' t)
- | reify'' (@{term "max :: real => _"} $ s $ t) =
+ | reify'' (\<^term>\<open>max :: real => _\<close> $ s $ t) =
Max (reify'' s, reify'' t)
- | reify'' (@{term "Multiseries_Expansion.rfloor"} $ s) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rfloor\<close> $ s) =
Floor (reify'' s)
- | reify'' (@{term "Multiseries_Expansion.rceil"} $ s) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rceil\<close> $ s) =
Ceiling (reify'' s)
- | reify'' (@{term "Archimedean_Field.frac :: real \<Rightarrow> real"} $ s) =
+ | reify'' (\<^term>\<open>Archimedean_Field.frac :: real \<Rightarrow> real\<close> $ s) =
Frac (reify'' s)
- | reify'' (@{term "Multiseries_Expansion.rnatmod"} $ s $ t) =
+ | reify'' (\<^term>\<open>Multiseries_Expansion.rnatmod\<close> $ s $ t) =
NatMod (reify'' s, reify'' t)
- | reify'' (@{term "sin :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>sin :: real => _\<close> $ s) =
Sin (reify'' s)
- | reify'' (@{term "cos :: real => _"} $ s) =
+ | reify'' (\<^term>\<open>cos :: real => _\<close> $ s) =
Cos (reify'' s)
| reify'' (Bound 0) = X
| reify'' t =
@@ -490,7 +490,7 @@
| NONE => raise TERM ("reify", [t'])
in
case Envir.eta_long [] t of
- Abs (_, @{typ real}, t'') => reify'' t''
+ Abs (_, \<^typ>\<open>real\<close>, t'') => reify'' t''
| _ => raise TERM ("reify", [t])
end
@@ -503,17 +503,17 @@
end
fun simple_print_const (Free (x, _)) = x
- | simple_print_const (@{term "uminus :: real => real"} $ a) =
+ | simple_print_const (\<^term>\<open>uminus :: real => real\<close> $ a) =
"(-" ^ simple_print_const a ^ ")"
- | simple_print_const (@{term "(+) :: real => _"} $ a $ b) =
+ | simple_print_const (\<^term>\<open>(+) :: real => _\<close> $ a $ b) =
"(" ^ simple_print_const a ^ "+" ^ simple_print_const b ^ ")"
- | simple_print_const (@{term "(-) :: real => _"} $ a $ b) =
+ | simple_print_const (\<^term>\<open>(-) :: real => _\<close> $ a $ b) =
"(" ^ simple_print_const a ^ "-" ^ simple_print_const b ^ ")"
- | simple_print_const (@{term "(*) :: real => _"} $ a $ b) =
+ | simple_print_const (\<^term>\<open>(*) :: real => _\<close> $ a $ b) =
"(" ^ simple_print_const a ^ "*" ^ simple_print_const b ^ ")"
- | simple_print_const (@{term "inverse :: real => _"} $ a) =
+ | simple_print_const (\<^term>\<open>inverse :: real => _\<close> $ a) =
"(1 / " ^ simple_print_const a ^ ")"
- | simple_print_const (@{term "(/) :: real => _"} $ a $ b) =
+ | simple_print_const (\<^term>\<open>(/) :: real => _\<close> $ a $ b) =
"(" ^ simple_print_const a ^ "/" ^ simple_print_const b ^ ")"
| simple_print_const t = Int.toString (snd (HOLogic.dest_number t))
@@ -529,7 +529,7 @@
| to_mathematica (ExpLn a) = "Exp[Ln[" ^ to_mathematica a ^ "]]"
| to_mathematica (Power (a, b)) = "(" ^ to_mathematica a ^ " ^ " ^
to_mathematica (ConstExpr b) ^ ")"
- | to_mathematica (Root (a, @{term "2::real"})) = "Sqrt[" ^ to_mathematica a ^ "]"
+ | to_mathematica (Root (a, \<^term>\<open>2::real\<close>)) = "Sqrt[" ^ to_mathematica a ^ "]"
| to_mathematica (Root (a, b)) = "Surd[" ^ to_mathematica a ^ ", " ^
to_mathematica (ConstExpr b) ^ "]"
| to_mathematica (Uminus a) = "(-" ^ to_mathematica a ^ ")"
@@ -562,7 +562,7 @@
| to_maple (ExpLn a) = "ln(exp(" ^ to_maple a ^ "))"
| to_maple (Power (a, b)) = "(" ^ to_maple a ^ " ^ " ^
to_maple (ConstExpr b) ^ ")"
- | to_maple (Root (a, @{term "2::real"})) = "sqrt(" ^ to_maple a ^ ")"
+ | to_maple (Root (a, \<^term>\<open>2::real\<close>)) = "sqrt(" ^ to_maple a ^ ")"
| to_maple (Root (a, b)) = "root(" ^ to_maple a ^ ", " ^
to_maple (ConstExpr b) ^ ")"
| to_maple (Uminus a) = "(-" ^ to_maple a ^ ")"
@@ -594,7 +594,7 @@
| to_maxima (LnPowr (a, b)) = "log(" ^ to_maxima a ^ " ^ " ^ to_maxima b ^ ")"
| to_maxima (Power (a, b)) = "(" ^ to_maxima a ^ " ^ " ^
to_maxima (ConstExpr b) ^ ")"
- | to_maxima (Root (a, @{term "2::real"})) = "sqrt(" ^ to_maxima a ^ ")"
+ | to_maxima (Root (a, \<^term>\<open>2::real\<close>)) = "sqrt(" ^ to_maxima a ^ ")"
| to_maxima (Root (a, b)) = to_maxima a ^ "^(1/" ^
to_maxima (ConstExpr b) ^ ")"
| to_maxima (Uminus a) = "(-" ^ to_maxima a ^ ")"
@@ -626,7 +626,7 @@
| to_sympy (LnPowr (a, b)) = "log(" ^ to_sympy a ^ " ** " ^ to_sympy b ^ ")"
| to_sympy (Power (a, b)) = "(" ^ to_sympy a ^ " ** " ^
to_sympy (ConstExpr b) ^ ")"
- | to_sympy (Root (a, @{term "2::real"})) = "sqrt(" ^ to_sympy a ^ ")"
+ | to_sympy (Root (a, \<^term>\<open>2::real\<close>)) = "sqrt(" ^ to_sympy a ^ ")"
| to_sympy (Root (a, b)) = "root(" ^ to_sympy a ^ ", " ^ to_sympy (ConstExpr b) ^ ")"
| to_sympy (Uminus a) = "(-" ^ to_sympy a ^ ")"
| to_sympy (Inverse a) = "(1/(" ^ to_sympy a ^ "))"
@@ -657,7 +657,7 @@
| to_sage (LnPowr (a, b)) = "log(" ^ to_sage a ^ " ^ " ^ to_sage b ^ ")"
| to_sage (Power (a, b)) = "(" ^ to_sage a ^ " ^ " ^
to_sage (ConstExpr b) ^ ")"
- | to_sage (Root (a, @{term "2::real"})) = "sqrt(" ^ to_sage a ^ ")"
+ | to_sage (Root (a, \<^term>\<open>2::real\<close>)) = "sqrt(" ^ to_sage a ^ ")"
| to_sage (Root (a, b)) = to_sage a ^ "^(1/" ^ to_sage (ConstExpr b) ^ ")"
| to_sage (Uminus a) = "(-" ^ to_sage a ^ ")"
| to_sage (Inverse a) = "(1/(" ^ to_sage a ^ "))"
--- a/src/HOL/Real_Asymp/multiseries_expansion.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/multiseries_expansion.ML Sat Jan 05 17:24:33 2019 +0100
@@ -149,7 +149,7 @@
let
val oracles = get_sign_oracles (Context.Proof ctxt)
fun tac' {context = ctxt, concl, ...} =
- if Thm.term_of concl = @{term "HOL.Trueprop HOL.False"} then
+ if Thm.term_of concl = \<^term>\<open>HOL.Trueprop HOL.False\<close> then
no_tac
else
FIRST (map (fn tac => HEADGOAL (snd tac ctxt)) oracles)
@@ -178,7 +178,7 @@
datatype intyness = Nat of thm | Neg_Nat of thm | No_Nat
fun get_intyness ctxt ct =
- if Thm.typ_of_cterm ct = @{typ Real.real} then
+ if Thm.typ_of_cterm ct = \<^typ>\<open>Real.real\<close> then
let
val ctxt' = put_simpset HOL_basic_ss ctxt addsimps @{thms intyness_simps}
val conv =
@@ -187,12 +187,12 @@
| flip _ = No_Nat
fun get_intyness' ct =
case Thm.term_of ct of
- @{term "0::real"} => Nat @{thm intyness_0}
- | @{term "1::real"} => Nat @{thm intyness_1}
- | Const (@{const_name numeral}, _) $ _ =>
+ \<^term>\<open>0::real\<close> => Nat @{thm intyness_0}
+ | \<^term>\<open>1::real\<close> => Nat @{thm intyness_1}
+ | Const (\<^const_name>\<open>numeral\<close>, _) $ _ =>
Nat (Thm.reflexive (Thm.dest_arg ct) RS @{thm intyness_numeral})
- | Const (@{const_name uminus}, _) $ _ => flip (get_intyness' (Thm.dest_arg ct))
- | Const (@{const_name of_nat}, _) $ _ =>
+ | Const (\<^const_name>\<open>uminus\<close>, _) $ _ => flip (get_intyness' (Thm.dest_arg ct))
+ | Const (\<^const_name>\<open>of_nat\<close>, _) $ _ =>
Nat (Thm.reflexive (Thm.dest_arg ct) RS @{thm intyness_of_nat})
| _ => No_Nat
val thm = conv ct
@@ -229,40 +229,40 @@
val get_num = Thm.dest_arg o Thm.dest_arg
in
case Thm.term_of ct of
- Const (@{const_name Groups.zero}, _) => Even (inst @{thm even_zero} [])
- | Const (@{const_name Groups.one}, _) => Odd (inst @{thm odd_one} [])
- | Const (@{const_name Num.numeral_class.numeral}, _) $ @{term "Num.One"} =>
+ Const (\<^const_name>\<open>Groups.zero\<close>, _) => Even (inst @{thm even_zero} [])
+ | Const (\<^const_name>\<open>Groups.one\<close>, _) => Odd (inst @{thm odd_one} [])
+ | Const (\<^const_name>\<open>Num.numeral_class.numeral\<close>, _) $ \<^term>\<open>Num.One\<close> =>
Odd (inst @{thm odd_Numeral1} [])
- | Const (@{const_name Num.numeral_class.numeral}, _) $ (@{term "Num.Bit0"} $ _) =>
+ | Const (\<^const_name>\<open>Num.numeral_class.numeral\<close>, _) $ (\<^term>\<open>Num.Bit0\<close> $ _) =>
Even (inst @{thm even_numeral} [get_num ct])
- | Const (@{const_name Num.numeral_class.numeral}, _) $ (@{term "Num.Bit1"} $ _) =>
+ | Const (\<^const_name>\<open>Num.numeral_class.numeral\<close>, _) $ (\<^term>\<open>Num.Bit1\<close> $ _) =>
Odd (inst @{thm odd_numeral} [get_num ct])
- | Const (@{const_name Groups.uminus}, _) $ _ => (
+ | Const (\<^const_name>\<open>Groups.uminus\<close>, _) $ _ => (
case get_parity (Thm.dest_arg ct) of
Even thm => Even (@{thm even_uminusI} OF [thm])
| Odd thm => Odd (@{thm odd_uminusI} OF [thm])
| _ => Unknown_Parity)
- | Const (@{const_name Groups.plus}, _) $ _ $ _ => (
+ | Const (\<^const_name>\<open>Groups.plus\<close>, _) $ _ $ _ => (
case apply2 get_parity (Thm.dest_binop ct) of
(Even thm1, Even thm2) => Even (@{thm even_addI(1)} OF [thm1, thm2])
| (Odd thm1, Odd thm2) => Even (@{thm even_addI(2)} OF [thm1, thm2])
| (Even thm1, Odd thm2) => Odd (@{thm odd_addI(1)} OF [thm1, thm2])
| (Odd thm1, Even thm2) => Odd (@{thm odd_addI(2)} OF [thm1, thm2])
| _ => Unknown_Parity)
- | Const (@{const_name Groups.minus}, _) $ _ $ _ => (
+ | Const (\<^const_name>\<open>Groups.minus\<close>, _) $ _ $ _ => (
case apply2 get_parity (Thm.dest_binop ct) of
(Even thm1, Even thm2) => Even (@{thm even_diffI(1)} OF [thm1, thm2])
| (Odd thm1, Odd thm2) => Even (@{thm even_diffI(2)} OF [thm1, thm2])
| (Even thm1, Odd thm2) => Odd (@{thm odd_diffI(1)} OF [thm1, thm2])
| (Odd thm1, Even thm2) => Odd (@{thm odd_diffI(2)} OF [thm1, thm2])
| _ => Unknown_Parity)
- | Const (@{const_name Groups.times}, _) $ _ $ _ => (
+ | Const (\<^const_name>\<open>Groups.times\<close>, _) $ _ $ _ => (
case apply2 get_parity (Thm.dest_binop ct) of
(Even thm1, _) => Even (@{thm even_multI(1)} OF [thm1])
| (_, Even thm2) => Even (@{thm even_multI(2)} OF [thm2])
| (Odd thm1, Odd thm2) => Odd (@{thm odd_multI} OF [thm1, thm2])
| _ => Unknown_Parity)
- | Const (@{const_name Power.power}, _) $ _ $ _ =>
+ | Const (\<^const_name>\<open>Power.power\<close>, _) $ _ $ _ =>
let
val (a, n) = Thm.dest_binop ct
in
@@ -295,9 +295,9 @@
(* Returns the leading coefficient of the given expansion. This coefficient is a multiseries. *)
fun try_get_coeff expr =
case expr of
- Const (@{const_name MS}, _) $ (
- Const (@{const_name MSLCons}, _) $ (
- Const (@{const_name Pair}, _) $ c $ _) $ _) $ _ =>
+ Const (\<^const_name>\<open>MS\<close>, _) $ (
+ Const (\<^const_name>\<open>MSLCons\<close>, _) $ (
+ Const (\<^const_name>\<open>Pair\<close>, _) $ c $ _) $ _) $ _ =>
SOME c
| _ => NONE
@@ -318,15 +318,15 @@
(* Returns the list of exponents of the leading term *)
fun get_exponents exp =
- if fastype_of exp = @{typ real} then
+ if fastype_of exp = \<^typ>\<open>real\<close> then
[]
else
get_exponent exp :: get_exponents (get_coeff exp)
(* Returns the function that the expansion corresponds to *)
fun get_eval expr =
- if fastype_of expr = @{typ real} then
- Abs ("x", @{typ real}, expr)
+ if fastype_of expr = \<^typ>\<open>real\<close> then
+ Abs ("x", \<^typ>\<open>real\<close>, expr)
else
expr |> dest_comb |> snd
@@ -337,7 +337,7 @@
let
val ctxt = Lazy_Eval.get_ctxt ectxt
val goal =
- betapply (@{term "\<lambda>f::real \<Rightarrow> real. eventually (\<lambda>x. f x = 0) at_top"}, t)
+ betapply (\<^term>\<open>\<lambda>f::real \<Rightarrow> real. eventually (\<lambda>x. f x = 0) at_top\<close>, t)
|> HOLogic.mk_Trueprop
fun tac {context = ctxt, ...} =
HEADGOAL (Method.insert_tac ctxt (get_facts ectxt))
@@ -362,21 +362,21 @@
fun zeroness_oracle fail mode ectxt exp =
let
val ctxt = Lazy_Eval.get_ctxt ectxt
- val eq = (exp, @{term "0::real"}) |> HOLogic.mk_eq
+ val eq = (exp, \<^term>\<open>0::real\<close>) |> HOLogic.mk_eq
val goal1 = (IsZero, eq |> HOLogic.mk_Trueprop)
val goal2 =
case mode of
SOME Pos_Trim =>
- (IsPos, @{term "(<) (0::real)"} $ exp |> HOLogic.mk_Trueprop)
+ (IsPos, \<^term>\<open>(<) (0::real)\<close> $ exp |> HOLogic.mk_Trueprop)
| SOME Sgn_Trim =>
- (IsPos, @{term "(<) (0::real)"} $ exp |> HOLogic.mk_Trueprop)
+ (IsPos, \<^term>\<open>(<) (0::real)\<close> $ exp |> HOLogic.mk_Trueprop)
| SOME Neg_Trim =>
- (IsNeg, betapply (@{term "\<lambda>x. x < (0::real)"}, exp) |> HOLogic.mk_Trueprop)
+ (IsNeg, betapply (\<^term>\<open>\<lambda>x. x < (0::real)\<close>, exp) |> HOLogic.mk_Trueprop)
| _ =>
(IsNonZero, eq |> HOLogic.mk_not |> HOLogic.mk_Trueprop)
val goals =
(if mode = SOME Sgn_Trim then
- [(IsNeg, betapply (@{term "\<lambda>x. x < (0::real)"}, exp) |> HOLogic.mk_Trueprop)]
+ [(IsNeg, betapply (\<^term>\<open>\<lambda>x. x < (0::real)\<close>, exp) |> HOLogic.mk_Trueprop)]
else
[])
val goals = goal2 :: goals
@@ -420,7 +420,7 @@
(* Tries to prove a given equality of real numbers. *)
fun try_prove_real_eq fail ectxt (lhs, rhs) =
- case zeroness_oracle false NONE ectxt (@{term "(-) :: real => _"} $ lhs $ rhs) of
+ case zeroness_oracle false NONE ectxt (\<^term>\<open>(-) :: real => _\<close> $ lhs $ rhs) of
(IsZero, SOME thm) => SOME (thm RS @{thm real_eqI})
| _ =>
if not fail then NONE else
@@ -443,14 +443,14 @@
(* Tries to prove a given eventual equality of real functions. *)
fun try_prove_ev_eq ectxt (f, g) =
let
- val t = Envir.beta_eta_contract (@{term "\<lambda>(f::real=>real) g x. f x - g x"} $ f $ g)
+ val t = Envir.beta_eta_contract (\<^term>\<open>\<lambda>(f::real=>real) g x. f x - g x\<close> $ f $ g)
in
Option.map (fn thm => thm RS @{thm eventually_diff_zero_imp_eq}) (ev_zeroness_oracle ectxt t)
end
-fun real_less a b = @{term "(<) :: real \<Rightarrow> real \<Rightarrow> bool"} $ a $ b
-fun real_eq a b = @{term "(=) :: real \<Rightarrow> real \<Rightarrow> bool"} $ a $ b
-fun real_neq a b = @{term "(\<noteq>) :: real \<Rightarrow> real \<Rightarrow> bool"} $ a $ b
+fun real_less a b = \<^term>\<open>(<) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ a $ b
+fun real_eq a b = \<^term>\<open>(=) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ a $ b
+fun real_neq a b = \<^term>\<open>(\<noteq>) :: real \<Rightarrow> real \<Rightarrow> bool\<close> $ a $ b
(* The hook that is called by the Lazy_Eval module whenever two real numbers have to be compared *)
fun real_sgn_hook ({pctxt = ctxt, facts, verbose, ...}) t =
@@ -470,10 +470,10 @@
| prove_first _ _ _ = raise Match
in
case t of
- @{term "(=) :: real => _"} $ a $ @{term "0 :: real"} =>
+ \<^term>\<open>(=) :: real => _\<close> $ a $ \<^term>\<open>0 :: real\<close> =>
let
val goals =
- map (fn c => HOLogic.mk_Trueprop (c a @{term "0 :: real"})) [real_neq, real_eq]
+ map (fn c => HOLogic.mk_Trueprop (c a \<^term>\<open>0 :: real\<close>)) [real_neq, real_eq]
fun err () =
let
val facts' = Net.content facts
@@ -487,7 +487,7 @@
in
prove_first err goals @{thms Eq_FalseI Eq_TrueI}
end
- | Const (@{const_name COMPARE}, _) $ a $ b =>
+ | Const (\<^const_name>\<open>COMPARE\<close>, _) $ a $ b =>
let
val goals = map HOLogic.mk_Trueprop [real_less a b, real_less b a, real_eq a b]
fun err () =
@@ -513,7 +513,7 @@
*)
fun get_constructors ctxt =
let
- val thms = Named_Theorems.get ctxt @{named_theorems exp_log_eval_constructor}
+ val thms = Named_Theorems.get ctxt \<^named_theorems>\<open>exp_log_eval_constructor\<close>
fun go _ [] acc = rev acc
| go f (x :: xs) acc =
case f x of
@@ -522,8 +522,8 @@
fun map_option f xs = go f xs []
fun dest_constructor thm =
case Thm.concl_of thm of
- Const (@{const_name HOL.Trueprop}, _) $
- (Const (@{const_name REAL_ASYMP_EVAL_CONSTRUCTOR}, _) $ Const (c, T)) =>
+ Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>REAL_ASYMP_EVAL_CONSTRUCTOR\<close>, _) $ Const (c, T)) =>
SOME (c, length (fst (strip_type T)))
| _ => NONE
in
@@ -535,7 +535,7 @@
*)
fun mk_eval_ctxt ctxt =
let
- val eval_eqs = (Named_Theorems.get ctxt @{named_theorems real_asymp_eval_eqs})
+ val eval_eqs = (Named_Theorems.get ctxt \<^named_theorems>\<open>real_asymp_eval_eqs\<close>)
val constructors = get_constructors ctxt
in
Lazy_Eval.mk_eval_ctxt ctxt constructors eval_eqs
@@ -547,9 +547,9 @@
let
val anypat = AnyPat ("_", 0)
in
- ConsPat (@{const_name MS},
- [ConsPat (@{const_name MSLCons},
- [ConsPat (@{const_name Pair}, [anypat, anypat]), anypat]), anypat])
+ ConsPat (\<^const_name>\<open>MS\<close>,
+ [ConsPat (\<^const_name>\<open>MSLCons\<close>,
+ [ConsPat (\<^const_name>\<open>Pair\<close>, [anypat, anypat]), anypat]), anypat])
end
(*
@@ -565,10 +565,10 @@
val exp' = eq_thm |> Thm.concl_of |> Logic.dest_equals |> snd
in
case exp' of
- Const (@{const_name MS}, _) $ (Const (@{const_name MSLCons}, _) $
- (Const (@{const_name Pair}, _) $ c $ _) $ _) $ _ =>
+ Const (\<^const_name>\<open>MS\<close>, _) $ (Const (\<^const_name>\<open>MSLCons\<close>, _) $
+ (Const (\<^const_name>\<open>Pair\<close>, _) $ c $ _) $ _) $ _ =>
(SOME c, @{thm expands_to_meta_eq_cong} OF [thm, eq_thm], eq_thm)
- | Const (@{const_name MS}, _) $ Const (@{const_name MSLNil}, _) $ _ =>
+ | Const (\<^const_name>\<open>MS\<close>, _) $ Const (\<^const_name>\<open>MSLNil\<close>, _) $ _ =>
(NONE, @{thm expands_to_meta_eq_cong} OF [thm, eq_thm], eq_thm)
| _ => raise TERM ("whnf_expansion", [exp'])
end
@@ -580,8 +580,8 @@
let
val f = get_expanded_fun thm
val T = fastype_of c
- val t = Const (@{const_name eval}, T --> @{typ "real \<Rightarrow> real"}) $ c
- val t = Term.betapply (Term.betapply (@{term "\<lambda>(f::real\<Rightarrow>real) g x. f x - g x"}, f), t)
+ val t = Const (\<^const_name>\<open>eval\<close>, T --> \<^typ>\<open>real \<Rightarrow> real\<close>) $ c
+ val t = Term.betapply (Term.betapply (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) g x. f x - g x\<close>, f), t)
in
case ev_zeroness_oracle ectxt t of
NONE => (NONE, thm)
@@ -597,7 +597,7 @@
(* Turns an expansion theorem into an expansion theorem for the leading coefficient. *)
fun expands_to_hd thm = thm RS
- (if fastype_of (get_expansion thm) = @{typ "real ms"} then
+ (if fastype_of (get_expansion thm) = \<^typ>\<open>real ms\<close> then
@{thm expands_to_hd'}
else
@{thm expands_to_hd})
@@ -622,9 +622,9 @@
val eq_thm = Simplifier.rewrite ctxt (Thm.cterm_of ctxt exp)
val trimmed_cong_thm =
case trimmed_thm |> concl_of' |> dest_fun of
- Const (@{const_name trimmed}, _) => @{thm trimmed_eq_cong}
- | Const (@{const_name trimmed_pos}, _) => @{thm trimmed_pos_eq_cong}
- | Const (@{const_name trimmed_neg}, _) => @{thm trimmed_neg_eq_cong}
+ Const (\<^const_name>\<open>trimmed\<close>, _) => @{thm trimmed_eq_cong}
+ | Const (\<^const_name>\<open>trimmed_pos\<close>, _) => @{thm trimmed_pos_eq_cong}
+ | Const (\<^const_name>\<open>trimmed_neg\<close>, _) => @{thm trimmed_neg_eq_cong}
| _ => raise THM ("simplify_trimmed_expansion", 2, [thm, trimmed_thm])
in
(@{thm expands_to_meta_eq_cong} OF [thm, eq_thm],
@@ -643,7 +643,7 @@
case c of
NONE => (thm, eq_thm)
| SOME c =>
- if fastype_of c = @{typ real} then
+ if fastype_of c = \<^typ>\<open>real\<close> then
(thm, eq_thm)
else
let
@@ -671,9 +671,9 @@
let
val exp = get_expansion thm
in
- if fastype_of exp = @{typ real} then
+ if fastype_of exp = \<^typ>\<open>real\<close> then
NONE
- else if fastype_of (get_coeff exp) = @{typ real} then
+ else if fastype_of (get_coeff exp) = \<^typ>\<open>real\<close> then
case zeroness_oracle fail (SOME Simple_Trim) ectxt (get_coeff exp) of
(IsZero, SOME zero_thm) => SOME (@{thm drop_zero_ms'} OF [zero_thm, thm])
| _ => NONE
@@ -681,7 +681,7 @@
let
val c = get_coeff exp
val T = fastype_of c
- val t = Const (@{const_name eval}, T --> @{typ "real \<Rightarrow> real"}) $ c
+ val t = Const (\<^const_name>\<open>eval\<close>, T --> \<^typ>\<open>real \<Rightarrow> real\<close>) $ c
in
case ev_zeroness_oracle ectxt t of
SOME zero_thm => SOME (@{thm expands_to_drop_zero} OF [zero_thm, thm])
@@ -701,7 +701,7 @@
val exp = get_expansion thm
val c = get_coeff exp
val t =
- if fastype_of c = @{typ real} then c else c |> dest_arg
+ if fastype_of c = \<^typ>\<open>real\<close> then c else c |> dest_arg
val t = simplify_term' (get_facts ectxt) ctxt t
val _ =
if #verbose (#ctxt ectxt) then
@@ -727,7 +727,7 @@
fun cstrip_assms ct =
case Thm.term_of ct of
- @{term "(==>)"} $ _ $ _ => cstrip_assms (snd (Thm.dest_implies ct))
+ \<^term>\<open>(==>)\<close> $ _ $ _ => cstrip_assms (snd (Thm.dest_implies ct))
| _ => ct
(*
@@ -787,9 +787,9 @@
let
val c = the c
val T = fastype_of c
- val t = Const (@{const_name eval}, T --> @{typ "real \<Rightarrow> real"}) $ c
+ val t = Const (\<^const_name>\<open>eval\<close>, T --> \<^typ>\<open>real \<Rightarrow> real\<close>) $ c
in
- if T = @{typ real} then (
+ if T = \<^typ>\<open>real\<close> then (
case zeroness_oracle fail mode ectxt c of
(IsZero, SOME zero_thm) =>
trim_expansion_while_greater strict es fail mode ectxt
@@ -826,7 +826,7 @@
| (_, NONE) => (thm, Trimmed (nz, NONE), thms)
end
end
- val minus = @{term "(-) :: real => real => real"}
+ val minus = \<^term>\<open>(-) :: real => real => real\<close>
in
case (c, es) of
(NONE, _) => (thm, Trimmed (IsZero, NONE), [])
@@ -842,7 +842,7 @@
| NONE => do_trim NONE |> @{apply 3(3)} (fn thms => (IsPos, pos_thm) :: thms))
| (IsNeg, SOME neg_thm) => (thm, Aborted LESS, [(IsNeg, neg_thm)])
| (IsZero, SOME zero_thm) =>
- if not strict andalso fastype_of c = @{typ real} then
+ if not strict andalso fastype_of c = \<^typ>\<open>real\<close> then
(thm, Aborted EQUAL, [(IsZero, zero_thm)])
else (
case try_drop_leading_term_ex false ectxt thm of
@@ -868,7 +868,7 @@
Determines the sign of an expansion that has already been trimmed.
*)
fun determine_trimmed_sgn ectxt exp =
- if fastype_of exp = @{typ real} then
+ if fastype_of exp = \<^typ>\<open>real\<close> then
(case zeroness_oracle true (SOME Sgn_Trim) ectxt exp of
(IsPos, SOME thm) => (IsPos, thm RS @{thm trimmed_pos_realI})
| (IsNeg, SOME thm) => (IsNeg, thm RS @{thm trimmed_neg_realI})
@@ -884,8 +884,8 @@
end
fun mk_compare_expansions_const T =
- Const (@{const_name compare_expansions},
- T --> T --> @{typ "cmp_result \<times> real \<times> real"})
+ Const (\<^const_name>\<open>compare_expansions\<close>,
+ T --> T --> \<^typ>\<open>cmp_result \<times> real \<times> real\<close>)
datatype comparison_result =
Cmp_Dominated of order * thm list * zeroness * trimmed_thm * expansion_thm * expansion_thm
@@ -900,13 +900,13 @@
| IsNeg => @{thm lift_trimmed_neg}
| _ => raise TERM ("Unexpected zeroness result in compare_expansions'", [])
val (e1, e2) = apply2 (get_expansion #> get_exponent) (thm1, thm2)
- val e = @{term "(-) :: real => _"} $ e1 $ e2
+ val e = \<^term>\<open>(-) :: real => _\<close> $ e1 $ e2
fun trim thm = trim_expansion true (SOME Sgn_Trim) ectxt (thm, basis)
val try_drop = Option.map (whnf_expansion ectxt #> #2) o try_drop_leading_term_ex false ectxt
fun handle_result ord zeroness trimmed_thm thm1 thm2 =
let
val (e1, e2) = apply2 (get_expansion #> get_exponent) (thm1, thm2)
- val e = @{term "(-) :: real => _"} $ e1 $ e2
+ val e = \<^term>\<open>(-) :: real => _\<close> $ e1 $ e2
val mode = if ord = LESS then Neg_Trim else Pos_Trim
in
case zeroness_oracle true (SOME mode) ectxt e of
@@ -975,15 +975,15 @@
@{thm compare_expansions_same_exp} OF [thm, prove_compare_expansions ord thms]
| prove_compare_expansions _ [] = raise Match
-val ev_zero_pos_thm = Eventuallize.eventuallize @{context}
+val ev_zero_pos_thm = Eventuallize.eventuallize \<^context>
@{lemma "\<forall>x::real. f x = 0 \<longrightarrow> g x > 0 \<longrightarrow> f x < g x" by auto} NONE
OF @{thms _ expands_to_imp_eventually_pos}
-val ev_zero_neg_thm = Eventuallize.eventuallize @{context}
+val ev_zero_neg_thm = Eventuallize.eventuallize \<^context>
@{lemma "\<forall>x::real. f x = 0 \<longrightarrow> g x < 0 \<longrightarrow> f x > g x" by auto} NONE
OF @{thms _ expands_to_imp_eventually_neg}
-val ev_zero_zero_thm = Eventuallize.eventuallize @{context}
+val ev_zero_zero_thm = Eventuallize.eventuallize \<^context>
@{lemma "\<forall>x::real. f x = 0 \<longrightarrow> g x = 0 \<longrightarrow> f x = g x" by auto} NONE
fun compare_expansions_trivial ectxt (thm1, thm2, basis) =
@@ -1065,7 +1065,7 @@
let
val ctxt = get_ctxt ectxt
fun lead_coeff exp =
- if fastype_of exp = @{typ real} then exp else lead_coeff (get_coeff exp)
+ if fastype_of exp = \<^typ>\<open>real\<close> then exp else lead_coeff (get_coeff exp)
val c = lead_coeff (get_expansion thm)
fun err () =
let
@@ -1108,7 +1108,7 @@
val e = trailing_exponent (get_expansion thm) basis
fun ln_expansion_aux trimmed_thm zero_thm thm basis =
let
- val t = betapply (@{term "\<lambda>(f::real \<Rightarrow> real) x. f x - 1 :: real"}, get_expanded_fun thm)
+ val t = betapply (\<^term>\<open>\<lambda>(f::real \<Rightarrow> real) x. f x - 1 :: real\<close>, get_expanded_fun thm)
in
case ev_zeroness_oracle ectxt t of
NONE => ln_expansion_aux' trimmed_thm zero_thm thm basis
@@ -1132,7 +1132,7 @@
(IsZero, SOME thm) =>
@{thm expands_to_ln_to_expands_to_ln_eval [OF expands_to_ln_aux_0]} OF [thm,c_thm]
| _ =>
- case try_prove_real_eq false ectxt (e, @{term "1::real"}) of
+ case try_prove_real_eq false ectxt (e, \<^term>\<open>1::real\<close>) of
SOME thm =>
@{thm expands_to_ln_to_expands_to_ln_eval [OF expands_to_ln_aux_1]}
OF [thm, wf_thm, c_thm, ln_thm]
@@ -1197,7 +1197,7 @@
fun insert_exp _ _ _ _ _ SEmpty = raise TERM ("insert_exp", [])
| insert_exp t ln_thm ln_smallo_thm ln_trimmed_thm lim_thm (SNE basis) =
let
- val head = Envir.beta_eta_contract (@{term "\<lambda>(f::real\<Rightarrow>real) x. exp (f x)"} $ t)
+ val head = Envir.beta_eta_contract (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. exp (f x)\<close> $ t)
val ln_smallo_thm = ln_smallo_thm RS @{thm ln_smallo_ln_exp}
val wf_thm = @{thm basis_wf_manyI} OF [lim_thm, ln_smallo_thm, get_basis_wf_thm' basis]
val basis' = SNE (SCons ({wf_thm = wf_thm, head = head},
@@ -1224,7 +1224,7 @@
val ln =
Option.map (fn x => (#ln_thm x, #trimmed_thm x)) (get_ln_info basis)
val ln = Option.map (fn (x, y) => retrim_pos_expansion ectxt (x, basis, y)) ln
- val es' = @{term "0::real"} :: (
+ val es' = \<^term>\<open>0::real\<close> :: (
case ln of
NONE => []
| SOME (ln_thm, _, _) => get_exponents (get_expansion ln_thm))
@@ -1257,7 +1257,7 @@
@{thms expands_to_exp_insert_neg}
|> map (fn thm' => thm' OF [thm, wf_thm, trimmed_thm, ln_smallo_thm])
val ln_smallo_thm = ln_smallo_thm RS @{thm basis_wf_insert_exp_uminus}
- val f' = Envir.beta_eta_contract (@{term "\<lambda>(f::real\<Rightarrow>real) x. -f x"} $ f)
+ val f' = Envir.beta_eta_contract (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. -f x\<close> $ f)
val basis' = insert_exp f' ln_thm' ln_smallo_thm ln_trimmed_thm lim_thm basis
in
(thm', basis')
@@ -1335,7 +1335,7 @@
| NONE => raise TERM ("TODO blargh", [])
val c =
case (thm, ln_thm) |> apply2 (get_expansion #> get_lead_coeff) of
- (c1, c2) => @{term "(/) :: real => _"} $ c1 $ c2
+ (c1, c2) => \<^term>\<open>(/) :: real => _\<close> $ c1 $ c2
val c = Thm.cterm_of (get_ctxt ectxt) c
val thm' =
@@ -1518,8 +1518,8 @@
thm
else
(* TODO Remove Debugging stuff *)
-let val _ = @{print} e
-val _ = @{print} (get_expanded_fun thm)
+let val _ = \<^print> e
+val _ = \<^print> (get_expanded_fun thm)
in
raise TERM ("check_expansion", [Thm.concl_of thm, expr_to_term e])
end
@@ -1539,7 +1539,7 @@
fun zero_expansion basis =
@{thm expands_to_zero} OF [get_basis_wf_thm basis, mk_expansion_level_eq_thm basis]
-fun const_expansion _ basis @{term "0 :: real"} = zero_expansion basis
+fun const_expansion _ basis \<^term>\<open>0 :: real\<close> = zero_expansion basis
| const_expansion ectxt basis t =
let
val ctxt = get_ctxt ectxt
@@ -1581,13 +1581,13 @@
@{thm expands_to_root_neg} OF [nz_thm, trimmed_thm, get_basis_wf_thm basis, thm]
| _ => raise TERM ("Unexpected zeroness result in root_expansion", [])
in
- case prove @{term "\<lambda>n::nat. n = 0"} of
+ case prove \<^term>\<open>\<lambda>n::nat. n = 0\<close> of
SOME zero_thm =>
@{thm expands_to_0th_root} OF
[zero_thm, get_basis_wf_thm basis, mk_expansion_level_eq_thm basis,
Thm.reflexive (Thm.cterm_of ctxt (get_expanded_fun thm))]
| NONE =>
- case prove @{term "\<lambda>n::nat. n > 0"} of
+ case prove \<^term>\<open>\<lambda>n::nat. n > 0\<close> of
NONE => err ()
| SOME nz_thm =>
case ev_zeroness_oracle ectxt (get_expanded_fun thm) of
@@ -1917,14 +1917,14 @@
| Finite_Limit of term
| Infinite_Limit of bool option
-fun is_empty_expansion (Const (@{const_name MS}, _) $ Const (@{const_name MSLNil}, _) $ _) = true
+fun is_empty_expansion (Const (\<^const_name>\<open>MS\<close>, _) $ Const (\<^const_name>\<open>MSLNil\<close>, _) $ _) = true
| is_empty_expansion _ = false
fun limit_of_expansion_aux ectxt basis thm =
let
val n = length (get_basis_list basis)
val (thm, res, e_thms) =
- trim_expansion_while_greater false (SOME (replicate n @{term "0::real"})) true
+ trim_expansion_while_greater false (SOME (replicate n \<^term>\<open>0::real\<close>)) true
(SOME Simple_Trim) ectxt (thm, basis)
val trimmed_thm = case res of Trimmed (_, trimmed_thm) => trimmed_thm | _ => NONE
val res = case res of Trimmed _ => GREATER | Aborted res => res
@@ -1934,7 +1934,7 @@
fun go thm _ _ [] = (
case zeroness_oracle false (SOME Simple_Trim) ectxt (get_expansion thm) of
(IsZero, _) => (Zero_Limit NONE, @{thm expands_to_real_imp_filterlim} OF [thm])
- | _ => (Finite_Limit @{term "0::real"}, @{thm expands_to_real_imp_filterlim} OF [thm]))
+ | _ => (Finite_Limit \<^term>\<open>0::real\<close>, @{thm expands_to_real_imp_filterlim} OF [thm]))
| go thm _ basis ((IsNeg, neg_thm) :: _) = (Zero_Limit NONE,
@{thm expands_to_neg_exponent_imp_filterlim} OF
[thm, get_basis_wf_thm basis, neg_thm RS @{thm compare_reals_diff_sgnD(1)}])
@@ -2010,7 +2010,7 @@
fun err () = raise TERM ("prove_at_infinity", [get_expanded_fun thm])
val (thm, _, SOME trimmed_thm) = trim_expansion true (SOME Simple_Trim) ectxt (thm, basis)
fun go basis thm trimmed_thm =
- if fastype_of (get_expansion thm) = @{typ "real"} then
+ if fastype_of (get_expansion thm) = \<^typ>\<open>real\<close> then
err ()
else
case zeroness_oracle true (SOME Pos_Trim) ectxt (get_exponent (get_expansion thm)) of
@@ -2035,7 +2035,7 @@
val trimmed_thm' = trimmed_thm RS
(if mode = Pos_Trim then @{thm trimmed_pos_imp_trimmed} else @{thm trimmed_neg_imp_trimmed})
fun go basis thm trimmed_thm =
- if fastype_of (get_expansion thm) = @{typ "real"} then
+ if fastype_of (get_expansion thm) = \<^typ>\<open>real\<close> then
err ()
else
case zeroness_oracle true (SOME Pos_Trim) ectxt (get_exponent (get_expansion thm)) of
@@ -2077,7 +2077,7 @@
fun err () = raise TERM (s, [get_expanded_fun thm])
val (thm, _, SOME trimmed_thm) = trim_expansion true (SOME mode) ectxt (thm, basis)
fun go basis thm =
- if fastype_of (get_expansion thm) = @{typ "real"} then
+ if fastype_of (get_expansion thm) = \<^typ>\<open>real\<close> then
err ()
else
case zeroness_oracle true (SOME Neg_Trim) ectxt (get_exponent (get_expansion thm)) of
@@ -2103,7 +2103,7 @@
let
fun simplify (a, b, c) = (a, simplify_expansion ectxt b, c)
fun go thm basis =
- if fastype_of (get_expansion thm) = @{typ "real"} then
+ if fastype_of (get_expansion thm) = \<^typ>\<open>real\<close> then
@{thm expands_to_real_imp_filterlim} OF [thm]
else
case whnf_expansion ectxt thm |> simplify of
@@ -2129,8 +2129,8 @@
let
val ((thm1, _, SOME trimmed_thm1), (thm2, _, SOME trimmed_thm2)) =
apply2 (trim_expansion true (SOME Simple_Trim) ectxt) ((thm1, basis), (thm2, basis))
- val pat = ConsPat (@{const_name Pair}, [ConsPat (@{const_name Lazy_Eval.cmp_result.EQ}, []),
- ConsPat (@{const_name Pair}, [AnyPat ("_", 0), AnyPat ("_", 0)])])
+ val pat = ConsPat (\<^const_name>\<open>Pair\<close>, [ConsPat (\<^const_name>\<open>Lazy_Eval.cmp_result.EQ\<close>, []),
+ ConsPat (\<^const_name>\<open>Pair\<close>, [AnyPat ("_", 0), AnyPat ("_", 0)])])
val (exp1, exp2) = apply2 get_expansion (thm1, thm2)
val T = fastype_of exp1
val t = mk_compare_expansions_const T $ exp1 $ exp2
@@ -2157,7 +2157,7 @@
fun print_trimming_error s ectxt exp =
let
val c = get_coeff exp
- val t = if fastype_of c = @{typ real} then c else get_eval c
+ val t = if fastype_of c = \<^typ>\<open>real\<close> then c else get_eval c
in
if #verbose (#ctxt ectxt) then
let
@@ -2233,18 +2233,18 @@
fun extract_terms (n, strict) ectxt basis t =
let
val bs = get_basis_list basis
- fun mk_constfun c = (Abs ("x", @{typ real}, c))
- val const_0 = mk_constfun @{term "0 :: real"}
- val const_1 = mk_constfun @{term "1 :: real"}
- fun uminus t = Term.betapply (@{term "\<lambda>(f::real\<Rightarrow>real) x. -f x"}, t)
+ fun mk_constfun c = (Abs ("x", \<^typ>\<open>real\<close>, c))
+ val const_0 = mk_constfun \<^term>\<open>0 :: real\<close>
+ val const_1 = mk_constfun \<^term>\<open>1 :: real\<close>
+ fun uminus t = Term.betapply (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. -f x\<close>, t)
fun betapply2 a b c = Term.betapply (Term.betapply (a, b), c)
fun mk_sum' [] acc = acc
| mk_sum' ((t, sgn) :: ts) acc = mk_sum' ts (
if sgn then
- betapply2 @{term "%(f::real=>real) g x. f x - g x"} acc t
+ betapply2 \<^term>\<open>%(f::real=>real) g x. f x - g x\<close> acc t
else
- betapply2 @{term "%(f::real=>real) g x. f x + g x"} acc t)
+ betapply2 \<^term>\<open>%(f::real=>real) g x. f x + g x\<close> acc t)
fun mk_sum [] = const_0
| mk_sum ((t, sgn) :: ts) = mk_sum' ts (if sgn then uminus t else t)
@@ -2253,41 +2253,41 @@
const_0
else if b aconv const_0 then
const_0
- else if a aconv @{term "\<lambda>_::real. 1 :: real"} then
+ else if a aconv \<^term>\<open>\<lambda>_::real. 1 :: real\<close> then
b
- else if b aconv @{term "\<lambda>_::real. 1 :: real"} then
+ else if b aconv \<^term>\<open>\<lambda>_::real. 1 :: real\<close> then
a
- else if a aconv @{term "\<lambda>_::real. -1 :: real"} then
- Term.betapply (@{term "\<lambda>(f::real\<Rightarrow>real) x. -f x"}, b)
- else if b aconv @{term "\<lambda>_::real. -1 :: real"} then
- Term.betapply (@{term "\<lambda>(f::real\<Rightarrow>real) x. -f x"}, a)
+ else if a aconv \<^term>\<open>\<lambda>_::real. -1 :: real\<close> then
+ Term.betapply (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. -f x\<close>, b)
+ else if b aconv \<^term>\<open>\<lambda>_::real. -1 :: real\<close> then
+ Term.betapply (\<^term>\<open>\<lambda>(f::real\<Rightarrow>real) x. -f x\<close>, a)
else
- Abs ("x", @{typ real}, @{term "(*) :: real => _"} $
+ Abs ("x", \<^typ>\<open>real\<close>, \<^term>\<open>(*) :: real => _\<close> $
(Term.betapply (a, Bound 0)) $ (Term.betapply (b, Bound 0)))
fun mk_powr b e =
- if e = @{term "0 :: real"} then
+ if e = \<^term>\<open>0 :: real\<close> then
const_1
else
let
val n = HOLogic.dest_number e |> snd
in
if n >= 0 then
- Term.betapply (Term.betapply (@{term "%(b::real=>real) e x. b x ^ e"}, b),
- HOLogic.mk_number @{typ nat} n)
+ Term.betapply (Term.betapply (\<^term>\<open>%(b::real=>real) e x. b x ^ e\<close>, b),
+ HOLogic.mk_number \<^typ>\<open>nat\<close> n)
else
- Term.betapply (Term.betapply (@{term "%(b::real=>real) e x. b x powr e"}, b), e)
+ Term.betapply (Term.betapply (\<^term>\<open>%(b::real=>real) e x. b x powr e\<close>, b), e)
end
handle TERM _ =>
- Term.betapply (Term.betapply (@{term "%(b::real=>real) e x. b x powr e"}, b), e)
+ Term.betapply (Term.betapply (\<^term>\<open>%(b::real=>real) e x. b x powr e\<close>, b), e)
fun mk_scale_elem b e acc =
let
val e = simplify_term ectxt e
in
- if e = @{term "0 :: real"} then
+ if e = \<^term>\<open>0 :: real\<close> then
acc
- else if e = @{term "1 :: real"} then
+ else if e = \<^term>\<open>1 :: real\<close> then
mk_mult acc b
else
mk_mult acc (mk_powr b e)
@@ -2300,10 +2300,10 @@
fun mk_summand c es =
let
- val es = mk_scale_elems bs es @{term "\<lambda>_::real. 1 :: real"}
+ val es = mk_scale_elems bs es \<^term>\<open>\<lambda>_::real. 1 :: real\<close>
in
case c of
- Const (@{const_name uminus}, _) $ c => ((c, true), es)
+ Const (\<^const_name>\<open>uminus\<close>, _) $ c => ((c, true), es)
| _ => ((c, false), es)
end
@@ -2312,21 +2312,21 @@
let
val c = simplify_term ectxt t
in
- if strict andalso c = @{term "0 :: real"} then
+ if strict andalso c = \<^term>\<open>0 :: real\<close> then
(acc, n)
else
(mk_summand c (rev es) :: acc, n - 1)
end
| go m es t acc n =
case Lazy_Eval.whnf ectxt t |> fst of
- Const (@{const_name MS}, _) $ cs $ _ =>
+ Const (\<^const_name>\<open>MS\<close>, _) $ cs $ _ =>
go' m es (simplify_term ectxt cs) acc n
| _ => raise TERM("extract_terms", [t])
and go' _ _ _ acc 0 = (acc, 0)
| go' m es cs acc n =
case Lazy_Eval.whnf ectxt cs |> fst of
- Const (@{const_name MSLNil}, _) => (acc, n)
- | Const (@{const_name MSLCons}, _) $ c $ cs => (
+ Const (\<^const_name>\<open>MSLNil\<close>, _) => (acc, n)
+ | Const (\<^const_name>\<open>MSLCons\<close>, _) $ c $ cs => (
case Lazy_Eval.whnf ectxt c |> fst |> HOLogic.dest_prod of
(c, e) =>
case go (m - 1) (e :: es) c acc n of
@@ -2336,7 +2336,7 @@
val (summands, error) =
if remaining = 0 then (rev (tl summands), SOME (snd (hd summands))) else (rev summands, NONE)
val summands = map (fn ((c, sgn), es) => (mk_mult (mk_constfun c) es, sgn)) summands
- val error = Option.map (fn err => Term.betapply (@{term "\<lambda>f::real\<Rightarrow>real. O(f)"}, err)) error
+ val error = Option.map (fn err => Term.betapply (\<^term>\<open>\<lambda>f::real\<Rightarrow>real. O(f)\<close>, err)) error
val expansion = mk_sum summands
in
(expansion, error)
--- a/src/HOL/Real_Asymp/multiseries_expansion_bounds.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/multiseries_expansion_bounds.ML Sat Jan 05 17:24:33 2019 +0100
@@ -92,7 +92,7 @@
thm OF [get_basis_wf_thm basis, mk_expansion_level_eq_thm basis]
end
-fun dest_eventually (Const (@{const_name "Filter.eventually"}, _) $ p $ f) = (p, f)
+fun dest_eventually (Const (\<^const_name>\<open>Filter.eventually\<close>, _) $ p $ f) = (p, f)
| dest_eventually t = raise TERM ("dest_eventually", [t])
fun dest_binop (f $ a $ b) = (f, a, b)
@@ -113,12 +113,12 @@
(Option.map (apsnd (fn thm => @{thm transfer_lower_bound} OF [thm, eq_thm])) lb,
Option.map (apsnd (fn thm => @{thm transfer_upper_bound} OF [thm, eq_thm])) ub)
-fun dest_le (@{term "(<=) :: real => _"} $ l $ r) = (l, r)
+fun dest_le (\<^term>\<open>(<=) :: real => _\<close> $ l $ r) = (l, r)
| dest_le t = raise TERM ("dest_le", [t])
fun abconv (t, t') = Envir.beta_eta_contract t aconv Envir.beta_eta_contract t'
-val realT = @{typ "Real.real"}
+val realT = \<^typ>\<open>Real.real\<close>
fun check_bounds e (Exact thm) = let val _ = check_expansion e thm in Exact thm end
| check_bounds e (Bounds bnds) =
@@ -167,7 +167,7 @@
fun trim_expansion_while_pos ectxt (thm, basis) =
let
val n = get_basis_size basis
- val es = SOME (replicate n @{term "0 :: real"})
+ val es = SOME (replicate n \<^term>\<open>0 :: real\<close>)
in
trim_expansion_while_greater false es false NONE ectxt (thm, basis)
end
@@ -189,7 +189,7 @@
val t = thm |> Thm.concl_of |> HOLogic.dest_Trueprop |> dest_comb |> fst |> dest_comb |> snd
in
case Envir.eta_long [] t of
- Abs (x, T, @{term "(<=) :: real => _"} $ lhs $ rhs) => Abs (x, T, f (lhs, rhs))
+ Abs (x, T, \<^term>\<open>(<=) :: real => _\<close> $ lhs $ rhs) => Abs (x, T, f (lhs, rhs))
| _ => raise THM ("get_expanded_fun_bounds", 0, [thm])
end
@@ -235,7 +235,7 @@
(thm1, mk_refl_thm ectxt (get_expanded_fun thm1), eq_thm RS @{thm eventually_eq_imp_le})
fun find_greater_expansion ectxt (thm1, thm2, basis) =
- case compare_expansions ectxt (@{print} (thm1, thm2, basis)) of
+ case compare_expansions ectxt (\<^print> (thm1, thm2, basis)) of
(LESS, less_thm, _, thm2) =>
(thm2, less_thm RS @{thm eventually_less_imp_le}, mk_refl_thm ectxt (get_expanded_fun thm2))
| (GREATER, gt_thm, thm1, _) =>
@@ -264,9 +264,9 @@
fun mk_nonstrict_thm [thm1, thm2] sgn_thm = (
case Thm.concl_of sgn_thm |> HOLogic.dest_Trueprop of
- Const (@{const_name "Filter.eventually"}, _) $ t $ _ => (
+ Const (\<^const_name>\<open>Filter.eventually\<close>, _) $ t $ _ => (
case Envir.eta_long [] t of
- Abs (_, _, Const (@{const_name "HOL.eq"}, _) $ _ $ _) => sgn_thm RS thm1
+ Abs (_, _, Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ _) => sgn_thm RS thm1
| _ => sgn_thm RS thm2)
| _ => sgn_thm RS thm2)
| mk_nonstrict_thm _ _ = raise Match
@@ -398,10 +398,10 @@
fun forget_trimmedness_sign trimmed_thm =
case Thm.concl_of trimmed_thm |> HOLogic.dest_Trueprop of
- Const (@{const_name Multiseries_Expansion.trimmed}, _) $ _ => trimmed_thm
- | Const (@{const_name Multiseries_Expansion.trimmed_pos}, _) $ _ =>
+ Const (\<^const_name>\<open>Multiseries_Expansion.trimmed\<close>, _) $ _ => trimmed_thm
+ | Const (\<^const_name>\<open>Multiseries_Expansion.trimmed_pos\<close>, _) $ _ =>
trimmed_thm RS @{thm trimmed_pos_imp_trimmed}
- | Const (@{const_name Multiseries_Expansion.trimmed_neg}, _) $ _ =>
+ | Const (\<^const_name>\<open>Multiseries_Expansion.trimmed_neg\<close>, _) $ _ =>
trimmed_thm RS @{thm trimmed_neg_imp_trimmed}
| _ => raise THM ("forget_trimmedness_sign", 0, [trimmed_thm])
@@ -428,10 +428,10 @@
fun trimmed_thm_to_inverse_sgn_thm basis thm trimmed_thm =
case trimmed_thm |> Thm.concl_of |> HOLogic.dest_Trueprop of
- Const (@{const_name "Multiseries_Expansion.trimmed_pos"}, _) $ _ =>
+ Const (\<^const_name>\<open>Multiseries_Expansion.trimmed_pos\<close>, _) $ _ =>
@{thm pos_imp_inverse_pos[eventuallized, OF expands_to_imp_eventually_pos]} OF
[get_basis_wf_thm basis, thm, trimmed_thm]
- | Const (@{const_name "Multiseries_Expansion.trimmed_neg"}, _) $ _ =>
+ | Const (\<^const_name>\<open>Multiseries_Expansion.trimmed_neg\<close>, _) $ _ =>
@{thm neg_imp_inverse_neg[eventuallized, OF expands_to_imp_eventually_neg]} OF
[get_basis_wf_thm basis, thm, trimmed_thm]
| _ => raise THM ("trimmed_thm_to_inverse_sgn_thm", 0, [trimmed_thm])
@@ -565,7 +565,7 @@
| ((true, _), (_, true)) => (
case find_greater_expansion ectxt (minus lthm, uthm, basis) of
(u'_thm, le_thm1, le_thm2) =>
- (mk_const_expansion ectxt basis @{term "0::real"}, u'_thm,
+ (mk_const_expansion ectxt basis \<^term>\<open>0::real\<close>, u'_thm,
@{thm indet_abs_bounds[eventuallized]} OF
[mk_nonpos_thm lsgn_thm, mk_nonneg_thm usgn_thm,
in_bounds_thm, le_thm1, le_thm2]))
@@ -623,7 +623,7 @@
end
fun compare_expansion_to_1 ectxt (thm, basis) =
- prove_asymptotic_relation ectxt (thm, const_expansion ectxt basis @{term "1 :: real"}, basis)
+ prove_asymptotic_relation ectxt (thm, const_expansion ectxt basis \<^term>\<open>1 :: real\<close>, basis)
fun powr_expansion_bounds_left ectxt basis
thm1 ((l2_thm, l2_le_thm), (u2_thm, u2_ge_thm)) =
@@ -795,7 +795,7 @@
| _ =>
let
val (uthm'', le_u'_thm1, le_u'_thm2) = find_greater_expansion ectxt
- (uthm', const_expansion ectxt basis @{term "1::real"}, basis)
+ (uthm', const_expansion ectxt basis \<^term>\<open>1::real\<close>, basis)
in
(uthm'', do_transfer 4
[mk_nonneg_thm l1_sgn_thm, ge_thm1, le_thm', le_u'_thm1, le_u'_thm2])
@@ -865,7 +865,7 @@
fun mk_lb (exp_thm, le_thm) =
let
val exp_thm' = @{thm expands_to_minus} OF
- [wf_thm, exp_thm, const_expansion ectxt basis @{term "1::real"}]
+ [wf_thm, exp_thm, const_expansion ectxt basis \<^term>\<open>1::real\<close>]
val le_thm = @{thm rfloor_bound(1)} OF [le_thm]
in
(exp_thm', le_thm)
@@ -883,7 +883,7 @@
fun mk_ub (exp_thm, le_thm) =
let
val exp_thm' = @{thm expands_to_add} OF
- [wf_thm, exp_thm, const_expansion ectxt basis @{term "1::real"}]
+ [wf_thm, exp_thm, const_expansion ectxt basis \<^term>\<open>1::real\<close>]
val le_thm = @{thm rceil_bound(2)} OF [le_thm]
in
(exp_thm', le_thm)
@@ -903,7 +903,7 @@
get_expanded_fun_bounds) (bnds1, bnds2)
val ge_lower_thm = @{thm natmod_trivial_lower_bound} OF [f, g]
fun minus1 thm = @{thm expands_to_minus} OF
- [get_basis_wf_thm basis, thm, const_expansion ectxt basis @{term "1::real"}]
+ [get_basis_wf_thm basis, thm, const_expansion ectxt basis \<^term>\<open>1::real\<close>]
fun find_upper uthm1 le1_thm u_nonneg_thm =
let
val upper1 = (uthm1, @{thm natmod_upper_bound'} OF [g, u_nonneg_thm, le1_thm])
@@ -1200,19 +1200,19 @@
val (lower, upper) = apply2 (Option.map aux) bnds
fun get_bound_exp (SOME (thm, _)) = SOME (get_expansion thm)
| get_bound_exp _ = NONE
- fun is_const (SOME (Const (@{const_name "Multiseries_Expansion.const_expansion"}, _) $ c'),
+ fun is_const (SOME (Const (\<^const_name>\<open>Multiseries_Expansion.const_expansion\<close>, _) $ c'),
c) = c aconv c'
| is_const _ = false
fun aconv' (SOME a, SOME b) = a aconv b
| aconv' _ = false
in
- if is_const (get_bound_exp lower, @{term "\<lambda>x::real. 1 :: real"}) then
+ if is_const (get_bound_exp lower, \<^term>\<open>\<lambda>x::real. 1 :: real\<close>) then
let
val SOME (lthm, ge_thm) = lower
in
Exact (@{thm eventually_sgn_ge_1D} OF [ge_thm, lthm])
end
- else if is_const (get_bound_exp upper, @{term "\<lambda>x::real. -1 :: real"}) then
+ else if is_const (get_bound_exp upper, \<^term>\<open>\<lambda>x::real. -1 :: real\<close>) then
let
val SOME (uthm, le_thm) = upper
in
@@ -1720,7 +1720,7 @@
fun lim_eq ectxt (l1, l2) = (l1 aconv l2) orelse
case (l1, l2) of
- (Const (@{const_name nhds}, _) $ a, Const (@{const_name nhds}, _) $ b) => (
+ (Const (\<^const_name>\<open>nhds\<close>, _) $ a, Const (\<^const_name>\<open>nhds\<close>, _) $ b) => (
case try_prove_real_eq false ectxt (a, b) of
SOME _ => true
| _ => false)
--- a/src/HOL/Real_Asymp/real_asymp.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/real_asymp.ML Sat Jan 05 17:24:33 2019 +0100
@@ -14,13 +14,13 @@
val basis = Asymptotic_Basis.default_basis
val prover =
case filter of
- Const (@{const_name "Topological_Spaces.nhds"}, _) $ _ => SOME Exp.prove_nhds
- | @{term "at (0 :: real)"} => SOME Exp.prove_at_0
- | @{term "at_left (0 :: real)"} => SOME Exp.prove_at_left_0
- | @{term "at_right (0 :: real)"} => SOME Exp.prove_at_right_0
- | @{term "at_infinity :: real filter"} => SOME Exp.prove_at_infinity
- | @{term "at_top :: real filter"} => SOME Exp.prove_at_top
- | @{term "at_bot :: real filter"} => SOME Exp.prove_at_bot
+ Const (\<^const_name>\<open>Topological_Spaces.nhds\<close>, _) $ _ => SOME Exp.prove_nhds
+ | \<^term>\<open>at (0 :: real)\<close> => SOME Exp.prove_at_0
+ | \<^term>\<open>at_left (0 :: real)\<close> => SOME Exp.prove_at_left_0
+ | \<^term>\<open>at_right (0 :: real)\<close> => SOME Exp.prove_at_right_0
+ | \<^term>\<open>at_infinity :: real filter\<close> => SOME Exp.prove_at_infinity
+ | \<^term>\<open>at_top :: real filter\<close> => SOME Exp.prove_at_top
+ | \<^term>\<open>at_bot :: real filter\<close> => SOME Exp.prove_at_bot
| _ => NONE
val lim_thm = Option.map (fn prover => prover ectxt (Exp.expand_term ectxt f basis)) prover
in
@@ -34,11 +34,11 @@
fun prove_eventually_at_top ectxt p =
case Envir.eta_long [] p of
- Abs (x, @{typ Real.real}, Const (rel, _) $ f $ g) => ((
+ Abs (x, \<^typ>\<open>Real.real\<close>, Const (rel, _) $ f $ g) => ((
let
- val (f, g) = apply2 (fn t => Abs (x, @{typ Real.real}, t)) (f, g)
- val _ = if rel = @{const_name "Orderings.less"}
- orelse rel = @{const_name "Orderings.less_eq"} then ()
+ val (f, g) = apply2 (fn t => Abs (x, \<^typ>\<open>Real.real\<close>, t)) (f, g)
+ val _ = if rel = \<^const_name>\<open>Orderings.less\<close>
+ orelse rel = \<^const_name>\<open>Orderings.less_eq\<close> then ()
else raise TERM ("prove_eventually_at_top", [p])
val ctxt = get_ctxt ectxt
val basis = Asymptotic_Basis.default_basis
@@ -58,10 +58,10 @@
val ([thm1, thm2], basis) = Exp.expand_terms ectxt [f, g] basis
val prover =
case l' of
- @{const_name smallo} => Exp.prove_smallo
- | @{const_name bigo} => Exp.prove_bigo
- | @{const_name bigtheta} => Exp.prove_bigtheta
- | @{const_name asymp_equiv} => Exp.prove_asymp_equiv
+ \<^const_name>\<open>smallo\<close> => Exp.prove_smallo
+ | \<^const_name>\<open>bigo\<close> => Exp.prove_bigo
+ | \<^const_name>\<open>bigtheta\<close> => Exp.prove_bigtheta
+ | \<^const_name>\<open>asymp_equiv\<close> => Exp.prove_asymp_equiv
| _ => raise TERM ("prove_landau", [f, g])
in
HEADGOAL (resolve_tac ctxt [prover ectxt (thm1, thm2, basis)])
@@ -95,9 +95,9 @@
fun conv (x, ctxt) =
let
val thms1 =
- Named_Theorems.get ctxt @{named_theorems real_asymp_nat_reify}
+ Named_Theorems.get ctxt \<^named_theorems>\<open>real_asymp_nat_reify\<close>
val thms2 =
- Named_Theorems.get ctxt @{named_theorems real_asymp_int_reify}
+ Named_Theorems.get ctxt \<^named_theorems>\<open>real_asymp_int_reify\<close>
val ctxt' = put_simpset HOL_basic_ss ctxt addsimps (thms1 @ thms2)
in
repeat'_conv (
@@ -117,14 +117,14 @@
val conv = preproc_exp_log_natintfun_conv ctxt
val conv =
case Thm.term_of goal of
- @{term "HOL.Trueprop"} $ t => (case t of
- Const (@{const_name Filter.filterlim}, _) $ _ $ _ $ _ =>
+ \<^term>\<open>HOL.Trueprop\<close> $ t => (case t of
+ Const (\<^const_name>\<open>Filter.filterlim\<close>, _) $ _ $ _ $ _ =>
Conv.fun_conv (Conv.fun_conv (Conv.arg_conv conv))
- | Const (@{const_name Filter.eventually}, _) $ _ $ _ =>
+ | Const (\<^const_name>\<open>Filter.eventually\<close>, _) $ _ $ _ =>
Conv.fun_conv (Conv.arg_conv conv)
- | Const (@{const_name Set.member}, _) $ _ $ (_ $ _ $ _) =>
+ | Const (\<^const_name>\<open>Set.member\<close>, _) $ _ $ (_ $ _ $ _) =>
Conv.combination_conv (Conv.arg_conv conv) (Conv.arg_conv conv)
- | Const (@{const_name Landau_Symbols.asymp_equiv}, _) $ _ $ _ $ _ =>
+ | Const (\<^const_name>\<open>Landau_Symbols.asymp_equiv\<close>, _) $ _ $ _ $ _ =>
Conv.combination_conv (Conv.fun_conv (Conv.arg_conv conv)) conv
| _ => Conv.all_conv)
| _ => Conv.all_conv
@@ -145,13 +145,13 @@
fun prove_eventually ectxt p filter =
case filter of
- @{term "Filter.at_top :: real filter"} => (prove_eventually_at_top ectxt p
+ \<^term>\<open>Filter.at_top :: real filter\<close> => (prove_eventually_at_top ectxt p
handle TERM _ => no_tac | THM _ => no_tac)
| _ => HEADGOAL (CONVERSION (Conv.rewrs_conv eventually_substs)
THEN' tac' (#verbose (#ctxt ectxt)) (Inr ectxt))
and prove_limit ectxt f filter filter' =
case filter' of
- @{term "Filter.at_top :: real filter"} => (prove_limit_at_top ectxt f filter
+ \<^term>\<open>Filter.at_top :: real filter\<close> => (prove_limit_at_top ectxt f filter
handle TERM _ => no_tac | THM _ => no_tac)
| _ => HEADGOAL (CONVERSION (Conv.rewrs_conv filterlim_substs)
THEN' tac' (#verbose (#ctxt ectxt)) (Inr ectxt))
@@ -168,15 +168,15 @@
| Inr ectxt => ectxt
in
case Thm.term_of goal of
- @{term "HOL.Trueprop"} $ t => ((case t of
- @{term "Filter.filterlim :: (real \<Rightarrow> real) \<Rightarrow> _"} $ f $ filter $ filter' =>
+ \<^term>\<open>HOL.Trueprop\<close> $ t => ((case t of
+ \<^term>\<open>Filter.filterlim :: (real \<Rightarrow> real) \<Rightarrow> _\<close> $ f $ filter $ filter' =>
(prove_limit ectxt f filter filter' handle TERM _ => no_tac | THM _ => no_tac)
- | @{term "Filter.eventually :: (real \<Rightarrow> bool) \<Rightarrow> _"} $ p $ filter =>
+ | \<^term>\<open>Filter.eventually :: (real \<Rightarrow> bool) \<Rightarrow> _\<close> $ p $ filter =>
(prove_eventually ectxt p filter handle TERM _ => no_tac | THM _ => no_tac)
- | @{term "Set.member :: (real => real) => _"} $ f $
- (l $ @{term "at_top :: real filter"} $ g) =>
+ | \<^term>\<open>Set.member :: (real => real) => _\<close> $ f $
+ (l $ \<^term>\<open>at_top :: real filter\<close> $ g) =>
(prove_landau ectxt l f g handle TERM _ => no_tac | THM _ => no_tac)
- | (l as @{term "Landau_Symbols.asymp_equiv :: (real\<Rightarrow>real)\<Rightarrow>_"}) $ f $ _ $ g =>
+ | (l as \<^term>\<open>Landau_Symbols.asymp_equiv :: (real\<Rightarrow>real)\<Rightarrow>_\<close>) $ f $ _ $ g =>
(prove_landau ectxt l f g handle TERM _ => no_tac | THM _ => no_tac)
| _ => no_tac) THEN distinct_subgoals_tac)
| _ => no_tac
--- a/src/HOL/Real_Asymp/real_asymp_diag.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Real_Asymp/real_asymp_diag.ML Sat Jan 05 17:24:33 2019 +0100
@@ -19,63 +19,63 @@
open Lazy_Eval
open Multiseries_Expansion
-fun pretty_limit _ (Const (@{const_name "at_top"}, _)) = Pretty.str "\<infinity>"
- | pretty_limit _ (Const (@{const_name "at_bot"}, _)) = Pretty.str "-\<infinity>"
- | pretty_limit _ (Const (@{const_name "at_infinity"}, _)) = Pretty.str "\<plusminus>\<infinity>"
- | pretty_limit ctxt (Const (@{const_name "at_within"}, _) $ c $
- (Const (@{const_name "greaterThan"}, _) $ _)) =
+fun pretty_limit _ (Const (\<^const_name>\<open>at_top\<close>, _)) = Pretty.str "\<infinity>"
+ | pretty_limit _ (Const (\<^const_name>\<open>at_bot\<close>, _)) = Pretty.str "-\<infinity>"
+ | pretty_limit _ (Const (\<^const_name>\<open>at_infinity\<close>, _)) = Pretty.str "\<plusminus>\<infinity>"
+ | pretty_limit ctxt (Const (\<^const_name>\<open>at_within\<close>, _) $ c $
+ (Const (\<^const_name>\<open>greaterThan\<close>, _) $ _)) =
Pretty.block [Syntax.pretty_term ctxt c, Pretty.str "\<^sup>+"]
- | pretty_limit ctxt (Const (@{const_name "at_within"}, _) $ c $
- (Const (@{const_name "lessThan"}, _) $ _)) =
+ | pretty_limit ctxt (Const (\<^const_name>\<open>at_within\<close>, _) $ c $
+ (Const (\<^const_name>\<open>lessThan\<close>, _) $ _)) =
Pretty.block [Syntax.pretty_term ctxt c, Pretty.str "\<^sup>-"]
- | pretty_limit ctxt (Const (@{const_name "at_within"}, _) $ c $ Const ("UNIV", _)) =
+ | pretty_limit ctxt (Const (\<^const_name>\<open>at_within\<close>, _) $ c $ Const ("UNIV", _)) =
Syntax.pretty_term ctxt c
- | pretty_limit ctxt (Const (@{const_name "nhds"}, _) $ c) =
+ | pretty_limit ctxt (Const (\<^const_name>\<open>nhds\<close>, _) $ c) =
Syntax.pretty_term ctxt c
| pretty_limit _ t = raise TERM ("pretty_limit", [t])
fun reduce_to_at_top flt t = Envir.beta_eta_contract (
case flt of
- @{term "at_top :: real filter"} => t
- | @{term "at_bot :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (-x)"}, t)
- | @{term "at_left 0 :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (-inverse x)"}, t)
- | @{term "at_right 0 :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (inverse x)"}, t)
- | @{term "at_within :: real => _"} $ c $ (@{term "greaterThan :: real \<Rightarrow> _"} $ c') =>
+ \<^term>\<open>at_top :: real filter\<close> => t
+ | \<^term>\<open>at_bot :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (-x)\<close>, t)
+ | \<^term>\<open>at_left 0 :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (-inverse x)\<close>, t)
+ | \<^term>\<open>at_right 0 :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (inverse x)\<close>, t)
+ | \<^term>\<open>at_within :: real => _\<close> $ c $ (\<^term>\<open>greaterThan :: real \<Rightarrow> _\<close> $ c') =>
if c aconv c' then
- Term.betapply (Term.betapply (@{term "%(f::real\<Rightarrow>real) c x. f (c + inverse x)"}, t), c)
+ Term.betapply (Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) c x. f (c + inverse x)\<close>, t), c)
else
raise TERM ("Unsupported filter for real_limit", [flt])
- | @{term "at_within :: real => _"} $ c $ (@{term "lessThan :: real \<Rightarrow> _"} $ c') =>
+ | \<^term>\<open>at_within :: real => _\<close> $ c $ (\<^term>\<open>lessThan :: real \<Rightarrow> _\<close> $ c') =>
if c aconv c' then
- Term.betapply (Term.betapply (@{term "%(f::real\<Rightarrow>real) c x. f (c - inverse x)"}, t), c)
+ Term.betapply (Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) c x. f (c - inverse x)\<close>, t), c)
else
raise TERM ("Unsupported filter for real_limit", [flt])
| _ =>
raise TERM ("Unsupported filter for real_limit", [flt]))
-fun mk_uminus (@{term "uminus :: real => real"} $ c) = c
- | mk_uminus c = Term.betapply (@{term "uminus :: real => real"}, c)
+fun mk_uminus (\<^term>\<open>uminus :: real => real\<close> $ c) = c
+ | mk_uminus c = Term.betapply (\<^term>\<open>uminus :: real => real\<close>, c)
fun transfer_expansion_from_at_top' flt t = Envir.beta_eta_contract (
case flt of
- @{term "at_top :: real filter"} => t
- | @{term "at_bot :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (-x)"}, t)
- | @{term "at_left 0 :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (-inverse x)"}, t)
- | @{term "at_right 0 :: real filter"} =>
- Term.betapply (@{term "%(f::real\<Rightarrow>real) x. f (inverse x)"}, t)
- | @{term "at_within :: real => _"} $ c $ (@{term "greaterThan :: real \<Rightarrow> _"} $ c') =>
+ \<^term>\<open>at_top :: real filter\<close> => t
+ | \<^term>\<open>at_bot :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (-x)\<close>, t)
+ | \<^term>\<open>at_left 0 :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (-inverse x)\<close>, t)
+ | \<^term>\<open>at_right 0 :: real filter\<close> =>
+ Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) x. f (inverse x)\<close>, t)
+ | \<^term>\<open>at_within :: real => _\<close> $ c $ (\<^term>\<open>greaterThan :: real \<Rightarrow> _\<close> $ c') =>
if c aconv c' then
- Term.betapply (Term.betapply (@{term "%(f::real\<Rightarrow>real) c x. f (inverse (x - c))"}, t), c)
+ Term.betapply (Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) c x. f (inverse (x - c))\<close>, t), c)
else
raise TERM ("Unsupported filter for real_limit", [flt])
- | @{term "at_within :: real => _"} $ c $ (@{term "lessThan :: real \<Rightarrow> _"} $ c') =>
+ | \<^term>\<open>at_within :: real => _\<close> $ c $ (\<^term>\<open>lessThan :: real \<Rightarrow> _\<close> $ c') =>
if c aconv c' then
- Term.betapply (Term.betapply (@{term "%(f::real\<Rightarrow>real) c x. f (inverse (c - x))"}, t), c)
+ Term.betapply (Term.betapply (\<^term>\<open>%(f::real\<Rightarrow>real) c x. f (inverse (c - x))\<close>, t), c)
else
raise TERM ("Unsupported filter for real_limit", [flt])
| _ =>
@@ -84,31 +84,31 @@
fun transfer_expansion_from_at_top flt =
let
- fun go idx (t as (@{term "(powr) :: real => _"} $
- (@{term "inverse :: real \<Rightarrow> _"} $ Bound n) $ e)) =
+ fun go idx (t as (\<^term>\<open>(powr) :: real => _\<close> $
+ (\<^term>\<open>inverse :: real \<Rightarrow> _\<close> $ Bound n) $ e)) =
if n = idx then
- Envir.beta_eta_contract (@{term "(powr) :: real => _"} $ Bound n $ mk_uminus e)
+ Envir.beta_eta_contract (\<^term>\<open>(powr) :: real => _\<close> $ Bound n $ mk_uminus e)
else
t
- | go idx (t as (@{term "(powr) :: real => _"} $ (@{term "uminus :: real \<Rightarrow> real"} $
- (@{term "inverse :: real \<Rightarrow> _"} $ Bound n)) $ e)) =
+ | go idx (t as (\<^term>\<open>(powr) :: real => _\<close> $ (\<^term>\<open>uminus :: real \<Rightarrow> real\<close> $
+ (\<^term>\<open>inverse :: real \<Rightarrow> _\<close> $ Bound n)) $ e)) =
if n = idx then
- Envir.beta_eta_contract (@{term "(powr) :: real => _"} $
+ Envir.beta_eta_contract (\<^term>\<open>(powr) :: real => _\<close> $
(mk_uminus (Bound n)) $ mk_uminus e)
else
t
- | go idx (t as (@{term "(powr) :: real => _"} $ (@{term "inverse :: real \<Rightarrow> _"} $
- (@{term "(-) :: real \<Rightarrow> _"} $ Bound n $ c)) $ e)) =
+ | go idx (t as (\<^term>\<open>(powr) :: real => _\<close> $ (\<^term>\<open>inverse :: real \<Rightarrow> _\<close> $
+ (\<^term>\<open>(-) :: real \<Rightarrow> _\<close> $ Bound n $ c)) $ e)) =
if n = idx then
- Envir.beta_eta_contract (@{term "(powr) :: real => _"} $
- (@{term "(-) :: real => _"} $ Bound n $ c) $ mk_uminus e)
+ Envir.beta_eta_contract (\<^term>\<open>(powr) :: real => _\<close> $
+ (\<^term>\<open>(-) :: real => _\<close> $ Bound n $ c) $ mk_uminus e)
else
t
- | go idx (t as (@{term "(powr) :: real => _"} $ (@{term "inverse :: real \<Rightarrow> _"} $
- (@{term "(-) :: real \<Rightarrow> _"} $ c $ Bound n)) $ e)) =
+ | go idx (t as (\<^term>\<open>(powr) :: real => _\<close> $ (\<^term>\<open>inverse :: real \<Rightarrow> _\<close> $
+ (\<^term>\<open>(-) :: real \<Rightarrow> _\<close> $ c $ Bound n)) $ e)) =
if n = idx then
- Envir.beta_eta_contract (@{term "(powr) :: real => _"} $
- (@{term "(-) :: real => _"} $ c $ Bound n) $ mk_uminus e)
+ Envir.beta_eta_contract (\<^term>\<open>(powr) :: real => _\<close> $
+ (\<^term>\<open>(-) :: real => _\<close> $ c $ Bound n) $ mk_uminus e)
else
t
| go idx (s $ t) = go idx s $ go idx t
@@ -145,15 +145,15 @@
gen_limit
(fn ctxt =>
Syntax.parse_term ctxt
- #> Type.constraint @{typ "real => real"}
+ #> Type.constraint \<^typ>\<open>real => real\<close>
#> Syntax.check_term ctxt)
(fn ctxt => fn flt =>
case flt of
- NONE => @{term "at_top :: real filter"}
+ NONE => \<^term>\<open>at_top :: real filter\<close>
| SOME flt =>
flt
|> Syntax.parse_term ctxt
- |> Type.constraint @{typ "real filter"}
+ |> Type.constraint \<^typ>\<open>real filter\<close>
|> Syntax.check_term ctxt)
(fn ctxt => flat o flat o map (map (Proof_Context.get_fact ctxt o fst)))
(fn ctxt => pretty_limit_result ctxt #> Pretty.writeln)
@@ -181,7 +181,7 @@
case error of
NONE => exp
| SOME err =>
- Term.betapply (Term.betapply (@{term "expansion_with_remainder_term"}, exp), err)
+ Term.betapply (Term.betapply (\<^term>\<open>expansion_with_remainder_term\<close>, exp), err)
in
res ctxt (t, basis)
end
@@ -194,15 +194,15 @@
gen_expansion
(fn ctxt =>
Syntax.parse_term ctxt
- #> Type.constraint @{typ "real => real"}
+ #> Type.constraint \<^typ>\<open>real => real\<close>
#> Syntax.check_term ctxt)
(fn ctxt => fn flt =>
case flt of
- NONE => @{term "at_top :: real filter"}
+ NONE => \<^term>\<open>at_top :: real filter\<close>
| SOME flt =>
flt
|> Syntax.parse_term ctxt
- |> Type.constraint @{typ "real filter"}
+ |> Type.constraint \<^typ>\<open>real filter\<close>
|> Syntax.check_term ctxt)
(fn ctxt => flat o flat o map (map (Proof_Context.get_fact ctxt o fst)))
(fn ctxt => fn (exp, basis) =>
@@ -246,7 +246,7 @@
in
val _ =
- Outer_Syntax.command @{command_keyword real_limit}
+ Outer_Syntax.command \<^command_keyword>\<open>real_limit\<close>
"semi-automatically compute limits of real functions"
((Parse.term -- parse_opts limit_opts dflt_limit_opts) >>
(fn (t, {limit = flt, facts = thms}) =>
@@ -254,7 +254,7 @@
Real_Asymp_Diag.limit_cmd (Toplevel.context_of state) thms t flt))))
val _ =
- Outer_Syntax.command @{command_keyword real_expansion}
+ Outer_Syntax.command \<^command_keyword>\<open>real_expansion\<close>
"semi-automatically compute expansions of real functions"
(Parse.term -- parse_opts expansion_opts dflt_expansion_opts >>
(fn (t, {limit = flt, terms = n_strict, facts = thms}) =>
--- a/src/HOL/SET_Protocol/Cardholder_Registration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Cardholder_Registration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -56,7 +56,7 @@
2nd case: CR5, where KC3 encrypts NC3;
3rd case: CR6, where KC2 encrypts NC3;
4th case: CR6, where KC2 encrypts NonceCCA;
- 5th case: any use of @{term "priEK C"} (including CardSecret).
+ 5th case: any use of \<^term>\<open>priEK C\<close> (including CardSecret).
NB the only Nonces we need to keep secret are CardSecret and NonceCCA.
But we can't prove \<open>Nonce_compromise\<close> unless the relation covers ALL
nonces that the protocol keeps secret.\<close>
@@ -124,7 +124,7 @@
- certificates pertain to the CA that C contacted (this is done by
checking the signature).
C generates a fresh symmetric key KC1.
- The point of encrypting @{term "\<lbrace>Agent C, Nonce NC2, Hash (Pan(pan C))\<rbrace>"}
+ The point of encrypting \<^term>\<open>\<lbrace>Agent C, Nonce NC2, Hash (Pan(pan C))\<rbrace>\<close>
is not clear.\<close>
"[| evs3 \<in> set_cr; C = Cardholder k;
Nonce NC2 \<notin> used evs3;
@@ -141,7 +141,7 @@
| SET_CR4:
\<comment> \<open>RegFormRes:
CA responds sending NC2 back with a new nonce NCA, after checking that
- - the digital envelope is correctly encrypted by @{term "pubEK (CA i)"}
+ - the digital envelope is correctly encrypted by \<^term>\<open>pubEK (CA i)\<close>
- the entire message is encrypted with the same key found inside the
envelope (here, KC1)\<close>
"[| evs4 \<in> set_cr;
@@ -186,8 +186,8 @@
its signature certificate and the new cardholder signature
certificate. CA checks to have never certified the key proposed by C.
NOTE: In Merchant Registration, the corresponding rule (4)
- uses the "sign" primitive. The encryption below is actually @{term EncK},
- which is just @{term "Crypt K (sign SK X)"}.\<close>
+ uses the "sign" primitive. The encryption below is actually \<^term>\<open>EncK\<close>,
+ which is just \<^term>\<open>Crypt K (sign SK X)\<close>.\<close>
| SET_CR6:
"[| evs6 \<in> set_cr;
@@ -496,7 +496,7 @@
text\<open>This holds because if (priEK (CA i)) appears in any traffic then it must
- be known to the Spy, by @{term Spy_see_private_Key}\<close>
+ be known to the Spy, by \<^term>\<open>Spy_see_private_Key\<close>\<close>
lemma cardSK_neq_priEK:
"[|Key cardSK \<notin> analz (knows Spy evs);
Key cardSK \<in> parts (knows Spy evs);
@@ -913,7 +913,7 @@
text\<open>Lemma for message 6: either cardSK isn't a CA's private encryption key,
or if it is then (because it appears in traffic) that CA is bad,
and so the Spy knows that key already. Either way, we can simplify
- the expression @{term "analz (insert (Key cardSK) X)"}.\<close>
+ the expression \<^term>\<open>analz (insert (Key cardSK) X)\<close>.\<close>
lemma msg6_cardSK_disj:
"[|Gets A \<lbrace>Crypt K \<lbrace>c, n, k', Key cardSK, X\<rbrace>, Y\<rbrace>
\<in> set evs; evs \<in> set_cr |]
@@ -954,7 +954,7 @@
text\<open>Confidentiality of the PAN\@. Maybe we could combine the statements of
- this theorem with @{term analz_image_pan}, requiring a single induction but
+ this theorem with \<^term>\<open>analz_image_pan\<close>, requiring a single induction but
a much more difficult proof.\<close>
lemma pan_confidentiality:
"[| Pan (pan C) \<in> analz(knows Spy evs); C \<noteq>Spy; evs \<in> set_cr|]
--- a/src/HOL/SET_Protocol/Event_SET.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Event_SET.thy Sat Jan 05 17:24:33 2019 +0100
@@ -89,7 +89,7 @@
by auto
text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
- on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
+ on whether \<^term>\<open>A=Spy\<close> and whether \<^term>\<open>A\<in>bad\<close>\<close>
lemma knows_Spy_Notes [simp]:
"knows Spy (Notes A X # evs) =
(if A\<in>bad then insert X (knows Spy evs) else knows Spy evs)"
@@ -130,7 +130,7 @@
parts.Body [elim_format]
-subsection\<open>The Function @{term used}\<close>
+subsection\<open>The Function \<^term>\<open>used\<close>\<close>
lemma parts_knows_Spy_subset_used: "parts (knows Spy evs) \<subseteq> used evs"
apply (induct_tac "evs")
@@ -168,10 +168,10 @@
used_Nil [simp del] used_Cons [simp del]
-text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) \<longrightarrow> P"}
+text\<open>For proving theorems of the form \<^term>\<open>X \<notin> analz (knows Spy evs) \<longrightarrow> P\<close>
New events added by induction to "evs" are discarded. Provided
this information isn't needed, the proof will be much shorter, since
- it will omit complicated reasoning about @{term analz}.\<close>
+ it will omit complicated reasoning about \<^term>\<open>analz\<close>.\<close>
lemmas analz_mono_contra =
knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
--- a/src/HOL/SET_Protocol/Merchant_Registration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Merchant_Registration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,7 +13,7 @@
text\<open>Copmpared with Cardholder Reigstration, \<open>KeyCryptKey\<close> is not
needed: no session key encrypts another. Instead we
prove the "key compromise" theorems for sets KK that contain no private
- encryption keys (@{term "priEK C"}).\<close>
+ encryption keys (\<^term>\<open>priEK C\<close>).\<close>
inductive_set
@@ -383,7 +383,7 @@
subsubsection\<open>The merchant's certificates really were created by the CA,
provided the CA is uncompromised\<close>
-text\<open>The assumption @{term "CA i \<noteq> RCA"} is required: step 2 uses
+text\<open>The assumption \<^term>\<open>CA i \<noteq> RCA\<close> is required: step 2 uses
certificates of the same form.\<close>
lemma certificate_merSK_valid_lemma [intro]:
"[|Crypt (priSK (CA i)) \<lbrace>Agent M, Key merSK, onlySig\<rbrace>
--- a/src/HOL/SET_Protocol/Message_SET.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Message_SET.thy Sat Jan 05 17:24:33 2019 +0100
@@ -21,12 +21,12 @@
text\<open>Collapses redundant cases in the huge protocol proofs\<close>
lemmas disj_simps = disj_comms disj_left_absorb disj_assoc
-text\<open>Effective with assumptions like @{term "K \<notin> range pubK"} and
- @{term "K \<notin> invKey`range pubK"}\<close>
+text\<open>Effective with assumptions like \<^term>\<open>K \<notin> range pubK\<close> and
+ \<^term>\<open>K \<notin> invKey`range pubK\<close>\<close>
lemma notin_image_iff: "(y \<notin> f`I) = (\<forall>i\<in>I. f i \<noteq> y)"
by blast
-text\<open>Effective with the assumption @{term "KK \<subseteq> - (range(invKey o pubK))"}\<close>
+text\<open>Effective with the assumption \<^term>\<open>KK \<subseteq> - (range(invKey o pubK))\<close>\<close>
lemma disjoint_image_iff: "(A \<subseteq> - (f`I)) = (\<forall>i\<in>I. f i \<notin> A)"
by blast
@@ -263,7 +263,7 @@
text\<open>This allows \<open>blast\<close> to simplify occurrences of
- @{term "parts(G\<union>H)"} in the assumption.\<close>
+ \<^term>\<open>parts(G\<union>H)\<close> in the assumption.\<close>
declare parts_Un [THEN equalityD1, THEN subsetD, THEN UnE, elim!]
--- a/src/HOL/SET_Protocol/Public_SET.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Public_SET.thy Sat Jan 05 17:24:33 2019 +0100
@@ -31,7 +31,7 @@
abbreviation "priSK A == invKey (pubSK A)"
text\<open>By freeness of agents, no two agents have the same key. Since
- @{term "True\<noteq>False"}, no agent has the same signing and encryption keys.\<close>
+ \<^term>\<open>True\<noteq>False\<close>, no agent has the same signing and encryption keys.\<close>
specification (publicKey)
injective_publicKey:
@@ -367,7 +367,7 @@
"for proving possibility theorems"
-subsection\<open>Specialized Rewriting for Theorems About @{term analz} and Image\<close>
+subsection\<open>Specialized Rewriting for Theorems About \<^term>\<open>analz\<close> and Image\<close>
lemma insert_Key_singleton: "insert (Key K) H = Key ` {K} \<union> H"
by blast
@@ -411,7 +411,7 @@
keysFor (insert A X) = keysFor (insert B X)"
by auto
-subsubsection\<open>Special Simplification Rules for @{term signCert}\<close>
+subsubsection\<open>Special Simplification Rules for \<^term>\<open>signCert\<close>\<close>
text\<open>Avoids duplicating X and its components!\<close>
lemma parts_insert_signCert:
@@ -428,7 +428,7 @@
lemma keysFor_insert_signCert: "keysFor (insert (signCert K X) H) = keysFor H"
by (simp add: signCert_def)
-text\<open>Controlled rewrite rules for @{term signCert}, just the definitions
+text\<open>Controlled rewrite rules for \<^term>\<open>signCert\<close>, just the definitions
of the others. Encryption primitives are just expanded, despite their huge
redundancy!\<close>
lemmas abbrev_simps [simp] =
@@ -477,7 +477,7 @@
by (unfold EXcrypt_def, blast)
-subsection\<open>Lemmas to Simplify Expressions Involving @{term analz}\<close>
+subsection\<open>Lemmas to Simplify Expressions Involving \<^term>\<open>analz\<close>\<close>
lemma analz_knows_absorb:
"Key K \<in> analz (knows Spy evs)
@@ -511,7 +511,7 @@
"[|Key K \<in> parts {X}; Says A B X \<in> set evs|] ==> Key K \<in> used evs"
by (blast intro: parts_trans dest!: Says_imp_knows_Spy [THEN parts.Inj])
-text\<open>A useful rewrite rule with @{term analz_image_keys_simps}\<close>
+text\<open>A useful rewrite rule with \<^term>\<open>analz_image_keys_simps\<close>\<close>
lemma Crypt_notin_image_Key: "Crypt K X \<notin> Key ` KK"
by auto
--- a/src/HOL/SET_Protocol/Purchase.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SET_Protocol/Purchase.thy Sat Jan 05 17:24:33 2019 +0100
@@ -152,7 +152,7 @@
| PReqS:
\<comment> \<open>SIGNED Purchase request. Page 77 of Formal Protocol Desc.
We could specify the equation
- @{term "PIReqSigned = \<lbrace> PIDualSigned, OIDualSigned \<rbrace>"}, since the
+ \<^term>\<open>PIReqSigned = \<lbrace> PIDualSigned, OIDualSigned \<rbrace>\<close>, since the
Formal Desc. gives PIHead the same format in the unsigned case.
However, there's little point, as P treats the signed and
unsigned cases differently.\<close>
@@ -431,8 +431,8 @@
evs \<in> set_pur|] ==> priEK C \<in> KK | C \<in> bad"
by auto
-text\<open>trivial proof because @{term"priEK C"} never appears even in
- @{term "parts evs"}.\<close>
+text\<open>trivial proof because \<^term>\<open>priEK C\<close> never appears even in
+ \<^term>\<open>parts evs\<close>.\<close>
lemma analz_image_priEK:
"evs \<in> set_pur ==>
(Key (priEK C) \<in> analz (Key`KK \<union> (knows Spy evs))) =
@@ -734,7 +734,7 @@
evs \<in> set_pur|] ==> \<exists>j. P = PG j"
by (erule rev_mp, erule set_pur.induct, simp_all)
-text\<open>If we trust M, then @{term LID_M} determines his choice of P
+text\<open>If we trust M, then \<^term>\<open>LID_M\<close> determines his choice of P
(Payment Gateway)\<close>
lemma goodM_gives_correct_PG:
"[| MsgPInitRes =
@@ -820,9 +820,9 @@
(Although the spec has SIGNED and UNSIGNED forms of AuthRes, they
send the same message to M.) The conclusion is weak: M is existentially
quantified! That is because Authorization Response does not refer to M, while
- the digital envelope weakens the link between @{term MsgAuthRes} and
- @{term"priSK M"}. Changing the precondition to refer to
- @{term "Crypt K (sign SK M)"} requires assuming @{term K} to be secure, since
+ the digital envelope weakens the link between \<^term>\<open>MsgAuthRes\<close> and
+ \<^term>\<open>priSK M\<close>. Changing the precondition to refer to
+ \<^term>\<open>Crypt K (sign SK M)\<close> requires assuming \<^term>\<open>K\<close> to be secure, since
otherwise the Spy could create that message.\<close>
theorem M_verifies_AuthRes:
"[| MsgAuthRes = \<lbrace>\<lbrace>Number LID_M, Number XID, Number PurchAmt\<rbrace>,
@@ -868,7 +868,7 @@
done
-text\<open>Unicity of @{term LID_M} between Merchant and Cardholder notes\<close>
+text\<open>Unicity of \<^term>\<open>LID_M\<close> between Merchant and Cardholder notes\<close>
lemma unique_LID_M:
"[|Notes (Merchant i) \<lbrace>Number LID_M, Agent P, Trans\<rbrace> \<in> set evs;
Notes C \<lbrace>Number LID_M, Agent M, Agent C, Number OD,
@@ -881,7 +881,7 @@
apply (force dest!: Notes_imp_parts_subset_used)
done
-text\<open>Unicity of @{term LID_M}, for two Merchant Notes events\<close>
+text\<open>Unicity of \<^term>\<open>LID_M\<close>, for two Merchant Notes events\<close>
lemma unique_LID_M2:
"[|Notes M \<lbrace>Number LID_M, Trans\<rbrace> \<in> set evs;
Notes M \<lbrace>Number LID_M, Trans'\<rbrace> \<in> set evs;
@@ -893,7 +893,7 @@
done
text\<open>Lemma needed below: for the case that
- if PRes is present, then @{term LID_M} has been used.\<close>
+ if PRes is present, then \<^term>\<open>LID_M\<close> has been used.\<close>
lemma signed_imp_used:
"[| Crypt (priSK M) (Hash X) \<in> parts (knows Spy evs);
M \<notin> bad; evs \<in> set_pur|] ==> parts {X} \<subseteq> used evs"
@@ -1051,8 +1051,8 @@
text\<open>When P sees a dual signature, he knows that it originated with C.
and was intended for M. This guarantee isn't useful to M, who never gets
- PIData. I don't see how to link @{term "PG j"} and \<open>LID_M\<close> without
- assuming @{term "M \<notin> bad"}.\<close>
+ PIData. I don't see how to link \<^term>\<open>PG j\<close> and \<open>LID_M\<close> without
+ assuming \<^term>\<open>M \<notin> bad\<close>.\<close>
theorem P_verifies_Signed_PReq:
"[| MsgDualSign = \<lbrace>Hash PIData, HOIData\<rbrace>;
PIData = \<lbrace>PIHead, PANData\<rbrace>;
@@ -1128,7 +1128,7 @@
text\<open>When P receives an AuthReq and a dual signature, he knows that C and M
agree on the essential details. PurchAmt however is never sent by M to
P; instead C and M both send
- @{term "HOD = Hash\<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>"}
+ \<^term>\<open>HOD = Hash\<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>\<close>
and P compares the two copies of HOD.
Agreement can't be proved for some things, including the symmetric keys
--- a/src/HOL/SMT_Examples/boogie.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SMT_Examples/boogie.ML Sat Jan 05 17:24:33 2019 +0100
@@ -39,7 +39,7 @@
fun add_type name (tds, fds, axs, vcs) =
let
- val T = TFree (isabelle_name name, @{sort type})
+ val T = TFree (isabelle_name name, \<^sort>\<open>type\<close>)
val tds' = Symtab.update (name, T) tds
in (tds', fds, axs, vcs) end
@@ -68,15 +68,15 @@
fun mk_var name T = Free ("V_" ^ isabelle_name name, T)
-fun mk_arrayT (Ts, T) = Type (@{type_name "fun"}, [HOLogic.mk_tupleT Ts, T])
+fun mk_arrayT (Ts, T) = Type (\<^type_name>\<open>fun\<close>, [HOLogic.mk_tupleT Ts, T])
fun mk_binary t (t1, t2) = t $ t1 $ t2
fun mk_nary _ t [] = t
| mk_nary f _ ts = uncurry (fold_rev f) (split_last ts)
-fun mk_distinct [] = @{const HOL.True}
- | mk_distinct [_] = @{const HOL.True}
+fun mk_distinct [] = \<^const>\<open>HOL.True\<close>
+ | mk_distinct [_] = \<^const>\<open>HOL.True\<close>
| mk_distinct (t :: ts) =
let
fun mk_noteq u u' =
@@ -87,23 +87,23 @@
let
val mT = Term.fastype_of m and kT = Term.fastype_of k
val vT = Term.fastype_of v
- in Const (@{const_name fun_upd}, mT --> kT --> vT --> mT) $ m $ k $ v end
+ in Const (\<^const_name>\<open>fun_upd\<close>, mT --> kT --> vT --> mT) $ m $ k $ v end
fun mk_quant q (Free (x, T)) t = q T $ absfree (x, T) t
| mk_quant _ t _ = raise TERM ("bad variable", [t])
-val patternT = @{typ "SMT.pattern"}
+val patternT = \<^typ>\<open>SMT.pattern\<close>
fun mk_pat t =
- Const (@{const_name "SMT.pat"}, Term.fastype_of t --> patternT) $ t
+ Const (\<^const_name>\<open>SMT.pat\<close>, Term.fastype_of t --> patternT) $ t
fun mk_pattern [] = raise TERM ("mk_pattern", [])
| mk_pattern ts = SMT_Util.mk_symb_list patternT (map mk_pat ts)
fun mk_trigger [] t = t
| mk_trigger pss t =
- @{term "SMT.trigger"} $
- SMT_Util.mk_symb_list @{typ "SMT.pattern SMT.symb_list"} (map mk_pattern pss) $ t
+ \<^term>\<open>SMT.trigger\<close> $
+ SMT_Util.mk_symb_list \<^typ>\<open>SMT.pattern SMT.symb_list\<close> (map mk_pattern pss) $ t
(* parser *)
@@ -112,36 +112,36 @@
let fun apply (xs, ls) = f ls |>> (fn x => x :: xs)
in funpow (as_int n) apply ([], ls) |>> rev end
-fun parse_type _ (["bool"] :: ls) = (@{typ bool}, ls)
- | parse_type _ (["int"] :: ls) = (@{typ int}, ls)
+fun parse_type _ (["bool"] :: ls) = (\<^typ>\<open>bool\<close>, ls)
+ | parse_type _ (["int"] :: ls) = (\<^typ>\<open>int\<close>, ls)
| parse_type cx (["array", arity] :: ls) =
repeat (parse_type cx) arity ls |>> mk_arrayT o split_last
| parse_type cx (("type-con" :: name :: _) :: ls) = (lookup_type cx name, ls)
| parse_type _ _ = error "Bad type"
-fun parse_expr _ (["true"] :: ls) = (@{term True}, ls)
- | parse_expr _ (["false"] :: ls) = (@{term False}, ls)
+fun parse_expr _ (["true"] :: ls) = (\<^term>\<open>True\<close>, ls)
+ | parse_expr _ (["false"] :: ls) = (\<^term>\<open>False\<close>, ls)
| parse_expr cx (["not"] :: ls) = parse_expr cx ls |>> HOLogic.mk_not
- | parse_expr cx (["and", n] :: ls) = parse_nary_expr cx n HOLogic.mk_conj @{term True} ls
- | parse_expr cx (["or", n] :: ls) = parse_nary_expr cx n HOLogic.mk_disj @{term False} ls
- | parse_expr cx (["implies"] :: ls) = parse_bin_expr cx (mk_binary @{term HOL.implies}) ls
+ | parse_expr cx (["and", n] :: ls) = parse_nary_expr cx n HOLogic.mk_conj \<^term>\<open>True\<close> ls
+ | parse_expr cx (["or", n] :: ls) = parse_nary_expr cx n HOLogic.mk_disj \<^term>\<open>False\<close> ls
+ | parse_expr cx (["implies"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>HOL.implies\<close>) ls
| parse_expr cx (["="] :: ls) = parse_bin_expr cx HOLogic.mk_eq ls
| parse_expr cx (["var", name] :: ls) = parse_type cx ls |>> mk_var name
| parse_expr cx (["fun", name, n] :: ls) =
let val (t, _) = lookup_func cx name
in repeat (parse_expr cx) n ls |>> curry Term.list_comb t end
| parse_expr cx (("label" :: _) :: ls) = parse_expr cx ls
- | parse_expr _ (["int-num", n] :: ls) = (HOLogic.mk_number @{typ int} (as_int n), ls)
- | parse_expr cx (["<"] :: ls) = parse_bin_expr cx (mk_binary @{term "(<) :: int => _"}) ls
- | parse_expr cx (["<="] :: ls) = parse_bin_expr cx (mk_binary @{term "(<=) :: int => _"}) ls
- | parse_expr cx ([">"] :: ls) = parse_bin_expr cx (mk_binary @{term "(<) :: int => _"}o swap) ls
+ | parse_expr _ (["int-num", n] :: ls) = (HOLogic.mk_number \<^typ>\<open>int\<close> (as_int n), ls)
+ | parse_expr cx (["<"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(<) :: int => _\<close>) ls
+ | parse_expr cx (["<="] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(<=) :: int => _\<close>) ls
+ | parse_expr cx ([">"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(<) :: int => _\<close>o swap) ls
| parse_expr cx ([">="] :: ls) =
- parse_bin_expr cx (mk_binary @{term "(<=) :: int => _"} o swap) ls
- | parse_expr cx (["+"] :: ls) = parse_bin_expr cx (mk_binary @{term "(+) :: int => _"}) ls
- | parse_expr cx (["-"] :: ls) = parse_bin_expr cx (mk_binary @{term "(-) :: int => _"}) ls
- | parse_expr cx (["*"] :: ls) = parse_bin_expr cx (mk_binary @{term "(*) :: int => _"}) ls
- | parse_expr cx (["/"] :: ls) = parse_bin_expr cx (mk_binary @{term boogie_div}) ls
- | parse_expr cx (["%"] :: ls) = parse_bin_expr cx (mk_binary @{term boogie_mod}) ls
+ parse_bin_expr cx (mk_binary \<^term>\<open>(<=) :: int => _\<close> o swap) ls
+ | parse_expr cx (["+"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(+) :: int => _\<close>) ls
+ | parse_expr cx (["-"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(-) :: int => _\<close>) ls
+ | parse_expr cx (["*"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>(*) :: int => _\<close>) ls
+ | parse_expr cx (["/"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>boogie_div\<close>) ls
+ | parse_expr cx (["%"] :: ls) = parse_bin_expr cx (mk_binary \<^term>\<open>boogie_mod\<close>) ls
| parse_expr cx (["select", n] :: ls) =
repeat (parse_expr cx) n ls
|>> (fn ts => hd ts $ HOLogic.mk_tuple (tl ts))
@@ -266,7 +266,7 @@
(* Isar command *)
val _ =
- Outer_Syntax.command @{command_keyword boogie_file}
+ Outer_Syntax.command \<^command_keyword>\<open>boogie_file\<close>
"prove verification condition from .b2i file"
(Resources.provide_parse_files "boogie_file" >> (fn files =>
Toplevel.theory (fn thy =>
--- a/src/HOL/SPARK/Manual/Reference.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/SPARK/Manual/Reference.thy Sat Jan 05 17:24:33 2019 +0100
@@ -23,9 +23,9 @@
\label{sec:spark-commands}
This section describes the syntax and effect of each of the commands provided
by HOL-\SPARK{}.
-@{rail \<open>
+\<^rail>\<open>
@'spark_open' name ('(' name ')')?
-\<close>}
+\<close>
Opens a new \SPARK{} verification environment and loads a \texttt{*.siv} file with VCs.
Alternatively, \texttt{*.vcg} files can be loaded using \isa{\isacommand{spark\_open\_vcg}}.
The corresponding \texttt{*.fdl} and \texttt{*.rls}
@@ -38,9 +38,9 @@
format \texttt{$p_1$\_\_$\ldots$\_\_$p_n$}. When working with projects consisting of several
packages, this is necessary in order for the verification environment to be able to map proof
functions and types defined in Isabelle to their \SPARK{} counterparts.
-@{rail \<open>
+\<^rail>\<open>
@'spark_proof_functions' ((name '=' term)+)
-\<close>}
+\<close>
Associates a proof function with the given name to a term. The name should be the full name
of the proof function as it appears in the \texttt{*.fdl} file, including the package prefix.
This command can be used both inside and outside a verification environment. The latter
@@ -48,11 +48,11 @@
or packages, whereas the former allows the given term to refer to the types generated
by \isa{\isacommand{spark\_open}} for record or enumeration types specified in the
\texttt{*.fdl} file.
-@{rail \<open>
+\<^rail>\<open>
@'spark_types' ((name '=' type (mapping?))+)
;
mapping: '('((name '=' name)+',')')'
-\<close>}
+\<close>
Associates a \SPARK{} type with the given name with an Isabelle type. This command can
only be used outside a verification environment. The given type must be either a record
or a datatype, where the names of fields or constructors must either match those of the
@@ -64,24 +64,24 @@
using Isabelle's commands for defining records or datatypes. Having introduced the
types, the proof functions can be defined in Isabelle. Finally, both the proof
functions and the types can be associated with their \SPARK{} counterparts.
-@{rail \<open>
+\<^rail>\<open>
@'spark_status' (('(proved)' | '(unproved)')?)
-\<close>}
+\<close>
Outputs the variables declared in the \texttt{*.fdl} file, the rules declared in
the \texttt{*.rls} file, and all VCs, together with their status (proved, unproved).
The output can be restricted to the proved or unproved VCs by giving the corresponding
option to the command.
-@{rail \<open>
+\<^rail>\<open>
@'spark_vc' name
-\<close>}
+\<close>
Initiates the proof of the VC with the given name. Similar to the standard
\isa{\isacommand{lemma}} or \isa{\isacommand{theorem}} commands, this command
must be followed by a sequence of proof commands. The command introduces the
hypotheses \texttt{H1} \dots \texttt{H$n$}, as well as the identifiers
\texttt{?C1} \dots \texttt{?C$m$} corresponding to the conclusions of the VC.
-@{rail \<open>
+\<^rail>\<open>
@'spark_end' '(incomplete)'?
-\<close>}
+\<close>
Closes the current verification environment. Unless the \texttt{incomplete}
option is given, all VCs must have been proved,
otherwise the command issues an error message. As a side effect, the command
@@ -101,7 +101,7 @@
subsection \<open>Integers\<close>
text \<open>
-The FDL type \texttt{integer} is modelled by the Isabelle type @{typ int}.
+The FDL type \texttt{integer} is modelled by the Isabelle type \<^typ>\<open>int\<close>.
While the FDL \texttt{mod} operator behaves in the same way as its Isabelle
counterpart, this is not the case for the \texttt{div} operator. As has already
been mentioned in \secref{sec:proving-vcs}, the \texttt{div} operator of \SPARK{}
@@ -146,7 +146,7 @@
\end{figure}
The bitwise logical operators of \SPARK{} and FDL are modelled by the operators
\<open>AND\<close>, \<open>OR\<close> and \<open>XOR\<close> from Isabelle's \<open>Word\<close> library,
-all of which have type @{typ "int \<Rightarrow> int \<Rightarrow> int"}. A list of properties of these
+all of which have type \<^typ>\<open>int \<Rightarrow> int \<Rightarrow> int\<close>. A list of properties of these
operators that are useful in proofs about \SPARK{} programs are shown in
\figref{fig:bitwise}
\<close>
@@ -163,7 +163,7 @@
\normalsize
\isacommand{datatype}\ $t$\ =\ $e_1$\ $\mid$\ $e_2$\ $\mid$\ \dots\ $\mid$\ $e_n$
\end{isabelle}
-The HOL-\SPARK{} environment defines a type class @{class spark_enum} that captures
+The HOL-\SPARK{} environment defines a type class \<^class>\<open>spark_enum\<close> that captures
the characteristic properties of all enumeration types. It provides the following
polymorphic functions and constants for all types \<open>'a\<close> of this type class:
\begin{flushleft}
@@ -174,14 +174,14 @@
@{term_type [mode=my_constrain] first_el} \\
@{term_type [mode=my_constrain] last_el}
\end{flushleft}
-In addition, @{class spark_enum} is a subclass of the @{class linorder} type class,
+In addition, \<^class>\<open>spark_enum\<close> is a subclass of the \<^class>\<open>linorder\<close> type class,
which allows the comparison operators \<open><\<close> and \<open>\<le>\<close> to be used on
enumeration types. The polymorphic operations shown above enjoy a number of
generic properties that hold for all enumeration types. These properties are
listed in \figref{fig:enum-generic-properties}.
Moreover, \figref{fig:enum-specific-properties} shows a list of properties
that are specific to each enumeration type $t$, such as the characteristic
-equations for @{term val} and @{term pos}.
+equations for \<^term>\<open>val\<close> and \<^term>\<open>pos\<close>.
\begin{figure}[t]
\begin{center}
\small
--- a/src/HOL/Statespace/DistinctTreeProver.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/DistinctTreeProver.thy Sat Jan 05 17:24:33 2019 +0100
@@ -44,8 +44,8 @@
set_of l \<inter> set_of r = {} \<and>
all_distinct l \<and> all_distinct r)"
-text \<open>Given a binary tree @{term "t"} for which
-@{const all_distinct} holds, given two different nodes contained in the tree,
+text \<open>Given a binary tree \<^term>\<open>t\<close> for which
+\<^const>\<open>all_distinct\<close> holds, given two different nodes contained in the tree,
we want to write a ML function that generates a logarithmic
certificate that the content of the nodes is distinct. We use the
following lemmas to achieve this.\<close>
@@ -85,16 +85,16 @@
text \<open>When deriving a state space from other ones, we create a new
name tree which contains all the names of the parent state spaces and
-assume the predicate @{const all_distinct}. We then prove that the new
+assume the predicate \<^const>\<open>all_distinct\<close>. We then prove that the new
locale interprets all parent locales. Hence we have to show that the
new distinctness assumption on all names implies the distinctness
assumptions of the parent locales. This proof is implemented in ML. We
do this efficiently by defining a kind of containment check of trees
by ``subtraction''. We subtract the parent tree from the new tree. If
-this succeeds we know that @{const all_distinct} of the new tree
-implies @{const all_distinct} of the parent tree. The resulting
-certificate is of the order @{term "n * log(m)"} where @{term "n"} is
-the size of the (smaller) parent tree and @{term "m"} the size of the
+this succeeds we know that \<^const>\<open>all_distinct\<close> of the new tree
+implies \<^const>\<open>all_distinct\<close> of the parent tree. The resulting
+certificate is of the order \<^term>\<open>n * log(m)\<close> where \<^term>\<open>n\<close> is
+the size of the (smaller) parent tree and \<^term>\<open>m\<close> the size of the
(bigger) new tree.\<close>
--- a/src/HOL/Statespace/StateSpaceEx.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/StateSpaceEx.thy Sat Jan 05 17:24:33 2019 +0100
@@ -32,9 +32,9 @@
text \<open>\noindent This resembles a \<^theory_text>\<open>record\<close> definition,
but introduces sophisticated locale
infrastructure instead of HOL type schemes. The resulting context
-postulates two distinct names @{term "n"} and @{term "b"} and
+postulates two distinct names \<^term>\<open>n\<close> and \<^term>\<open>b\<close> and
projection~/ injection functions that convert from abstract values to
-@{typ "nat"} and \<open>bool\<close>. The logical content of the locale is:\<close>
+\<^typ>\<open>nat\<close> and \<open>bool\<close>. The logical content of the locale is:\<close>
locale vars' =
fixes n::'name and b::'name
@@ -46,7 +46,7 @@
fixes project_bool::"'value \<Rightarrow> bool" and inject_bool::"bool \<Rightarrow> 'value"
assumes "\<And>b. project_bool (inject_bool b) = b"
-text \<open>\noindent The HOL predicate @{const "distinct"} describes
+text \<open>\noindent The HOL predicate \<^const>\<open>distinct\<close> describes
distinctness of all names in the context. Locale \<open>vars'\<close>
defines the raw logical content that is defined in the state space
locale. We also maintain non-logical context information to support
@@ -78,7 +78,7 @@
lemma (in vars) foo: "s<n := 2>\<cdot>b = s\<cdot>b" by simp
text \<open>\noindent Here the simplifier was able to refer to
-distinctness of @{term "b"} and @{term "n"} to solve the equation.
+distinctness of \<^term>\<open>b\<close> and \<^term>\<open>n\<close> to solve the equation.
The resulting lemma is also recorded in locale \<open>vars\<close> for
later use and is automatically propagated to all its interpretations.
Here is another example:\<close>
@@ -87,23 +87,22 @@
text \<open>\noindent The state space \<open>varsX\<close> imports two copies
of the state space \<open>vars\<close>, where one has the variables renamed
-to upper-case letters, and adds another variable @{term "x"} of type
-@{typ "'a"}. This type is fixed inside the state space but may get
+to upper-case letters, and adds another variable \<^term>\<open>x\<close> of type
+\<^typ>\<open>'a\<close>. This type is fixed inside the state space but may get
instantiated later on, analogous to type parameters of an ML-functor.
The distinctness assumption is now \<open>distinct [N, B, n, b, x]\<close>,
-from this we can derive both @{term "distinct [N,B]"} and @{term
-"distinct [n,b]"}, the distinction assumptions for the two versions of
+from this we can derive both \<^term>\<open>distinct [N,B]\<close> and \<^term>\<open>distinct [n,b]\<close>, the distinction assumptions for the two versions of
locale \<open>vars\<close> above. Moreover we have all necessary
projection and injection assumptions available. These assumptions
-together allow us to establish state space @{term "varsX"} as an
-interpretation of both instances of locale @{term "vars"}. Hence we
+together allow us to establish state space \<^term>\<open>varsX\<close> as an
+interpretation of both instances of locale \<^term>\<open>vars\<close>. Hence we
inherit both variants of theorem \<open>foo\<close>: \<open>s\<langle>N := 2\<rangle>\<cdot>B =
s\<cdot>B\<close> as well as \<open>s\<langle>n := 2\<rangle>\<cdot>b = s\<cdot>b\<close>. These are immediate
consequences of the locale interpretation action.
The declarations for syntax and the distinctness theorems also observe
the morphisms generated by the locale package due to the renaming
-@{term "n = N"}:\<close>
+\<^term>\<open>n = N\<close>:\<close>
lemma (in varsX) foo: "s\<langle>N := 2\<rangle>\<cdot>x = s\<cdot>x" by simp
--- a/src/HOL/Statespace/StateSpaceSyntax.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/StateSpaceSyntax.thy Sat Jan 05 17:24:33 2019 +0100
@@ -23,15 +23,15 @@
parse_translation
\<open>
- [(@{syntax_const "_statespace_lookup"}, StateSpace.lookup_tr),
- (@{syntax_const "_statespace_update"}, StateSpace.update_tr)]
+ [(\<^syntax_const>\<open>_statespace_lookup\<close>, StateSpace.lookup_tr),
+ (\<^syntax_const>\<open>_statespace_update\<close>, StateSpace.update_tr)]
\<close>
print_translation
\<open>
- [(@{const_syntax lookup}, StateSpace.lookup_tr'),
- (@{const_syntax update}, StateSpace.update_tr')]
+ [(\<^const_syntax>\<open>lookup\<close>, StateSpace.lookup_tr'),
+ (\<^const_syntax>\<open>update\<close>, StateSpace.update_tr')]
\<close>
end
--- a/src/HOL/Statespace/distinct_tree_prover.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/distinct_tree_prover.ML Sat Jan 05 17:24:33 2019 +0100
@@ -28,9 +28,9 @@
datatype direction = Left | Right;
-fun treeT T = Type (@{type_name tree}, [T]);
+fun treeT T = Type (\<^type_name>\<open>tree\<close>, [T]);
-fun mk_tree' e T n [] = Const (@{const_name Tip}, treeT T)
+fun mk_tree' e T n [] = Const (\<^const_name>\<open>Tip\<close>, treeT T)
| mk_tree' e T n xs =
let
val m = (n - 1) div 2;
@@ -38,20 +38,20 @@
val l = mk_tree' e T m xsl;
val r = mk_tree' e T (n-(m+1)) xsr;
in
- Const (@{const_name Node}, treeT T --> T --> HOLogic.boolT--> treeT T --> treeT T) $
- l $ e x $ @{term False} $ r
+ Const (\<^const_name>\<open>Node\<close>, treeT T --> T --> HOLogic.boolT--> treeT T --> treeT T) $
+ l $ e x $ \<^term>\<open>False\<close> $ r
end
fun mk_tree e T xs = mk_tree' e T (length xs) xs;
-fun dest_tree (Const (@{const_name Tip}, _)) = []
- | dest_tree (Const (@{const_name Node}, _) $ l $ e $ _ $ r) = dest_tree l @ e :: dest_tree r
+fun dest_tree (Const (\<^const_name>\<open>Tip\<close>, _)) = []
+ | dest_tree (Const (\<^const_name>\<open>Node\<close>, _) $ l $ e $ _ $ r) = dest_tree l @ e :: dest_tree r
| dest_tree t = raise TERM ("dest_tree", [t]);
-fun lin_find_tree e (Const (@{const_name Tip}, _)) = NONE
- | lin_find_tree e (Const (@{const_name Node}, _) $ l $ x $ _ $ r) =
+fun lin_find_tree e (Const (\<^const_name>\<open>Tip\<close>, _)) = NONE
+ | lin_find_tree e (Const (\<^const_name>\<open>Node\<close>, _) $ l $ x $ _ $ r) =
if e aconv x
then SOME []
else
@@ -63,8 +63,8 @@
| NONE => NONE))
| lin_find_tree e t = raise TERM ("find_tree: input not a tree", [t])
-fun bin_find_tree order e (Const (@{const_name Tip}, _)) = NONE
- | bin_find_tree order e (Const (@{const_name Node}, _) $ l $ x $ _ $ r) =
+fun bin_find_tree order e (Const (\<^const_name>\<open>Tip\<close>, _)) = NONE
+ | bin_find_tree order e (Const (\<^const_name>\<open>Node\<close>, _) $ l $ x $ _ $ r) =
(case order (e, x) of
EQUAL => SOME []
| LESS => Option.map (cons Left) (bin_find_tree order e l)
@@ -293,7 +293,7 @@
in (dest_TVar (Thm.typ_of alpha), #1 (dest_Var (Thm.term_of ct))) end;
in
-fun subtractProver ctxt (Const (@{const_name Tip}, T)) ct dist_thm =
+fun subtractProver ctxt (Const (\<^const_name>\<open>Tip\<close>, T)) ct dist_thm =
let
val ct' = dist_thm |> Thm.cprop_of |> Thm.dest_comb |> #2 |> Thm.dest_comb |> #2;
val [alphaI] = #2 (dest_Type T);
@@ -302,7 +302,7 @@
([(alpha, Thm.ctyp_of ctxt alphaI)],
[((v, treeT alphaI), ct')]) @{thm subtract_Tip}
end
- | subtractProver ctxt (Const (@{const_name Node}, nT) $ l $ x $ d $ r) ct dist_thm =
+ | subtractProver ctxt (Const (\<^const_name>\<open>Node\<close>, nT) $ l $ x $ d $ r) ct dist_thm =
let
val ct' = dist_thm |> Thm.cprop_of |> Thm.dest_comb |> #2 |> Thm.dest_comb |> #2;
val (_, [cl, _, _, cr]) = Drule.strip_comb ct;
@@ -342,8 +342,8 @@
fun distinctTree_tac names ctxt = SUBGOAL (fn (goal, i) =>
(case goal of
- Const (@{const_name Trueprop}, _) $
- (Const (@{const_name Not}, _) $ (Const (@{const_name HOL.eq}, _) $ x $ y)) =>
+ Const (\<^const_name>\<open>Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>Not\<close>, _) $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ x $ y)) =>
(case get_fst_success (neq_x_y ctxt x y) names of
SOME neq => resolve_tac ctxt [neq] i
| NONE => no_tac)
@@ -353,11 +353,11 @@
mk_solver "distinctFieldSolver" (distinctTree_tac names);
fun distinct_simproc names =
- Simplifier.make_simproc @{context} "DistinctTreeProver.distinct_simproc"
- {lhss = [@{term "x = y"}],
+ Simplifier.make_simproc \<^context> "DistinctTreeProver.distinct_simproc"
+ {lhss = [\<^term>\<open>x = y\<close>],
proc = fn _ => fn ctxt => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name HOL.eq}, _) $ x $ y =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, _) $ x $ y =>
Option.map (fn neq => @{thm neq_to_eq_False} OF [neq])
(get_fst_success (neq_x_y ctxt x y) names)
| _ => NONE)};
--- a/src/HOL/Statespace/state_fun.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/state_fun.ML Sat Jan 05 17:24:33 2019 +0100
@@ -21,8 +21,8 @@
structure StateFun: STATE_FUN =
struct
-val lookupN = @{const_name StateFun.lookup};
-val updateN = @{const_name StateFun.update};
+val lookupN = \<^const_name>\<open>StateFun.lookup\<close>;
+val updateN = \<^const_name>\<open>StateFun.update\<close>;
val sel_name = HOLogic.dest_string;
@@ -42,20 +42,20 @@
val conj_True = @{thm conj_True};
val conj_cong = @{thm conj_cong};
-fun isFalse (Const (@{const_name False}, _)) = true
+fun isFalse (Const (\<^const_name>\<open>False\<close>, _)) = true
| isFalse _ = false;
-fun isTrue (Const (@{const_name True}, _)) = true
+fun isTrue (Const (\<^const_name>\<open>True\<close>, _)) = true
| isTrue _ = false;
in
val lazy_conj_simproc =
- Simplifier.make_simproc @{context} "lazy_conj_simp"
- {lhss = [@{term "P & Q"}],
+ Simplifier.make_simproc \<^context> "lazy_conj_simp"
+ {lhss = [\<^term>\<open>P & Q\<close>],
proc = fn _ => fn ctxt => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name HOL.conj},_) $ P $ Q =>
+ Const (\<^const_name>\<open>HOL.conj\<close>,_) $ P $ Q =>
let
val P_P' = Simplifier.rewrite ctxt (Thm.cterm_of ctxt P);
val P' = P_P' |> Thm.prop_of |> Logic.dest_equals |> #2;
@@ -84,7 +84,7 @@
end;
val lookup_ss =
- simpset_of (put_simpset HOL_basic_ss @{context}
+ simpset_of (put_simpset HOL_basic_ss \<^context>
addsimps (@{thms list.inject} @ @{thms char.inject}
@ @{thms list.distinct} @ @{thms simp_thms}
@ [@{thm StateFun.lookup_update_id_same}, @{thm StateFun.id_id_cancel},
@@ -94,7 +94,7 @@
|> fold Simplifier.add_cong @{thms block_conj_cong});
val ex_lookup_ss =
- simpset_of (put_simpset HOL_ss @{context} addsimps @{thms StateFun.ex_id});
+ simpset_of (put_simpset HOL_ss \<^context> addsimps @{thms StateFun.ex_id});
structure Data = Generic_Data
@@ -109,41 +109,41 @@
val _ = Theory.setup (Context.theory_map (Data.put (lookup_ss, ex_lookup_ss, false)));
val lookup_simproc =
- Simplifier.make_simproc @{context} "lookup_simp"
- {lhss = [@{term "lookup d n (update d' c m v s)"}],
+ Simplifier.make_simproc \<^context> "lookup_simp"
+ {lhss = [\<^term>\<open>lookup d n (update d' c m v s)\<close>],
proc = fn _ => fn ctxt => fn ct =>
- (case Thm.term_of ct of (Const (@{const_name StateFun.lookup}, lT) $ destr $ n $
- (s as Const (@{const_name StateFun.update}, uT) $ _ $ _ $ _ $ _ $ _)) =>
+ (case Thm.term_of ct of (Const (\<^const_name>\<open>StateFun.lookup\<close>, lT) $ destr $ n $
+ (s as Const (\<^const_name>\<open>StateFun.update\<close>, uT) $ _ $ _ $ _ $ _ $ _)) =>
(let
val (_::_::_::_::sT::_) = binder_types uT;
val mi = Term.maxidx_of_term (Thm.term_of ct);
- fun mk_upds (Const (@{const_name StateFun.update}, uT) $ d' $ c $ m $ v $ s) =
+ fun mk_upds (Const (\<^const_name>\<open>StateFun.update\<close>, uT) $ d' $ c $ m $ v $ s) =
let
val (_ :: _ :: _ :: fT :: _ :: _) = binder_types uT;
val vT = domain_type fT;
val (s', cnt) = mk_upds s;
val (v', cnt') =
(case v of
- Const (@{const_name K_statefun}, KT) $ v'' =>
+ Const (\<^const_name>\<open>K_statefun\<close>, KT) $ v'' =>
(case v'' of
- (Const (@{const_name StateFun.lookup}, _) $
- (d as (Const (@{const_name Fun.id}, _))) $ n' $ _) =>
+ (Const (\<^const_name>\<open>StateFun.lookup\<close>, _) $
+ (d as (Const (\<^const_name>\<open>Fun.id\<close>, _))) $ n' $ _) =>
if d aconv c andalso n aconv m andalso m aconv n'
then (v,cnt) (* Keep value so that
lookup_update_id_same can fire *)
else
- (Const (@{const_name StateFun.K_statefun}, KT) $
+ (Const (\<^const_name>\<open>StateFun.K_statefun\<close>, KT) $
Var (("v", cnt), vT), cnt + 1)
| _ =>
- (Const (@{const_name StateFun.K_statefun}, KT) $
+ (Const (\<^const_name>\<open>StateFun.K_statefun\<close>, KT) $
Var (("v", cnt), vT), cnt + 1))
| _ => (v, cnt));
- in (Const (@{const_name StateFun.update}, uT) $ d' $ c $ m $ v' $ s', cnt') end
+ in (Const (\<^const_name>\<open>StateFun.update\<close>, uT) $ d' $ c $ m $ v' $ s', cnt') end
| mk_upds s = (Var (("s", mi + 1), sT), mi + 2);
val ct =
Thm.cterm_of ctxt
- (Const (@{const_name StateFun.lookup}, lT) $ destr $ n $ fst (mk_upds s));
+ (Const (\<^const_name>\<open>StateFun.lookup\<close>, lT) $ destr $ n $ fst (mk_upds s));
val basic_ss = #1 (Data.get (Context.Proof ctxt));
val ctxt' = ctxt |> Config.put simp_depth_limit 100 |> put_simpset basic_ss;
val thm = Simplifier.rewrite ctxt' ct;
@@ -160,7 +160,7 @@
val meta_ext = @{thm StateFun.meta_ext};
val ss' =
- simpset_of (put_simpset HOL_ss @{context} addsimps
+ simpset_of (put_simpset HOL_ss \<^context> addsimps
(@{thm StateFun.update_apply} :: @{thm Fun.o_apply} :: @{thms list.inject} @ @{thms char.inject}
@ @{thms list.distinct})
addsimprocs [lazy_conj_simproc, StateSpace.distinct_simproc]
@@ -169,11 +169,11 @@
in
val update_simproc =
- Simplifier.make_simproc @{context} "update_simp"
- {lhss = [@{term "update d c n v s"}],
+ Simplifier.make_simproc \<^context> "update_simp"
+ {lhss = [\<^term>\<open>update d c n v s\<close>],
proc = fn _ => fn ctxt => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name StateFun.update}, uT) $ _ $ _ $ _ $ _ $ _ =>
+ Const (\<^const_name>\<open>StateFun.update\<close>, uT) $ _ $ _ $ _ $ _ $ _ =>
let
val (_ :: _ :: _ :: _ :: sT :: _) = binder_types uT;
(*"('v => 'a1) => ('a2 => 'v) => 'n => ('a1 => 'a2) => ('n => 'v) => ('n => 'v)"*)
@@ -181,7 +181,7 @@
fun mk_comp f fT g gT =
let val T = domain_type fT --> range_type gT
- in (Const (@{const_name Fun.comp}, gT --> fT --> T) $ g $ f, T) end;
+ in (Const (\<^const_name>\<open>Fun.comp\<close>, gT --> fT --> T) $ g $ f, T) end;
fun mk_comps fs = foldl1 (fn ((f, fT), (g, gT)) => mk_comp g gT f fT) fs;
@@ -207,7 +207,7 @@
* updates again, the optimised term is constructed.
*)
fun mk_updterm already
- ((upd as Const (@{const_name StateFun.update}, uT)) $ d $ c $ n $ v $ s) =
+ ((upd as Const (\<^const_name>\<open>StateFun.update\<close>, uT)) $ d $ c $ n $ v $ s) =
let
fun rest already = mk_updterm already;
val (dT :: cT :: nT :: vT :: sT :: _) = binder_types uT;
@@ -234,7 +234,7 @@
val ((c', c'T), f', (d', d'T)) = merge_upds n comps';
val vT' = range_type d'T --> domain_type c'T;
val upd' =
- Const (@{const_name StateFun.update},
+ Const (\<^const_name>\<open>StateFun.update\<close>,
d'T --> c'T --> nT --> vT' --> sT --> sT);
in
(upd $ d $ c $ n $ kb $ trm, upd' $ d' $ c' $ n $ f' $ trm', kv :: vars,
@@ -274,8 +274,8 @@
in
val ex_lookup_eq_simproc =
- Simplifier.make_simproc @{context} "ex_lookup_eq_simproc"
- {lhss = [@{term "Ex t"}],
+ Simplifier.make_simproc \<^context> "ex_lookup_eq_simproc"
+ {lhss = [\<^term>\<open>Ex t\<close>],
proc = fn _ => fn ctxt => fn ct =>
let
val thy = Proof_Context.theory_of ctxt;
@@ -294,7 +294,7 @@
val x' = if not (Term.is_dependent x) then Bound 1 else raise TERM ("", [x]);
val n' = if not (Term.is_dependent n) then Bound 2 else raise TERM ("", [n]);
val sel' = lo $ d $ n' $ s;
- in (Const (@{const_name HOL.eq}, Teq) $ sel' $ x', hd (binder_types Teq), nT, swap) end;
+ in (Const (\<^const_name>\<open>HOL.eq\<close>, Teq) $ sel' $ x', hd (binder_types Teq), nT, swap) end;
fun dest_state (s as Bound 0) = s
| dest_state (s as (Const (sel, sT) $ Bound 0)) =
@@ -303,22 +303,22 @@
| dest_state s = raise TERM ("StateFun.ex_lookup_eq_simproc: not a record slector", [s]);
fun dest_sel_eq
- (Const (@{const_name HOL.eq}, Teq) $
- ((lo as (Const (@{const_name StateFun.lookup}, lT))) $ d $ n $ s) $ X) =
+ (Const (\<^const_name>\<open>HOL.eq\<close>, Teq) $
+ ((lo as (Const (\<^const_name>\<open>StateFun.lookup\<close>, lT))) $ d $ n $ s) $ X) =
(false, Teq, lT, lo, d, n, X, dest_state s)
| dest_sel_eq
- (Const (@{const_name HOL.eq}, Teq) $ X $
- ((lo as (Const (@{const_name StateFun.lookup}, lT))) $ d $ n $ s)) =
+ (Const (\<^const_name>\<open>HOL.eq\<close>, Teq) $ X $
+ ((lo as (Const (\<^const_name>\<open>StateFun.lookup\<close>, lT))) $ d $ n $ s)) =
(true, Teq, lT, lo, d, n, X, dest_state s)
| dest_sel_eq _ = raise TERM ("", []);
in
(case t of
- Const (@{const_name Ex}, Tex) $ Abs (s, T, t) =>
+ Const (\<^const_name>\<open>Ex\<close>, Tex) $ Abs (s, T, t) =>
(let
val (eq, eT, nT, swap) = mkeq (dest_sel_eq t) 0;
val prop =
Logic.list_all ([("n", nT), ("x", eT)],
- Logic.mk_equals (Const (@{const_name Ex}, Tex) $ Abs (s, T, eq), @{term True}));
+ Logic.mk_equals (Const (\<^const_name>\<open>Ex\<close>, Tex) $ Abs (s, T, eq), \<^term>\<open>True\<close>));
val thm = Drule.export_without_context (prove prop);
val thm' = if swap then swap_ex_eq OF [thm] else thm
in SOME thm' end handle TERM _ => NONE)
@@ -342,7 +342,7 @@
fun is_datatype thy = is_some o BNF_LFP_Compat.get_info thy [BNF_LFP_Compat.Keep_Nesting];
-fun mk_map @{type_name List.list} = Syntax.const @{const_name List.map}
+fun mk_map \<^type_name>\<open>List.list\<close> = Syntax.const \<^const_name>\<open>List.map\<close>
| mk_map n = Syntax.const ("StateFun.map_" ^ Long_Name.base_name n);
fun gen_constr_destr comp prfx thy (Type (T, [])) =
@@ -369,19 +369,19 @@
Syntax.const (deco prfx (implode (map mkName argTs) ^ mkUpper (Long_Name.base_name T)))
| gen_constr_destr thy _ _ T = raise (TYPE ("StateFun.gen_constr_destr", [T], []));
-val mk_constr = gen_constr_destr (fn a => fn b => Syntax.const @{const_name Fun.comp} $ a $ b) "";
-val mk_destr = gen_constr_destr (fn a => fn b => Syntax.const @{const_name Fun.comp} $ b $ a) "the_";
+val mk_constr = gen_constr_destr (fn a => fn b => Syntax.const \<^const_name>\<open>Fun.comp\<close> $ a $ b) "";
+val mk_destr = gen_constr_destr (fn a => fn b => Syntax.const \<^const_name>\<open>Fun.comp\<close> $ b $ a) "the_";
val _ =
Theory.setup
- (Attrib.setup @{binding statefun_simp}
+ (Attrib.setup \<^binding>\<open>statefun_simp\<close>
(Scan.succeed (Thm.declaration_attribute (fn thm => fn context =>
let
val ctxt = Context.proof_of context;
val (lookup_ss, ex_lookup_ss, simprocs_active) = Data.get context;
val (lookup_ss', ex_lookup_ss') =
(case Thm.concl_of thm of
- (_ $ ((Const (@{const_name Ex}, _) $ _))) =>
+ (_ $ ((Const (\<^const_name>\<open>Ex\<close>, _) $ _))) =>
(lookup_ss, simpset_map ctxt (Simplifier.add_simp thm) ex_lookup_ss)
| _ =>
(simpset_map ctxt (Simplifier.add_simp thm) lookup_ss, ex_lookup_ss));
--- a/src/HOL/Statespace/state_space.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Statespace/state_space.ML Sat Jan 05 17:24:33 2019 +0100
@@ -200,9 +200,9 @@
fun distinctTree_tac ctxt = SUBGOAL (fn (goal, i) =>
(case goal of
- Const (@{const_name Trueprop}, _) $
- (Const (@{const_name Not}, _) $
- (Const (@{const_name HOL.eq}, _) $ (x as Free _) $ (y as Free _))) =>
+ Const (\<^const_name>\<open>Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>Not\<close>, _) $
+ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (x as Free _) $ (y as Free _))) =>
(case neq_x_y ctxt x y of
SOME neq => resolve_tac ctxt [neq] i
| NONE => no_tac)
@@ -211,11 +211,11 @@
val distinctNameSolver = mk_solver "distinctNameSolver" distinctTree_tac;
val distinct_simproc =
- Simplifier.make_simproc @{context} "StateSpace.distinct_simproc"
- {lhss = [@{term "x = y"}],
+ Simplifier.make_simproc \<^context> "StateSpace.distinct_simproc"
+ {lhss = [\<^term>\<open>x = y\<close>],
proc = fn _ => fn ctxt => fn ct =>
(case Thm.term_of ct of
- Const (@{const_name HOL.eq},_) $ (x as Free _) $ (y as Free _) =>
+ Const (\<^const_name>\<open>HOL.eq\<close>,_) $ (x as Free _) $ (y as Free _) =>
Option.map (fn neq => DistinctTreeProver.neq_to_eq_False OF [neq])
(neq_x_y ctxt x y)
| _ => NONE)};
@@ -272,7 +272,7 @@
val assume =
((Binding.name dist_thm_name, [attr]),
[(HOLogic.Trueprop $
- (Const (@{const_name all_distinct}, Type (@{type_name tree}, [nameT]) --> HOLogic.boolT) $
+ (Const (\<^const_name>\<open>all_distinct\<close>, Type (\<^type_name>\<open>tree\<close>, [nameT]) --> HOLogic.boolT) $
DistinctTreeProver.mk_tree (fn n => Free (n, nameT)) nameT
(sort fast_string_ord all_comps)), [])]);
in
@@ -562,12 +562,12 @@
fun gen_lookup_tr ctxt s n =
(case get_comp (Context.Proof ctxt) n of
SOME (T, _) =>
- Syntax.const @{const_name StateFun.lookup} $
+ Syntax.const \<^const_name>\<open>StateFun.lookup\<close> $
Syntax.free (project_name T) $ Syntax.free n $ s
| NONE =>
if get_silent (Context.Proof ctxt)
- then Syntax.const @{const_name StateFun.lookup} $
- Syntax.const @{const_syntax undefined} $ Syntax.free n $ s
+ then Syntax.const \<^const_name>\<open>StateFun.lookup\<close> $
+ Syntax.const \<^const_syntax>\<open>undefined\<close> $ Syntax.free n $ s
else raise TERM ("StateSpace.gen_lookup_tr: component " ^ quote n ^ " not defined", []));
fun lookup_tr ctxt [s, x] =
@@ -588,19 +588,19 @@
fun gen_update_tr id ctxt n v s =
let
- fun pname T = if id then @{const_name Fun.id} else project_name T;
- fun iname T = if id then @{const_name Fun.id} else inject_name T;
+ fun pname T = if id then \<^const_name>\<open>Fun.id\<close> else project_name T;
+ fun iname T = if id then \<^const_name>\<open>Fun.id\<close> else inject_name T;
in
(case get_comp (Context.Proof ctxt) n of
SOME (T, _) =>
- Syntax.const @{const_name StateFun.update} $
+ Syntax.const \<^const_name>\<open>StateFun.update\<close> $
Syntax.free (pname T) $ Syntax.free (iname T) $
- Syntax.free n $ (Syntax.const @{const_name K_statefun} $ v) $ s
+ Syntax.free n $ (Syntax.const \<^const_name>\<open>K_statefun\<close> $ v) $ s
| NONE =>
if get_silent (Context.Proof ctxt) then
- Syntax.const @{const_name StateFun.update} $
- Syntax.const @{const_syntax undefined} $ Syntax.const @{const_syntax undefined} $
- Syntax.free n $ (Syntax.const @{const_name K_statefun} $ v) $ s
+ Syntax.const \<^const_name>\<open>StateFun.update\<close> $
+ Syntax.const \<^const_syntax>\<open>undefined\<close> $ Syntax.const \<^const_syntax>\<open>undefined\<close> $
+ Syntax.free n $ (Syntax.const \<^const_name>\<open>K_statefun\<close> $ v) $ s
else raise TERM ("StateSpace.gen_update_tr: component " ^ n ^ " not defined", []))
end;
@@ -611,7 +611,7 @@
fun update_tr' ctxt
[_ $ Free (prj, _), _ $ Free (inj, _), n as (_ $ Free (name, _)), (Const (k, _) $ v), s] =
- if Long_Name.base_name k = Long_Name.base_name @{const_name K_statefun} then
+ if Long_Name.base_name k = Long_Name.base_name \<^const_name>\<open>K_statefun\<close> then
(case get_comp (Context.Proof ctxt) name of
SOME (T, _) =>
if inj = inject_name T andalso prj = project_name T then
@@ -630,15 +630,15 @@
val type_insts =
Parse.typ >> single ||
- @{keyword "("} |-- Parse.!!! (Parse.list1 Parse.typ --| @{keyword ")"})
+ \<^keyword>\<open>(\<close> |-- Parse.!!! (Parse.list1 Parse.typ --| \<^keyword>\<open>)\<close>)
-val comp = Parse.name -- (@{keyword "::"} |-- Parse.!!! Parse.typ);
+val comp = Parse.name -- (\<^keyword>\<open>::\<close> |-- Parse.!!! Parse.typ);
fun plus1_unless test scan =
- scan ::: Scan.repeat (@{keyword "+"} |-- Scan.unless test (Parse.!!! scan));
+ scan ::: Scan.repeat (\<^keyword>\<open>+\<close> |-- Scan.unless test (Parse.!!! scan));
-val mapsto = @{keyword "="};
+val mapsto = \<^keyword>\<open>=\<close>;
val rename = Parse.name -- (mapsto |-- Parse.name);
-val renames = Scan.optional (@{keyword "["} |-- Parse.!!! (Parse.list1 rename --| @{keyword "]"})) [];
+val renames = Scan.optional (\<^keyword>\<open>[\<close> |-- Parse.!!! (Parse.list1 rename --| \<^keyword>\<open>]\<close>)) [];
val parent =
Parse_Spec.locale_prefix --
@@ -649,12 +649,12 @@
val statespace_decl =
Parse.type_args -- Parse.name --
- (@{keyword "="} |--
+ (\<^keyword>\<open>=\<close> |--
((Scan.repeat1 comp >> pair []) ||
(plus1_unless comp parent --
- Scan.optional (@{keyword "+"} |-- Parse.!!! (Scan.repeat1 comp)) [])));
+ Scan.optional (\<^keyword>\<open>+\<close> |-- Parse.!!! (Scan.repeat1 comp)) [])));
val _ =
- Outer_Syntax.command @{command_keyword statespace} "define state-space as locale context"
+ Outer_Syntax.command \<^command_keyword>\<open>statespace\<close> "define state-space as locale context"
(statespace_decl >> (fn ((args, name), (parents, comps)) =>
Toplevel.theory (define_statespace args name parents comps)));
--- a/src/HOL/TLA/Action.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Action.thy Sat Jan 05 17:24:33 2019 +0100
@@ -114,7 +114,7 @@
fun action_use ctxt th =
case Thm.concl_of th of
- Const _ $ (Const (@{const_name Valid}, _) $ _) =>
+ Const _ $ (Const (\<^const_name>\<open>Valid\<close>, _) $ _) =>
(flatten (action_unlift ctxt th) handle THM _ => th)
| _ => th;
\<close>
--- a/src/HOL/TLA/Buffer/DBuffer.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Buffer/DBuffer.thy Sat Jan 05 17:24:33 2019 +0100
@@ -59,7 +59,7 @@
apply (rule square_simulation)
apply clarsimp
apply (tactic
- \<open>action_simp_tac (@{context} addsimps (@{thm hd_append} :: @{thms db_defs})) [] [] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps (@{thm hd_append} :: @{thms db_defs})) [] [] 1\<close>)
done
--- a/src/HOL/TLA/Inc/Inc.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Inc/Inc.thy Sat Jan 05 17:24:33 2019 +0100
@@ -170,9 +170,9 @@
\<longrightarrow> (pc1 = #g \<leadsto> pc1 = #a)"
apply (rule SF1)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps @{thm angle_def} :: @{thms Psi_defs}) [] [] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps @{thm angle_def} :: @{thms Psi_defs}) [] [] 1\<close>)
(* reduce \<turnstile> \<box>A \<longrightarrow> \<diamond>Enabled B to \<turnstile> A \<longrightarrow> Enabled B *)
apply (auto intro!: InitDmd_gen [temp_use] N1_enabled_at_g [temp_use]
dest!: STL2_gen [temp_use] simp: Init_def)
@@ -191,8 +191,8 @@
"\<turnstile> \<box>[(N1 \<or> N2) \<and> \<not>beta1]_(x,y,sem,pc1,pc2) \<and> SF(N2)_(x,y,sem,pc1,pc2) \<and> \<box>#True
\<longrightarrow> (pc2 = #g \<leadsto> pc2 = #a)"
apply (rule SF1)
- apply (tactic \<open>action_simp_tac (@{context} addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
- apply (tactic \<open>action_simp_tac (@{context} addsimps @{thm angle_def} :: @{thms Psi_defs})
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps @{thm angle_def} :: @{thms Psi_defs})
[] [] 1\<close>)
apply (auto intro!: InitDmd_gen [temp_use] N2_enabled_at_g [temp_use]
dest!: STL2_gen [temp_use] simp add: Init_def)
@@ -211,9 +211,9 @@
\<longrightarrow> (pc2 = #b \<leadsto> pc2 = #g)"
apply (rule SF1)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps @{thm angle_def} :: @{thms Psi_defs}) [] [] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps @{thm angle_def} :: @{thms Psi_defs}) [] [] 1\<close>)
apply (auto intro!: InitDmd_gen [temp_use] N2_enabled_at_b [temp_use]
dest!: STL2_gen [temp_use] simp: Init_def)
done
@@ -253,9 +253,9 @@
\<and> SF(N1)_(x,y,sem,pc1,pc2) \<and> \<box> SF(N2)_(x,y,sem,pc1,pc2)
\<longrightarrow> (pc1 = #a \<leadsto> pc1 = #b)"
apply (rule SF1)
- apply (tactic \<open>action_simp_tac (@{context} addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps @{thms Psi_defs}) [] [@{thm squareE}] 1\<close>)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps (@{thm angle_def} :: @{thms Psi_defs})) [] [] 1\<close>)
+ \<open>action_simp_tac (\<^context> addsimps (@{thm angle_def} :: @{thms Psi_defs})) [] [] 1\<close>)
apply (clarsimp intro!: N1_enabled_at_both_a [THEN DmdImpl [temp_use]])
apply (auto intro!: BoxDmd2_simple [temp_use] N2_live [temp_use]
simp: split_box_conj more_temp_simps)
--- a/src/HOL/TLA/Intensional.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Intensional.thy Sat Jan 05 17:24:33 2019 +0100
@@ -240,7 +240,7 @@
fun hflatten t =
case Thm.concl_of t of
- Const _ $ (Const (@{const_name HOL.implies}, _) $ _ $ _) => hflatten (t RS mp)
+ Const _ $ (Const (\<^const_name>\<open>HOL.implies\<close>, _) $ _ $ _) => hflatten (t RS mp)
| _ => (hflatten (matchsome conjI t)) handle THM _ => zero_var_indexes t
in
hflatten t
@@ -248,7 +248,7 @@
fun int_use ctxt th =
case Thm.concl_of th of
- Const _ $ (Const (@{const_name Valid}, _) $ _) =>
+ Const _ $ (Const (\<^const_name>\<open>Valid\<close>, _) $ _) =>
(flatten (int_unlift ctxt th) handle THM _ => th)
| _ => th
\<close>
--- a/src/HOL/TLA/Memory/MemClerk.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Memory/MemClerk.thy Sat Jan 05 17:24:33 2019 +0100
@@ -85,7 +85,7 @@
lemma MClkFwd_enabled: "\<And>p. basevars (rtrner send!p, caller rcv!p, cst!p) \<Longrightarrow>
\<turnstile> Calling send p \<and> \<not>Calling rcv p \<and> cst!p = #clkA
\<longrightarrow> Enabled (MClkFwd send rcv cst p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm MClkFwd_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm MClkFwd_def},
@{thm ACall_def}, @{thm caller_def}, @{thm rtrner_def}]) [exI]
[@{thm base_enabled}, @{thm Pair_inject}] 1\<close>)
@@ -100,9 +100,9 @@
lemma MClkReply_enabled: "\<And>p. basevars (rtrner send!p, caller rcv!p, cst!p) \<Longrightarrow>
\<turnstile> Calling send p \<and> \<not>Calling rcv p \<and> cst!p = #clkB
\<longrightarrow> Enabled (<MClkReply send rcv cst p>_(cst!p, rtrner send!p, caller rcv!p))"
- apply (tactic \<open>action_simp_tac @{context}
+ apply (tactic \<open>action_simp_tac \<^context>
[@{thm MClkReply_change} RSN (2, @{thm enabled_mono})] [] 1\<close>)
- apply (tactic \<open>action_simp_tac (@{context} addsimps
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps
[@{thm MClkReply_def}, @{thm AReturn_def}, @{thm caller_def}, @{thm rtrner_def}])
[exI] [@{thm base_enabled}, @{thm Pair_inject}] 1\<close>)
done
--- a/src/HOL/TLA/Memory/Memory.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Memory/Memory.thy Sat Jan 05 17:24:33 2019 +0100
@@ -189,9 +189,9 @@
\<turnstile> Calling ch p \<and> (rs!p \<noteq> #NotAResult)
\<longrightarrow> Enabled (<MemReturn ch rs p>_(rtrner ch ! p, rs!p))"
apply (tactic
- \<open>action_simp_tac @{context} [@{thm MemReturn_change} RSN (2, @{thm enabled_mono}) ] [] 1\<close>)
+ \<open>action_simp_tac \<^context> [@{thm MemReturn_change} RSN (2, @{thm enabled_mono}) ] [] 1\<close>)
apply (tactic
- \<open>action_simp_tac (@{context} addsimps [@{thm MemReturn_def}, @{thm AReturn_def},
+ \<open>action_simp_tac (\<^context> addsimps [@{thm MemReturn_def}, @{thm AReturn_def},
@{thm rtrner_def}]) [exI] [@{thm base_enabled}, @{thm Pair_inject}] 1\<close>)
done
@@ -235,12 +235,12 @@
\<longrightarrow> Enabled (<RNext ch mm rs p>_(rtrner ch ! p, rs!p))"
apply (auto simp: enabled_disj [try_rewrite] intro!: RWRNext_enabled [temp_use])
apply (case_tac "arg (ch w p)")
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm Read_def},
- temp_rewrite @{context} @{thm enabled_ex}]) [@{thm ReadInner_enabled}, exI] [] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm Read_def},
+ temp_rewrite \<^context> @{thm enabled_ex}]) [@{thm ReadInner_enabled}, exI] [] 1\<close>)
apply (force dest: base_pair [temp_use])
apply (erule contrapos_np)
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm Write_def},
- temp_rewrite @{context} @{thm enabled_ex}])
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm Write_def},
+ temp_rewrite \<^context> @{thm enabled_ex}])
[@{thm WriteInner_enabled}, exI] [] 1\<close>)
done
--- a/src/HOL/TLA/Memory/MemoryImplementation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Memory/MemoryImplementation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -224,7 +224,7 @@
(but it can be a lot faster than the default setup)
*)
ML \<open>
- val config_fast_solver = Attrib.setup_config_bool @{binding fast_solver} (K false);
+ val config_fast_solver = Attrib.setup_config_bool \<^binding>\<open>fast_solver\<close> (K false);
val fast_solver = mk_solver "fast_solver" (fn ctxt =>
if Config.get ctxt config_fast_solver
then assume_tac ctxt ORELSE' (eresolve_tac ctxt [notE])
@@ -248,9 +248,9 @@
apply (rule historyI)
apply assumption+
apply (rule MI_base)
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HInit_def}]) [] [] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HInit_def}]) [] [] 1\<close>)
apply (erule fun_cong)
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HNext_def}])
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HNext_def}])
[@{thm busy_squareI}] [] 1\<close>)
apply (erule fun_cong)
done
@@ -350,9 +350,9 @@
lemma S1Hist: "\<turnstile> [HNext rmhist p]_(c p,r p,m p,rmhist!p) \<and> $(S1 rmhist p)
\<longrightarrow> unchanged (rmhist!p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HNext_def}, @{thm S_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HNext_def}, @{thm S_def},
@{thm S1_def}, @{thm MemReturn_def}, @{thm RPCFail_def}, @{thm MClkReply_def},
- @{thm AReturn_def}]) [] [temp_use @{context} @{thm squareE}] 1\<close>)
+ @{thm AReturn_def}]) [] [temp_use \<^context> @{thm squareE}] 1\<close>)
(* ------------------------------ State S2 ---------------------------------------- *)
@@ -366,7 +366,7 @@
lemma S2Forward: "\<turnstile> $(S2 rmhist p) \<and> MClkFwd memCh crCh cst p
\<and> unchanged (e p, r p, m p, rmhist!p)
\<longrightarrow> (S3 rmhist p)$"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm MClkFwd_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm MClkFwd_def},
@{thm ACall_def}, @{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def},
@{thm rtrner_def}, @{thm S_def}, @{thm S2_def}, @{thm S3_def}, @{thm Calling_def}]) [] [] 1\<close>)
@@ -403,7 +403,7 @@
lemma S3Forward: "\<turnstile> RPCFwd crCh rmCh rst p \<and> HNext rmhist p \<and> $(S3 rmhist p)
\<and> unchanged (e p, c p, m p)
\<longrightarrow> (S4 rmhist p)$ \<and> unchanged (rmhist!p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RPCFwd_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RPCFwd_def},
@{thm HNext_def}, @{thm MemReturn_def}, @{thm RPCFail_def},
@{thm MClkReply_def}, @{thm AReturn_def}, @{thm ACall_def}, @{thm e_def},
@{thm c_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm S_def},
@@ -412,7 +412,7 @@
lemma S3Fail: "\<turnstile> RPCFail crCh rmCh rst p \<and> $(S3 rmhist p) \<and> HNext rmhist p
\<and> unchanged (e p, c p, m p)
\<longrightarrow> (S6 rmhist p)$"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HNext_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HNext_def},
@{thm RPCFail_def}, @{thm AReturn_def}, @{thm e_def}, @{thm c_def},
@{thm m_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm MVOKBARF_def},
@{thm S_def}, @{thm S3_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1\<close>)
@@ -439,7 +439,7 @@
lemma S4ReadInner: "\<turnstile> ReadInner rmCh mm ires p l \<and> $(S4 rmhist p) \<and> unchanged (e p, c p, r p)
\<and> HNext rmhist p \<and> $(MemInv mm l)
\<longrightarrow> (S4 rmhist p)$ \<and> unchanged (rmhist!p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ReadInner_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ReadInner_def},
@{thm GoodRead_def}, @{thm BadRead_def}, @{thm HNext_def}, @{thm MemReturn_def},
@{thm RPCFail_def}, @{thm MClkReply_def}, @{thm AReturn_def}, @{thm e_def},
@{thm c_def}, @{thm r_def}, @{thm rtrner_def}, @{thm caller_def},
@@ -453,7 +453,7 @@
lemma S4WriteInner: "\<turnstile> WriteInner rmCh mm ires p l v \<and> $(S4 rmhist p) \<and> unchanged (e p, c p, r p) \<and> HNext rmhist p
\<longrightarrow> (S4 rmhist p)$ \<and> unchanged (rmhist!p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm WriteInner_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm WriteInner_def},
@{thm GoodWrite_def}, @{thm BadWrite_def}, @{thm HNext_def}, @{thm MemReturn_def},
@{thm RPCFail_def}, @{thm MClkReply_def}, @{thm AReturn_def}, @{thm e_def},
@{thm c_def}, @{thm r_def}, @{thm rtrner_def}, @{thm caller_def}, @{thm MVNROKBA_def},
@@ -492,14 +492,14 @@
lemma S5Reply: "\<turnstile> RPCReply crCh rmCh rst p \<and> $(S5 rmhist p) \<and> unchanged (e p, c p, m p,rmhist!p)
\<longrightarrow> (S6 rmhist p)$"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RPCReply_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RPCReply_def},
@{thm AReturn_def}, @{thm e_def}, @{thm c_def}, @{thm m_def}, @{thm MVOKBA_def},
@{thm MVOKBARF_def}, @{thm caller_def}, @{thm rtrner_def}, @{thm S_def},
@{thm S5_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1\<close>)
lemma S5Fail: "\<turnstile> RPCFail crCh rmCh rst p \<and> $(S5 rmhist p) \<and> unchanged (e p, c p, m p,rmhist!p)
\<longrightarrow> (S6 rmhist p)$"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RPCFail_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RPCFail_def},
@{thm AReturn_def}, @{thm e_def}, @{thm c_def}, @{thm m_def},
@{thm MVOKBARF_def}, @{thm caller_def}, @{thm rtrner_def},
@{thm S_def}, @{thm S5_def}, @{thm S6_def}, @{thm Calling_def}]) [] [] 1\<close>)
@@ -525,7 +525,7 @@
lemma S6Retry: "\<turnstile> MClkRetry memCh crCh cst p \<and> HNext rmhist p \<and> $S6 rmhist p
\<and> unchanged (e p,r p,m p)
\<longrightarrow> (S3 rmhist p)$ \<and> unchanged (rmhist!p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HNext_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HNext_def},
@{thm MClkReply_def}, @{thm MClkRetry_def}, @{thm ACall_def}, @{thm AReturn_def},
@{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def},
@{thm S_def}, @{thm S6_def}, @{thm S3_def}, @{thm Calling_def}]) [] [] 1\<close>)
@@ -533,7 +533,7 @@
lemma S6Reply: "\<turnstile> MClkReply memCh crCh cst p \<and> HNext rmhist p \<and> $S6 rmhist p
\<and> unchanged (e p,r p,m p)
\<longrightarrow> (S1 rmhist p)$"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm HNext_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm HNext_def},
@{thm MemReturn_def}, @{thm RPCFail_def}, @{thm AReturn_def}, @{thm MClkReply_def},
@{thm e_def}, @{thm r_def}, @{thm m_def}, @{thm caller_def}, @{thm rtrner_def},
@{thm S_def}, @{thm S6_def}, @{thm S1_def}, @{thm Calling_def}]) [] [] 1\<close>)
@@ -565,8 +565,8 @@
lemma Step1_2_1: "\<turnstile> [HNext rmhist p]_(c p,r p,m p, rmhist!p) \<and> ImpNext p
\<and> \<not>unchanged (e p, c p, r p, m p, rmhist!p) \<and> $S1 rmhist p
\<longrightarrow> (S2 rmhist p)$ \<and> ENext p \<and> unchanged (c p, r p, m p)"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context})
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>)
[@{thm S1ClerkUnch}, @{thm S1RPCUnch}, @{thm S1MemUnch}, @{thm S1Hist}]) 1\<close>)
using [[fast_solver]]
apply (auto elim!: squareE [temp_use] intro!: S1Env [temp_use])
@@ -576,8 +576,8 @@
\<and> \<not>unchanged (e p, c p, r p, m p, rmhist!p) \<and> $S2 rmhist p
\<longrightarrow> (S3 rmhist p)$ \<and> MClkFwd memCh crCh cst p
\<and> unchanged (e p, r p, m p, rmhist!p)"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context})
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>)
[@{thm S2EnvUnch}, @{thm S2RPCUnch}, @{thm S2MemUnch}, @{thm S2Hist}]) 1\<close>)
using [[fast_solver]]
apply (auto elim!: squareE [temp_use] intro!: S2Clerk [temp_use] S2Forward [temp_use])
@@ -587,11 +587,11 @@
\<and> \<not>unchanged (e p, c p, r p, m p, rmhist!p) \<and> $S3 rmhist p
\<longrightarrow> ((S4 rmhist p)$ \<and> RPCFwd crCh rmCh rst p \<and> unchanged (e p, c p, m p, rmhist!p))
\<or> ((S6 rmhist p)$ \<and> RPCFail crCh rmCh rst p \<and> unchanged (e p, c p, m p))"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context}) [@{thm S3EnvUnch}, @{thm S3ClerkUnch}, @{thm S3MemUnch}]) 1\<close>)
- apply (tactic \<open>action_simp_tac @{context} []
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>) [@{thm S3EnvUnch}, @{thm S3ClerkUnch}, @{thm S3MemUnch}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac \<^context> []
(@{thm squareE} ::
- map (temp_elim @{context}) [@{thm S3RPC}, @{thm S3Forward}, @{thm S3Fail}]) 1\<close>)
+ map (temp_elim \<^context>) [@{thm S3RPC}, @{thm S3Forward}, @{thm S3Fail}]) 1\<close>)
apply (auto dest!: S3Hist [temp_use])
done
@@ -601,11 +601,11 @@
\<longrightarrow> ((S4 rmhist p)$ \<and> Read rmCh mm ires p \<and> unchanged (e p, c p, r p, rmhist!p))
\<or> ((S4 rmhist p)$ \<and> (\<exists>l. Write rmCh mm ires p l) \<and> unchanged (e p, c p, r p, rmhist!p))
\<or> ((S5 rmhist p)$ \<and> MemReturn rmCh ires p \<and> unchanged (e p, c p, r p))"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context}) [@{thm S4EnvUnch}, @{thm S4ClerkUnch}, @{thm S4RPCUnch}]) 1\<close>)
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RNext_def}]) []
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>) [@{thm S4EnvUnch}, @{thm S4ClerkUnch}, @{thm S4RPCUnch}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RNext_def}]) []
(@{thm squareE} ::
- map (temp_elim @{context}) [@{thm S4Read}, @{thm S4Write}, @{thm S4Return}]) 1\<close>)
+ map (temp_elim \<^context>) [@{thm S4Read}, @{thm S4Write}, @{thm S4Return}]) 1\<close>)
apply (auto dest!: S4Hist [temp_use])
done
@@ -613,9 +613,9 @@
\<and> \<not>unchanged (e p, c p, r p, m p, rmhist!p) \<and> $S5 rmhist p
\<longrightarrow> ((S6 rmhist p)$ \<and> RPCReply crCh rmCh rst p \<and> unchanged (e p, c p, m p))
\<or> ((S6 rmhist p)$ \<and> RPCFail crCh rmCh rst p \<and> unchanged (e p, c p, m p))"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context}) [@{thm S5EnvUnch}, @{thm S5ClerkUnch}, @{thm S5MemUnch}, @{thm S5Hist}]) 1\<close>)
- apply (tactic \<open>action_simp_tac @{context} [] [@{thm squareE}, temp_elim @{context} @{thm S5RPC}] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>) [@{thm S5EnvUnch}, @{thm S5ClerkUnch}, @{thm S5MemUnch}, @{thm S5Hist}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac \<^context> [] [@{thm squareE}, temp_elim \<^context> @{thm S5RPC}] 1\<close>)
using [[fast_solver]]
apply (auto elim!: squareE [temp_use] dest!: S5Reply [temp_use] S5Fail [temp_use])
done
@@ -624,10 +624,10 @@
\<and> \<not>unchanged (e p, c p, r p, m p, rmhist!p) \<and> $S6 rmhist p
\<longrightarrow> ((S1 rmhist p)$ \<and> MClkReply memCh crCh cst p \<and> unchanged (e p, r p, m p))
\<or> ((S3 rmhist p)$ \<and> MClkRetry memCh crCh cst p \<and> unchanged (e p,r p,m p,rmhist!p))"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ImpNext_def}]) []
- (map (temp_elim @{context}) [@{thm S6EnvUnch}, @{thm S6RPCUnch}, @{thm S6MemUnch}]) 1\<close>)
- apply (tactic \<open>action_simp_tac @{context} []
- (@{thm squareE} :: map (temp_elim @{context}) [@{thm S6Clerk}, @{thm S6Retry}, @{thm S6Reply}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ImpNext_def}]) []
+ (map (temp_elim \<^context>) [@{thm S6EnvUnch}, @{thm S6RPCUnch}, @{thm S6MemUnch}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac \<^context> []
+ (@{thm squareE} :: map (temp_elim \<^context>) [@{thm S6Clerk}, @{thm S6Retry}, @{thm S6Reply}]) 1\<close>)
apply (auto dest: S6Hist [temp_use])
done
@@ -638,7 +638,7 @@
section "Initialization (Step 1.3)"
lemma Step1_3: "\<turnstile> S1 rmhist p \<longrightarrow> PInit (resbar rmhist) p"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm resbar_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm resbar_def},
@{thm PInit_def}, @{thm S_def}, @{thm S1_def}]) [] [] 1\<close>)
(* ----------------------------------------------------------------------
@@ -657,7 +657,7 @@
\<and> unchanged (e p, r p, m p, rmhist!p)
\<longrightarrow> unchanged (rtrner memCh!p, resbar rmhist!p)"
by (tactic \<open>action_simp_tac
- (@{context} addsimps [@{thm MClkFwd_def}, @{thm e_def}, @{thm r_def}, @{thm m_def},
+ (\<^context> addsimps [@{thm MClkFwd_def}, @{thm e_def}, @{thm r_def}, @{thm m_def},
@{thm resbar_def}, @{thm S_def}, @{thm S2_def}, @{thm S3_def}]) [] [] 1\<close>)
lemma Step1_4_3a: "\<turnstile> RPCFwd crCh rmCh rst p \<and> $S3 rmhist p \<and> (S4 rmhist p)$
@@ -665,7 +665,7 @@
\<longrightarrow> unchanged (rtrner memCh!p, resbar rmhist!p)"
apply clarsimp
apply (drule S3_excl [temp_use] S4_excl [temp_use])+
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm e_def},
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm e_def},
@{thm c_def}, @{thm m_def}, @{thm resbar_def}, @{thm S_def}, @{thm S3_def}]) [] [] 1\<close>)
done
@@ -684,11 +684,11 @@
\<longrightarrow> ReadInner memCh mm (resbar rmhist) p l"
apply clarsimp
apply (drule S4_excl [temp_use])+
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm ReadInner_def},
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm ReadInner_def},
@{thm GoodRead_def}, @{thm BadRead_def}, @{thm e_def}, @{thm c_def}, @{thm m_def}]) [] [] 1\<close>)
apply (auto simp: resbar_def)
apply (tactic \<open>ALLGOALS (action_simp_tac
- (@{context} addsimps [@{thm RPCRelayArg_def}, @{thm MClkRelayArg_def},
+ (\<^context> addsimps [@{thm RPCRelayArg_def}, @{thm MClkRelayArg_def},
@{thm S_def}, @{thm S4_def}, @{thm RdRequest_def}, @{thm MemInv_def}])
[] [@{thm impE}, @{thm MemValNotAResultE}])\<close>)
done
@@ -703,11 +703,11 @@
\<longrightarrow> WriteInner memCh mm (resbar rmhist) p l v"
apply clarsimp
apply (drule S4_excl [temp_use])+
- apply (tactic \<open>action_simp_tac (@{context} addsimps
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps
[@{thm WriteInner_def}, @{thm GoodWrite_def}, @{thm BadWrite_def}, @{thm e_def},
@{thm c_def}, @{thm m_def}]) [] [] 1\<close>)
apply (auto simp: resbar_def)
- apply (tactic \<open>ALLGOALS (action_simp_tac (@{context} addsimps
+ apply (tactic \<open>ALLGOALS (action_simp_tac (\<^context> addsimps
[@{thm RPCRelayArg_def}, @{thm MClkRelayArg_def}, @{thm S_def},
@{thm S4_def}, @{thm WrRequest_def}]) [] [])\<close>)
done
@@ -720,7 +720,7 @@
lemma Step1_4_4c: "\<turnstile> MemReturn rmCh ires p \<and> $S4 rmhist p \<and> (S5 rmhist p)$
\<and> unchanged (e p, c p, r p)
\<longrightarrow> unchanged (rtrner memCh!p, resbar rmhist!p)"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm e_def},
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm e_def},
@{thm c_def}, @{thm r_def}, @{thm resbar_def}]) [] [] 1\<close>)
apply (drule S4_excl [temp_use] S5_excl [temp_use])+
using [[fast_solver]]
@@ -750,11 +750,11 @@
\<longrightarrow> MemReturn memCh (resbar rmhist) p"
apply clarsimp
apply (drule S6_excl [temp_use])+
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm e_def},
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm e_def},
@{thm r_def}, @{thm m_def}, @{thm MClkReply_def}, @{thm MemReturn_def},
@{thm AReturn_def}, @{thm resbar_def}]) [] [] 1\<close>)
apply simp_all (* simplify if-then-else *)
- apply (tactic \<open>ALLGOALS (action_simp_tac (@{context} addsimps
+ apply (tactic \<open>ALLGOALS (action_simp_tac (\<^context> addsimps
[@{thm MClkReplyVal_def}, @{thm S6_def}, @{thm S_def}]) [] [@{thm MVOKBARFnotNR}])\<close>)
done
@@ -763,7 +763,7 @@
\<longrightarrow> MemFail memCh (resbar rmhist) p"
apply clarsimp
apply (drule S3_excl [temp_use])+
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm e_def}, @{thm r_def},
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm e_def}, @{thm r_def},
@{thm m_def}, @{thm MClkRetry_def}, @{thm MemFail_def}, @{thm resbar_def}]) [] [] 1\<close>)
apply (auto simp: S6_def S_def)
done
@@ -901,14 +901,14 @@
lemma S1_RNextdisabled: "\<turnstile> S1 rmhist p \<longrightarrow>
\<not>Enabled (<RNext memCh mm (resbar rmhist) p>_(rtrner memCh!p, resbar rmhist!p))"
- apply (tactic \<open>action_simp_tac (@{context} addsimps [@{thm angle_def},
- @{thm S_def}, @{thm S1_def}]) [notI] [@{thm enabledE}, temp_elim @{context} @{thm Memoryidle}] 1\<close>)
+ apply (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm angle_def},
+ @{thm S_def}, @{thm S1_def}]) [notI] [@{thm enabledE}, temp_elim \<^context> @{thm Memoryidle}] 1\<close>)
apply force
done
lemma S1_Returndisabled: "\<turnstile> S1 rmhist p \<longrightarrow>
\<not>Enabled (<MemReturn memCh (resbar rmhist) p>_(rtrner memCh!p, resbar rmhist!p))"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm angle_def}, @{thm MemReturn_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm angle_def}, @{thm MemReturn_def},
@{thm AReturn_def}, @{thm S_def}, @{thm S1_def}]) [notI] [@{thm enabledE}] 1\<close>)
lemma RNext_fair: "\<turnstile> \<box>\<diamond>S1 rmhist p
@@ -1087,7 +1087,7 @@
lemma MClkReplyS6:
"\<turnstile> $ImpInv rmhist p \<and> <MClkReply memCh crCh cst p>_(c p) \<longrightarrow> $S6 rmhist p"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm angle_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm angle_def},
@{thm MClkReply_def}, @{thm AReturn_def}, @{thm ImpInv_def}, @{thm S_def},
@{thm S1_def}, @{thm S2_def}, @{thm S3_def}, @{thm S4_def}, @{thm S5_def}]) [] [] 1\<close>)
@@ -1095,7 +1095,7 @@
apply (auto simp: c_def intro!: MClkReply_enabled [temp_use])
apply (cut_tac MI_base)
apply (blast dest: base_pair)
- apply (tactic \<open>ALLGOALS (action_simp_tac (@{context}
+ apply (tactic \<open>ALLGOALS (action_simp_tac (\<^context>
addsimps [@{thm S_def}, @{thm S6_def}]) [] [])\<close>)
done
@@ -1106,8 +1106,8 @@
apply (subgoal_tac "sigma \<Turnstile> \<box>\<diamond> (<MClkReply memCh crCh cst p>_ (c p))")
apply (erule InfiniteEnsures)
apply assumption
- apply (tactic \<open>action_simp_tac @{context} []
- (map (temp_elim @{context}) [@{thm MClkReplyS6}, @{thm S6MClkReply_successors}]) 1\<close>)
+ apply (tactic \<open>action_simp_tac \<^context> []
+ (map (temp_elim \<^context>) [@{thm MClkReplyS6}, @{thm S6MClkReply_successors}]) 1\<close>)
apply (auto simp: SF_def)
apply (erule contrapos_np)
apply (auto intro!: S6MClkReply_enabled [temp_use] elim!: STL4E [temp_use] DmdImplE [temp_use])
@@ -1193,8 +1193,8 @@
sigma \<Turnstile> \<box>\<diamond>S6 rmhist p \<longrightarrow> \<box>\<diamond>S1 rmhist p \<rbrakk>
\<Longrightarrow> sigma \<Turnstile> \<box>\<diamond>S1 rmhist p"
apply (rule classical)
- apply (tactic \<open>asm_lr_simp_tac (@{context} addsimps
- [temp_use @{context} @{thm NotBox}, temp_rewrite @{context} @{thm NotDmd}]) 1\<close>)
+ apply (tactic \<open>asm_lr_simp_tac (\<^context> addsimps
+ [temp_use \<^context> @{thm NotBox}, temp_rewrite \<^context> @{thm NotDmd}]) 1\<close>)
apply (auto elim!: leadsto_infinite [temp_use] mp dest!: DBImplBD [temp_use])
done
--- a/src/HOL/TLA/Memory/RPC.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/Memory/RPC.thy Sat Jan 05 17:24:33 2019 +0100
@@ -100,14 +100,14 @@
(* Enabledness of some actions *)
lemma RPCFail_enabled: "\<And>p. basevars (rtrner send!p, caller rcv!p, rst!p) \<Longrightarrow>
\<turnstile> \<not>Calling rcv p \<and> Calling send p \<longrightarrow> Enabled (RPCFail send rcv rst p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RPCFail_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RPCFail_def},
@{thm AReturn_def}, @{thm caller_def}, @{thm rtrner_def}]) [exI]
[@{thm base_enabled}, @{thm Pair_inject}] 1\<close>)
lemma RPCReply_enabled: "\<And>p. basevars (rtrner send!p, caller rcv!p, rst!p) \<Longrightarrow>
\<turnstile> \<not>Calling rcv p \<and> Calling send p \<and> rst!p = #rpcB
\<longrightarrow> Enabled (RPCReply send rcv rst p)"
- by (tactic \<open>action_simp_tac (@{context} addsimps [@{thm RPCReply_def},
+ by (tactic \<open>action_simp_tac (\<^context> addsimps [@{thm RPCReply_def},
@{thm AReturn_def}, @{thm caller_def}, @{thm rtrner_def}]) [exI]
[@{thm base_enabled}, @{thm Pair_inject}] 1\<close>)
--- a/src/HOL/TLA/TLA.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TLA/TLA.thy Sat Jan 05 17:24:33 2019 +0100
@@ -116,7 +116,7 @@
fun temp_use ctxt th =
case Thm.concl_of th of
- Const _ $ (Const (@{const_name Intensional.Valid}, _) $ _) =>
+ Const _ $ (Const (\<^const_name>\<open>Intensional.Valid\<close>, _) $ _) =>
((flatten (temp_unlift ctxt th)) handle THM _ => th)
| _ => th;
--- a/src/HOL/TPTP/ATP_Theory_Export.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/ATP_Theory_Export.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,8 +17,8 @@
ML \<open>
val do_it = false (* switch to "true" to generate the files *)
-val ctxt = @{context}
-val thy = @{theory Complex_Main}
+val ctxt = \<^context>
+val thy = \<^theory>\<open>Complex_Main\<close>
val infer_policy = (* Unchecked_Inferences *) No_Inferences
\<close>
--- a/src/HOL/TPTP/MaSh_Eval.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/MaSh_Eval.thy Sat Jan 05 17:24:33 2019 +0100
@@ -23,7 +23,7 @@
\<close>
ML \<open>
-val params = Sledgehammer_Commands.default_params @{theory} []
+val params = Sledgehammer_Commands.default_params \<^theory> []
val prob_dir = prefix ^ "mash_problems"
\<close>
@@ -36,7 +36,7 @@
ML \<open>
if do_it then
- evaluate_mash_suggestions @{context} params range (SOME prob_dir)
+ evaluate_mash_suggestions \<^context> params range (SOME prob_dir)
[prefix ^ "mepo_suggestions",
prefix ^ "mash_suggestions",
prefix ^ "mash_prover_suggestions",
--- a/src/HOL/TPTP/MaSh_Export_Base.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/MaSh_Export_Base.thy Sat Jan 05 17:24:33 2019 +0100
@@ -29,8 +29,8 @@
ML \<open>
val do_it = false (* switch to "true" to generate the files *)
-val thys = [@{theory List}]
-val params as {provers, ...} = Sledgehammer_Commands.default_params @{theory} []
+val thys = [\<^theory>\<open>List\<close>]
+val params as {provers, ...} = Sledgehammer_Commands.default_params \<^theory> []
val prover = hd provers
val range = (1, NONE)
val step = 1
--- a/src/HOL/TPTP/TPTP_Parser/tptp_interpret.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_interpret.ML Sat Jan 05 17:24:33 2019 +0100
@@ -196,7 +196,7 @@
val binding = mk_binding config type_name
val final_fqn = Sign.full_name thy binding
val tyargs =
- List.tabulate (arity, rpair @{sort type} o prefix "'" o string_of_int)
+ List.tabulate (arity, rpair \<^sort>\<open>type\<close> o prefix "'" o string_of_int)
val (_, thy') =
Typedecl.typedecl_global {final = true} (mk_binding config type_name, tyargs, NoSyn) thy
val typ_map_entry = (thf_type, (final_fqn, arity))
@@ -254,7 +254,7 @@
Atom_type (str, tys) => Atom_type (str, tys @ [fmlatype_to_type tm2]))
fun tfree_name_of_var_type str = "'" ^ Name.desymbolize (SOME false) str
-fun tfree_of_var_type str = TFree (tfree_name_of_var_type str, @{sort type})
+fun tfree_of_var_type str = TFree (tfree_name_of_var_type str, \<^sort>\<open>type\<close>)
fun interpret_type config thy type_map thf_ty =
let
@@ -273,11 +273,11 @@
in
case thf_ty of
Prod_type (thf_ty1, thf_ty2) =>
- Type (@{type_name prod},
+ Type (\<^type_name>\<open>prod\<close>,
[interpret_type config thy type_map thf_ty1,
interpret_type config thy type_map thf_ty2])
| Fn_type (thf_ty1, thf_ty2) =>
- Type (@{type_name fun},
+ Type (\<^type_name>\<open>fun\<close>,
[interpret_type config thy type_map thf_ty1,
interpret_type config thy type_map thf_ty2])
| Atom_type (str, thf_tys) =>
@@ -286,7 +286,7 @@
| Var_type str => tfree_of_var_type str
| Defined_type tptp_base_type =>
(case tptp_base_type of
- Type_Ind => @{typ ind}
+ Type_Ind => \<^typ>\<open>ind\<close>
| Type_Bool => HOLogic.boolT
| Type_Type => raise MISINTERPRET_TYPE ("No type interpretation", thf_ty)
(*FIXME these types are currently unsupported, so they're treated as
@@ -399,17 +399,16 @@
| And => HOLogic.conj
| Iff => HOLogic.eq_const HOLogic.boolT
| If => HOLogic.imp
- | Fi => @{term "\<lambda> x. \<lambda> y. y \<longrightarrow> x"}
+ | Fi => \<^term>\<open>\<lambda> x. \<lambda> y. y \<longrightarrow> x\<close>
| Xor =>
- @{term
- "\<lambda> x. \<lambda> y. \<not> (x \<longleftrightarrow> y)"}
- | Nor => @{term "\<lambda> x. \<lambda> y. \<not> (x \<or> y)"}
- | Nand => @{term "\<lambda> x. \<lambda> y. \<not> (x \<and> y)"}
+ \<^term>\<open>\<lambda> x. \<lambda> y. \<not> (x \<longleftrightarrow> y)\<close>
+ | Nor => \<^term>\<open>\<lambda> x. \<lambda> y. \<not> (x \<or> y)\<close>
+ | Nand => \<^term>\<open>\<lambda> x. \<lambda> y. \<not> (x \<and> y)\<close>
| Not => HOLogic.Not
| Op_Forall => HOLogic.all_const dummyT
| Op_Exists => HOLogic.exists_const dummyT
- | True => @{term "True"}
- | False => @{term "False"}
+ | True => \<^term>\<open>True\<close>
+ | False => \<^term>\<open>False\<close>
)
| TypeSymbol _ =>
raise MISINTERPRET_SYMBOL
@@ -429,7 +428,7 @@
(*As above, but for products*)
fun mtimes thy =
fold (fn x => fn y =>
- Sign.mk_const thy (@{const_name Pair}, [dummyT, dummyT]) $ y $ x)
+ Sign.mk_const thy (\<^const_name>\<open>Pair\<close>, [dummyT, dummyT]) $ y $ x)
fun mtimes' (args, thy) f =
let
@@ -485,11 +484,11 @@
end
(*Next batch of functions give info about Isabelle/HOL types*)
-fun is_fun (Type (@{type_name fun}, _)) = true
+fun is_fun (Type (\<^type_name>\<open>fun\<close>, _)) = true
| is_fun _ = false
-fun is_prod (Type (@{type_name prod}, _)) = true
+fun is_prod (Type (\<^type_name>\<open>prod\<close>, _)) = true
| is_prod _ = false
-fun dom_type (Type (@{type_name fun}, [ty1, _])) = ty1
+fun dom_type (Type (\<^type_name>\<open>fun\<close>, [ty1, _])) = ty1
fun is_prod_typed thy config symb =
let
fun symb_type const_name =
@@ -600,7 +599,7 @@
val ([t1, t2], thy'') =
fold_map (interpret_term formula_level config language const_map var_types type_map)
[tptp_t1, tptp_t2] thy'
- in (mk_n_fun 3 @{const_name If} $ t_fmla $ t1 $ t2, thy'') end
+ in (mk_n_fun 3 \<^const_name>\<open>If\<close> $ t_fmla $ t1 $ t2, thy'') end
| Term_Num (number_kind, num) =>
let
(*FIXME hack*)
@@ -718,7 +717,7 @@
let val (t, thy') =
interpret_formula config language const_map var_types type_map
(Quant (Lambda, bindings, tptp_formula)) thy
- in (Const (@{const_name The}, dummyT) $ t, thy') end
+ in (Const (\<^const_name>\<open>The\<close>, dummyT) $ t, thy') end
| Dep_Prod =>
raise MISINTERPRET_FORMULA ("Unsupported", tptp_fmla)
| Dep_Sum =>
@@ -968,7 +967,7 @@
in TPTP_Data.map (cons ((prob_name, result))) thy' end
val _ =
- Outer_Syntax.command @{command_keyword import_tptp} "import TPTP problem"
+ Outer_Syntax.command \<^command_keyword>\<open>import_tptp\<close> "import TPTP problem"
(Parse.path >> (fn name =>
Toplevel.theory (fn thy =>
let val path = Path.explode name
--- a/src/HOL/TPTP/TPTP_Parser/tptp_reconstruct.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_reconstruct.ML Sat Jan 05 17:24:33 2019 +0100
@@ -315,7 +315,7 @@
then should still be able to handle formulas like
(! X, X. F).*)
if x = bound_var andalso
- fst (dest_Const t1) = @{const_name All} then
+ fst (dest_Const t1) = \<^const_name>\<open>All\<close> then
(*Body might contain free variables, so bind them using "var_ctxt".
this involves replacing instances of Free with instances of Bound
at the right index.*)
@@ -413,7 +413,7 @@
end
(*FIXME currently assuming that we're only ever given a single binding each time this is called*)
- val _ = @{assert} (length bindings' = 1)
+ val _ = \<^assert> (length bindings' = 1)
in
fold safe_instantiate_bound bindings' ([], HOLogic.dest_Trueprop orig_parent_fmla)
@@ -450,8 +450,8 @@
case try HOLogic.dest_eq formula of
NONE => if strict then raise (UNPOLARISED formula)
else (formula, true)
- | SOME (x, p as @{term True}) => (x, true)
- | SOME (x, p as @{term False}) => (x, false)
+ | SOME (x, p as \<^term>\<open>True\<close>) => (x, true)
+ | SOME (x, p as \<^term>\<open>False\<close>) => (x, false)
| SOME (x, _) =>
if strict then raise (UNPOLARISED formula)
else (formula, true)
@@ -459,17 +459,17 @@
(*flattens a formula wrt associative operators*)
fun flatten formula_kind formula =
let
- fun is_conj (Const (@{const_name HOL.conj}, _) $ _ $ _) = true
+ fun is_conj (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ _ $ _) = true
| is_conj _ = false
- fun is_disj (Const (@{const_name HOL.disj}, _) $ _ $ _) = true
+ fun is_disj (Const (\<^const_name>\<open>HOL.disj\<close>, _) $ _ $ _) = true
| is_disj _ = false
- fun is_iff (Const (@{const_name HOL.eq}, ty) $ _ $ _) =
+ fun is_iff (Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ _ $ _) =
ty = ([HOLogic.boolT, HOLogic.boolT] ---> HOLogic.boolT)
| is_iff _ = false
fun flatten' formula acc =
case formula of
- Const (@{const_name HOL.conj}, _) $ t1 $ t2 =>
+ Const (\<^const_name>\<open>HOL.conj\<close>, _) $ t1 $ t2 =>
(case formula_kind of
Conjunctive _ =>
let
@@ -479,7 +479,7 @@
if is_conj t2 then flatten' t2 left else (t2 :: left)
end
| _ => formula :: acc)
- | Const (@{const_name HOL.disj}, _) $ t1 $ t2 =>
+ | Const (\<^const_name>\<open>HOL.disj\<close>, _) $ t1 $ t2 =>
(case formula_kind of
Disjunctive _ =>
let
@@ -489,7 +489,7 @@
if is_disj t2 then flatten' t2 left else (t2 :: left)
end
| _ => formula :: acc)
- | Const (@{const_name HOL.eq}, ty) $ t1 $ t2 =>
+ | Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ t1 $ t2 =>
if ty = ([HOLogic.boolT, HOLogic.boolT] ---> HOLogic.boolT) then
case formula_kind of
Biimplicational _ =>
@@ -601,7 +601,7 @@
fun build_inference_info rule_name parent_infos =
let
- val _ = @{assert} (not (null parent_infos))
+ val _ = \<^assert> (not (null parent_infos))
(*hypothesis formulas (with bindings already
instantiated during the proof-transformation
@@ -1316,8 +1316,8 @@
(*Extract the constant name, type, and its definition*)
fun get_defn_components
- (Const (@{const_name HOL.Trueprop}, _) $
- (Const (@{const_name HOL.eq}, _) $
+ (Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $
+ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $
Const (name, ty) $ t)) = ((name, ty), t)
@@ -1446,31 +1446,31 @@
- expands iff (and doesn't recur)*)
fun transform_fmla i fmla_t =
case fmla_t of
- Const (@{const_name "HOL.All"}, ty) $ Abs (s, ty', t') =>
+ Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t') =>
let
val (i', fmla_ts) = transform_fmla i t'
in
if i' > i then
(i' + 1,
map (fn t =>
- Const (@{const_name "HOL.All"}, ty) $ Abs (s, ty', t))
+ Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t))
fmla_ts)
else (i, [fmla_t])
end
- | Const (@{const_name "HOL.Ex"}, ty) $ Abs (s, ty', t') =>
+ | Const (\<^const_name>\<open>HOL.Ex\<close>, ty) $ Abs (s, ty', t') =>
if loose_bvar (t', 0) then
(i, [fmla_t])
else transform_fmla (i + 1) t'
- | @{term HOL.Not} $ (@{term HOL.Not} $ t') =>
+ | \<^term>\<open>HOL.Not\<close> $ (\<^term>\<open>HOL.Not\<close> $ t') =>
transform_fmla (i + 1) t'
- | @{term HOL.conj} $ t1 $ t2 =>
+ | \<^term>\<open>HOL.conj\<close> $ t1 $ t2 =>
let
val (i1, fmla_t1s) = transform_fmla (i + 1) t1
val (i2, fmla_t2s) = transform_fmla (i + 1) t2
in
(i1 + i2 - i, fmla_t1s @ fmla_t2s)
end
- | Const (@{const_name HOL.eq}, ty) $ t1 $ t2 =>
+ | Const (\<^const_name>\<open>HOL.eq\<close>, ty) $ t1 $ t2 =>
let
val (T1, (T2, res)) =
dest_funT ty
@@ -1500,7 +1500,7 @@
(node_name,
{role = TPTP_Syntax.Role_Plain,
fmla =
- HOLogic.mk_eq (target_fmla, @{term False}) (*polarise*)
+ HOLogic.mk_eq (target_fmla, \<^term>\<open>False\<close>) (*polarise*)
|> HOLogic.mk_Trueprop,
source_inf_opt =
SOME (TPTP_Proof.Inference (split_preprocessingK, [], [TPTP_Proof.Parent split_node_name]))})
@@ -1586,7 +1586,7 @@
fun remove_repeated_quantification seen t =
case t of
(*NOTE we're assuming that variables having the same name, have the same type throughout*)
- Const (@{const_name "HOL.All"}, ty) $ Abs (s, ty', t') =>
+ Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', t') =>
let
val (seen_so_far, seen') =
case AList.lookup (op =) seen s of
@@ -1598,7 +1598,7 @@
NONE => raise DROP_REPEATED_QUANTIFICATION
| SOME n =>
if n > seen_so_far then pre_final_t
- else Const (@{const_name "HOL.All"}, ty) $ Abs (s, ty', pre_final_t)
+ else Const (\<^const_name>\<open>HOL.All\<close>, ty) $ Abs (s, ty', pre_final_t)
in (final_t, final_seen) end
| _ => (t, seen)
--- a/src/HOL/TPTP/TPTP_Parser/tptp_reconstruct_library.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/TPTP_Parser/tptp_reconstruct_library.ML Sat Jan 05 17:24:33 2019 +0100
@@ -100,7 +100,7 @@
fun list_diff l1 l2 =
filter (fn x => forall (fn y => x <> y) l2) l1
-val _ = @{assert}
+val _ = \<^assert>
(list_diff [1,2,3] [2,4] = [1, 3])
(* [a,b] times_list [c,d] gives [[a,c,d], [b,c,d]] *)
@@ -179,13 +179,13 @@
fun prefix_intersection_list l1 l2 = prefix_intersection_list' ([], []) l1 l2
end;
-val _ = @{assert}
+val _ = \<^assert>
(prefix_intersection_list [1,2,3,4,5] [1,3,5] = [1, 3, 5, 2, 4]);
-val _ = @{assert}
+val _ = \<^assert>
(prefix_intersection_list [1,2,3,4,5] [] = [1,2,3,4,5]);
-val _ = @{assert}
+val _ = \<^assert>
(prefix_intersection_list [] [1,3,5] = [])
fun switch f y x = f x y
@@ -199,7 +199,7 @@
opt_list
[];
-val _ = @{assert}
+val _ = \<^assert>
([2,0,1] =
fold_options [NONE, SOME 1, NONE, SOME 0, NONE, NONE, SOME 2]);
@@ -250,7 +250,7 @@
|> apsnd break_list
|> (fn (xs, (y, ys)) => (y, xs @ ys))
-val _ = @{assert} (find_and_remove (curry (op =) 3) [0,1,2,3,4,5] = (3, [0,1,2,4,5]))
+val _ = \<^assert> (find_and_remove (curry (op =) 3) [0,1,2,3,4,5] = (3, [0,1,2,4,5]))
(** Functions on terms **)
@@ -259,21 +259,21 @@
and the body*)
local
(*Strip off HOL's All combinator if it's at the toplevel*)
- fun try_dest_All (Const (@{const_name HOL.All}, _) $ t) = t
- | try_dest_All (Const (@{const_name HOL.Trueprop}, _) $ t) = try_dest_All t
+ fun try_dest_All (Const (\<^const_name>\<open>HOL.All\<close>, _) $ t) = t
+ | try_dest_All (Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $ t) = try_dest_All t
| try_dest_All t = t
- val _ = @{assert}
- ((@{term "\<forall>x. (\<forall>y. P) = True"}
+ val _ = \<^assert>
+ ((\<^term>\<open>\<forall>x. (\<forall>y. P) = True\<close>
|> try_dest_All
|> Term.strip_abs_vars)
- = [("x", @{typ "'a"})])
+ = [("x", \<^typ>\<open>'a\<close>)])
- val _ = @{assert}
- ((@{prop "\<forall>x. (\<forall>y. P) = True"}
+ val _ = \<^assert>
+ ((\<^prop>\<open>\<forall>x. (\<forall>y. P) = True\<close>
|> try_dest_All
|> Term.strip_abs_vars)
- = [("x", @{typ "'a"})])
+ = [("x", \<^typ>\<open>'a\<close>)])
fun strip_top_All_vars' once acc t =
let
@@ -300,13 +300,13 @@
val _ =
let
val answer =
- ([("x", @{typ "'a"})],
- HOLogic.all_const @{typ "'a"} $
- (HOLogic.eq_const @{typ "'a"} $
- Free ("x", @{typ "'a"})))
+ ([("x", \<^typ>\<open>'a\<close>)],
+ HOLogic.all_const \<^typ>\<open>'a\<close> $
+ (HOLogic.eq_const \<^typ>\<open>'a\<close> $
+ Free ("x", \<^typ>\<open>'a\<close>)))
in
- @{assert}
- ((@{term "\<forall>x. All ((=) x)"}
+ \<^assert>
+ ((\<^term>\<open>\<forall>x. All ((=) x)\<close>
|> strip_top_All_vars)
= answer)
end
@@ -754,7 +754,7 @@
val conc_results =
TERMFUN (snd (*discard hypotheses*)
#> conc_pred_over_terms) i_opt st
- val _ = @{assert} (length hyp_results = length conc_results)
+ val _ = \<^assert> (length hyp_results = length conc_results)
in
if null hyp_results then true
else
--- a/src/HOL/TPTP/TPTP_Proof_Reconstruction.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/TPTP_Proof_Reconstruction.thy Sat Jan 05 17:24:33 2019 +0100
@@ -53,12 +53,12 @@
section "Setup"
ML \<open>
- val tptp_unexceptional_reconstruction = Attrib.setup_config_bool @{binding tptp_unexceptional_reconstruction} (K false)
+ val tptp_unexceptional_reconstruction = Attrib.setup_config_bool \<^binding>\<open>tptp_unexceptional_reconstruction\<close> (K false)
fun unexceptional_reconstruction ctxt = Config.get ctxt tptp_unexceptional_reconstruction
- val tptp_informative_failure = Attrib.setup_config_bool @{binding tptp_informative_failure} (K false)
+ val tptp_informative_failure = Attrib.setup_config_bool \<^binding>\<open>tptp_informative_failure\<close> (K false)
fun informative_failure ctxt = Config.get ctxt tptp_informative_failure
- val tptp_trace_reconstruction = Attrib.setup_config_bool @{binding tptp_trace_reconstruction} (K false)
- val tptp_max_term_size = Attrib.setup_config_int @{binding tptp_max_term_size} (K 0) (*0=infinity*)
+ val tptp_trace_reconstruction = Attrib.setup_config_bool \<^binding>\<open>tptp_trace_reconstruction\<close> (K false)
+ val tptp_max_term_size = Attrib.setup_config_int \<^binding>\<open>tptp_max_term_size\<close> (K 0) (*0=infinity*)
fun exceeds_tptp_max_term_size ctxt size =
let
@@ -545,7 +545,7 @@
(*FIXME LHS should be constant. Currently allow variables for testing. Probably should still allow Vars (but not Frees) since they'll act as intermediate values*)
fun conc_is_skolem_def t =
case t of
- Const (@{const_name HOL.eq}, _) $ t' $ (Const (@{const_name Hilbert_Choice.Eps}, _) $ _) =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ (Const (\<^const_name>\<open>Hilbert_Choice.Eps\<close>, _) $ _) =>
let
val (h, args) =
strip_comb t'
@@ -555,18 +555,18 @@
is_Free h orelse
is_Var h orelse
(is_Const h
- andalso (get_const_name h <> get_const_name @{term HOL.Ex})
- andalso (get_const_name h <> get_const_name @{term HOL.All})
- andalso (h <> @{term Hilbert_Choice.Eps})
- andalso (h <> @{term HOL.conj})
- andalso (h <> @{term HOL.disj})
- andalso (h <> @{term HOL.eq})
- andalso (h <> @{term HOL.implies})
- andalso (h <> @{term HOL.The})
- andalso (h <> @{term HOL.Ex1})
- andalso (h <> @{term HOL.Not})
- andalso (h <> @{term HOL.iff})
- andalso (h <> @{term HOL.not_equal}))
+ andalso (get_const_name h <> get_const_name \<^term>\<open>HOL.Ex\<close>)
+ andalso (get_const_name h <> get_const_name \<^term>\<open>HOL.All\<close>)
+ andalso (h <> \<^term>\<open>Hilbert_Choice.Eps\<close>)
+ andalso (h <> \<^term>\<open>HOL.conj\<close>)
+ andalso (h <> \<^term>\<open>HOL.disj\<close>)
+ andalso (h <> \<^term>\<open>HOL.eq\<close>)
+ andalso (h <> \<^term>\<open>HOL.implies\<close>)
+ andalso (h <> \<^term>\<open>HOL.The\<close>)
+ andalso (h <> \<^term>\<open>HOL.Ex1\<close>)
+ andalso (h <> \<^term>\<open>HOL.Not\<close>)
+ andalso (h <> \<^term>\<open>HOL.iff\<close>)
+ andalso (h <> \<^term>\<open>HOL.not_equal\<close>))
val args_property =
fold (fn t => fn b =>
b andalso is_Free t) args true
@@ -580,24 +580,24 @@
(*Hack used to detect if a Skolem definition, with an LHS Var, has had the LHS instantiated into an unacceptable term.*)
fun conc_is_bad_skolem_def t =
case t of
- Const (@{const_name HOL.eq}, _) $ t' $ (Const (@{const_name Hilbert_Choice.Eps}, _) $ _) =>
+ Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ (Const (\<^const_name>\<open>Hilbert_Choice.Eps\<close>, _) $ _) =>
let
val (h, args) = strip_comb t'
val get_const_name = dest_Const #> fst
val const_h_test =
if is_Const h then
- (get_const_name h = get_const_name @{term HOL.Ex})
- orelse (get_const_name h = get_const_name @{term HOL.All})
- orelse (h = @{term Hilbert_Choice.Eps})
- orelse (h = @{term HOL.conj})
- orelse (h = @{term HOL.disj})
- orelse (h = @{term HOL.eq})
- orelse (h = @{term HOL.implies})
- orelse (h = @{term HOL.The})
- orelse (h = @{term HOL.Ex1})
- orelse (h = @{term HOL.Not})
- orelse (h = @{term HOL.iff})
- orelse (h = @{term HOL.not_equal})
+ (get_const_name h = get_const_name \<^term>\<open>HOL.Ex\<close>)
+ orelse (get_const_name h = get_const_name \<^term>\<open>HOL.All\<close>)
+ orelse (h = \<^term>\<open>Hilbert_Choice.Eps\<close>)
+ orelse (h = \<^term>\<open>HOL.conj\<close>)
+ orelse (h = \<^term>\<open>HOL.disj\<close>)
+ orelse (h = \<^term>\<open>HOL.eq\<close>)
+ orelse (h = \<^term>\<open>HOL.implies\<close>)
+ orelse (h = \<^term>\<open>HOL.The\<close>)
+ orelse (h = \<^term>\<open>HOL.Ex1\<close>)
+ orelse (h = \<^term>\<open>HOL.Not\<close>)
+ orelse (h = \<^term>\<open>HOL.iff\<close>)
+ orelse (h = \<^term>\<open>HOL.not_equal\<close>)
else true
val h_property =
not (is_Free h) andalso
@@ -621,7 +621,7 @@
|> try_dest_Trueprop
in
case t' of
- Const (@{const_name HOL.eq}, _) $ t' $ (Const (@{const_name Hilbert_Choice.Eps}, _) $ _) => SOME t'
+ Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ (Const (\<^const_name>\<open>Hilbert_Choice.Eps\<close>, _) $ _) => SOME t'
| _ => NONE
end
@@ -706,7 +706,7 @@
fun skolem_const_info_of t =
case t of
- Const (@{const_name HOL.Trueprop}, _) $ (Const (@{const_name HOL.eq}, _) $ t' $ (Const (@{const_name Hilbert_Choice.Eps}, _) $ _)) =>
+ Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ (Const (\<^const_name>\<open>Hilbert_Choice.Eps\<close>, _) $ _)) =>
head_of t'
|> strip_abs_body (*since in general might have a skolem term, so we want to rip out the prefixing lambdas to get to the constant (which should be at head position)*)
|> head_of
@@ -761,7 +761,7 @@
there's no need to use this expensive matching.*)
fun find_skolem_term ctxt consts_candidate arity = fn st =>
let
- val _ = @{assert} (arity > 0)
+ val _ = \<^assert> (arity > 0)
val gls =
Thm.prop_of st
@@ -822,14 +822,14 @@
fun skolem_const_info_of t =
case t of
- Const (@{const_name HOL.Trueprop}, _) $ (Const (@{const_name HOL.eq}, _) $ lhs $ (Const (@{const_name Hilbert_Choice.Eps}, _) $ rhs)) =>
+ Const (\<^const_name>\<open>HOL.Trueprop\<close>, _) $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ (Const (\<^const_name>\<open>Hilbert_Choice.Eps\<close>, _) $ rhs)) =>
let
(*the parameters we will concern ourselves with*)
val params' =
Term.add_frees lhs []
|> distinct (op =)
(*check to make sure that params' <= params*)
- val _ = @{assert} (forall (member (op =) params) params')
+ val _ = \<^assert> (forall (member (op =) params) params')
val skolem_const_ty =
let
val (skolem_const_prety, no_params) =
@@ -837,7 +837,7 @@
|> apfst (dest_Var #> snd) (*head of lhs consists of a logical variable. we just want its type.*)
|> apsnd length
- val _ = @{assert} (length params = no_params)
+ val _ = \<^assert> (length params = no_params)
(*get value type of a function type after n arguments have been supplied*)
fun get_val_ty n ty =
@@ -988,35 +988,35 @@
(*n-ary decomposition. Code is based on the n-ary arg_cong generator*)
fun extuni_dec_n ctxt arity =
let
- val _ = @{assert} (arity > 0)
+ val _ = \<^assert> (arity > 0)
val is =
1 upto arity
|> map Int.toString
- val arg_tys = map (fn i => TFree ("arg" ^ i ^ "_ty", @{sort type})) is
- val res_ty = TFree ("res" ^ "_ty", @{sort type})
+ val arg_tys = map (fn i => TFree ("arg" ^ i ^ "_ty", \<^sort>\<open>type\<close>)) is
+ val res_ty = TFree ("res" ^ "_ty", \<^sort>\<open>type\<close>)
val f_ty = arg_tys ---> res_ty
val f = Free ("f", f_ty)
val xs = map (fn i =>
- Free ("x" ^ i, TFree ("arg" ^ i ^ "_ty", @{sort type}))) is
+ Free ("x" ^ i, TFree ("arg" ^ i ^ "_ty", \<^sort>\<open>type\<close>))) is
(*FIXME DRY principle*)
val ys = map (fn i =>
- Free ("y" ^ i, TFree ("arg" ^ i ^ "_ty", @{sort type}))) is
+ Free ("y" ^ i, TFree ("arg" ^ i ^ "_ty", \<^sort>\<open>type\<close>))) is
val hyp_lhs = list_comb (f, xs)
val hyp_rhs = list_comb (f, ys)
val hyp_eq =
HOLogic.eq_const res_ty $ hyp_lhs $ hyp_rhs
val hyp =
- HOLogic.eq_const HOLogic.boolT $ hyp_eq $ @{term False}
+ HOLogic.eq_const HOLogic.boolT $ hyp_eq $ \<^term>\<open>False\<close>
|> HOLogic.mk_Trueprop
fun conc_eq i =
let
- val ty = TFree ("arg" ^ i ^ "_ty", @{sort type})
+ val ty = TFree ("arg" ^ i ^ "_ty", \<^sort>\<open>type\<close>)
val x = Free ("x" ^ i, ty)
val y = Free ("y" ^ i, ty)
val eq = HOLogic.eq_const ty $ x $ y
in
- HOLogic.eq_const HOLogic.boolT $ eq $ @{term False}
+ HOLogic.eq_const HOLogic.boolT $ eq $ \<^term>\<open>False\<close>
end
val conc_disjs = map conc_eq is
@@ -1180,7 +1180,7 @@
ML \<open>
(*Conjunctive counterparts to Term.disjuncts_aux and Term.disjuncts*)
-fun conjuncts_aux (Const (@{const_name HOL.conj}, _) $ t $ t') conjs =
+fun conjuncts_aux (Const (\<^const_name>\<open>HOL.conj\<close>, _) $ t $ t') conjs =
conjuncts_aux t (conjuncts_aux t' conjs)
| conjuncts_aux t conjs = t :: conjs
@@ -1188,7 +1188,7 @@
(*HOL equivalent of Logic.strip_horn*)
local
- fun imp_strip_horn' acc (Const (@{const_name HOL.implies}, _) $ A $ B) =
+ fun imp_strip_horn' acc (Const (\<^const_name>\<open>HOL.implies\<close>, _) $ A $ B) =
imp_strip_horn' (A :: acc) B
| imp_strip_horn' acc t = (acc, t)
in
@@ -1220,7 +1220,7 @@
|> fst
(*hypothesis clause should be singleton*)
- val _ = @{assert} (length hypos = 1)
+ val _ = \<^assert> (length hypos = 1)
val (t, pol) = the_single hypos
|> try_dest_Trueprop
@@ -1229,7 +1229,7 @@
|> TPTP_Reconstruct.remove_polarity true
(*literal is negative*)
- val _ = @{assert} (not pol)
+ val _ = \<^assert> (not pol)
val (antes, conc) = imp_strip_horn t
@@ -1261,7 +1261,7 @@
validate it*)
fun mk_standard_cnf ctxt kind arity =
let
- val _ = @{assert} (arity > 0)
+ val _ = \<^assert> (arity > 0)
val vars =
1 upto (arity + 1)
|> map (fn i => Free ("x" ^ Int.toString i, HOLogic.boolT))
@@ -1272,8 +1272,8 @@
val conc =
fold
(curry HOLogic.mk_conj)
- (map (fn var => HOLogic.mk_eq (var, @{term True})) antecedents)
- (HOLogic.mk_eq (consequent, @{term False}))
+ (map (fn var => HOLogic.mk_eq (var, \<^term>\<open>True\<close>)) antecedents)
+ (HOLogic.mk_eq (consequent, \<^term>\<open>False\<close>))
val pre_hyp =
case kind of
@@ -1286,7 +1286,7 @@
| TPTP_Reconstruct.Implicational NONE =>
fold (curry HOLogic.mk_imp) antecedents consequent
- val hyp = HOLogic.mk_eq (pre_hyp, @{term False})
+ val hyp = HOLogic.mk_eq (pre_hyp, \<^term>\<open>False\<close>)
val t =
Logic.mk_implies (HOLogic.mk_Trueprop hyp, HOLogic.mk_Trueprop conc)
@@ -1431,16 +1431,16 @@
can_feature (LoopOnce loop_feats) l orelse
can_feature (InnerLoopOnce loop_feats) l;
-@{assert} (can_feature ConstsDiff [StripQuantifiers, ConstsDiff]);
+\<^assert> (can_feature ConstsDiff [StripQuantifiers, ConstsDiff]);
-@{assert}
+\<^assert>
(can_feature (CleanUp [RemoveHypothesesFromSkolemDefs])
[CleanUp [RemoveHypothesesFromSkolemDefs, RemoveDuplicates]]);
-@{assert}
+\<^assert>
(can_feature (Loop []) [Loop [Existential_Var]]);
-@{assert}
+\<^assert>
(not (can_feature (Loop []) [InnerLoopOnce [Existential_Var]]));
\<close>
@@ -1464,7 +1464,7 @@
else raise NO_LOOP_FEATS
end;
-@{assert}
+\<^assert>
(get_loop_feats [Loop [King_Cong, Break_Hypotheses, Existential_Free, Existential_Var, Universal]] =
[King_Cong, Break_Hypotheses, Existential_Free, Existential_Var, Universal])
\<close>
@@ -1505,7 +1505,7 @@
in
scrubup_tac st
|> break_seq
- |> tap (fn (_, rest) => @{assert} (null (Seq.list_of rest)))
+ |> tap (fn (_, rest) => \<^assert> (null (Seq.list_of rest)))
|> fst
|> TERMFUN (snd (*discard hypotheses*)
#> get_skolem_conc_const) NONE
@@ -1556,7 +1556,7 @@
val tacs =
map (fn t_s => (* FIXME proper context!? *)
- Rule_Insts.eres_inst_tac @{context} [((("x", 0), Position.none), t_s)] [] @{thm allE}
+ Rule_Insts.eres_inst_tac \<^context> [((("x", 0), Position.none), t_s)] [] @{thm allE}
THEN' assume_tac ctxt)
in
(TRY o eresolve_tac ctxt @{thms forall_pos_lift})
@@ -1651,20 +1651,20 @@
end
val interpreted_consts =
- [@{const_name HOL.All}, @{const_name HOL.Ex},
- @{const_name Hilbert_Choice.Eps},
- @{const_name HOL.conj},
- @{const_name HOL.disj},
- @{const_name HOL.eq},
- @{const_name HOL.implies},
- @{const_name HOL.The},
- @{const_name HOL.Ex1},
- @{const_name HOL.Not},
+ [\<^const_name>\<open>HOL.All\<close>, \<^const_name>\<open>HOL.Ex\<close>,
+ \<^const_name>\<open>Hilbert_Choice.Eps\<close>,
+ \<^const_name>\<open>HOL.conj\<close>,
+ \<^const_name>\<open>HOL.disj\<close>,
+ \<^const_name>\<open>HOL.eq\<close>,
+ \<^const_name>\<open>HOL.implies\<close>,
+ \<^const_name>\<open>HOL.The\<close>,
+ \<^const_name>\<open>HOL.Ex1\<close>,
+ \<^const_name>\<open>HOL.Not\<close>,
(* @{const_name HOL.iff}, *) (*FIXME do these exist?*)
(* @{const_name HOL.not_equal}, *)
- @{const_name HOL.False},
- @{const_name HOL.True},
- @{const_name Pure.imp}]
+ \<^const_name>\<open>HOL.False\<close>,
+ \<^const_name>\<open>HOL.True\<close>,
+ \<^const_name>\<open>Pure.imp\<close>]
fun strip_qtfrs_tac ctxt =
REPEAT_DETERM (HEADGOAL (resolve_tac ctxt @{thms allI}))
@@ -1779,8 +1779,8 @@
if length hyp_clauses > 1 then no_tac st
else
let
- fun literal_content (Const (@{const_name HOL.eq}, _) $ lhs $ (rhs as @{term True})) = SOME (lhs, rhs)
- | literal_content (Const (@{const_name HOL.eq}, _) $ lhs $ (rhs as @{term False})) = SOME (lhs, rhs)
+ fun literal_content (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ (rhs as \<^term>\<open>True\<close>)) = SOME (lhs, rhs)
+ | literal_content (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ (rhs as \<^term>\<open>False\<close>)) = SOME (lhs, rhs)
| literal_content t = NONE
val hyp_clause =
@@ -2231,7 +2231,7 @@
fun leo2_on_load (pannot : TPTP_Reconstruct.proof_annotation) thy =
let
val ctxt = Proof_Context.init_global thy
- val dud = ("", Binding.empty, @{term False})
+ val dud = ("", Binding.empty, \<^term>\<open>False\<close>)
val pre_skolem_defs =
nodes_by_inference (#meta pannot) "extcnf_forall_neg" @
nodes_by_inference (#meta pannot) "extuni_func"
--- a/src/HOL/TPTP/atp_problem_import.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/atp_problem_import.ML Sat Jan 05 17:24:33 2019 +0100
@@ -63,11 +63,11 @@
(** Nitpick **)
-fun aptrueprop f ((t0 as @{const Trueprop}) $ t1) = t0 $ f t1
+fun aptrueprop f ((t0 as \<^const>\<open>Trueprop\<close>) $ t1) = t0 $ f t1
| aptrueprop f t = f t
-fun is_legitimate_tptp_def (@{const Trueprop} $ t) = is_legitimate_tptp_def t
- | is_legitimate_tptp_def (Const (@{const_name HOL.eq}, _) $ t $ u) =
+fun is_legitimate_tptp_def (\<^const>\<open>Trueprop\<close> $ t) = is_legitimate_tptp_def t
+ | is_legitimate_tptp_def (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ u) =
(is_Const t orelse is_Free t) andalso not (exists_subterm (curry (op =) t) u)
| is_legitimate_tptp_def _ = false
@@ -102,7 +102,7 @@
val subst = []
in
Nitpick.pick_nits_in_term state params Nitpick.TPTP i n step subst defs nondefs
- (case conjs of conj :: _ => conj | [] => @{prop True});
+ (case conjs of conj :: _ => conj | [] => \<^prop>\<open>True\<close>);
()
end
@@ -124,7 +124,7 @@
("maxvars", "100000")]
in
Refute.refute_term ctxt params (defs @ nondefs)
- (case conjs of conj :: _ => conj | [] => @{prop True})
+ (case conjs of conj :: _ => conj | [] => \<^prop>\<open>True\<close>)
|> print_szs_of_outcome (not (null conjs))
end
@@ -148,9 +148,9 @@
fun nitpick_finite_oracle_tac ctxt timeout i th =
let
- fun is_safe (Type (@{type_name fun}, Ts)) = forall is_safe Ts
- | is_safe @{typ prop} = true
- | is_safe @{typ bool} = true
+ fun is_safe (Type (\<^type_name>\<open>fun\<close>, Ts)) = forall is_safe Ts
+ | is_safe \<^typ>\<open>prop\<close> = true
+ | is_safe \<^typ>\<open>bool\<close> = true
| is_safe _ = false
val conj = Thm.term_of (Thm.cprem_of th i)
@@ -187,7 +187,7 @@
val assm_ths0 = map (Skip_Proof.make_thm thy) assms
val ((assm_name, _), ctxt) = ctxt
|> Config.put Sledgehammer_Prover_ATP.atp_completish (if completeness > 0 then 3 else 0)
- |> Local_Theory.note ((@{binding thms}, []), assm_ths0)
+ |> Local_Theory.note ((\<^binding>\<open>thms\<close>, []), assm_ths0)
fun ref_of th = (Facts.named (Thm.derivation_name th), [])
val ref_of_assms = (Facts.named assm_name, [])
@@ -260,7 +260,7 @@
end
fun make_conj (defs, nondefs) conjs =
- Logic.list_implies (rev defs @ rev nondefs, case conjs of conj :: _ => conj | [] => @{prop False})
+ Logic.list_implies (rev defs @ rev nondefs, case conjs of conj :: _ => conj | [] => \<^prop>\<open>False\<close>)
fun print_szs_of_success conjs success =
writeln ("% SZS status " ^
--- a/src/HOL/TPTP/atp_theory_export.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/atp_theory_export.ML Sat Jan 05 17:24:33 2019 +0100
@@ -136,8 +136,8 @@
(* A fairly random selection of types used for monomorphizing. *)
val ground_types =
- [@{typ nat}, HOLogic.intT, HOLogic.realT, @{typ "nat => bool"}, @{typ bool},
- @{typ unit}]
+ [\<^typ>\<open>nat\<close>, HOLogic.intT, HOLogic.realT, \<^typ>\<open>nat => bool\<close>, \<^typ>\<open>bool\<close>,
+ \<^typ>\<open>unit\<close>]
fun ground_type_of_tvar _ [] tvar = raise TYPE ("ground_type_of_tvar", [TVar tvar], [])
| ground_type_of_tvar thy (T :: Ts) tvar =
@@ -147,7 +147,7 @@
fun monomorphize_term ctxt t =
let val thy = Proof_Context.theory_of ctxt in
t |> map_types (map_type_tvar (ground_type_of_tvar thy ground_types))
- handle TYPE _ => @{prop True}
+ handle TYPE _ => \<^prop>\<open>True\<close>
end
fun heading_sort_key heading =
@@ -170,7 +170,7 @@
|> map (fn ((_, loc), th) =>
((Thm.get_name_hint th, loc), th |> Thm.prop_of |> mono ? monomorphize_term ctxt))
|> generate_atp_problem ctxt true format Axiom type_enc Exporter combsN false false true []
- @{prop False}
+ \<^prop>\<open>False\<close>
|> #1 |> sort_by (heading_sort_key o fst)
val prelude = fst (split_last problem)
val name_tabs = Sledgehammer_Fact.build_name_tables Thm.get_name_hint facts
--- a/src/HOL/TPTP/mash_export.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/TPTP/mash_export.ML Sat Jan 05 17:24:33 2019 +0100
@@ -287,7 +287,7 @@
#> Sledgehammer_MePo.mepo_suggested_facts ctxt params max_suggs NONE hyp_ts concl_t)
fun generate_mash_suggestions algorithm ctxt =
- (Options.put_default @{system_option MaSh} algorithm;
+ (Options.put_default \<^system_option>\<open>MaSh\<close> algorithm;
Sledgehammer_MaSh.mash_unlearn ctxt;
generate_mepo_or_mash_suggestions
(fn ctxt => fn thy_name => fn params as {provers = prover :: _, ...} =>
--- a/src/HOL/Tools/ATP/atp_problem_generate.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/ATP/atp_problem_generate.ML Sat Jan 05 17:24:33 2019 +0100
@@ -703,14 +703,14 @@
fun is_lambda_free t =
(case t of
- @{const Not} $ t1 => is_lambda_free t1
+ \<^const>\<open>Not\<close> $ t1 => is_lambda_free t1
| Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, _, t') => is_lambda_free t'
| Const (\<^const_name>\<open>All\<close>, _) $ t1 => is_lambda_free t1
| Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, _, t') => is_lambda_free t'
| Const (\<^const_name>\<open>Ex\<close>, _) $ t1 => is_lambda_free t1
- | @{const HOL.conj} $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
- | @{const HOL.disj} $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
- | @{const HOL.implies} $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 => is_lambda_free t1 andalso is_lambda_free t2
| Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [\<^typ>\<open>bool\<close>, _])) $ t1 $ t2 =>
is_lambda_free t1 andalso is_lambda_free t2
| _ => not (exists_subterm (fn Abs _ => true | _ => false) t))
@@ -722,16 +722,16 @@
let
fun trans Ts t =
(case t of
- @{const Not} $ t1 => @{const Not} $ trans Ts t1
+ \<^const>\<open>Not\<close> $ t1 => \<^const>\<open>Not\<close> $ trans Ts t1
| (t0 as Const (\<^const_name>\<open>All\<close>, _)) $ Abs (s, T, t') =>
t0 $ Abs (s, T, trans (T :: Ts) t')
| (t0 as Const (\<^const_name>\<open>All\<close>, _)) $ t1 => trans Ts (t0 $ eta_expand Ts t1 1)
| (t0 as Const (\<^const_name>\<open>Ex\<close>, _)) $ Abs (s, T, t') =>
t0 $ Abs (s, T, trans (T :: Ts) t')
| (t0 as Const (\<^const_name>\<open>Ex\<close>, _)) $ t1 => trans Ts (t0 $ eta_expand Ts t1 1)
- | (t0 as @{const HOL.conj}) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
- | (t0 as @{const HOL.disj}) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
- | (t0 as @{const HOL.implies}) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
+ | (t0 as \<^const>\<open>HOL.conj\<close>) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
+ | (t0 as \<^const>\<open>HOL.disj\<close>) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
+ | (t0 as \<^const>\<open>HOL.implies\<close>) $ t1 $ t2 => t0 $ trans Ts t1 $ trans Ts t2
| (t0 as Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [\<^typ>\<open>bool\<close>, _]))) $ t1 $ t2 =>
t0 $ trans Ts t1 $ trans Ts t2
| _ =>
@@ -1197,17 +1197,17 @@
do_formula bs pos1 t1 ##>> do_formula bs pos2 t2 #>> uncurry (mk_aconn c)
and do_formula bs pos t =
(case t of
- @{const Trueprop} $ t1 => do_formula bs pos t1
- | @{const Not} $ t1 => do_formula bs (Option.map not pos) t1 #>> mk_anot
+ \<^const>\<open>Trueprop\<close> $ t1 => do_formula bs pos t1
+ | \<^const>\<open>Not\<close> $ t1 => do_formula bs (Option.map not pos) t1 #>> mk_anot
| Const (\<^const_name>\<open>All\<close>, _) $ Abs (s, T, t') => do_quant bs AForall pos s T t'
| (t0 as Const (\<^const_name>\<open>All\<close>, _)) $ t1 =>
do_formula bs pos (t0 $ eta_expand (map (snd o snd) bs) t1 1)
| Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (s, T, t') => do_quant bs AExists pos s T t'
| (t0 as Const (\<^const_name>\<open>Ex\<close>, _)) $ t1 =>
do_formula bs pos (t0 $ eta_expand (map (snd o snd) bs) t1 1)
- | @{const HOL.conj} $ t1 $ t2 => do_conn bs AAnd pos t1 pos t2
- | @{const HOL.disj} $ t1 $ t2 => do_conn bs AOr pos t1 pos t2
- | @{const HOL.implies} $ t1 $ t2 => do_conn bs AImplies (Option.map not pos) t1 pos t2
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 => do_conn bs AAnd pos t1 pos t2
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 => do_conn bs AOr pos t1 pos t2
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 => do_conn bs AImplies (Option.map not pos) t1 pos t2
| Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [\<^typ>\<open>bool\<close>, _])) $ t1 $ t2 =>
if iff_for_eq then do_conn bs AIff NONE t1 NONE t2 else do_term bs t
| _ => do_term bs t)
@@ -1264,7 +1264,7 @@
|> presimplify_term ctxt
|> HOLogic.dest_Trueprop
end
- handle TERM _ => @{const True}
+ handle TERM _ => \<^const>\<open>True\<close>
(* Satallax prefers "=" to "<=>" (for definitions) and Metis (CNF) requires "=" for technical
reasons. *)
@@ -1847,9 +1847,9 @@
end
in List.partition (curry (op =) Definition o #role) #>> reorder [] #> op @ end
-fun s_not_prop (@{const Trueprop} $ t) = @{const Trueprop} $ s_not t
- | s_not_prop (@{const Pure.imp} $ t $ \<^prop>\<open>False\<close>) = t
- | s_not_prop t = @{const Pure.imp} $ t $ \<^prop>\<open>False\<close>
+fun s_not_prop (\<^const>\<open>Trueprop\<close> $ t) = \<^const>\<open>Trueprop\<close> $ s_not t
+ | s_not_prop (\<^const>\<open>Pure.imp\<close> $ t $ \<^prop>\<open>False\<close>) = t
+ | s_not_prop t = \<^const>\<open>Pure.imp\<close> $ t $ \<^prop>\<open>False\<close>
fun translate_formulas ctxt prem_role format type_enc lam_trans presimp hyp_ts concl_t facts =
let
--- a/src/HOL/Tools/ATP/atp_proof_reconstruct.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/ATP/atp_proof_reconstruct.ML Sat Jan 05 17:24:33 2019 +0100
@@ -127,7 +127,7 @@
| (u, Const (\<^const_name>\<open>True\<close>, _)) => u
| (Const (\<^const_name>\<open>False\<close>, _), v) => s_not v
| (u, Const (\<^const_name>\<open>False\<close>, _)) => s_not u
- | _ => if u aconv v then @{const True} else t $ simplify_bool u $ simplify_bool v)
+ | _ => if u aconv v then \<^const>\<open>True\<close> else t $ simplify_bool u $ simplify_bool v)
| simplify_bool (t $ u) = simplify_bool t $ simplify_bool u
| simplify_bool (Abs (s, T, t)) = Abs (s, T, simplify_bool t)
| simplify_bool t = t
@@ -351,7 +351,7 @@
error "Isar proof reconstruction failed because the ATP proof contains unparsable \
\material"
else if String.isPrefix native_type_prefix s then
- @{const True} (* ignore TPTP type information (needed?) *)
+ \<^const>\<open>True\<close> (* ignore TPTP type information (needed?) *)
else if s = tptp_equal then
list_comb (Const (\<^const_name>\<open>HOL.eq\<close>, Type_Infer.anyT \<^sort>\<open>type\<close>),
map (do_term [] NONE) us)
@@ -372,7 +372,7 @@
(nth us (length us - 2))
end
else if s' = type_guard_name then
- @{const True} (* ignore type predicates *)
+ \<^const>\<open>True\<close> (* ignore type predicates *)
else
let
val new_skolem = String.isPrefix new_skolem_const_prefix s''
@@ -436,7 +436,7 @@
fun term_of_atom ctxt format type_enc textual sym_tab pos (u as ATerm ((s, _), _)) =
if String.isPrefix class_prefix s then
add_type_constraint pos (type_constraint_of_term ctxt u)
- #> pair @{const True}
+ #> pair \<^const>\<open>True\<close>
else
pair (term_of_atp ctxt format type_enc textual sym_tab (SOME \<^typ>\<open>bool\<close>) u)
@@ -614,8 +614,8 @@
fun repair_waldmeister_endgame proof =
let
- fun repair_tail (name, _, @{const Trueprop} $ t, rule, deps) =
- (name, Negated_Conjecture, @{const Trueprop} $ s_not t, rule, deps)
+ fun repair_tail (name, _, \<^const>\<open>Trueprop\<close> $ t, rule, deps) =
+ (name, Negated_Conjecture, \<^const>\<open>Trueprop\<close> $ s_not t, rule, deps)
fun repair_body [] = []
| repair_body ((line as ((num, _), _, _, _, _)) :: lines) =
if num = waldmeister_conjecture_num then map repair_tail (line :: lines)
--- a/src/HOL/Tools/ATP/atp_util.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/ATP/atp_util.ML Sat Jan 05 17:24:33 2019 +0100
@@ -264,39 +264,39 @@
Const (\<^const_name>\<open>Ex\<close>, T) $ Abs (s, T', s_not t')
| s_not (Const (\<^const_name>\<open>Ex\<close>, T) $ Abs (s, T', t')) =
Const (\<^const_name>\<open>All\<close>, T) $ Abs (s, T', s_not t')
- | s_not (@{const HOL.implies} $ t1 $ t2) = @{const HOL.conj} $ t1 $ s_not t2
- | s_not (@{const HOL.conj} $ t1 $ t2) =
- @{const HOL.disj} $ s_not t1 $ s_not t2
- | s_not (@{const HOL.disj} $ t1 $ t2) =
- @{const HOL.conj} $ s_not t1 $ s_not t2
- | s_not (@{const False}) = @{const True}
- | s_not (@{const True}) = @{const False}
- | s_not (@{const Not} $ t) = t
- | s_not t = @{const Not} $ t
+ | s_not (\<^const>\<open>HOL.implies\<close> $ t1 $ t2) = \<^const>\<open>HOL.conj\<close> $ t1 $ s_not t2
+ | s_not (\<^const>\<open>HOL.conj\<close> $ t1 $ t2) =
+ \<^const>\<open>HOL.disj\<close> $ s_not t1 $ s_not t2
+ | s_not (\<^const>\<open>HOL.disj\<close> $ t1 $ t2) =
+ \<^const>\<open>HOL.conj\<close> $ s_not t1 $ s_not t2
+ | s_not (\<^const>\<open>False\<close>) = \<^const>\<open>True\<close>
+ | s_not (\<^const>\<open>True\<close>) = \<^const>\<open>False\<close>
+ | s_not (\<^const>\<open>Not\<close> $ t) = t
+ | s_not t = \<^const>\<open>Not\<close> $ t
-fun s_conj (@{const True}, t2) = t2
- | s_conj (t1, @{const True}) = t1
- | s_conj (@{const False}, _) = @{const False}
- | s_conj (_, @{const False}) = @{const False}
+fun s_conj (\<^const>\<open>True\<close>, t2) = t2
+ | s_conj (t1, \<^const>\<open>True\<close>) = t1
+ | s_conj (\<^const>\<open>False\<close>, _) = \<^const>\<open>False\<close>
+ | s_conj (_, \<^const>\<open>False\<close>) = \<^const>\<open>False\<close>
| s_conj (t1, t2) = if t1 aconv t2 then t1 else HOLogic.mk_conj (t1, t2)
-fun s_disj (@{const False}, t2) = t2
- | s_disj (t1, @{const False}) = t1
- | s_disj (@{const True}, _) = @{const True}
- | s_disj (_, @{const True}) = @{const True}
+fun s_disj (\<^const>\<open>False\<close>, t2) = t2
+ | s_disj (t1, \<^const>\<open>False\<close>) = t1
+ | s_disj (\<^const>\<open>True\<close>, _) = \<^const>\<open>True\<close>
+ | s_disj (_, \<^const>\<open>True\<close>) = \<^const>\<open>True\<close>
| s_disj (t1, t2) = if t1 aconv t2 then t1 else HOLogic.mk_disj (t1, t2)
-fun s_imp (@{const True}, t2) = t2
- | s_imp (t1, @{const False}) = s_not t1
- | s_imp (@{const False}, _) = @{const True}
- | s_imp (_, @{const True}) = @{const True}
+fun s_imp (\<^const>\<open>True\<close>, t2) = t2
+ | s_imp (t1, \<^const>\<open>False\<close>) = s_not t1
+ | s_imp (\<^const>\<open>False\<close>, _) = \<^const>\<open>True\<close>
+ | s_imp (_, \<^const>\<open>True\<close>) = \<^const>\<open>True\<close>
| s_imp p = HOLogic.mk_imp p
-fun s_iff (@{const True}, t2) = t2
- | s_iff (t1, @{const True}) = t1
- | s_iff (@{const False}, t2) = s_not t2
- | s_iff (t1, @{const False}) = s_not t1
- | s_iff (t1, t2) = if t1 aconv t2 then @{const True} else HOLogic.eq_const HOLogic.boolT $ t1 $ t2
+fun s_iff (\<^const>\<open>True\<close>, t2) = t2
+ | s_iff (t1, \<^const>\<open>True\<close>) = t1
+ | s_iff (\<^const>\<open>False\<close>, t2) = s_not t2
+ | s_iff (t1, \<^const>\<open>False\<close>) = s_not t1
+ | s_iff (t1, t2) = if t1 aconv t2 then \<^const>\<open>True\<close> else HOLogic.eq_const HOLogic.boolT $ t1 $ t2
fun close_form t =
fold (fn ((s, i), T) => fn t' =>
@@ -352,11 +352,11 @@
fun unextensionalize_def t =
case t of
- @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs) =>
+ \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs) =>
(case strip_comb lhs of
(c as Const (_, T), args) =>
if forall is_Var args andalso not (has_duplicates (op =) args) then
- @{const Trueprop}
+ \<^const>\<open>Trueprop\<close>
$ (Const (\<^const_name>\<open>HOL.eq\<close>, T --> T --> \<^typ>\<open>bool\<close>)
$ c $ fold_rev lambda args rhs)
else
@@ -370,8 +370,8 @@
"Meson_Clausify".) *)
fun transform_elim_prop t =
case Logic.strip_imp_concl t of
- @{const Trueprop} $ Var (z, \<^typ>\<open>bool\<close>) =>
- subst_Vars [(z, @{const False})] t
+ \<^const>\<open>Trueprop\<close> $ Var (z, \<^typ>\<open>bool\<close>) =>
+ subst_Vars [(z, \<^const>\<open>False\<close>)] t
| Var (z, \<^typ>\<open>prop\<close>) => subst_Vars [(z, \<^prop>\<open>False\<close>)] t
| _ => t
--- a/src/HOL/Tools/BNF/bnf_fp_def_sugar.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_fp_def_sugar.ML Sat Jan 05 17:24:33 2019 +0100
@@ -2212,7 +2212,7 @@
let
fun mk_goal c cps gcorec n k disc =
mk_Trueprop_eq (disc $ (gcorec $ c),
- if n = 1 then @{const True}
+ if n = 1 then \<^const>\<open>True\<close>
else Library.foldr1 HOLogic.mk_conj (seq_conds mk_maybe_not n k cps));
val goalss = @{map 6} (map2 oooo mk_goal) cs cpss gcorecs ns kss discss;
--- a/src/HOL/Tools/BNF/bnf_fp_rec_sugar_transfer.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_fp_rec_sugar_transfer.ML Sat Jan 05 17:24:33 2019 +0100
@@ -42,7 +42,7 @@
let
fun instantiate_with_lambda thm =
let
- val prop as @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (Var (_, fT) $ _) $ _) =
+ val prop as \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (Var (_, fT) $ _) $ _) =
Thm.prop_of thm;
val T = range_type fT;
val j = Term.maxidx_of_term prop + 1;
--- a/src/HOL/Tools/BNF/bnf_gfp_grec_sugar.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_gfp_grec_sugar.ML Sat Jan 05 17:24:33 2019 +0100
@@ -385,7 +385,7 @@
val ssig_map_thms = #map_thms ssig_fp_bnf_sugar;
val all_algLam_alg_pointfuls = map (mk_pointful ctxt) all_algLam_algs;
- val @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs) = code_goal;
+ val \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ lhs $ rhs) = code_goal;
val (fun_t, args) = strip_comb lhs;
val closed_rhs = fold_rev lambda args rhs;
@@ -447,7 +447,7 @@
val fp_nesting_Ts = map T_of_bnf fp_nesting_bnfs;
- fun is_nullary_disc_def (@{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _
+ fun is_nullary_disc_def (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _
$ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ _))) = true
| is_nullary_disc_def (Const (\<^const_name>\<open>Pure.eq\<close>, _) $ _
$ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ _)) = true
@@ -512,7 +512,7 @@
val goal = mk_Trueprop_eq (fun_t, abs_curried_balanced arg_Ts algrho);
fun const_of_transfer thm =
- (case Thm.prop_of thm of @{const Trueprop} $ (_ $ cst $ _) => cst);
+ (case Thm.prop_of thm of \<^const>\<open>Trueprop\<close> $ (_ $ cst $ _) => cst);
val eq_algrho =
Goal.prove (*no sorry*) ctxt [] [] goal (fn {context = ctxt, prems = _} =>
@@ -590,7 +590,7 @@
fun derive_cong_ctr_intros ctxt cong_ctor_intro =
let
- val @{const Pure.imp} $ _ $ (@{const Trueprop} $ ((Rcong as _ $ _) $ _ $ (ctor $ _))) =
+ val \<^const>\<open>Pure.imp\<close> $ _ $ (\<^const>\<open>Trueprop\<close> $ ((Rcong as _ $ _) $ _ $ (ctor $ _))) =
Thm.prop_of cong_ctor_intro;
val fpT as Type (fpT_name, fp_argTs) = range_type (fastype_of ctor);
@@ -615,19 +615,19 @@
fun derive_cong_friend_intro ctxt cong_algrho_intro =
let
- val @{const Pure.imp} $ _ $ (@{const Trueprop} $ ((Rcong as _ $ _) $ _
+ val \<^const>\<open>Pure.imp\<close> $ _ $ (\<^const>\<open>Trueprop\<close> $ ((Rcong as _ $ _) $ _
$ ((algrho as Const (algrho_name, _)) $ _))) =
Thm.prop_of cong_algrho_intro;
val fpT as Type (_, fp_argTs) = range_type (fastype_of algrho);
- fun has_algrho (@{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ rhs)) =
+ fun has_algrho (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ _ $ rhs)) =
fst (dest_Const (head_of (strip_abs_body rhs))) = algrho_name;
val eq_algrho :: _ =
maps (filter (has_algrho o Thm.prop_of) o #eq_algrhos o snd) (all_friend_extras_of ctxt);
- val @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ friend0 $ _) = Thm.prop_of eq_algrho;
+ val \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ friend0 $ _) = Thm.prop_of eq_algrho;
val friend = mk_ctr fp_argTs friend0;
val goal = mk_cong_intro_ctr_or_friend_goal ctxt fpT Rcong friend;
@@ -654,8 +654,8 @@
let
val thy = Proof_Context.theory_of ctxt;
- val @{const Pure.imp} $ (@{const Trueprop} $ (_ $ Abs (_, _, _ $
- Abs (_, _, @{const implies} $ _ $ (_ $ (cong0 $ _) $ _ $ _))))) $ _ =
+ val \<^const>\<open>Pure.imp\<close> $ (\<^const>\<open>Trueprop\<close> $ (_ $ Abs (_, _, _ $
+ Abs (_, _, \<^const>\<open>implies\<close> $ _ $ (_ $ (cong0 $ _) $ _ $ _))))) $ _ =
Thm.prop_of dtor_coinduct;
val SOME {X as TVar ((X_s, _), _), fp_res = {dtor_ctors, ...}, pre_bnf,
@@ -820,7 +820,7 @@
|> curry (op ~~) (map (fn disc => disc $ lhs) discs);
fun mk_disc_iff_props props [] = props
- | mk_disc_iff_props _ ((lhs, @{const HOL.True}) :: _) = [lhs]
+ | mk_disc_iff_props _ ((lhs, \<^const>\<open>HOL.True\<close>) :: _) = [lhs]
| mk_disc_iff_props props ((lhs, rhs) :: views) =
mk_disc_iff_props ((HOLogic.mk_eq (lhs, rhs)) :: props) views;
in
@@ -2241,7 +2241,7 @@
val fun_T =
(case code_goal of
- @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ _) => fastype_of (head_of t)
+ \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ _) => fastype_of (head_of t)
| _ => ill_formed_equation_lhs_rhs lthy [code_goal]);
val fun_t = Const (fun_name, fun_T);
--- a/src/HOL/Tools/BNF/bnf_gfp_grec_sugar_util.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_gfp_grec_sugar_util.ML Sat Jan 05 17:24:33 2019 +0100
@@ -367,7 +367,7 @@
ctrXs_Tss
|> map_index (fn (i, Ts) =>
Abs (Name.uu, mk_tupleT_balanced Ts,
- if i + 1 = k then @{const HOL.True} else @{const HOL.False}))
+ if i + 1 = k then \<^const>\<open>HOL.True\<close> else \<^const>\<open>HOL.False\<close>))
|> mk_case_sumN_balanced
|> map_types substXYT
|> (fn tm => Library.foldl1 HOLogic.mk_comp [tm, rep, snd_const YpreT])
--- a/src/HOL/Tools/BNF/bnf_gfp_rec_sugar.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_gfp_rec_sugar.ML Sat Jan 05 17:24:33 2019 +0100
@@ -189,22 +189,22 @@
fun sort_list_duplicates xs = map snd (sort (int_ord o apply2 fst) xs);
-val mk_conjs = try (foldr1 HOLogic.mk_conj) #> the_default @{const True};
-val mk_disjs = try (foldr1 HOLogic.mk_disj) #> the_default @{const False};
+val mk_conjs = try (foldr1 HOLogic.mk_conj) #> the_default \<^const>\<open>True\<close>;
+val mk_disjs = try (foldr1 HOLogic.mk_disj) #> the_default \<^const>\<open>False\<close>;
val mk_dnf = mk_disjs o map mk_conjs;
-val conjuncts_s = filter_out (curry (op aconv) @{const True}) o HOLogic.conjuncts;
+val conjuncts_s = filter_out (curry (op aconv) \<^const>\<open>True\<close>) o HOLogic.conjuncts;
-fun s_not @{const True} = @{const False}
- | s_not @{const False} = @{const True}
- | s_not (@{const Not} $ t) = t
- | s_not (@{const conj} $ t $ u) = @{const disj} $ s_not t $ s_not u
- | s_not (@{const disj} $ t $ u) = @{const conj} $ s_not t $ s_not u
- | s_not t = @{const Not} $ t;
+fun s_not \<^const>\<open>True\<close> = \<^const>\<open>False\<close>
+ | s_not \<^const>\<open>False\<close> = \<^const>\<open>True\<close>
+ | s_not (\<^const>\<open>Not\<close> $ t) = t
+ | s_not (\<^const>\<open>conj\<close> $ t $ u) = \<^const>\<open>disj\<close> $ s_not t $ s_not u
+ | s_not (\<^const>\<open>disj\<close> $ t $ u) = \<^const>\<open>conj\<close> $ s_not t $ s_not u
+ | s_not t = \<^const>\<open>Not\<close> $ t;
val s_not_conj = conjuncts_s o s_not o mk_conjs;
-fun propagate_unit_pos u cs = if member (op aconv) cs u then [@{const False}] else cs;
+fun propagate_unit_pos u cs = if member (op aconv) cs u then [\<^const>\<open>False\<close>] else cs;
fun propagate_unit_neg not_u cs = remove (op aconv) not_u cs;
fun propagate_units css =
@@ -215,17 +215,17 @@
(map (propagate_unit_pos u) (uss @ css'))));
fun s_conjs cs =
- if member (op aconv) cs @{const False} then @{const False}
- else mk_conjs (remove (op aconv) @{const True} cs);
+ if member (op aconv) cs \<^const>\<open>False\<close> then \<^const>\<open>False\<close>
+ else mk_conjs (remove (op aconv) \<^const>\<open>True\<close> cs);
fun s_disjs ds =
- if member (op aconv) ds @{const True} then @{const True}
- else mk_disjs (remove (op aconv) @{const False} ds);
+ if member (op aconv) ds \<^const>\<open>True\<close> then \<^const>\<open>True\<close>
+ else mk_disjs (remove (op aconv) \<^const>\<open>False\<close> ds);
fun s_dnf css0 =
let val css = propagate_units css0 in
if null css then
- [@{const False}]
+ [\<^const>\<open>False\<close>]
else if exists null css then
[]
else
--- a/src/HOL/Tools/BNF/bnf_gfp_rec_sugar_tactics.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_gfp_rec_sugar_tactics.ML Sat Jan 05 17:24:33 2019 +0100
@@ -154,7 +154,7 @@
fun inst_split_eq ctxt split =
(case Thm.prop_of split of
- @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (Var (_, Type (_, [T, _])) $ _) $ _) =>
+ \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (Var (_, Type (_, [T, _])) $ _) $ _) =>
let
val s = Name.uu;
val eq = Abs (Name.uu, T, HOLogic.mk_eq (Free (s, T), Bound 0));
--- a/src/HOL/Tools/BNF/bnf_lfp_countable.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/BNF/bnf_lfp_countable.ML Sat Jan 05 17:24:33 2019 +0100
@@ -70,13 +70,13 @@
fun encode_sumN n k t =
Balanced_Tree.access {init = t,
- left = fn t => @{const sum_encode} $ (@{const Inl (nat, nat)} $ t),
- right = fn t => @{const sum_encode} $ (@{const Inr (nat, nat)} $ t)}
+ left = fn t => \<^const>\<open>sum_encode\<close> $ (@{const Inl (nat, nat)} $ t),
+ right = fn t => \<^const>\<open>sum_encode\<close> $ (@{const Inr (nat, nat)} $ t)}
n k;
fun encode_tuple [] = \<^term>\<open>0 :: nat\<close>
| encode_tuple ts =
- Balanced_Tree.make (fn (t, u) => @{const prod_encode} $ (@{const Pair (nat, nat)} $ u $ t)) ts;
+ Balanced_Tree.make (fn (t, u) => \<^const>\<open>prod_encode\<close> $ (@{const Pair (nat, nat)} $ u $ t)) ts;
fun mk_encode_funs ctxt fpTs ns ctrss0 recs0 =
let
@@ -181,7 +181,7 @@
|> map Thm.close_derivation
end;
-fun get_countable_goal_type_name (@{const Trueprop} $ (Const (\<^const_name>\<open>Ex\<close>, _)
+fun get_countable_goal_type_name (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>Ex\<close>, _)
$ Abs (_, Type (_, [Type (s, _), _]), Const (\<^const_name>\<open>inj_on\<close>, _) $ Bound 0
$ Const (\<^const_name>\<open>top\<close>, _)))) = s
| get_countable_goal_type_name _ = error "Wrong goal format for datatype countability tactic";
--- a/src/HOL/Tools/Ctr_Sugar/ctr_sugar.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Ctr_Sugar/ctr_sugar.ML Sat Jan 05 17:24:33 2019 +0100
@@ -299,11 +299,11 @@
fun name_of_disc t =
(case head_of t of
- Abs (_, _, @{const Not} $ (t' $ Bound 0)) =>
+ Abs (_, _, \<^const>\<open>Not\<close> $ (t' $ Bound 0)) =>
Long_Name.map_base_name (prefix not_prefix) (name_of_disc t')
| Abs (_, _, Const (\<^const_name>\<open>HOL.eq\<close>, _) $ Bound 0 $ t') =>
Long_Name.map_base_name (prefix is_prefix) (name_of_disc t')
- | Abs (_, _, @{const Not} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ Bound 0 $ t')) =>
+ | Abs (_, _, \<^const>\<open>Not\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ Bound 0 $ t')) =>
Long_Name.map_base_name (prefix (not_prefix ^ is_prefix)) (name_of_disc t')
| t' => name_of_const "discriminator" (perhaps (try domain_type)) t');
@@ -1033,7 +1033,7 @@
val disc_eq_case_thms =
let
- fun const_of_bool b = if b then @{const True} else @{const False};
+ fun const_of_bool b = if b then \<^const>\<open>True\<close> else \<^const>\<open>False\<close>;
fun mk_case_args n = map_index (fn (k, argTs) =>
fold_rev Term.absdummy argTs (const_of_bool (n = k))) ctr_Tss;
val goals = map_index (fn (n, udisc) =>
--- a/src/HOL/Tools/Lifting/lifting_def_code_dt.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Lifting/lifting_def_code_dt.ML Sat Jan 05 17:24:33 2019 +0100
@@ -401,7 +401,7 @@
let val (sel_rhs, wits) = mk_sel_case_args lthy ctr_Tss ks arg
in (arg |> #2, wits, list_comb (mk_sel_casex arg, sel_rhs)) end;
fun mk_dis_case_args args k = map (fn (k', arg) => (if k = k'
- then fold_rev Term.lambda arg @{const True} else fold_rev Term.lambda arg @{const False})) args;
+ then fold_rev Term.lambda arg \<^const>\<open>True\<close> else fold_rev Term.lambda arg \<^const>\<open>False\<close>)) args;
val sel_rhs = map (map mk_sel_rhs) sel_argss
val dis_rhs = map (fn k => list_comb (dis_casex, mk_dis_case_args (ks ~~ xss) k)) ks
val dis_qty = qty_isom --> HOLogic.boolT;
--- a/src/HOL/Tools/Metis/metis_reconstruct.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Metis/metis_reconstruct.ML Sat Jan 05 17:24:33 2019 +0100
@@ -222,10 +222,10 @@
end
end
-fun s_not (@{const Not} $ t) = t
+fun s_not (\<^const>\<open>Not\<close> $ t) = t
| s_not t = HOLogic.mk_not t
-fun simp_not_not (@{const Trueprop} $ t) = @{const Trueprop} $ simp_not_not t
- | simp_not_not (@{const Not} $ t) = s_not (simp_not_not t)
+fun simp_not_not (\<^const>\<open>Trueprop\<close> $ t) = \<^const>\<open>Trueprop\<close> $ simp_not_not t
+ | simp_not_not (\<^const>\<open>Not\<close> $ t) = s_not (simp_not_not t)
| simp_not_not t = t
val normalize_literal = simp_not_not o Envir.eta_contract
--- a/src/HOL/Tools/Nitpick/nitpick.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick.ML Sat Jan 05 17:24:33 2019 +0100
@@ -192,8 +192,8 @@
fun none_true assigns = forall (not_equal (SOME true) o snd) assigns
-fun has_lonely_bool_var (@{const Pure.conjunction}
- $ (@{const Trueprop} $ Free _) $ _) = true
+fun has_lonely_bool_var (\<^const>\<open>Pure.conjunction\<close>
+ $ (\<^const>\<open>Trueprop\<close> $ Free _) $ _) = true
| has_lonely_bool_var _ = false
val syntactic_sorts =
--- a/src/HOL/Tools/Nitpick/nitpick_hol.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_hol.ML Sat Jan 05 17:24:33 2019 +0100
@@ -331,16 +331,16 @@
else
s
-fun s_conj (t1, @{const True}) = t1
- | s_conj (@{const True}, t2) = t2
+fun s_conj (t1, \<^const>\<open>True\<close>) = t1
+ | s_conj (\<^const>\<open>True\<close>, t2) = t2
| s_conj (t1, t2) =
- if t1 = @{const False} orelse t2 = @{const False} then @{const False}
+ if t1 = \<^const>\<open>False\<close> orelse t2 = \<^const>\<open>False\<close> then \<^const>\<open>False\<close>
else HOLogic.mk_conj (t1, t2)
-fun s_disj (t1, @{const False}) = t1
- | s_disj (@{const False}, t2) = t2
+fun s_disj (t1, \<^const>\<open>False\<close>) = t1
+ | s_disj (\<^const>\<open>False\<close>, t2) = t2
| s_disj (t1, t2) =
- if t1 = @{const True} orelse t2 = @{const True} then @{const True}
+ if t1 = \<^const>\<open>True\<close> orelse t2 = \<^const>\<open>True\<close> then \<^const>\<open>True\<close>
else HOLogic.mk_disj (t1, t2)
fun strip_connective conn_t (t as (t0 $ t1 $ t2)) =
@@ -348,13 +348,13 @@
| strip_connective _ t = [t]
fun strip_any_connective (t as (t0 $ _ $ _)) =
- if t0 = @{const HOL.conj} orelse t0 = @{const HOL.disj} then
+ if t0 = \<^const>\<open>HOL.conj\<close> orelse t0 = \<^const>\<open>HOL.disj\<close> then
(strip_connective t0 t, t0)
else
- ([t], @{const Not})
- | strip_any_connective t = ([t], @{const Not})
-val conjuncts_of = strip_connective @{const HOL.conj}
-val disjuncts_of = strip_connective @{const HOL.disj}
+ ([t], \<^const>\<open>Not\<close>)
+ | strip_any_connective t = ([t], \<^const>\<open>Not\<close>)
+val conjuncts_of = strip_connective \<^const>\<open>HOL.conj\<close>
+val disjuncts_of = strip_connective \<^const>\<open>HOL.disj\<close>
(* When you add constants to these lists, make sure to handle them in
"Nitpick_Nut.nut_from_term", and perhaps in "Nitpick_Mono.consider_term" as
@@ -797,8 +797,8 @@
the (Quotient_Info.lookup_quotients thy s)
val partial =
case Thm.prop_of equiv_thm of
- @{const Trueprop} $ (Const (\<^const_name>\<open>equivp\<close>, _) $ _) => false
- | @{const Trueprop} $ (Const (\<^const_name>\<open>part_equivp\<close>, _) $ _) => true
+ \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>equivp\<close>, _) $ _) => false
+ | \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>part_equivp\<close>, _) $ _) => true
| _ => raise NOT_SUPPORTED "Ill-formed quotient type equivalence \
\relation theorem"
val Ts' = qtyp |> dest_Type |> snd
@@ -948,7 +948,7 @@
fold (fn (z as ((s, _), T)) => fn t' =>
Logic.all_const T $ Abs (s, T, abstract_over (Var z, t')))
(take (length zs' - length zs) zs')
- fun aux zs (@{const Pure.imp} $ t1 $ t2) =
+ fun aux zs (\<^const>\<open>Pure.imp\<close> $ t1 $ t2) =
let val zs' = Term.add_vars t1 zs in
close_up zs zs' (Logic.mk_implies (t1, aux zs' t2))
end
@@ -957,8 +957,8 @@
fun distinctness_formula T =
all_distinct_unordered_pairs_of
- #> map (fn (t1, t2) => @{const Not} $ (HOLogic.eq_const T $ t1 $ t2))
- #> List.foldr (s_conj o swap) @{const True}
+ #> map (fn (t1, t2) => \<^const>\<open>Not\<close> $ (HOLogic.eq_const T $ t1 $ t2))
+ #> List.foldr (s_conj o swap) \<^const>\<open>True\<close>
fun zero_const T = Const (\<^const_name>\<open>zero_class.zero\<close>, T)
fun suc_const T = Const (\<^const_name>\<open>Suc\<close>, T --> T)
@@ -986,7 +986,7 @@
SOME {abs_type, rep_type, Abs_name, ...} =>
[(Abs_name, varify_and_instantiate_type ctxt abs_type T rep_type --> T)]
| NONE =>
- if T = \<^typ>\<open>ind\<close> then [dest_Const @{const Zero_Rep}, dest_Const @{const Suc_Rep}]
+ if T = \<^typ>\<open>ind\<close> then [dest_Const \<^const>\<open>Zero_Rep\<close>, dest_Const \<^const>\<open>Suc_Rep\<close>]
else [])
| uncached_data_type_constrs _ _ = []
@@ -1145,8 +1145,8 @@
if t1' aconv t2 then \<^prop>\<open>True\<close> else t1 $ t2
| s_betapply _ (t1 as Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1', t2) =
if t1' aconv t2 then \<^term>\<open>True\<close> else t1 $ t2
- | s_betapply _ (Const (\<^const_name>\<open>If\<close>, _) $ @{const True} $ t1', _) = t1'
- | s_betapply _ (Const (\<^const_name>\<open>If\<close>, _) $ @{const False} $ _, t2) = t2
+ | s_betapply _ (Const (\<^const_name>\<open>If\<close>, _) $ \<^const>\<open>True\<close> $ t1', _) = t1'
+ | s_betapply _ (Const (\<^const_name>\<open>If\<close>, _) $ \<^const>\<open>False\<close> $ _, t2) = t2
| s_betapply Ts (Const (\<^const_name>\<open>Let\<close>,
Type (_, [bound_T, Type (_, [_, body_T])]))
$ t12 $ Abs (s, T, t13'), t2) =
@@ -1181,18 +1181,18 @@
fun discr_term_for_constr hol_ctxt (x as (s, T)) =
let val dataT = body_type T in
if s = \<^const_name>\<open>Suc\<close> then
- Abs (Name.uu, dataT, @{const Not} $ HOLogic.mk_eq (zero_const dataT, Bound 0))
+ Abs (Name.uu, dataT, \<^const>\<open>Not\<close> $ HOLogic.mk_eq (zero_const dataT, Bound 0))
else if length (data_type_constrs hol_ctxt dataT) >= 2 then
Const (discr_for_constr x)
else
- Abs (Name.uu, dataT, @{const True})
+ Abs (Name.uu, dataT, \<^const>\<open>True\<close>)
end
fun discriminate_value (hol_ctxt as {ctxt, ...}) x t =
case head_of t of
Const x' =>
- if x = x' then @{const True}
- else if is_nonfree_constr ctxt x' then @{const False}
+ if x = x' then \<^const>\<open>True\<close>
+ else if is_nonfree_constr ctxt x' then \<^const>\<open>False\<close>
else s_betapply [] (discr_term_for_constr hol_ctxt x, t)
| _ => s_betapply [] (discr_term_for_constr hol_ctxt x, t)
@@ -1380,9 +1380,9 @@
simplification rules (equational specifications). *)
fun term_under_def t =
case t of
- @{const Pure.imp} $ _ $ t2 => term_under_def t2
+ \<^const>\<open>Pure.imp\<close> $ _ $ t2 => term_under_def t2
| Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ _ => term_under_def t1
- | @{const Trueprop} $ t1 => term_under_def t1
+ | \<^const>\<open>Trueprop\<close> $ t1 => term_under_def t1
| Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _ => term_under_def t1
| Abs (_, _, t') => term_under_def t'
| t1 $ _ => term_under_def t1
@@ -1407,7 +1407,7 @@
val (lhs, rhs) =
case t of
Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2 => (t1, t2)
- | @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2) =>
+ | \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2) =>
(t1, t2)
| _ => raise TERM ("Nitpick_HOL.normalized_rhs_of", [t])
val args = strip_comb lhs |> snd
@@ -1485,11 +1485,11 @@
case t of
Const (\<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, _, t1) => lhs_of_equation t1
| Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ _ => SOME t1
- | @{const Pure.imp} $ _ $ t2 => lhs_of_equation t2
- | @{const Trueprop} $ t1 => lhs_of_equation t1
+ | \<^const>\<open>Pure.imp\<close> $ _ $ t2 => lhs_of_equation t2
+ | \<^const>\<open>Trueprop\<close> $ t1 => lhs_of_equation t1
| Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, _, t1) => lhs_of_equation t1
| Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _ => SOME t1
- | @{const HOL.implies} $ _ $ t2 => lhs_of_equation t2
+ | \<^const>\<open>HOL.implies\<close> $ _ $ t2 => lhs_of_equation t2
| _ => NONE
fun is_constr_pattern _ (Bound _) = true
@@ -1599,19 +1599,19 @@
(incr_boundvars 1 func_t, x),
discriminate_value hol_ctxt x (Bound 0)))
|> AList.group (op aconv)
- |> map (apsnd (List.foldl s_disj @{const False}))
+ |> map (apsnd (List.foldl s_disj \<^const>\<open>False\<close>))
|> sort (int_ord o apply2 (size_of_term o snd))
|> rev
in
if res_T = bool_T then
- if forall (member (op =) [@{const False}, @{const True}] o fst) cases then
+ if forall (member (op =) [\<^const>\<open>False\<close>, \<^const>\<open>True\<close>] o fst) cases then
case cases of
[(body_t, _)] => body_t
- | [_, (@{const True}, head_t2)] => head_t2
- | [_, (@{const False}, head_t2)] => @{const Not} $ head_t2
+ | [_, (\<^const>\<open>True\<close>, head_t2)] => head_t2
+ | [_, (\<^const>\<open>False\<close>, head_t2)] => \<^const>\<open>Not\<close> $ head_t2
| _ => raise BAD ("Nitpick_HOL.optimized_case_def", "impossible cases")
else
- @{const True} |> fold_rev (add_constr_case res_T) cases
+ \<^const>\<open>True\<close> |> fold_rev (add_constr_case res_T) cases
else
fst (hd cases) |> fold_rev (add_constr_case res_T) (tl cases)
end
@@ -1896,13 +1896,13 @@
in
Logic.list_implies (prems,
case concl of
- @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [T, _]))
+ \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [T, _]))
$ t1 $ t2) =>
- @{const Trueprop} $ extensional_equal j T t1 t2
- | @{const Trueprop} $ t' =>
- @{const Trueprop} $ HOLogic.mk_eq (t', @{const True})
+ \<^const>\<open>Trueprop\<close> $ extensional_equal j T t1 t2
+ | \<^const>\<open>Trueprop\<close> $ t' =>
+ \<^const>\<open>Trueprop\<close> $ HOLogic.mk_eq (t', \<^const>\<open>True\<close>)
| Const (\<^const_name>\<open>Pure.eq\<close>, Type (_, [T, _])) $ t1 $ t2 =>
- @{const Trueprop} $ extensional_equal j T t1 t2
+ \<^const>\<open>Trueprop\<close> $ extensional_equal j T t1 t2
| _ => (warning ("Ignoring " ^ quote tag ^ " for non-equation " ^
quote (Syntax.string_of_term ctxt t));
raise SAME ()))
@@ -1953,7 +1953,7 @@
end
fun ground_theorem_table thy =
- fold ((fn @{const Trueprop} $ t1 =>
+ fold ((fn \<^const>\<open>Trueprop\<close> $ t1 =>
is_ground_term t1 ? Inttab.map_default (hash_term t1, []) (cons t1)
| _ => I) o Thm.prop_of o snd) (Global_Theory.all_thms_of thy true) Inttab.empty
@@ -2018,13 +2018,13 @@
in
[Logic.mk_equals (normal_fun $ sel_a_t, sel_a_t),
Logic.list_implies
- ([@{const Not} $ (is_unknown_t $ normal_x),
- @{const Not} $ (is_unknown_t $ normal_y),
+ ([\<^const>\<open>Not\<close> $ (is_unknown_t $ normal_x),
+ \<^const>\<open>Not\<close> $ (is_unknown_t $ normal_y),
equiv_rel $ x_var $ y_var] |> map HOLogic.mk_Trueprop,
Logic.mk_equals (normal_x, normal_y)),
Logic.list_implies
- ([HOLogic.mk_Trueprop (@{const Not} $ (is_unknown_t $ normal_x)),
- HOLogic.mk_Trueprop (@{const Not} $ HOLogic.mk_eq (normal_x, x_var))],
+ ([HOLogic.mk_Trueprop (\<^const>\<open>Not\<close> $ (is_unknown_t $ normal_x)),
+ HOLogic.mk_Trueprop (\<^const>\<open>Not\<close> $ HOLogic.mk_eq (normal_x, x_var))],
HOLogic.mk_Trueprop (equiv_rel $ x_var $ normal_x))]
|> partial ? cons (HOLogic.mk_Trueprop (equiv_rel $ sel_a_t $ sel_a_t))
end
@@ -2034,7 +2034,7 @@
val xs = data_type_constrs hol_ctxt T
val pred_T = T --> bool_T
val iter_T = \<^typ>\<open>bisim_iterator\<close>
- val bisim_max = @{const bisim_iterator_max}
+ val bisim_max = \<^const>\<open>bisim_iterator_max\<close>
val n_var = Var (("n", 0), iter_T)
val n_var_minus_1 =
Const (\<^const_name>\<open>safe_The\<close>, (iter_T --> bool_T) --> iter_T)
@@ -2215,8 +2215,8 @@
fun repair_rec j (Const (\<^const_name>\<open>Ex\<close>, T1) $ Abs (s2, T2, t2')) =
Const (\<^const_name>\<open>Ex\<close>, T1)
$ Abs (s2, T2, repair_rec (j + 1) t2')
- | repair_rec j (@{const HOL.conj} $ t1 $ t2) =
- @{const HOL.conj} $ repair_rec j t1 $ repair_rec j t2
+ | repair_rec j (\<^const>\<open>HOL.conj\<close> $ t1 $ t2) =
+ \<^const>\<open>HOL.conj\<close> $ repair_rec j t1 $ repair_rec j t2
| repair_rec j t =
let val (head, args) = strip_comb t in
if head = Bound j then
@@ -2228,9 +2228,9 @@
val (nonrecs, recs) =
List.partition (curry (op =) 0 o num_occs_of_bound_in_term j)
(disjuncts_of body)
- val base_body = nonrecs |> List.foldl s_disj @{const False}
+ val base_body = nonrecs |> List.foldl s_disj \<^const>\<open>False\<close>
val step_body = recs |> map (repair_rec j)
- |> List.foldl s_disj @{const False}
+ |> List.foldl s_disj \<^const>\<open>False\<close>
in
(fold_rev Term.abs (tl xs) (incr_bv (~1, j, base_body))
|> ap_n_split (length arg_Ts) tuple_T bool_T,
@@ -2366,7 +2366,7 @@
[inductive_pred_axiom hol_ctxt x]
else case def_of_const thy def_tables x of
SOME def =>
- @{const Trueprop} $ HOLogic.mk_eq (Const x, def)
+ \<^const>\<open>Trueprop\<close> $ HOLogic.mk_eq (Const x, def)
|> equationalize_term ctxt "" |> the |> single
| NONE => [])
| psimps => psimps)
@@ -2374,7 +2374,7 @@
fun is_equational_fun_surely_complete hol_ctxt x =
case equational_fun_axioms hol_ctxt x of
- [@{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _)] =>
+ [\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _)] =>
strip_comb t1 |> snd |> forall is_Var
| _ => false
--- a/src/HOL/Tools/Nitpick/nitpick_model.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_model.ML Sat Jan 05 17:24:33 2019 +0100
@@ -333,8 +333,8 @@
else raise TYPE ("Nitpick_Model.format_fun.do_term", [T, T'], [])
in if T1' = T1 andalso T2' = T2 then t else do_fun T1' T2' T1 T2 t end
-fun truth_const_sort_key @{const True} = "0"
- | truth_const_sort_key @{const False} = "2"
+fun truth_const_sort_key \<^const>\<open>True\<close> = "0"
+ | truth_const_sort_key \<^const>\<open>False\<close> = "2"
| truth_const_sort_key _ = "1"
fun mk_tuple (Type (\<^type_name>\<open>prod\<close>, [T1, T2])) ts =
@@ -411,14 +411,14 @@
empty_const
| aux ((t1, t2) :: zs) =
aux zs
- |> t2 <> @{const False}
+ |> t2 <> \<^const>\<open>False\<close>
? curry (op $)
(insert_const
- $ (t1 |> t2 <> @{const True}
+ $ (t1 |> t2 <> \<^const>\<open>True\<close>
? curry (op $)
(Const (maybe_name, T --> T))))
in
- if forall (fn (_, t) => t <> @{const True} andalso t <> @{const False})
+ if forall (fn (_, t) => t <> \<^const>\<open>True\<close> andalso t <> \<^const>\<open>False\<close>)
tps then
Const (unknown, set_T)
else
@@ -516,7 +516,7 @@
| term_for_atom seen \<^typ>\<open>prop\<close> _ j k =
HOLogic.mk_Trueprop (term_for_atom seen bool_T bool_T j k)
| term_for_atom _ \<^typ>\<open>bool\<close> _ j _ =
- if j = 0 then @{const False} else @{const True}
+ if j = 0 then \<^const>\<open>False\<close> else \<^const>\<open>True\<close>
| term_for_atom seen T _ j k =
if T = nat_T then
HOLogic.mk_number nat_T j
--- a/src/HOL/Tools/Nitpick/nitpick_mono.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_mono.ML Sat Jan 05 17:24:33 2019 +0100
@@ -829,10 +829,10 @@
" \<turnstile> " ^ Syntax.string_of_term ctxt t ^
" : _?");
case t of
- @{const False} => (bool_M, accum ||> add_comp_frame (A Fls) Leq frame)
+ \<^const>\<open>False\<close> => (bool_M, accum ||> add_comp_frame (A Fls) Leq frame)
| Const (\<^const_name>\<open>None\<close>, T) =>
(mtype_for T, accum ||> add_comp_frame (A Fls) Leq frame)
- | @{const True} => (bool_M, accum ||> add_comp_frame (A Tru) Leq frame)
+ | \<^const>\<open>True\<close> => (bool_M, accum ||> add_comp_frame (A Tru) Leq frame)
| (t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ Bound 0 $ t2 =>
(* hack to exploit symmetry of equality when typing "insert" *)
(if t2 = Bound 0 then do_term \<^term>\<open>True\<close>
@@ -850,9 +850,9 @@
| \<^const_name>\<open>Ex\<close> =>
let val set_T = domain_type T in
do_term (Abs (Name.uu, set_T,
- @{const Not} $ (HOLogic.mk_eq
+ \<^const>\<open>Not\<close> $ (HOLogic.mk_eq
(Abs (Name.uu, domain_type set_T,
- @{const False}),
+ \<^const>\<open>False\<close>),
Bound 0)))) accum
end
| \<^const_name>\<open>HOL.eq\<close> => do_equals T accum
@@ -971,10 +971,10 @@
val (M', accum) =
do_term t' (accum |>> push_bound (V x) T M)
in (MFun (M, V x, M'), accum |>> pop_bound) end))
- | @{const Not} $ t1 => do_connect imp_spec t1 @{const False} accum
- | @{const conj} $ t1 $ t2 => do_connect conj_spec t1 t2 accum
- | @{const disj} $ t1 $ t2 => do_connect disj_spec t1 t2 accum
- | @{const implies} $ t1 $ t2 => do_connect imp_spec t1 t2 accum
+ | \<^const>\<open>Not\<close> $ t1 => do_connect imp_spec t1 \<^const>\<open>False\<close> accum
+ | \<^const>\<open>conj\<close> $ t1 $ t2 => do_connect conj_spec t1 t2 accum
+ | \<^const>\<open>disj\<close> $ t1 $ t2 => do_connect disj_spec t1 t2 accum
+ | \<^const>\<open>implies\<close> $ t1 $ t2 => do_connect imp_spec t1 t2 accum
| Const (\<^const_name>\<open>Let\<close>, _) $ t1 $ t2 =>
do_term (betapply (t2, t1)) accum
| t1 $ t2 =>
@@ -1060,7 +1060,7 @@
Const (s as \<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, T1, t1) =>
do_quantifier s T1 t1
| Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2 => do_equals t1 t2
- | @{const Trueprop} $ t1 => do_formula sn t1 accum
+ | \<^const>\<open>Trueprop\<close> $ t1 => do_formula sn t1 accum
| Const (s as \<^const_name>\<open>All\<close>, _) $ Abs (_, T1, t1) =>
do_quantifier s T1 t1
| Const (s as \<^const_name>\<open>Ex\<close>, T0) $ (t1 as Abs (_, T1, t1')) =>
@@ -1068,19 +1068,19 @@
Plus => do_quantifier s T1 t1'
| Minus =>
(* FIXME: Needed? *)
- do_term (@{const Not}
+ do_term (\<^const>\<open>Not\<close>
$ (HOLogic.eq_const (domain_type T0) $ t1
- $ Abs (Name.uu, T1, @{const False}))) accum)
+ $ Abs (Name.uu, T1, \<^const>\<open>False\<close>))) accum)
| Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2 => do_equals t1 t2
| Const (\<^const_name>\<open>Let\<close>, _) $ t1 $ t2 =>
do_formula sn (betapply (t2, t1)) accum
- | @{const Pure.conjunction} $ t1 $ t2 =>
+ | \<^const>\<open>Pure.conjunction\<close> $ t1 $ t2 =>
do_connect meta_conj_spec false t1 t2 accum
- | @{const Pure.imp} $ t1 $ t2 => do_connect meta_imp_spec true t1 t2 accum
- | @{const Not} $ t1 => do_connect imp_spec true t1 @{const False} accum
- | @{const conj} $ t1 $ t2 => do_connect conj_spec false t1 t2 accum
- | @{const disj} $ t1 $ t2 => do_connect disj_spec false t1 t2 accum
- | @{const implies} $ t1 $ t2 => do_connect imp_spec true t1 t2 accum
+ | \<^const>\<open>Pure.imp\<close> $ t1 $ t2 => do_connect meta_imp_spec true t1 t2 accum
+ | \<^const>\<open>Not\<close> $ t1 => do_connect imp_spec true t1 \<^const>\<open>False\<close> accum
+ | \<^const>\<open>conj\<close> $ t1 $ t2 => do_connect conj_spec false t1 t2 accum
+ | \<^const>\<open>disj\<close> $ t1 $ t2 => do_connect disj_spec false t1 t2 accum
+ | \<^const>\<open>implies\<close> $ t1 $ t2 => do_connect imp_spec true t1 t2 accum
| _ => do_term t accum
end
|> tap (fn (gamma, _) =>
@@ -1123,17 +1123,17 @@
and do_formula t accum =
case t of
Const (\<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, T1, t1) => do_all T1 t1 accum
- | @{const Trueprop} $ t1 => do_formula t1 accum
+ | \<^const>\<open>Trueprop\<close> $ t1 => do_formula t1 accum
| Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2 =>
consider_general_equals mdata true t1 t2 accum
- | @{const Pure.imp} $ t1 $ t2 => do_implies t1 t2 accum
- | @{const Pure.conjunction} $ t1 $ t2 =>
+ | \<^const>\<open>Pure.imp\<close> $ t1 $ t2 => do_implies t1 t2 accum
+ | \<^const>\<open>Pure.conjunction\<close> $ t1 $ t2 =>
fold (do_formula) [t1, t2] accum
| Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, T1, t1) => do_all T1 t1 accum
| Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2 =>
consider_general_equals mdata true t1 t2 accum
- | @{const conj} $ t1 $ t2 => fold (do_formula) [t1, t2] accum
- | @{const implies} $ t1 $ t2 => do_implies t1 t2 accum
+ | \<^const>\<open>conj\<close> $ t1 $ t2 => fold (do_formula) [t1, t2] accum
+ | \<^const>\<open>implies\<close> $ t1 $ t2 => do_implies t1 t2 accum
| _ => raise TERM ("Nitpick_Mono.consider_definitional_axiom.\
\do_formula", [t])
in do_formula t end
--- a/src/HOL/Tools/Nitpick/nitpick_preproc.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_preproc.ML Sat Jan 05 17:24:33 2019 +0100
@@ -38,13 +38,13 @@
let
fun aux def (Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2) =
aux def t1 andalso aux false t2
- | aux def (@{const Pure.imp} $ t1 $ t2) = aux false t1 andalso aux def t2
+ | aux def (\<^const>\<open>Pure.imp\<close> $ t1 $ t2) = aux false t1 andalso aux def t2
| aux def (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2) =
aux def t1 andalso aux false t2
- | aux def (@{const HOL.implies} $ t1 $ t2) = aux false t1 andalso aux def t2
+ | aux def (\<^const>\<open>HOL.implies\<close> $ t1 $ t2) = aux false t1 andalso aux def t2
| aux def (t1 $ t2) = aux def t1 andalso aux def t2
| aux def (t as Const (s, _)) =
- (not def orelse t <> @{const Suc}) andalso
+ (not def orelse t <> \<^const>\<open>Suc\<close>) andalso
not (member (op =)
[\<^const_name>\<open>Abs_Frac\<close>, \<^const_name>\<open>Rep_Frac\<close>,
\<^const_name>\<open>nat_gcd\<close>, \<^const_name>\<open>nat_lcm\<close>,
@@ -143,7 +143,7 @@
| _ => exists_subterm (curry (op =) (Var z)) t' ? insert (op =) T
fun box_var_in_def new_Ts old_Ts t (z as (_, T)) =
case t of
- @{const Trueprop} $ t1 => box_var_in_def new_Ts old_Ts t1 z
+ \<^const>\<open>Trueprop\<close> $ t1 => box_var_in_def new_Ts old_Ts t1 z
| Const (s0, _) $ t1 $ _ =>
if s0 = \<^const_name>\<open>Pure.eq\<close> orelse s0 = \<^const_name>\<open>HOL.eq\<close> then
let
@@ -190,30 +190,30 @@
do_quantifier new_Ts old_Ts polar s0 T0 s1 T1 t1
| Const (s0 as \<^const_name>\<open>Pure.eq\<close>, T0) $ t1 $ t2 =>
do_equals new_Ts old_Ts s0 T0 t1 t2
- | @{const Pure.imp} $ t1 $ t2 =>
- @{const Pure.imp} $ do_term new_Ts old_Ts (flip_polarity polar) t1
+ | \<^const>\<open>Pure.imp\<close> $ t1 $ t2 =>
+ \<^const>\<open>Pure.imp\<close> $ do_term new_Ts old_Ts (flip_polarity polar) t1
$ do_term new_Ts old_Ts polar t2
- | @{const Pure.conjunction} $ t1 $ t2 =>
- @{const Pure.conjunction} $ do_term new_Ts old_Ts polar t1
+ | \<^const>\<open>Pure.conjunction\<close> $ t1 $ t2 =>
+ \<^const>\<open>Pure.conjunction\<close> $ do_term new_Ts old_Ts polar t1
$ do_term new_Ts old_Ts polar t2
- | @{const Trueprop} $ t1 =>
- @{const Trueprop} $ do_term new_Ts old_Ts polar t1
- | @{const Not} $ t1 =>
- @{const Not} $ do_term new_Ts old_Ts (flip_polarity polar) t1
+ | \<^const>\<open>Trueprop\<close> $ t1 =>
+ \<^const>\<open>Trueprop\<close> $ do_term new_Ts old_Ts polar t1
+ | \<^const>\<open>Not\<close> $ t1 =>
+ \<^const>\<open>Not\<close> $ do_term new_Ts old_Ts (flip_polarity polar) t1
| Const (s0 as \<^const_name>\<open>All\<close>, T0) $ Abs (s1, T1, t1) =>
do_quantifier new_Ts old_Ts polar s0 T0 s1 T1 t1
| Const (s0 as \<^const_name>\<open>Ex\<close>, T0) $ Abs (s1, T1, t1) =>
do_quantifier new_Ts old_Ts polar s0 T0 s1 T1 t1
| Const (s0 as \<^const_name>\<open>HOL.eq\<close>, T0) $ t1 $ t2 =>
do_equals new_Ts old_Ts s0 T0 t1 t2
- | @{const HOL.conj} $ t1 $ t2 =>
- @{const HOL.conj} $ do_term new_Ts old_Ts polar t1
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 =>
+ \<^const>\<open>HOL.conj\<close> $ do_term new_Ts old_Ts polar t1
$ do_term new_Ts old_Ts polar t2
- | @{const HOL.disj} $ t1 $ t2 =>
- @{const HOL.disj} $ do_term new_Ts old_Ts polar t1
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 =>
+ \<^const>\<open>HOL.disj\<close> $ do_term new_Ts old_Ts polar t1
$ do_term new_Ts old_Ts polar t2
- | @{const HOL.implies} $ t1 $ t2 =>
- @{const HOL.implies} $ do_term new_Ts old_Ts (flip_polarity polar) t1
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 =>
+ \<^const>\<open>HOL.implies\<close> $ do_term new_Ts old_Ts (flip_polarity polar) t1
$ do_term new_Ts old_Ts polar t2
| Const (x as (s, T)) =>
if is_descr s then
@@ -335,11 +335,11 @@
case t of
(t0 as Const (\<^const_name>\<open>Pure.eq\<close>, _)) $ t1 $ t2 =>
do_eq_or_imp Ts true def t0 t1 t2 seen
- | (t0 as @{const Pure.imp}) $ t1 $ t2 =>
+ | (t0 as \<^const>\<open>Pure.imp\<close>) $ t1 $ t2 =>
if def then (t, []) else do_eq_or_imp Ts false def t0 t1 t2 seen
| (t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ t1 $ t2 =>
do_eq_or_imp Ts true def t0 t1 t2 seen
- | (t0 as @{const HOL.implies}) $ t1 $ t2 =>
+ | (t0 as \<^const>\<open>HOL.implies\<close>) $ t1 $ t2 =>
do_eq_or_imp Ts false def t0 t1 t2 seen
| Abs (s, T, t') =>
let val (t', seen) = do_term (T :: Ts) def t' [] seen in
@@ -402,11 +402,11 @@
| _ => I) t (K 0)
fun aux Ts careful ((t0 as Const (\<^const_name>\<open>Pure.eq\<close>, _)) $ t1 $ t2) =
aux_eq Ts careful true t0 t1 t2
- | aux Ts careful ((t0 as @{const Pure.imp}) $ t1 $ t2) =
+ | aux Ts careful ((t0 as \<^const>\<open>Pure.imp\<close>) $ t1 $ t2) =
t0 $ aux Ts false t1 $ aux Ts careful t2
| aux Ts careful ((t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ t1 $ t2) =
aux_eq Ts careful true t0 t1 t2
- | aux Ts careful ((t0 as @{const HOL.implies}) $ t1 $ t2) =
+ | aux Ts careful ((t0 as \<^const>\<open>HOL.implies\<close>) $ t1 $ t2) =
t0 $ aux Ts false t1 $ aux Ts careful t2
| aux Ts careful (Abs (s, T, t')) = Abs (s, T, aux (T :: Ts) careful t')
| aux Ts careful (t1 $ t2) = aux Ts careful t1 $ aux Ts careful t2
@@ -417,13 +417,13 @@
raise SAME ()
else if axiom andalso is_Var t2 andalso
num_occs_of_var (dest_Var t2) = 1 then
- @{const True}
+ \<^const>\<open>True\<close>
else case strip_comb t2 of
(* The first case is not as general as it could be. *)
(Const (\<^const_name>\<open>PairBox\<close>, _),
[Const (\<^const_name>\<open>fst\<close>, _) $ Var z1,
Const (\<^const_name>\<open>snd\<close>, _) $ Var z2]) =>
- if z1 = z2 andalso num_occs_of_var z1 = 2 then @{const True}
+ if z1 = z2 andalso num_occs_of_var z1 = 2 then \<^const>\<open>True\<close>
else raise SAME ()
| (Const (x as (s, T)), args) =>
let
@@ -454,25 +454,25 @@
(** Destruction of universal and existential equalities **)
-fun curry_assms (@{const Pure.imp} $ (@{const Trueprop}
- $ (@{const HOL.conj} $ t1 $ t2)) $ t3) =
+fun curry_assms (\<^const>\<open>Pure.imp\<close> $ (\<^const>\<open>Trueprop\<close>
+ $ (\<^const>\<open>HOL.conj\<close> $ t1 $ t2)) $ t3) =
curry_assms (Logic.list_implies ([t1, t2] |> map HOLogic.mk_Trueprop, t3))
- | curry_assms (@{const Pure.imp} $ t1 $ t2) =
- @{const Pure.imp} $ curry_assms t1 $ curry_assms t2
+ | curry_assms (\<^const>\<open>Pure.imp\<close> $ t1 $ t2) =
+ \<^const>\<open>Pure.imp\<close> $ curry_assms t1 $ curry_assms t2
| curry_assms t = t
val destroy_universal_equalities =
let
fun aux prems zs t =
case t of
- @{const Pure.imp} $ t1 $ t2 => aux_implies prems zs t1 t2
+ \<^const>\<open>Pure.imp\<close> $ t1 $ t2 => aux_implies prems zs t1 t2
| _ => Logic.list_implies (rev prems, t)
and aux_implies prems zs t1 t2 =
case t1 of
Const (\<^const_name>\<open>Pure.eq\<close>, _) $ Var z $ t' => aux_eq prems zs z t' t1 t2
- | @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ Var z $ t') =>
+ | \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ Var z $ t') =>
aux_eq prems zs z t' t1 t2
- | @{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ Var z) =>
+ | \<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t' $ Var z) =>
aux_eq prems zs z t' t1 t2
| _ => aux (t1 :: prems) (Term.add_vars t1 zs) t2
and aux_eq prems zs z t' t1 t2 =
@@ -528,7 +528,7 @@
fun kill [] [] ts = foldr1 s_conj ts
| kill (s :: ss) (T :: Ts) ts =
(case find_bound_assign ctxt (length ss) [] ts of
- SOME (_, []) => @{const True}
+ SOME (_, []) => \<^const>\<open>True\<close>
| SOME (arg_t, ts) =>
kill ss Ts (map (subst_one_bound (length ss)
(incr_bv (~1, length ss + 1, arg_t))) ts)
@@ -592,26 +592,26 @@
case t of
Const (s0 as \<^const_name>\<open>Pure.all\<close>, T0) $ Abs (s1, T1, t1) =>
do_quantifier s0 T0 s1 T1 t1
- | @{const Pure.imp} $ t1 $ t2 =>
- @{const Pure.imp} $ aux ss Ts js skolemizable (flip_polarity polar) t1
+ | \<^const>\<open>Pure.imp\<close> $ t1 $ t2 =>
+ \<^const>\<open>Pure.imp\<close> $ aux ss Ts js skolemizable (flip_polarity polar) t1
$ aux ss Ts js skolemizable polar t2
- | @{const Pure.conjunction} $ t1 $ t2 =>
- @{const Pure.conjunction} $ aux ss Ts js skolemizable polar t1
+ | \<^const>\<open>Pure.conjunction\<close> $ t1 $ t2 =>
+ \<^const>\<open>Pure.conjunction\<close> $ aux ss Ts js skolemizable polar t1
$ aux ss Ts js skolemizable polar t2
- | @{const Trueprop} $ t1 =>
- @{const Trueprop} $ aux ss Ts js skolemizable polar t1
- | @{const Not} $ t1 =>
- @{const Not} $ aux ss Ts js skolemizable (flip_polarity polar) t1
+ | \<^const>\<open>Trueprop\<close> $ t1 =>
+ \<^const>\<open>Trueprop\<close> $ aux ss Ts js skolemizable polar t1
+ | \<^const>\<open>Not\<close> $ t1 =>
+ \<^const>\<open>Not\<close> $ aux ss Ts js skolemizable (flip_polarity polar) t1
| Const (s0 as \<^const_name>\<open>All\<close>, T0) $ Abs (s1, T1, t1) =>
do_quantifier s0 T0 s1 T1 t1
| Const (s0 as \<^const_name>\<open>Ex\<close>, T0) $ Abs (s1, T1, t1) =>
do_quantifier s0 T0 s1 T1 t1
- | @{const HOL.conj} $ t1 $ t2 =>
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 =>
s_conj (apply2 (aux ss Ts js skolemizable polar) (t1, t2))
- | @{const HOL.disj} $ t1 $ t2 =>
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 =>
s_disj (apply2 (aux ss Ts js skolemizable polar) (t1, t2))
- | @{const HOL.implies} $ t1 $ t2 =>
- @{const HOL.implies} $ aux ss Ts js skolemizable (flip_polarity polar) t1
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 =>
+ \<^const>\<open>HOL.implies\<close> $ aux ss Ts js skolemizable (flip_polarity polar) t1
$ aux ss Ts js skolemizable polar t2
| (t0 as Const (\<^const_name>\<open>Let\<close>, _)) $ t1 $ t2 =>
t0 $ t1 $ aux ss Ts js skolemizable polar t2
@@ -622,8 +622,8 @@
let
val gfp = (fixpoint_kind_of_const thy def_tables x = Gfp)
val (pref, connective) =
- if gfp then (lbfp_prefix, @{const HOL.disj})
- else (ubfp_prefix, @{const HOL.conj})
+ if gfp then (lbfp_prefix, \<^const>\<open>HOL.disj\<close>)
+ else (ubfp_prefix, \<^const>\<open>HOL.conj\<close>)
fun pos () = unrolled_inductive_pred_const hol_ctxt gfp x
|> aux ss Ts js skolemizable polar
fun neg () = Const (pref ^ s, T)
@@ -653,8 +653,8 @@
(** Function specialization **)
-fun params_in_equation (@{const Pure.imp} $ _ $ t2) = params_in_equation t2
- | params_in_equation (@{const Trueprop} $ t1) = params_in_equation t1
+fun params_in_equation (\<^const>\<open>Pure.imp\<close> $ _ $ t2) = params_in_equation t2
+ | params_in_equation (\<^const>\<open>Trueprop\<close> $ t1) = params_in_equation t1
| params_in_equation (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ _) =
snd (strip_comb t1)
| params_in_equation _ = []
@@ -866,7 +866,7 @@
in
case t of
Const (\<^const_name>\<open>Pure.eq\<close>, _) $ (u as Free _) $ def => do_equals u def
- | @{const Trueprop}
+ | \<^const>\<open>Trueprop\<close>
$ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ (u as Free _) $ def) =>
do_equals u def
| _ => NONE
@@ -917,7 +917,7 @@
and add_def_axiom depth = add_axiom fst apfst true depth
and add_nondef_axiom depth = add_axiom snd apsnd false depth
and add_maybe_def_axiom depth t =
- (if head_of t <> @{const Pure.imp} then add_def_axiom
+ (if head_of t <> \<^const>\<open>Pure.imp\<close> then add_def_axiom
else add_nondef_axiom) depth t
and add_eq_axiom depth t =
(if is_constr_pattern_formula ctxt t then add_def_axiom
@@ -1104,10 +1104,10 @@
case t of
(t0 as Const (\<^const_name>\<open>All\<close>, T0)) $ Abs (s, T1, t1) =>
(case t1 of
- (t10 as @{const HOL.conj}) $ t11 $ t12 =>
+ (t10 as \<^const>\<open>HOL.conj\<close>) $ t11 $ t12 =>
t10 $ distribute_quantifiers (t0 $ Abs (s, T1, t11))
$ distribute_quantifiers (t0 $ Abs (s, T1, t12))
- | (t10 as @{const Not}) $ t11 =>
+ | (t10 as \<^const>\<open>Not\<close>) $ t11 =>
t10 $ distribute_quantifiers (Const (\<^const_name>\<open>Ex\<close>, T0)
$ Abs (s, T1, t11))
| t1 =>
@@ -1117,14 +1117,14 @@
t0 $ Abs (s, T1, distribute_quantifiers t1))
| (t0 as Const (\<^const_name>\<open>Ex\<close>, T0)) $ Abs (s, T1, t1) =>
(case distribute_quantifiers t1 of
- (t10 as @{const HOL.disj}) $ t11 $ t12 =>
+ (t10 as \<^const>\<open>HOL.disj\<close>) $ t11 $ t12 =>
t10 $ distribute_quantifiers (t0 $ Abs (s, T1, t11))
$ distribute_quantifiers (t0 $ Abs (s, T1, t12))
- | (t10 as @{const HOL.implies}) $ t11 $ t12 =>
+ | (t10 as \<^const>\<open>HOL.implies\<close>) $ t11 $ t12 =>
t10 $ distribute_quantifiers (Const (\<^const_name>\<open>All\<close>, T0)
$ Abs (s, T1, t11))
$ distribute_quantifiers (t0 $ Abs (s, T1, t12))
- | (t10 as @{const Not}) $ t11 =>
+ | (t10 as \<^const>\<open>Not\<close>) $ t11 =>
t10 $ distribute_quantifiers (Const (\<^const_name>\<open>All\<close>, T0)
$ Abs (s, T1, t11))
| t1 =>
--- a/src/HOL/Tools/Nunchaku/nunchaku.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nunchaku/nunchaku.ML Sat Jan 05 17:24:33 2019 +0100
@@ -131,7 +131,7 @@
fun none_true assigns = forall (curry (op <>) (SOME true) o snd) assigns;
-fun has_lonely_bool_var (@{const Pure.conjunction} $ (@{const Trueprop} $ Free _) $ _) = true
+fun has_lonely_bool_var (\<^const>\<open>Pure.conjunction\<close> $ (\<^const>\<open>Trueprop\<close> $ Free _) $ _) = true
| has_lonely_bool_var _ = false;
val syntactic_sorts =
--- a/src/HOL/Tools/Nunchaku/nunchaku_collect.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nunchaku/nunchaku_collect.ML Sat Jan 05 17:24:33 2019 +0100
@@ -269,7 +269,7 @@
val A = Logic.varifyT_global \<^typ>\<open>'a\<close>;
val absT = Type (\<^type_name>\<open>set\<close>, [A]);
val repT = A --> HOLogic.boolT;
- val pred = Abs (Name.uu, repT, @{const True});
+ val pred = Abs (Name.uu, repT, \<^const>\<open>True\<close>);
val abs = Const (\<^const_name>\<open>Collect\<close>, repT --> absT);
val rep = Const (\<^const_name>\<open>rmember\<close>, absT --> repT);
in
@@ -523,7 +523,7 @@
end;
fun lhs_of_equation (Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t $ _) = t
- | lhs_of_equation (@{const Trueprop} $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ _)) = t;
+ | lhs_of_equation (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ _)) = t;
fun specialize_definition_type thy x def0 =
let
@@ -681,7 +681,7 @@
fun defined_by (Const (\<^const_name>\<open>All\<close>, _) $ t) = defined_by t
| defined_by (Abs (_, _, t)) = defined_by t
- | defined_by (@{const implies} $ _ $ u) = defined_by u
+ | defined_by (\<^const>\<open>implies\<close> $ _ $ u) = defined_by u
| defined_by (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ _) = head_of t
| defined_by t = head_of t;
@@ -1007,14 +1007,14 @@
val (poly_axioms, mono_axioms0) = orphan_axioms_of ctxt
|> List.partition has_polymorphism;
- fun implicit_evals_of pol (@{const Not} $ t) = implicit_evals_of (not pol) t
- | implicit_evals_of pol (@{const implies} $ t $ u) =
+ fun implicit_evals_of pol (\<^const>\<open>Not\<close> $ t) = implicit_evals_of (not pol) t
+ | implicit_evals_of pol (\<^const>\<open>implies\<close> $ t $ u) =
(case implicit_evals_of pol u of
[] => implicit_evals_of (not pol) t
| ts => ts)
- | implicit_evals_of pol (@{const conj} $ t $ u) =
+ | implicit_evals_of pol (\<^const>\<open>conj\<close> $ t $ u) =
union (op aconv) (implicit_evals_of pol t) (implicit_evals_of pol u)
- | implicit_evals_of pol (@{const disj} $ t $ u) =
+ | implicit_evals_of pol (\<^const>\<open>disj\<close> $ t $ u) =
union (op aconv) (implicit_evals_of pol t) (implicit_evals_of pol u)
| implicit_evals_of false (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t $ u) =
distinct (op aconv) [t, u]
@@ -1054,7 +1054,7 @@
Syntax.string_of_term ctxt t ^ " : " ^ Syntax.string_of_typ ctxt (fastype_of t);
fun is_triv_wrt (Abs (_, _, body)) = is_triv_wrt body
- | is_triv_wrt @{const True} = true
+ | is_triv_wrt \<^const>\<open>True\<close> = true
| is_triv_wrt _ = false;
fun str_of_isa_type_spec ctxt {abs_typ, rep_typ, wrt, abs, rep} =
--- a/src/HOL/Tools/Nunchaku/nunchaku_reconstruct.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Nunchaku/nunchaku_reconstruct.ML Sat Jan 05 17:24:33 2019 +0100
@@ -166,7 +166,7 @@
else if id = nun_equals then
Const (\<^const_name>\<open>HOL.eq\<close>, typ_of ty)
else if id = nun_false then
- @{const False}
+ \<^const>\<open>False\<close>
else if id = nun_if then
Const (\<^const_name>\<open>If\<close>, typ_of ty)
else if id = nun_implies then
@@ -178,7 +178,7 @@
else if id = nun_unique_unsafe then
Const (\<^const_name>\<open>The_unsafe\<close>, typ_of ty)
else if id = nun_true then
- @{const True}
+ \<^const>\<open>True\<close>
else if String.isPrefix nun_dollar_anon_fun_prefix id then
let val j = Int.fromString (unprefix nun_dollar_anon_fun_prefix id) |> the_default ~1 in
Var ((anonymousN ^ nat_subscript (j + 1), 0), typ_of ty)
@@ -225,11 +225,11 @@
end
| term_of _ (NMatch _) = raise Fail "unexpected match";
- fun rewrite_numbers (t as @{const Suc} $ _) =
+ fun rewrite_numbers (t as \<^const>\<open>Suc\<close> $ _) =
(case try HOLogic.dest_nat t of
SOME n => HOLogic.mk_number \<^typ>\<open>nat\<close> n
| NONE => t)
- | rewrite_numbers (@{const Abs_Integ} $ (@{const Pair (nat, nat)} $ t $ u)) =
+ | rewrite_numbers (\<^const>\<open>Abs_Integ\<close> $ (@{const Pair (nat, nat)} $ t $ u)) =
HOLogic.mk_number \<^typ>\<open>int\<close> (HOLogic.dest_nat t - HOLogic.dest_nat u)
| rewrite_numbers (t $ u) = rewrite_numbers t $ rewrite_numbers u
| rewrite_numbers (Abs (s, T, t)) = Abs (s, T, rewrite_numbers t)
--- a/src/HOL/Tools/SMT/smt_replay_methods.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/smt_replay_methods.ML Sat Jan 05 17:24:33 2019 +0100
@@ -192,28 +192,28 @@
fun abstract_ter abs f t t1 t2 t3 =
abstract_sub t (abs t1 ##>> abs t2 ##>> abs t3 #>> (Scan.triple1 #> f))
-fun abstract_lit (@{const HOL.Not} $ t) = abstract_term t #>> HOLogic.mk_not
+fun abstract_lit (\<^const>\<open>HOL.Not\<close> $ t) = abstract_term t #>> HOLogic.mk_not
| abstract_lit t = abstract_term t
-fun abstract_not abs (t as @{const HOL.Not} $ t1) =
+fun abstract_not abs (t as \<^const>\<open>HOL.Not\<close> $ t1) =
abstract_sub t (abs t1 #>> HOLogic.mk_not)
| abstract_not _ t = abstract_lit t
-fun abstract_conj (t as @{const HOL.conj} $ t1 $ t2) =
+fun abstract_conj (t as \<^const>\<open>HOL.conj\<close> $ t1 $ t2) =
abstract_bin abstract_conj HOLogic.mk_conj t t1 t2
| abstract_conj t = abstract_lit t
-fun abstract_disj (t as @{const HOL.disj} $ t1 $ t2) =
+fun abstract_disj (t as \<^const>\<open>HOL.disj\<close> $ t1 $ t2) =
abstract_bin abstract_disj HOLogic.mk_disj t t1 t2
| abstract_disj t = abstract_lit t
fun abstract_prop (t as (c as @{const If (bool)}) $ t1 $ t2 $ t3) =
abstract_ter abstract_prop (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3
- | abstract_prop (t as @{const HOL.disj} $ t1 $ t2) =
+ | abstract_prop (t as \<^const>\<open>HOL.disj\<close> $ t1 $ t2) =
abstract_bin abstract_prop HOLogic.mk_disj t t1 t2
- | abstract_prop (t as @{const HOL.conj} $ t1 $ t2) =
+ | abstract_prop (t as \<^const>\<open>HOL.conj\<close> $ t1 $ t2) =
abstract_bin abstract_prop HOLogic.mk_conj t t1 t2
- | abstract_prop (t as @{const HOL.implies} $ t1 $ t2) =
+ | abstract_prop (t as \<^const>\<open>HOL.implies\<close> $ t1 $ t2) =
abstract_bin abstract_prop HOLogic.mk_imp t t1 t2
| abstract_prop (t as \<^term>\<open>HOL.eq :: bool => _\<close> $ t1 $ t2) =
abstract_bin abstract_prop HOLogic.mk_eq t t1 t2
@@ -227,8 +227,8 @@
abstract_sub t (abs t' #>> (fn u' => c $ Abs (s, T, u')))
| abs (t as (c as Const (\<^const_name>\<open>If\<close>, _)) $ t1 $ t2 $ t3) =
abstract_ter abs (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3
- | abs (t as @{const HOL.Not} $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not)
- | abs (t as @{const HOL.disj} $ t1 $ t2) =
+ | abs (t as \<^const>\<open>HOL.Not\<close> $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not)
+ | abs (t as \<^const>\<open>HOL.disj\<close> $ t1 $ t2) =
abstract_sub t (abs t1 ##>> abs t2 #>> HOLogic.mk_disj)
| abs (t as (c as Const (\<^const_name>\<open>uminus_class.uminus\<close>, _)) $ t1) =
abstract_sub t (abs t1 #>> (fn u => c $ u))
@@ -256,10 +256,10 @@
| (NONE, _) => abstract_term t cx))
in abs u end
-fun abstract_unit (t as (@{const HOL.Not} $ (@{const HOL.disj} $ t1 $ t2))) =
+fun abstract_unit (t as (\<^const>\<open>HOL.Not\<close> $ (\<^const>\<open>HOL.disj\<close> $ t1 $ t2))) =
abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>>
HOLogic.mk_not o HOLogic.mk_disj)
- | abstract_unit (t as (@{const HOL.disj} $ t1 $ t2)) =
+ | abstract_unit (t as (\<^const>\<open>HOL.disj\<close> $ t1 $ t2)) =
abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>>
HOLogic.mk_disj)
| abstract_unit (t as (Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2)) =
@@ -267,12 +267,12 @@
abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>>
HOLogic.mk_eq)
else abstract_lit t
- | abstract_unit (t as (@{const HOL.Not} $ Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2)) =
+ | abstract_unit (t as (\<^const>\<open>HOL.Not\<close> $ Const(\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2)) =
if fastype_of t1 = \<^typ>\<open>bool\<close> then
abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>>
HOLogic.mk_eq #>> HOLogic.mk_not)
else abstract_lit t
- | abstract_unit (t as (@{const HOL.Not} $ t1)) =
+ | abstract_unit (t as (\<^const>\<open>HOL.Not\<close> $ t1)) =
abstract_sub t (abstract_unit t1 #>> HOLogic.mk_not)
| abstract_unit t = abstract_lit t
--- a/src/HOL/Tools/SMT/smt_util.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/smt_util.ML Sat Jan 05 17:24:33 2019 +0100
@@ -133,10 +133,10 @@
(* terms *)
-fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u)
+fun dest_conj (\<^const>\<open>HOL.conj\<close> $ t $ u) = (t, u)
| dest_conj t = raise TERM ("not a conjunction", [t])
-fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u)
+fun dest_disj (\<^const>\<open>HOL.disj\<close> $ t $ u) = (t, u)
| dest_disj t = raise TERM ("not a disjunction", [t])
fun under_quant f t =
@@ -201,17 +201,17 @@
val dest_all_cbinders = repeat_yield (try o dest_cbinder)
-val mk_cprop = Thm.apply (Thm.cterm_of \<^context> @{const Trueprop})
+val mk_cprop = Thm.apply (Thm.cterm_of \<^context> \<^const>\<open>Trueprop\<close>)
fun dest_cprop ct =
(case Thm.term_of ct of
- @{const Trueprop} $ _ => Thm.dest_arg ct
+ \<^const>\<open>Trueprop\<close> $ _ => Thm.dest_arg ct
| _ => raise CTERM ("not a property", [ct]))
val equals = mk_const_pat \<^theory> \<^const_name>\<open>Pure.eq\<close> destT1
fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu
-val dest_prop = (fn @{const Trueprop} $ t => t | t => t)
+val dest_prop = (fn \<^const>\<open>Trueprop\<close> $ t => t | t => t)
fun term_of ct = dest_prop (Thm.term_of ct)
fun prop_of thm = dest_prop (Thm.prop_of thm)
@@ -241,7 +241,7 @@
fun prop_conv cv ct =
(case Thm.term_of ct of
- @{const Trueprop} $ _ => Conv.arg_conv cv ct
+ \<^const>\<open>Trueprop\<close> $ _ => Conv.arg_conv cv ct
| _ => raise CTERM ("not a property", [ct]))
end;
--- a/src/HOL/Tools/SMT/smtlib_interface.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/smtlib_interface.ML Sat Jan 05 17:24:33 2019 +0100
@@ -46,12 +46,12 @@
(\<^typ>\<open>bool\<close>, K (SOME ("Bool", [])), K (K NONE)),
(\<^typ>\<open>int\<close>, K (SOME ("Int", [])), int_num)] #>
fold (SMT_Builtin.add_builtin_fun' smtlibC) [
- (@{const True}, "true"),
- (@{const False}, "false"),
- (@{const Not}, "not"),
- (@{const HOL.conj}, "and"),
- (@{const HOL.disj}, "or"),
- (@{const HOL.implies}, "=>"),
+ (\<^const>\<open>True\<close>, "true"),
+ (\<^const>\<open>False\<close>, "false"),
+ (\<^const>\<open>Not\<close>, "not"),
+ (\<^const>\<open>HOL.conj\<close>, "and"),
+ (\<^const>\<open>HOL.disj\<close>, "or"),
+ (\<^const>\<open>HOL.implies\<close>, "=>"),
(@{const HOL.eq ('a)}, "="),
(@{const If ('a)}, "ite"),
(@{const less (int)}, "<"),
--- a/src/HOL/Tools/SMT/smtlib_proof.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/smtlib_proof.ML Sat Jan 05 17:24:33 2019 +0100
@@ -140,8 +140,8 @@
fun mk_less t1 t2 = mk_binary_pred \<^const_name>\<open>ord_class.less\<close> \<^sort>\<open>linorder\<close> t1 t2
fun mk_less_eq t1 t2 = mk_binary_pred \<^const_name>\<open>ord_class.less_eq\<close> \<^sort>\<open>linorder\<close> t1 t2
-fun core_term_parser (SMTLIB.Sym "true", _) = SOME @{const HOL.True}
- | core_term_parser (SMTLIB.Sym "false", _) = SOME @{const HOL.False}
+fun core_term_parser (SMTLIB.Sym "true", _) = SOME \<^const>\<open>HOL.True\<close>
+ | core_term_parser (SMTLIB.Sym "false", _) = SOME \<^const>\<open>HOL.False\<close>
| core_term_parser (SMTLIB.Sym "not", [t]) = SOME (HOLogic.mk_not t)
| core_term_parser (SMTLIB.Sym "and", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_conj) t ts)
| core_term_parser (SMTLIB.Sym "or", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_disj) t ts)
--- a/src/HOL/Tools/SMT/verit_proof.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/verit_proof.ML Sat Jan 05 17:24:33 2019 +0100
@@ -383,7 +383,7 @@
fun inline_assumption assumption assumption_id
(VeriT_Node {id, rule, prems, proof_ctxt, concl, bounds}) =
mk_node id rule (remove_assumption_id assumption_id prems) proof_ctxt
- (@{const Pure.imp} $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds
+ (\<^const>\<open>Pure.imp\<close> $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds
fun find_input_steps_and_inline [] = []
| find_input_steps_and_inline
(VeriT_Node {id = id', rule, prems, concl, bounds, ...} :: steps) =
--- a/src/HOL/Tools/SMT/z3_interface.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/z3_interface.ML Sat Jan 05 17:24:33 2019 +0100
@@ -51,8 +51,8 @@
val setup_builtins =
SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #>
- SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #>
- SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod")
+ SMT_Builtin.add_builtin_fun' smtlib_z3C (\<^const>\<open>z3div\<close>, "div") #>
+ SMT_Builtin.add_builtin_fun' smtlib_z3C (\<^const>\<open>z3mod\<close>, "mod")
in
val _ = Theory.setup (Context.theory_map (
@@ -116,13 +116,13 @@
| mk_builtin_num ctxt i T =
chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T
-val mk_true = Thm.cterm_of \<^context> (@{const Not} $ @{const False})
-val mk_false = Thm.cterm_of \<^context> @{const False}
-val mk_not = Thm.apply (Thm.cterm_of \<^context> @{const Not})
-val mk_implies = Thm.mk_binop (Thm.cterm_of \<^context> @{const HOL.implies})
+val mk_true = Thm.cterm_of \<^context> (\<^const>\<open>Not\<close> $ \<^const>\<open>False\<close>)
+val mk_false = Thm.cterm_of \<^context> \<^const>\<open>False\<close>
+val mk_not = Thm.apply (Thm.cterm_of \<^context> \<^const>\<open>Not\<close>)
+val mk_implies = Thm.mk_binop (Thm.cterm_of \<^context> \<^const>\<open>HOL.implies\<close>)
val mk_iff = Thm.mk_binop (Thm.cterm_of \<^context> @{const HOL.eq (bool)})
-val conj = Thm.cterm_of \<^context> @{const HOL.conj}
-val disj = Thm.cterm_of \<^context> @{const HOL.disj}
+val conj = Thm.cterm_of \<^context> \<^const>\<open>HOL.conj\<close>
+val disj = Thm.cterm_of \<^context> \<^const>\<open>HOL.disj\<close>
fun mk_nary _ cu [] = cu
| mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts)
@@ -148,8 +148,8 @@
val int0 = Numeral.mk_cnumber \<^ctyp>\<open>int\<close> 0
val mk_sub = Thm.mk_binop (Thm.cterm_of \<^context> @{const minus (int)})
val mk_mul = Thm.mk_binop (Thm.cterm_of \<^context> @{const times (int)})
-val mk_div = Thm.mk_binop (Thm.cterm_of \<^context> @{const z3div})
-val mk_mod = Thm.mk_binop (Thm.cterm_of \<^context> @{const z3mod})
+val mk_div = Thm.mk_binop (Thm.cterm_of \<^context> \<^const>\<open>z3div\<close>)
+val mk_mod = Thm.mk_binop (Thm.cterm_of \<^context> \<^const>\<open>z3mod\<close>)
val mk_lt = Thm.mk_binop (Thm.cterm_of \<^context> @{const less (int)})
val mk_le = Thm.mk_binop (Thm.cterm_of \<^context> @{const less_eq (int)})
--- a/src/HOL/Tools/SMT/z3_replay_methods.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/SMT/z3_replay_methods.ML Sat Jan 05 17:24:33 2019 +0100
@@ -319,10 +319,10 @@
(thm COMP_INCR intro_hyp_rule1)
handle THM _ => thm COMP_INCR intro_hyp_rule2
-fun negated_prop (@{const HOL.Not} $ t) = HOLogic.mk_Trueprop t
+fun negated_prop (\<^const>\<open>HOL.Not\<close> $ t) = HOLogic.mk_Trueprop t
| negated_prop t = HOLogic.mk_Trueprop (HOLogic.mk_not t)
-fun intro_hyps tab (t as @{const HOL.disj} $ t1 $ t2) cx =
+fun intro_hyps tab (t as \<^const>\<open>HOL.disj\<close> $ t1 $ t2) cx =
lookup_intro_hyps tab t (fold (intro_hyps tab) [t1, t2]) cx
| intro_hyps tab t cx =
lookup_intro_hyps tab t (fn _ => raise LEMMA ()) cx
@@ -380,7 +380,7 @@
fun def_axiom_disj ctxt t =
(case dest_prop t of
- @{const HOL.disj} $ u1 $ u2 =>
+ \<^const>\<open>HOL.disj\<close> $ u1 $ u2 =>
SMT_Replay_Methods.prove_abstract' ctxt t prop_tac (
SMT_Replay_Methods.abstract_prop u2 ##>> SMT_Replay_Methods.abstract_prop u1 #>>
HOLogic.mk_disj o swap)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Sat Jan 05 17:24:33 2019 +0100
@@ -83,8 +83,8 @@
fun is_rec_eq lhs = Term.exists_subterm (curry (op =) (head_of lhs))
-fun is_rec_def (@{const Trueprop} $ t) = is_rec_def t
- | is_rec_def (@{const Pure.imp} $ _ $ t2) = is_rec_def t2
+fun is_rec_def (\<^const>\<open>Trueprop\<close> $ t) = is_rec_def t
+ | is_rec_def (\<^const>\<open>Pure.imp\<close> $ _ $ t2) = is_rec_def t2
| is_rec_def (Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2) = is_rec_eq t1 t2
| is_rec_def (Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2) = is_rec_eq t1 t2
| is_rec_def _ = false
@@ -254,8 +254,8 @@
else
Interesting
- fun interest_of_prop _ (@{const Trueprop} $ t) = interest_of_bool t
- | interest_of_prop Ts (@{const Pure.imp} $ t $ u) =
+ fun interest_of_prop _ (\<^const>\<open>Trueprop\<close> $ t) = interest_of_bool t
+ | interest_of_prop Ts (\<^const>\<open>Pure.imp\<close> $ t $ u) =
combine_interests (interest_of_prop Ts t) (interest_of_prop Ts u)
| interest_of_prop Ts (Const (\<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, T, t)) =
if type_has_top_sort T then Deal_Breaker else interest_of_prop (T :: Ts) t
@@ -336,9 +336,9 @@
end
end
-fun normalize_eq (@{const Trueprop} $ (t as (t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ t1 $ t2)) =
+fun normalize_eq (\<^const>\<open>Trueprop\<close> $ (t as (t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ t1 $ t2)) =
if is_less_equal (Term_Ord.fast_term_ord (t1, t2)) then t else t0 $ t2 $ t1
- | normalize_eq (@{const Trueprop} $ (t as @{const Not}
+ | normalize_eq (\<^const>\<open>Trueprop\<close> $ (t as \<^const>\<open>Not\<close>
$ ((t0 as Const (\<^const_name>\<open>HOL.eq\<close>, _)) $ t1 $ t2))) =
if is_less_equal (Term_Ord.fast_term_ord (t1, t2)) then t else HOLogic.mk_not (t0 $ t2 $ t1)
| normalize_eq (Const (\<^const_name>\<open>Pure.eq\<close>, Type (_, [T, _])) $ t1 $ t2) =
@@ -381,7 +381,7 @@
fun struct_induct_rule_on th =
(case Logic.strip_horn (Thm.prop_of th) of
- (prems, @{const Trueprop} $ ((p as Var ((p_name, 0), _)) $ (a as Var (_, ind_T)))) =>
+ (prems, \<^const>\<open>Trueprop\<close> $ ((p as Var ((p_name, 0), _)) $ (a as Var (_, ind_T)))) =>
if not (is_TVar ind_T) andalso length prems > 1 andalso
exists (exists_subterm (curry (op aconv) p)) prems andalso
not (exists (exists_subterm (curry (op aconv) a)) prems) then
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML Sat Jan 05 17:24:33 2019 +0100
@@ -178,18 +178,18 @@
and do_formula t =
(case t of
Const (\<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, _, t') => do_formula t'
- | @{const Pure.imp} $ t1 $ t2 => do_formula t1 #> do_formula t2
+ | \<^const>\<open>Pure.imp\<close> $ t1 $ t2 => do_formula t1 #> do_formula t2
| Const (\<^const_name>\<open>Pure.eq\<close>, Type (_, [T, _])) $ t1 $ t2 =>
do_term_or_formula false T t1 #> do_term_or_formula true T t2
- | @{const Trueprop} $ t1 => do_formula t1
- | @{const False} => I
- | @{const True} => I
- | @{const Not} $ t1 => do_formula t1
+ | \<^const>\<open>Trueprop\<close> $ t1 => do_formula t1
+ | \<^const>\<open>False\<close> => I
+ | \<^const>\<open>True\<close> => I
+ | \<^const>\<open>Not\<close> $ t1 => do_formula t1
| Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, _, t') => do_formula t'
| Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, _, t') => do_formula t'
- | @{const HOL.conj} $ t1 $ t2 => do_formula t1 #> do_formula t2
- | @{const HOL.disj} $ t1 $ t2 => do_formula t1 #> do_formula t2
- | @{const HOL.implies} $ t1 $ t2 => do_formula t1 #> do_formula t2
+ | \<^const>\<open>HOL.conj\<close> $ t1 $ t2 => do_formula t1 #> do_formula t2
+ | \<^const>\<open>HOL.disj\<close> $ t1 $ t2 => do_formula t1 #> do_formula t2
+ | \<^const>\<open>HOL.implies\<close> $ t1 $ t2 => do_formula t1 #> do_formula t2
| Const (\<^const_name>\<open>HOL.eq\<close>, Type (_, [T, _])) $ t1 $ t2 =>
do_term_or_formula false T t1 #> do_term_or_formula true T t2
| Const (\<^const_name>\<open>If\<close>, Type (_, [_, Type (_, [T, _])])) $ t1 $ t2 $ t3 =>
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Sat Jan 05 17:24:33 2019 +0100
@@ -78,17 +78,17 @@
fun do_equals t1 t2 = is_bad_equal t1 t2 orelse is_bad_equal t2 t1
fun do_formula pos t =
(case (pos, t) of
- (_, @{const Trueprop} $ t1) => do_formula pos t1
+ (_, \<^const>\<open>Trueprop\<close> $ t1) => do_formula pos t1
| (true, Const (\<^const_name>\<open>Pure.all\<close>, _) $ Abs (_, _, t')) => do_formula pos t'
| (true, Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, _, t')) => do_formula pos t'
| (false, Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, _, t')) => do_formula pos t'
- | (_, @{const Pure.imp} $ t1 $ t2) =>
+ | (_, \<^const>\<open>Pure.imp\<close> $ t1 $ t2) =>
do_formula (not pos) t1 andalso (t2 = \<^prop>\<open>False\<close> orelse do_formula pos t2)
- | (_, @{const HOL.implies} $ t1 $ t2) =>
- do_formula (not pos) t1 andalso (t2 = @{const False} orelse do_formula pos t2)
- | (_, @{const Not} $ t1) => do_formula (not pos) t1
- | (true, @{const HOL.disj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
- | (false, @{const HOL.conj} $ t1 $ t2) => forall (do_formula pos) [t1, t2]
+ | (_, \<^const>\<open>HOL.implies\<close> $ t1 $ t2) =>
+ do_formula (not pos) t1 andalso (t2 = \<^const>\<open>False\<close> orelse do_formula pos t2)
+ | (_, \<^const>\<open>Not\<close> $ t1) => do_formula (not pos) t1
+ | (true, \<^const>\<open>HOL.disj\<close> $ t1 $ t2) => forall (do_formula pos) [t1, t2]
+ | (false, \<^const>\<open>HOL.conj\<close> $ t1 $ t2) => forall (do_formula pos) [t1, t2]
| (true, Const (\<^const_name>\<open>HOL.eq\<close>, _) $ t1 $ t2) => do_equals t1 t2
| (true, Const (\<^const_name>\<open>Pure.eq\<close>, _) $ t1 $ t2) => do_equals t1 t2
| _ => false)
--- a/src/HOL/Tools/Transfer/transfer.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/Transfer/transfer.ML Sat Jan 05 17:24:33 2019 +0100
@@ -571,8 +571,8 @@
Symtab.join (K or3) (tab1, tab2)
end
val tab = go [] p (t, u) Symtab.empty
- fun f (a, (true, false, false)) = SOME (a, @{const implies})
- | f (a, (false, true, false)) = SOME (a, @{const rev_implies})
+ fun f (a, (true, false, false)) = SOME (a, \<^const>\<open>implies\<close>)
+ | f (a, (false, true, false)) = SOME (a, \<^const>\<open>rev_implies\<close>)
| f (a, (true, true, _)) = SOME (a, HOLogic.eq_const HOLogic.boolT)
| f _ = NONE
in
--- a/src/HOL/Tools/set_comprehension_pointfree.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Tools/set_comprehension_pointfree.ML Sat Jan 05 17:24:33 2019 +0100
@@ -250,8 +250,8 @@
(map Pattern pats, Un (fm1', fm2'))
end;
-fun mk_formula vs (@{const HOL.conj} $ t1 $ t2) = merge_inter vs (mk_formula vs t1) (mk_formula vs t2)
- | mk_formula vs (@{const HOL.disj} $ t1 $ t2) = merge_union vs (mk_formula vs t1) (mk_formula vs t2)
+fun mk_formula vs (\<^const>\<open>HOL.conj\<close> $ t1 $ t2) = merge_inter vs (mk_formula vs t1) (mk_formula vs t2)
+ | mk_formula vs (\<^const>\<open>HOL.disj\<close> $ t1 $ t2) = merge_union vs (mk_formula vs t1) (mk_formula vs t2)
| mk_formula vs t = apfst single (mk_atom vs t)
fun strip_Int (Int (fm1, fm2)) = fm1 :: (strip_Int fm2)
--- a/src/HOL/Transitive_Closure.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Transitive_Closure.thy Sat Jan 05 17:24:33 2019 +0100
@@ -1224,7 +1224,7 @@
val trancl_rtrancl_trancl = @{thm trancl_rtrancl_trancl};
val rtrancl_trans = @{thm rtrancl_trans};
- fun decomp (@{const Trueprop} $ t) =
+ fun decomp (\<^const>\<open>Trueprop\<close> $ t) =
let
fun dec (Const (\<^const_name>\<open>Set.member\<close>, _) $ (Const (\<^const_name>\<open>Pair\<close>, _) $ a $ b) $ rel) =
let
@@ -1249,7 +1249,7 @@
val trancl_rtrancl_trancl = @{thm tranclp_rtranclp_tranclp};
val rtrancl_trans = @{thm rtranclp_trans};
- fun decomp (@{const Trueprop} $ t) =
+ fun decomp (\<^const>\<open>Trueprop\<close> $ t) =
let
fun dec (rel $ a $ b) =
let
--- a/src/HOL/Types_To_Sets/Examples/Linear_Algebra_On.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/Examples/Linear_Algebra_On.thy Sat Jan 05 17:24:33 2019 +0100
@@ -384,7 +384,7 @@
end
-subsection \<open>Transfer from type-based @{theory HOL.Modules} and @{theory HOL.Vector_Spaces}\<close>
+subsection \<open>Transfer from type-based \<^theory>\<open>HOL.Modules\<close> and \<^theory>\<open>HOL.Vector_Spaces\<close>\<close>
lemmas [transfer_rule] = right_total_fun_eq_transfer
and [transfer_rule del] = vimage_parametric
--- a/src/HOL/Types_To_Sets/Examples/Prerequisites.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/Examples/Prerequisites.thy Sat Jan 05 17:24:33 2019 +0100
@@ -92,7 +92,7 @@
ML \<open>
val _ = Outer_Syntax.local_theory' \<^command_keyword>\<open>lemmas_with\<close> "note theorems with (the same) attributes"
- (Parse.attribs --| @{keyword :} -- Parse_Spec.name_facts -- Parse.for_fixes
+ (Parse.attribs --| \<^keyword>\<open>:\<close> -- Parse_Spec.name_facts -- Parse.for_fixes
>> (fn (((attrs),facts), fixes) =>
#2 oo Specification.theorems_cmd Thm.theoremK
(map (apsnd (map (apsnd (fn xs => attrs@xs)))) facts) fixes))
--- a/src/HOL/Types_To_Sets/Examples/T2_Spaces.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/Examples/T2_Spaces.thy Sat Jan 05 17:24:33 2019 +0100
@@ -99,7 +99,7 @@
by blast
setup \<open>Sign.add_const_constraint
- (@{const_name "open"}, SOME @{typ "'a set \<Rightarrow> bool"})\<close>
+ (\<^const_name>\<open>open\<close>, SOME \<^typ>\<open>'a set \<Rightarrow> bool\<close>)\<close>
text\<open>The aforementioned development can be automated. The main part is already automated
by the transfer_prover.\<close>
@@ -153,7 +153,7 @@
qed
setup \<open>Sign.add_const_constraint
- (@{const_name "open"}, SOME @{typ "'a::topological_space set \<Rightarrow> bool"})\<close>
+ (\<^const_name>\<open>open\<close>, SOME \<^typ>\<open>'a::topological_space set \<Rightarrow> bool\<close>)\<close>
text\<open>The Final Result. We can compare the type-based and the set-based statement.\<close>
thm compact_imp_closed compact_imp_closed_set_based
--- a/src/HOL/Types_To_Sets/internalize_sort.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/internalize_sort.ML Sat Jan 05 17:24:33 2019 +0100
@@ -62,7 +62,7 @@
Thm.rule_attribute [] (fn context => fn thm =>
(snd (internalize_sort (Thm.ctyp_of (Context.proof_of context) tvar) thm)));
-val _ = Context.>> (Context.map_theory (Attrib.setup @{binding internalize_sort}
+val _ = Context.>> (Context.map_theory (Attrib.setup \<^binding>\<open>internalize_sort\<close>
(tvar >> internalize_sort_attr) "internalize a sort"));
end;
--- a/src/HOL/Types_To_Sets/local_typedef.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/local_typedef.ML Sat Jan 05 17:24:33 2019 +0100
@@ -23,9 +23,9 @@
(** BEGINNING OF THE CRITICAL CODE **)
-fun dest_typedef (Const (@{const_name Ex}, _) $ Abs (_, _,
- (Const (@{const_name Ex}, _) $ Abs (_, Abs_type,
- (Const (@{const_name type_definition}, _)) $ Bound 1 $ Bound 0 $ set)))) =
+fun dest_typedef (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, _,
+ (Const (\<^const_name>\<open>Ex\<close>, _) $ Abs (_, Abs_type,
+ (Const (\<^const_name>\<open>type_definition\<close>, _)) $ Bound 1 $ Bound 0 $ set)))) =
(Abs_type, set)
| dest_typedef t = raise TERM ("dest_typedef", [t]);
@@ -71,14 +71,14 @@
(** END OF THE CRITICAL CODE **)
val (_, cancel_type_definition_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding cancel_type_definition}, cancel_type_definition_cterm)));
+ (Thm.add_oracle (\<^binding>\<open>cancel_type_definition\<close>, cancel_type_definition_cterm)));
fun cancel_type_definition thm =
Drule.implies_elim_list (cancel_type_definition_oracle thm) (map Thm.assume (Thm.chyps_of thm));
val cancel_type_definition_attr = Thm.rule_attribute [] (K cancel_type_definition);
-val _ = Context.>> (Context.map_theory (Attrib.setup @{binding cancel_type_definition}
+val _ = Context.>> (Context.map_theory (Attrib.setup \<^binding>\<open>cancel_type_definition\<close>
(Scan.succeed cancel_type_definition_attr) "cancel a local type definition"));
end;
--- a/src/HOL/Types_To_Sets/unoverload_type.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/unoverload_type.ML Sat Jan 05 17:24:33 2019 +0100
@@ -56,7 +56,7 @@
fun unoverload_type_attr xs = Thm.rule_attribute [] (fn context => unoverload_type context xs)
-val _ = Context.>> (Context.map_theory (Attrib.setup @{binding unoverload_type}
+val _ = Context.>> (Context.map_theory (Attrib.setup \<^binding>\<open>unoverload_type\<close>
(Scan.lift (Scan.repeat Args.var) >> unoverload_type_attr)
"internalize and unoverload type class parameters"))
--- a/src/HOL/Types_To_Sets/unoverloading.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Types_To_Sets/unoverloading.ML Sat Jan 05 17:24:33 2019 +0100
@@ -120,7 +120,7 @@
(** END OF THE CRITICAL CODE **)
val (_, unoverload_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding unoverload},
+ (Thm.add_oracle (\<^binding>\<open>unoverload\<close>,
fn (const, thm) => unoverload_impl const thm)));
fun unoverload const thm = unoverload_oracle (const, thm);
@@ -134,7 +134,7 @@
then error ("The term is not a constant: " ^ Syntax.string_of_term ctxt tm)
else tm |> Logic.varify_types_global |> Thm.cterm_of ctxt);
-val _ = Context.>> (Context.map_theory (Attrib.setup @{binding unoverload}
+val _ = Context.>> (Context.map_theory (Attrib.setup \<^binding>\<open>unoverload\<close>
(const >> unoverload_attr) "unoverload an uninterpreted constant"));
end;
--- a/src/HOL/UNITY/Comp/Alloc.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Comp/Alloc.thy Sat Jan 05 17:24:33 2019 +0100
@@ -368,7 +368,7 @@
done
-subsubsection\<open>bijectivity of @{term sysOfClient}\<close>
+subsubsection\<open>bijectivity of \<^term>\<open>sysOfClient\<close>\<close>
lemma inj_sysOfClient [iff]: "inj sysOfClient"
apply (unfold sysOfClient_def)
@@ -396,7 +396,7 @@
done
-subsubsection\<open>bijectivity of @{term client_map}\<close>
+subsubsection\<open>bijectivity of \<^term>\<open>client_map\<close>\<close>
lemma inj_client_map [iff]: "inj client_map"
apply (unfold inj_on_def)
@@ -420,14 +420,14 @@
done
-text\<open>o-simprules for @{term client_map}\<close>
+text\<open>o-simprules for \<^term>\<open>client_map\<close>\<close>
lemma fst_o_client_map: "fst o client_map = non_dummy"
apply (unfold client_map_def)
apply (rule fst_o_funPair)
done
-ML \<open>ML_Thms.bind_thms ("fst_o_client_map'", make_o_equivs @{context} @{thm fst_o_client_map})\<close>
+ML \<open>ML_Thms.bind_thms ("fst_o_client_map'", make_o_equivs \<^context> @{thm fst_o_client_map})\<close>
declare fst_o_client_map' [simp]
lemma snd_o_client_map: "snd o client_map = clientState_d.dummy"
@@ -435,90 +435,90 @@
apply (rule snd_o_funPair)
done
-ML \<open>ML_Thms.bind_thms ("snd_o_client_map'", make_o_equivs @{context} @{thm snd_o_client_map})\<close>
+ML \<open>ML_Thms.bind_thms ("snd_o_client_map'", make_o_equivs \<^context> @{thm snd_o_client_map})\<close>
declare snd_o_client_map' [simp]
-subsection\<open>o-simprules for @{term sysOfAlloc} [MUST BE AUTOMATED]\<close>
+subsection\<open>o-simprules for \<^term>\<open>sysOfAlloc\<close> [MUST BE AUTOMATED]\<close>
lemma client_o_sysOfAlloc: "client o sysOfAlloc = fst o allocState_d.dummy "
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("client_o_sysOfAlloc'", make_o_equivs @{context} @{thm client_o_sysOfAlloc})\<close>
+ML \<open>ML_Thms.bind_thms ("client_o_sysOfAlloc'", make_o_equivs \<^context> @{thm client_o_sysOfAlloc})\<close>
declare client_o_sysOfAlloc' [simp]
lemma allocGiv_o_sysOfAlloc_eq: "allocGiv o sysOfAlloc = allocGiv"
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocGiv_o_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocGiv_o_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocGiv_o_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocGiv_o_sysOfAlloc_eq})\<close>
declare allocGiv_o_sysOfAlloc_eq' [simp]
lemma allocAsk_o_sysOfAlloc_eq: "allocAsk o sysOfAlloc = allocAsk"
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocAsk_o_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocAsk_o_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocAsk_o_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocAsk_o_sysOfAlloc_eq})\<close>
declare allocAsk_o_sysOfAlloc_eq' [simp]
lemma allocRel_o_sysOfAlloc_eq: "allocRel o sysOfAlloc = allocRel"
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocRel_o_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocRel_o_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocRel_o_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocRel_o_sysOfAlloc_eq})\<close>
declare allocRel_o_sysOfAlloc_eq' [simp]
-subsection\<open>o-simprules for @{term sysOfClient} [MUST BE AUTOMATED]\<close>
+subsection\<open>o-simprules for \<^term>\<open>sysOfClient\<close> [MUST BE AUTOMATED]\<close>
lemma client_o_sysOfClient: "client o sysOfClient = fst"
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("client_o_sysOfClient'", make_o_equivs @{context} @{thm client_o_sysOfClient})\<close>
+ML \<open>ML_Thms.bind_thms ("client_o_sysOfClient'", make_o_equivs \<^context> @{thm client_o_sysOfClient})\<close>
declare client_o_sysOfClient' [simp]
lemma allocGiv_o_sysOfClient_eq: "allocGiv o sysOfClient = allocGiv o snd "
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocGiv_o_sysOfClient_eq'", make_o_equivs @{context} @{thm allocGiv_o_sysOfClient_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocGiv_o_sysOfClient_eq'", make_o_equivs \<^context> @{thm allocGiv_o_sysOfClient_eq})\<close>
declare allocGiv_o_sysOfClient_eq' [simp]
lemma allocAsk_o_sysOfClient_eq: "allocAsk o sysOfClient = allocAsk o snd "
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocAsk_o_sysOfClient_eq'", make_o_equivs @{context} @{thm allocAsk_o_sysOfClient_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocAsk_o_sysOfClient_eq'", make_o_equivs \<^context> @{thm allocAsk_o_sysOfClient_eq})\<close>
declare allocAsk_o_sysOfClient_eq' [simp]
lemma allocRel_o_sysOfClient_eq: "allocRel o sysOfClient = allocRel o snd "
apply record_auto
done
-ML \<open>ML_Thms.bind_thms ("allocRel_o_sysOfClient_eq'", make_o_equivs @{context} @{thm allocRel_o_sysOfClient_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocRel_o_sysOfClient_eq'", make_o_equivs \<^context> @{thm allocRel_o_sysOfClient_eq})\<close>
declare allocRel_o_sysOfClient_eq' [simp]
lemma allocGiv_o_inv_sysOfAlloc_eq: "allocGiv o inv sysOfAlloc = allocGiv"
apply (simp add: o_def)
done
-ML \<open>ML_Thms.bind_thms ("allocGiv_o_inv_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocGiv_o_inv_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocGiv_o_inv_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocGiv_o_inv_sysOfAlloc_eq})\<close>
declare allocGiv_o_inv_sysOfAlloc_eq' [simp]
lemma allocAsk_o_inv_sysOfAlloc_eq: "allocAsk o inv sysOfAlloc = allocAsk"
apply (simp add: o_def)
done
-ML \<open>ML_Thms.bind_thms ("allocAsk_o_inv_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocAsk_o_inv_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocAsk_o_inv_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocAsk_o_inv_sysOfAlloc_eq})\<close>
declare allocAsk_o_inv_sysOfAlloc_eq' [simp]
lemma allocRel_o_inv_sysOfAlloc_eq: "allocRel o inv sysOfAlloc = allocRel"
apply (simp add: o_def)
done
-ML \<open>ML_Thms.bind_thms ("allocRel_o_inv_sysOfAlloc_eq'", make_o_equivs @{context} @{thm allocRel_o_inv_sysOfAlloc_eq})\<close>
+ML \<open>ML_Thms.bind_thms ("allocRel_o_inv_sysOfAlloc_eq'", make_o_equivs \<^context> @{thm allocRel_o_inv_sysOfAlloc_eq})\<close>
declare allocRel_o_inv_sysOfAlloc_eq' [simp]
lemma rel_inv_client_map_drop_map: "(rel o inv client_map o drop_map i o inv sysOfClient) =
@@ -526,7 +526,7 @@
apply (simp add: o_def drop_map_def)
done
-ML \<open>ML_Thms.bind_thms ("rel_inv_client_map_drop_map'", make_o_equivs @{context} @{thm rel_inv_client_map_drop_map})\<close>
+ML \<open>ML_Thms.bind_thms ("rel_inv_client_map_drop_map'", make_o_equivs \<^context> @{thm rel_inv_client_map_drop_map})\<close>
declare rel_inv_client_map_drop_map [simp]
lemma ask_inv_client_map_drop_map: "(ask o inv client_map o drop_map i o inv sysOfClient) =
@@ -534,7 +534,7 @@
apply (simp add: o_def drop_map_def)
done
-ML \<open>ML_Thms.bind_thms ("ask_inv_client_map_drop_map'", make_o_equivs @{context} @{thm ask_inv_client_map_drop_map})\<close>
+ML \<open>ML_Thms.bind_thms ("ask_inv_client_map_drop_map'", make_o_equivs \<^context> @{thm ask_inv_client_map_drop_map})\<close>
declare ask_inv_client_map_drop_map [simp]
@@ -548,7 +548,7 @@
val [Client_Increasing_ask, Client_Increasing_rel,
Client_Bounded, Client_Progress, Client_AllowedActs,
Client_preserves_giv, Client_preserves_dummy] =
- @{thm Client} |> simplify (@{context} addsimps @{thms client_spec_simps})
+ @{thm Client} |> simplify (\<^context> addsimps @{thms client_spec_simps})
|> list_of_Int;
ML_Thms.bind_thm ("Client_Increasing_ask", Client_Increasing_ask);
@@ -578,7 +578,7 @@
val [Network_Ask, Network_Giv, Network_Rel, Network_AllowedActs,
Network_preserves_allocGiv, Network_preserves_rel,
Network_preserves_ask] =
- @{thm Network} |> simplify (@{context} addsimps @{thms network_spec_simps})
+ @{thm Network} |> simplify (\<^context> addsimps @{thms network_spec_simps})
|> list_of_Int;
ML_Thms.bind_thm ("Network_Ask", Network_Ask);
@@ -609,7 +609,7 @@
val [Alloc_Increasing_0, Alloc_Safety, Alloc_Progress, Alloc_AllowedActs,
Alloc_preserves_allocRel, Alloc_preserves_allocAsk,
Alloc_preserves_dummy] =
- @{thm Alloc} |> simplify (@{context} addsimps @{thms alloc_spec_simps})
+ @{thm Alloc} |> simplify (\<^context> addsimps @{thms alloc_spec_simps})
|> list_of_Int;
ML_Thms.bind_thm ("Alloc_Increasing_0", Alloc_Increasing_0);
@@ -732,7 +732,7 @@
Scan.succeed (fn ctxt => SIMPLE_METHOD (rename_client_map_tac ctxt))
\<close>
-text\<open>Lifting \<open>Client_Increasing\<close> to @{term systemState}\<close>
+text\<open>Lifting \<open>Client_Increasing\<close> to \<^term>\<open>systemState\<close>\<close>
lemma rename_Client_Increasing: "i \<in> I
==> rename sysOfClient (plam x: I. rename client_map Client) \<in>
UNIV guarantees
@@ -901,7 +901,7 @@
text\<open>safety (1), step 4 (final result!)\<close>
theorem System_safety: "System \<in> system_safety"
apply (unfold system_safety_def)
- apply (tactic \<open>resolve_tac @{context} [Always_Int_rule [@{thm System_sum_bounded},
+ apply (tactic \<open>resolve_tac \<^context> [Always_Int_rule [@{thm System_sum_bounded},
@{thm Always_tokens_giv_le_allocGiv}, @{thm Always_tokens_allocRel_le_rel}] RS
@{thm Always_weaken}] 1\<close>)
apply auto
@@ -944,7 +944,7 @@
lemma System_Bounded_allocAsk: "System \<in> Always {s. \<forall>i<Nclients.
\<forall>elt \<in> set ((sub i o allocAsk) s). elt \<le> NbT}"
apply (auto simp add: Collect_all_imp_eq)
- apply (tactic \<open>resolve_tac @{context} [Always_Int_rule [@{thm Always_allocAsk_le_ask},
+ apply (tactic \<open>resolve_tac \<^context> [Always_Int_rule [@{thm Always_allocAsk_le_ask},
@{thm System_Bounded_ask}] RS @{thm Always_weaken}] 1\<close>)
apply (auto dest: set_mono)
done
--- a/src/HOL/UNITY/Comp/Priority.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Comp/Priority.thy Sat Jan 05 17:24:33 2019 +0100
@@ -254,8 +254,8 @@
text\<open>We have proved all (relevant) theorems given in the paper. We didn't
-assume any thing about the relation @{term r}. It is not necessary that
-@{term r} be a priority relation as assumed in the original proof. It
+assume any thing about the relation \<^term>\<open>r\<close>. It is not necessary that
+\<^term>\<open>r\<close> be a priority relation as assumed in the original proof. It
suffices that we start from a state which is finite and acyclic.\<close>
--- a/src/HOL/UNITY/Comp/Progress.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Comp/Progress.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,7 +18,7 @@
definition GG :: "int program" where
"GG = mk_total_program (UNIV, {range (\<lambda>x. (x, 2*x))}, UNIV)"
-subsubsection \<open>Calculating @{term "wens_set FF (atLeast k)"}\<close>
+subsubsection \<open>Calculating \<^term>\<open>wens_set FF (atLeast k)\<close>\<close>
lemma Domain_actFF: "Domain (range (\<lambda>x::int. (x, x + 1))) = UNIV"
by force
@@ -62,7 +62,7 @@
apply (simp add: wens_single_finite_FF)
done
-subsubsection \<open>Proving @{term "FF \<in> UNIV leadsTo atLeast (k::int)"}\<close>
+subsubsection \<open>Proving \<^term>\<open>FF \<in> UNIV leadsTo atLeast (k::int)\<close>\<close>
lemma atLeast_ensures: "FF \<in> atLeast (k - 1) ensures atLeast (k::int)"
apply (simp add: Progress.wens_FF [symmetric] wens_ensures)
--- a/src/HOL/UNITY/Extend.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Extend.thy Sat Jan 05 17:24:33 2019 +0100
@@ -167,7 +167,7 @@
end
-subsection\<open>@{term extend_set}: basic properties\<close>
+subsection\<open>\<^term>\<open>extend_set\<close>: basic properties\<close>
lemma project_set_iff [iff]:
"(x \<in> project_set h C) = (\<exists>y. h(x,y) \<in> C)"
@@ -215,7 +215,7 @@
apply (auto simp add: split_extended_all)
done
-subsection\<open>@{term project_set}: basic properties\<close>
+subsection\<open>\<^term>\<open>project_set\<close>: basic properties\<close>
(*project_set is simply image!*)
lemma project_set_eq: "project_set h C = f ` C"
@@ -262,7 +262,7 @@
by (auto simp: extend_set_def)
-subsection\<open>@{term extend_act}\<close>
+subsection\<open>\<^term>\<open>extend_act\<close>\<close>
(*Can't strengthen it to
((h(s,y), h(s',y')) \<in> extend_act h act) = ((s, s') \<in> act & y=y')
--- a/src/HOL/UNITY/Lift_prog.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Lift_prog.thy Sat Jan 05 17:24:33 2019 +0100
@@ -140,7 +140,7 @@
done
-subsection\<open>The Operator @{term lift_set}\<close>
+subsection\<open>The Operator \<^term>\<open>lift_set\<close>\<close>
lemma lift_set_empty [simp]: "lift_set i {} = {}"
by (unfold lift_set_def, auto)
--- a/src/HOL/UNITY/PPROD.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/PPROD.thy Sat Jan 05 17:24:33 2019 +0100
@@ -68,7 +68,7 @@
PLam I F \<in> transient A = (\<exists>i \<in> I. lift i (F i) \<in> transient A)"
by (simp add: JN_transient PLam_def)
-text\<open>This holds because the @{term "F j"} cannot change @{term "lift_set i"}\<close>
+text\<open>This holds because the \<^term>\<open>F j\<close> cannot change \<^term>\<open>lift_set i\<close>\<close>
lemma PLam_ensures:
"[| i \<in> I; F i \<in> (A \<times> UNIV) ensures (B \<times> UNIV);
\<forall>j. F j \<in> preserves snd |]
@@ -114,7 +114,7 @@
(*** guarantees properties ***)
-text\<open>This rule looks unsatisfactory because it refers to @{term lift}.
+text\<open>This rule looks unsatisfactory because it refers to \<^term>\<open>lift\<close>.
One must use
\<open>lift_guarantees_eq_lift_inv\<close> to rewrite the first subgoal and
something like \<open>lift_preserves_sub\<close> to rewrite the third. However
--- a/src/HOL/UNITY/ProgressSets.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/ProgressSets.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
theory ProgressSets imports Transformers begin
-subsection \<open>Complete Lattices and the Operator @{term cl}\<close>
+subsection \<open>Complete Lattices and the Operator \<^term>\<open>cl\<close>\<close>
definition lattice :: "'a set set => bool" where
\<comment> \<open>Meier calls them closure sets, but they are just complete lattices\<close>
@@ -59,8 +59,8 @@
lemma lattice_stable: "lattice {X. F \<in> stable X}"
by (simp add: lattice_def stable_def constrains_def, blast)
-text\<open>The next three results state that @{term "cl L r"} is the minimal
- element of @{term L} that includes @{term r}.\<close>
+text\<open>The next three results state that \<^term>\<open>cl L r\<close> is the minimal
+ element of \<^term>\<open>L\<close> that includes \<^term>\<open>r\<close>.\<close>
lemma cl_in_lattice: "lattice L ==> cl L r \<in> L"
by (simp add: lattice_def cl_def)
@@ -131,7 +131,7 @@
subsection \<open>Progress Sets and the Main Lemma\<close>
text\<open>A progress set satisfies certain closure conditions and is a
-simple way of including the set @{term "wens_set F B"}.\<close>
+simple way of including the set \<^term>\<open>wens_set F B\<close>.\<close>
definition closed :: "['a program, 'a set, 'a set, 'a set set] => bool" where
"closed F T B L == \<forall>M. \<forall>act \<in> Acts F. B\<subseteq>M & T\<inter>M \<in> L -->
@@ -146,12 +146,12 @@
==> T \<inter> (B \<union> wp act M) \<in> L"
by (simp add: closed_def)
-text\<open>Note: the formalization below replaces Meier's @{term q} by @{term B}
-and @{term m} by @{term X}.\<close>
+text\<open>Note: the formalization below replaces Meier's \<^term>\<open>q\<close> by \<^term>\<open>B\<close>
+and \<^term>\<open>m\<close> by \<^term>\<open>X\<close>.\<close>
text\<open>Part of the proof of the claim at the bottom of page 97. It's
proved separately because the argument requires a generalization over
-all @{term "act \<in> Acts F"}.\<close>
+all \<^term>\<open>act \<in> Acts F\<close>.\<close>
lemma lattice_awp_lemma:
assumes TXC: "T\<inter>X \<in> C" \<comment> \<open>induction hypothesis in theorem below\<close>
and BsubX: "B \<subseteq> X" \<comment> \<open>holds in inductive step\<close>
@@ -240,15 +240,15 @@
qed
text\<open>Proved on page 96 of Meier's thesis. The special case when
- @{term "T=UNIV"} states that every progress set for the program @{term F}
- and set @{term B} includes the set @{term "wens_set F B"}.\<close>
+ \<^term>\<open>T=UNIV\<close> states that every progress set for the program \<^term>\<open>F\<close>
+ and set \<^term>\<open>B\<close> includes the set \<^term>\<open>wens_set F B\<close>.\<close>
lemma progress_set_lemma:
"[|C \<in> progress_set F T B; r \<in> wens_set F B; F \<in> stable T|] ==> T\<inter>r \<in> C"
apply (simp add: progress_set_def, clarify)
apply (erule wens_set.induct)
txt\<open>Base\<close>
apply (simp add: Int_in_lattice)
- txt\<open>The difficult @{term wens} case\<close>
+ txt\<open>The difficult \<^term>\<open>wens\<close> case\<close>
apply (simp add: progress_induction_step)
txt\<open>Disjunctive case\<close>
apply (subgoal_tac "(\<Union>U\<in>W. T \<inter> U) \<in> C")
@@ -436,7 +436,7 @@
thus ?thesis by simp
qed
-text\<open>The ``Decoupling via @{term G'} Union Theorem''\<close>
+text\<open>The ``Decoupling via \<^term>\<open>G'\<close> Union Theorem''\<close>
theorem decoupling_via_aux:
assumes leadsTo: "F \<in> A leadsTo B"
and prog: "{X. G' \<in> stable X} \<in> progress_set F UNIV B"
@@ -455,7 +455,7 @@
subsection\<open>Composition Theorems Based on Monotonicity and Commutativity\<close>
-subsubsection\<open>Commutativity of @{term "cl L"} and assignment.\<close>
+subsubsection\<open>Commutativity of \<^term>\<open>cl L\<close> and assignment.\<close>
definition commutes :: "['a program, 'a set, 'a set, 'a set set] => bool" where
"commutes F T B L ==
\<forall>M. \<forall>act \<in> Acts F. B \<subseteq> M -->
@@ -496,7 +496,7 @@
-text\<open>Possibly move to Relation.thy, after @{term single_valued}\<close>
+text\<open>Possibly move to Relation.thy, after \<^term>\<open>single_valued\<close>\<close>
definition funof :: "[('a*'b)set, 'a] => 'b" where
"funof r == (\<lambda>x. THE y. (x,y) \<in> r)"
--- a/src/HOL/UNITY/Simple/NSP_Bad.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Simple/NSP_Bad.thy Sat Jan 05 17:24:33 2019 +0100
@@ -88,7 +88,7 @@
done
-subsection\<open>Inductive Proofs about @{term ns_public}\<close>
+subsection\<open>Inductive Proofs about \<^term>\<open>ns_public\<close>\<close>
lemma ns_constrainsI:
"(!!act s s'. [| act \<in> {Id, Fake, NS1, NS2, NS3};
--- a/src/HOL/UNITY/Transformers.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Transformers.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,8 +17,8 @@
theory Transformers imports Comp begin
-subsection\<open>Defining the Predicate Transformers @{term wp},
- @{term awp} and @{term wens}\<close>
+subsection\<open>Defining the Predicate Transformers \<^term>\<open>wp\<close>,
+ \<^term>\<open>awp\<close> and \<^term>\<open>wens\<close>\<close>
definition wp :: "[('a*'a) set, 'a set] => 'a set" where
\<comment> \<open>Dijkstra's weakest-precondition operator (for an individual command)\<close>
@@ -88,7 +88,7 @@
lemma wens_Id [simp]: "wens F Id B = B"
by (simp add: wens_def gfp_def wp_def awp_def, blast)
-text\<open>These two theorems justify the claim that @{term wens} returns the
+text\<open>These two theorems justify the claim that \<^term>\<open>wens\<close> returns the
weakest assertion satisfying the ensures property\<close>
lemma ensures_imp_wens: "F \<in> A ensures B ==> \<exists>act \<in> Acts F. A \<subseteq> wens F act B"
apply (simp add: wens_def ensures_def transient_def, clarify)
@@ -125,7 +125,7 @@
slow!\<close>
text\<open>Assertion (7): 4.18 in the thesis. NOTE that many of these results
-hold for an arbitrary action. We often do not require @{term "act \<in> Acts F"}\<close>
+hold for an arbitrary action. We often do not require \<^term>\<open>act \<in> Acts F\<close>\<close>
lemma stable_wens: "F \<in> stable A ==> F \<in> stable (wens F act A)"
apply (simp add: stable_def)
apply (drule constrains_Un [OF Diff_wens_constrains [of F act A]])
@@ -144,7 +144,7 @@
done
text\<open>Assertion (8): 4.21 in the thesis. Here we indeed require
- @{term "act \<in> Acts F"}\<close>
+ \<^term>\<open>act \<in> Acts F\<close>\<close>
lemma wens_Int_eq:
"[|T-B \<subseteq> awp F T; act \<in> Acts F|]
==> T \<inter> wens F act B = T \<inter> wens F act (T\<inter>B)"
@@ -202,9 +202,9 @@
lemma leadsTo_iff_wens_set: "(F \<in> A leadsTo B) = (\<exists>C \<in> wens_set F B. A \<subseteq> C)"
by (blast intro: leadsTo_imp_wens_set leadsTo_weaken_L wens_set_imp_leadsTo)
-text\<open>This is the result that requires the definition of @{term wens_set} to
- require @{term W} to be non-empty in the Unio case, for otherwise we should
- always have @{term "{} \<in> wens_set F B"}.\<close>
+text\<open>This is the result that requires the definition of \<^term>\<open>wens_set\<close> to
+ require \<^term>\<open>W\<close> to be non-empty in the Unio case, for otherwise we should
+ always have \<^term>\<open>{} \<in> wens_set F B\<close>.\<close>
lemma wens_set_imp_subset: "A \<in> wens_set F B ==> B \<subseteq> A"
apply (erule wens_set.induct)
apply (blast intro: wens_weakening [THEN subsetD])+
@@ -299,7 +299,7 @@
done
-subsection \<open>The Set @{term "wens_set F B"} for a Single-Assignment Program\<close>
+subsection \<open>The Set \<^term>\<open>wens_set F B\<close> for a Single-Assignment Program\<close>
text\<open>Thesis Section 4.3.3\<close>
text\<open>We start by proving laws about single-assignment programs\<close>
@@ -332,7 +332,7 @@
by (simp add: wens_def gfp_def wp_def, blast)
-text\<open>Next, we express the @{term "wens_set"} for single-assignment programs\<close>
+text\<open>Next, we express the \<^term>\<open>wens_set\<close> for single-assignment programs\<close>
definition wens_single_finite :: "[('a*'a) set, 'a set, nat] => 'a set" where
"wens_single_finite act B k == \<Union>i \<in> atMost k. (wp act ^^ i) B"
--- a/src/HOL/UNITY/UNITY.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/UNITY.thy Sat Jan 05 17:24:33 2019 +0100
@@ -283,7 +283,7 @@
lemma invariantI: "[| Init F \<subseteq> A; F \<in> stable A |] ==> F \<in> invariant A"
by (simp add: invariant_def)
-text\<open>Could also say @{term "invariant A \<inter> invariant B \<subseteq> invariant(A \<inter> B)"}\<close>
+text\<open>Could also say \<^term>\<open>invariant A \<inter> invariant B \<subseteq> invariant(A \<inter> B)\<close>\<close>
lemma invariant_Int:
"[| F \<in> invariant A; F \<in> invariant B |] ==> F \<in> invariant (A \<inter> B)"
by (auto simp add: invariant_def stable_Int)
--- a/src/HOL/UNITY/Union.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/Union.thy Sat Jan 05 17:24:33 2019 +0100
@@ -372,7 +372,7 @@
lemma OK_iff_Allowed: "OK I F = (\<forall>i \<in> I. \<forall>j \<in> I-{i}. F i \<in> Allowed(F j))"
by (auto simp add: OK_iff_ok ok_iff_Allowed)
-subsection\<open>@{term safety_prop}, for reasoning about
+subsection\<open>\<^term>\<open>safety_prop\<close>, for reasoning about
given instances of "ok"\<close>
lemma safety_prop_Acts_iff:
--- a/src/HOL/UNITY/WFair.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/UNITY/WFair.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,8 +36,8 @@
\<comment> \<open>This definition specifies conditional fairness. The rest of the theory
is generic to all forms of fairness. To get weak fairness, conjoin
- the inclusion below with @{term "A \<subseteq> Domain act"}, which specifies
- that the action is enabled over all of @{term A}.\<close>
+ the inclusion below with \<^term>\<open>A \<subseteq> Domain act\<close>, which specifies
+ that the action is enabled over all of \<^term>\<open>A\<close>.\<close>
transient :: "'a set => 'a program set" where
"transient A == {F. \<exists>act\<in>Acts F. act``A \<subseteq> -A}"
@@ -64,7 +64,7 @@
"A leadsTo B == {F. (A,B) \<in> leads F}"
definition wlt :: "['a program, 'a set] => 'a set" where
- \<comment> \<open>predicate transformer: the largest set that leads to @{term B}\<close>
+ \<comment> \<open>predicate transformer: the largest set that leads to \<^term>\<open>B\<close>\<close>
"wlt F B == \<Union>{A. F \<in> A leadsTo B}"
notation leadsTo (infixl "\<longmapsto>" 60)
--- a/src/HOL/Unix/Nested_Environment.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Unix/Nested_Environment.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,8 +10,8 @@
text \<open>
Consider a partial function @{term [source] "e :: 'a \<Rightarrow> 'b option"}; this may
- be understood as an \<^emph>\<open>environment\<close> mapping indexes @{typ 'a} to optional
- entry values @{typ 'b} (cf.\ the basic theory \<open>Map\<close> of Isabelle/HOL). This
+ be understood as an \<^emph>\<open>environment\<close> mapping indexes \<^typ>\<open>'a\<close> to optional
+ entry values \<^typ>\<open>'b\<close> (cf.\ the basic theory \<open>Map\<close> of Isabelle/HOL). This
basic idea is easily generalized to that of a \<^emph>\<open>nested environment\<close>, where
entries may be either basic values or again proper environments. Then each
entry is accessed by a \<^emph>\<open>path\<close>, i.e.\ a list of indexes leading to its
@@ -24,9 +24,9 @@
text \<open>
\<^medskip>
- In the type @{typ "('a, 'b, 'c) env"} the parameter @{typ 'a} refers to
- basic values (occurring in terminal positions), type @{typ 'b} to values
- associated with proper (inner) environments, and type @{typ 'c} with the
+ In the type \<^typ>\<open>('a, 'b, 'c) env\<close> the parameter \<^typ>\<open>'a\<close> refers to
+ basic values (occurring in terminal positions), type \<^typ>\<open>'b\<close> to values
+ associated with proper (inner) environments, and type \<^typ>\<open>'c\<close> with the
index type for branching. Note that there is no restriction on any of these
types. In particular, arbitrary branching may yield rather large
(transfinite) tree structures.
@@ -39,7 +39,7 @@
Lookup in nested environments works by following a given path of index
elements, leading to an optional result (a terminal value or nested
environment). A \<^emph>\<open>defined position\<close> within a nested environment is one where
- @{term lookup} at its path does not yield @{term None}.
+ \<^term>\<open>lookup\<close> at its path does not yield \<^term>\<open>None\<close>.
\<close>
primrec lookup :: "('a, 'b, 'c) env \<Rightarrow> 'c list \<Rightarrow> ('a, 'b, 'c) env option"
@@ -57,7 +57,7 @@
text \<open>
\<^medskip>
- The characteristic cases of @{term lookup} are expressed by the following
+ The characteristic cases of \<^term>\<open>lookup\<close> are expressed by the following
equalities.
\<close>
@@ -92,7 +92,7 @@
text \<open>
\<^medskip>
- Displaced @{term lookup} operations, relative to a certain base path prefix,
+ Displaced \<^term>\<open>lookup\<close> operations, relative to a certain base path prefix,
may be reduced as follows. There are two cases, depending whether the
environment actually extends far enough to follow the base path.
\<close>
@@ -178,7 +178,7 @@
text \<open>
\<^medskip>
- Successful @{term lookup} deeper down an environment structure means we are
+ Successful \<^term>\<open>lookup\<close> deeper down an environment structure means we are
able to peek further up as well. Note that this is basically just the
contrapositive statement of @{thm [source] lookup_append_none} above.
\<close>
@@ -194,8 +194,7 @@
qed
text \<open>
- The subsequent statement describes in more detail how a successful @{term
- lookup} with a non-empty path results in a certain situation at any upper
+ The subsequent statement describes in more detail how a successful \<^term>\<open>lookup\<close> with a non-empty path results in a certain situation at any upper
position.
\<close>
@@ -265,7 +264,7 @@
text \<open>
\<^medskip>
- The characteristic cases of @{term update} are expressed by the following
+ The characteristic cases of \<^term>\<open>update\<close> are expressed by the following
equalities.
\<close>
@@ -316,8 +315,8 @@
text \<open>
\<^medskip>
- The most basic correspondence of @{term lookup} and @{term update} states
- that after @{term update} at a defined position, subsequent @{term lookup}
+ The most basic correspondence of \<^term>\<open>lookup\<close> and \<^term>\<open>update\<close> states
+ that after \<^term>\<open>update\<close> at a defined position, subsequent \<^term>\<open>lookup\<close>
operations would yield the new value.
\<close>
@@ -364,10 +363,9 @@
text \<open>
\<^medskip>
- The properties of displaced @{term update} operations are analogous to those
- of @{term lookup} above. There are two cases: below an undefined position
- @{term update} is absorbed altogether, and below a defined positions @{term
- update} affects subsequent @{term lookup} operations in the obvious way.
+ The properties of displaced \<^term>\<open>update\<close> operations are analogous to those
+ of \<^term>\<open>lookup\<close> above. There are two cases: below an undefined position
+ \<^term>\<open>update\<close> is absorbed altogether, and below a defined positions \<^term>\<open>update\<close> affects subsequent \<^term>\<open>lookup\<close> operations in the obvious way.
\<close>
theorem update_append_none:
@@ -459,9 +457,8 @@
text \<open>
\<^medskip>
- Apparently, @{term update} does not affect the result of subsequent @{term
- lookup} operations at independent positions, i.e.\ in case that the paths
- for @{term update} and @{term lookup} fork at a certain point.
+ Apparently, \<^term>\<open>update\<close> does not affect the result of subsequent \<^term>\<open>lookup\<close> operations at independent positions, i.e.\ in case that the paths
+ for \<^term>\<open>update\<close> and \<^term>\<open>lookup\<close> fork at a certain point.
\<close>
theorem lookup_update_other:
--- a/src/HOL/Unix/Unix.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Unix/Unix.thy Sat Jan 05 17:24:33 2019 +0100
@@ -82,20 +82,19 @@
others :: perms
text \<open>
- For plain files @{term Readable} and @{term Writable} specify read and write
+ For plain files \<^term>\<open>Readable\<close> and \<^term>\<open>Writable\<close> specify read and write
access to the actual content, i.e.\ the string of text stored here. For
- directories @{term Readable} determines if the set of entry names may be
- accessed, and @{term Writable} controls the ability to create or delete any
+ directories \<^term>\<open>Readable\<close> determines if the set of entry names may be
+ accessed, and \<^term>\<open>Writable\<close> controls the ability to create or delete any
entries (both plain files or sub-directories).
- As another simplification, we ignore the @{term Executable} permission
+ As another simplification, we ignore the \<^term>\<open>Executable\<close> permission
altogether. In reality it would indicate executable plain files (also known
as ``binaries''), or control actual lookup of directory entries (recall that
- mere directory browsing is controlled via @{term Readable}). Note that the
+ mere directory browsing is controlled via \<^term>\<open>Readable\<close>). Note that the
latter means that in order to perform any file-system operation whatsoever,
- all directories encountered on the path would have to grant @{term
- Executable}. We ignore this detail and pretend that all directories give
- @{term Executable} permission to anybody.
+ all directories encountered on the path would have to grant \<^term>\<open>Executable\<close>. We ignore this detail and pretend that all directories give
+ \<^term>\<open>Executable\<close> permission to anybody.
\<close>
@@ -103,9 +102,9 @@
text \<open>
In order to model the general tree structure of a Unix file-system we use
- the arbitrarily branching datatype @{typ "('a, 'b, 'c) env"} from the
+ the arbitrarily branching datatype \<^typ>\<open>('a, 'b, 'c) env\<close> from the
standard library of Isabelle/HOL @{cite "Bauer-et-al:2002:HOL-Library"}.
- This type provides constructors @{term Val} and @{term Env} as follows:
+ This type provides constructors \<^term>\<open>Val\<close> and \<^term>\<open>Env\<close> as follows:
\<^medskip>
{\def\isastyleminor{\isastyle}
@@ -115,19 +114,18 @@
\end{tabular}}
\<^medskip>
- Here the parameter @{typ 'a} refers to plain values occurring at leaf
- positions, parameter @{typ 'b} to information kept with inner branch nodes,
- and parameter @{typ 'c} to the branching type of the tree structure. For our
- purpose we use the type instance with @{typ "att \<times> string"} (representing
- plain files), @{typ att} (for attributes of directory nodes), and @{typ
- name} (for the index type of directory nodes).
+ Here the parameter \<^typ>\<open>'a\<close> refers to plain values occurring at leaf
+ positions, parameter \<^typ>\<open>'b\<close> to information kept with inner branch nodes,
+ and parameter \<^typ>\<open>'c\<close> to the branching type of the tree structure. For our
+ purpose we use the type instance with \<^typ>\<open>att \<times> string\<close> (representing
+ plain files), \<^typ>\<open>att\<close> (for attributes of directory nodes), and \<^typ>\<open>name\<close> (for the index type of directory nodes).
\<close>
type_synonym "file" = "(att \<times> string, att, name) env"
text \<open>
\<^medskip>
- The HOL library also provides @{term lookup} and @{term update} operations
+ The HOL library also provides \<^term>\<open>lookup\<close> and \<^term>\<open>update\<close> operations
for general tree structures with the subsequent primitive recursive
characterizations.
@@ -147,7 +145,7 @@
without further notice.
\<^bigskip>
- Apparently, the elements of type @{typ "file"} contain an @{typ att}
+ Apparently, the elements of type \<^typ>\<open>file\<close> contain an \<^typ>\<open>att\<close>
component in either case. We now define a few auxiliary operations to
manipulate this field uniformly, following the conventions for record types
in Isabelle/HOL @{cite "Nipkow-et-al:2000:HOL"}.
@@ -203,8 +201,8 @@
text \<open>
The main internal file-system operation is access of a file by a user,
- requesting a certain set of permissions. The resulting @{typ "file option"}
- indicates if a file had been present at the corresponding @{typ path} and if
+ requesting a certain set of permissions. The resulting \<^typ>\<open>file option\<close>
+ indicates if a file had been present at the corresponding \<^typ>\<open>path\<close> and if
access was granted according to the permissions recorded within the
file-system.
@@ -228,14 +226,13 @@
\<^medskip>
Successful access to a certain file is the main prerequisite for
system-calls to be applicable (cf.\ \secref{sec:unix-trans}). Any
- modification of the file-system is then performed using the basic @{term
- update} operation.
+ modification of the file-system is then performed using the basic \<^term>\<open>update\<close> operation.
\<^medskip>
- We see that @{term access} is just a wrapper for the basic @{term lookup}
+ We see that \<^term>\<open>access\<close> is just a wrapper for the basic \<^term>\<open>lookup\<close>
function, with additional checking of attributes. Subsequently we establish
- a few auxiliary facts that stem from the primitive @{term lookup} used
- within @{term access}.
+ a few auxiliary facts that stem from the primitive \<^term>\<open>lookup\<close> used
+ within \<^term>\<open>access\<close>.
\<close>
lemma access_empty_lookup: "access root path uid {} = lookup root path"
@@ -289,10 +286,10 @@
| Readdir uid "name set" path
text \<open>
- The @{typ uid} field of an operation corresponds to the \<^emph>\<open>effective user id\<close>
+ The \<^typ>\<open>uid\<close> field of an operation corresponds to the \<^emph>\<open>effective user id\<close>
of the underlying process, although our model never mentions processes
explicitly. The other parameters are provided as arguments by the caller;
- the @{term path} one is common to all kinds of system-calls.
+ the \<^term>\<open>path\<close> one is common to all kinds of system-calls.
\<close>
primrec uid_of :: "operation \<Rightarrow> uid"
@@ -320,7 +317,7 @@
text \<open>
\<^medskip>
Note that we have omitted explicit \<open>Open\<close> and \<open>Close\<close> operations, pretending
- that @{term Read} and @{term Write} would already take care of this behind
+ that \<^term>\<open>Read\<close> and \<^term>\<open>Write\<close> would already take care of this behind
the scenes. Thus we have basically treated actual sequences of real
system-calls \<open>open\<close>--\<open>read\<close>/\<open>write\<close>--\<open>close\<close> as atomic.
@@ -399,7 +396,7 @@
text \<open>
The transition system \<open>root \<midarrow>x\<rightarrow> root'\<close> defined above determines a unique
- result @{term root'} from given @{term root} and @{term x} (this is holds
+ result \<^term>\<open>root'\<close> from given \<^term>\<open>root\<close> and \<^term>\<open>x\<close> (this is holds
rather trivially, since there is even only one clause for each operation).
This uniqueness statement will simplify our subsequent development to some
extent, since we only have to reason about a partial function rather than a
@@ -532,10 +529,9 @@
text \<open>
\<^medskip>
- The following fact shows how an invariant @{term Q} of single transitions
- with property @{term P} may be transferred to iterated transitions. The
- proof is rather obvious by rule induction over the definition of @{term
- "root \<Midarrow>xs\<Rightarrow> root'"}.
+ The following fact shows how an invariant \<^term>\<open>Q\<close> of single transitions
+ with property \<^term>\<open>P\<close> may be transferred to iterated transitions. The
+ proof is rather obvious by rule induction over the definition of \<^term>\<open>root \<Midarrow>xs\<Rightarrow> root'\<close>.
\<close>
lemma transitions_invariant:
@@ -653,16 +649,16 @@
theorem "u \<in> users \<Longrightarrow> can_exec (init users)
[Mkdir u perms [u, name]]"
apply (rule can_exec_cons)
- \<comment> \<open>back-chain @{term can_exec} (of @{term [source] Cons})\<close>
+ \<comment> \<open>back-chain \<^term>\<open>can_exec\<close> (of @{term [source] Cons})\<close>
apply (rule mkdir)
- \<comment> \<open>back-chain @{term Mkdir}\<close>
+ \<comment> \<open>back-chain \<^term>\<open>Mkdir\<close>\<close>
apply (force simp add: eval)+
- \<comment> \<open>solve preconditions of @{term Mkdir}\<close>
+ \<comment> \<open>solve preconditions of \<^term>\<open>Mkdir\<close>\<close>
apply (simp add: eval)
\<comment> \<open>peek at resulting dir (optional)\<close>
txt \<open>@{subgoals [display]}\<close>
apply (rule can_exec_nil)
- \<comment> \<open>back-chain @{term can_exec} (of @{term [source] Nil})\<close>
+ \<comment> \<open>back-chain \<^term>\<open>can_exec\<close> (of @{term [source] Nil})\<close>
done
text \<open>
@@ -758,13 +754,13 @@
qed
text \<open>
- Here @{prop "P x"} refers to the restriction on file-system operations that
+ Here \<^prop>\<open>P x\<close> refers to the restriction on file-system operations that
are admitted after having reached the critical configuration; according to
- the problem specification this will become @{prop "uid_of x = user\<^sub>1"} later
- on. Furthermore, @{term y} refers to the operations we claim to be
- impossible to perform afterwards, we will take @{term Rmdir} later. Moreover
- @{term Q} is a suitable (auxiliary) invariant over the file-system; choosing
- @{term Q} adequately is very important to make the proof work (see
+ the problem specification this will become \<^prop>\<open>uid_of x = user\<^sub>1\<close> later
+ on. Furthermore, \<^term>\<open>y\<close> refers to the operations we claim to be
+ impossible to perform afterwards, we will take \<^term>\<open>Rmdir\<close> later. Moreover
+ \<^term>\<open>Q\<close> is a suitable (auxiliary) invariant over the file-system; choosing
+ \<^term>\<open>Q\<close> adequately is very important to make the proof work (see
\secref{sec:unix-inv-lemmas}).
\<close>
@@ -804,8 +800,8 @@
definition "bogus_path = [user\<^sub>1, name\<^sub>1, name\<^sub>2]"
text \<open>
- The @{term bogus} operations are the ones that lead into the uncouth
- situation; @{term bogus_path} is the key position within the file-system
+ The \<^term>\<open>bogus\<close> operations are the ones that lead into the uncouth
+ situation; \<^term>\<open>bogus_path\<close> is the key position within the file-system
where things go awry.
\<close>
@@ -814,9 +810,9 @@
text \<open>
The following invariant over the root file-system describes the bogus
- situation in an abstract manner: located at a certain @{term path} within
+ situation in an abstract manner: located at a certain \<^term>\<open>path\<close> within
the file-system is a non-empty directory that is neither owned nor writable
- by @{term user\<^sub>1}.
+ by \<^term>\<open>user\<^sub>1\<close>.
\<close>
definition
@@ -831,14 +827,13 @@
will now establish the three key lemmas required to yield the final result.
\<^enum> The invariant is sufficiently strong to entail the pathological case
- that @{term user\<^sub>1} is unable to remove the (owned) directory at @{term
- "[user\<^sub>1, name\<^sub>1]"}.
+ that \<^term>\<open>user\<^sub>1\<close> is unable to remove the (owned) directory at \<^term>\<open>[user\<^sub>1, name\<^sub>1]\<close>.
- \<^enum> The invariant does hold after having executed the @{term bogus} list of
+ \<^enum> The invariant does hold after having executed the \<^term>\<open>bogus\<close> list of
operations (starting with an initial file-system configuration).
\<^enum> The invariant is preserved by any file-system operation performed by
- @{term user\<^sub>1} alone, without any help by other users.
+ \<^term>\<open>user\<^sub>1\<close> alone, without any help by other users.
As the invariant appears both as assumptions and conclusions in the course
of proof, its formulation is rather critical for the whole development to
@@ -885,9 +880,8 @@
text \<open>
\<^medskip>
At last we are left with the main effort to show that the ``bogosity''
- invariant is preserved by any file-system operation performed by @{term
- user\<^sub>1} alone. Note that this holds for any @{term path} given, the
- particular @{term bogus_path} is not required here.
+ invariant is preserved by any file-system operation performed by \<^term>\<open>user\<^sub>1\<close> alone. Note that this holds for any \<^term>\<open>path\<close> given, the
+ particular \<^term>\<open>bogus_path\<close> is not required here.
\<close>
lemma preserve_invariant:
--- a/src/HOL/Word/Tools/smt_word.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Word/Tools/smt_word.ML Sat Jan 05 17:24:33 2019 +0100
@@ -23,16 +23,16 @@
local
val smtlibC = SMTLIB_Interface.smtlibC
- val wordT = @{typ "'a::len word"}
+ val wordT = \<^typ>\<open>'a::len word\<close>
fun index1 s i = "(_ " ^ s ^ " " ^ string_of_int i ^ ")"
fun index2 s i j = "(_ " ^ s ^ " " ^ string_of_int i ^ " " ^ string_of_int j ^ ")"
- fun word_typ (Type (@{type_name word}, [T])) =
+ fun word_typ (Type (\<^type_name>\<open>word\<close>, [T])) =
Option.map (rpair [] o index1 "BitVec") (try dest_binT T)
| word_typ _ = NONE
- fun word_num (Type (@{type_name word}, [T])) k =
+ fun word_num (Type (\<^type_name>\<open>word\<close>, [T])) k =
Option.map (index1 ("bv" ^ string_of_int k)) (try dest_binT T)
| word_num _ _ = NONE
@@ -49,7 +49,7 @@
let val (m, _) = Term.dest_Const t
in SMT_Builtin.add_builtin_fun smtlibC (Term.dest_Const t, K (f m n)) end
- val mk_nat = HOLogic.mk_number @{typ nat}
+ val mk_nat = HOLogic.mk_number \<^typ>\<open>nat\<close>
fun mk_shift c [t, u] = Const c $ t $ mk_nat (snd (HOLogic.dest_number u))
| mk_shift c ts = raise TERM ("bad arguments", Const c :: ts)
@@ -100,32 +100,32 @@
val setup_builtins =
SMT_Builtin.add_builtin_typ smtlibC (wordT, word_typ, word_num) #>
fold (add_word_fun if_fixed_all) [
- (@{term "uminus :: 'a::len word \<Rightarrow> _"}, "bvneg"),
- (@{term "plus :: 'a::len word \<Rightarrow> _"}, "bvadd"),
- (@{term "minus :: 'a::len word \<Rightarrow> _"}, "bvsub"),
- (@{term "times :: 'a::len word \<Rightarrow> _"}, "bvmul"),
- (@{term "bitNOT :: 'a::len word \<Rightarrow> _"}, "bvnot"),
- (@{term "bitAND :: 'a::len word \<Rightarrow> _"}, "bvand"),
- (@{term "bitOR :: 'a::len word \<Rightarrow> _"}, "bvor"),
- (@{term "bitXOR :: 'a::len word \<Rightarrow> _"}, "bvxor"),
- (@{term "word_cat :: 'a::len word \<Rightarrow> _"}, "concat") ] #>
+ (\<^term>\<open>uminus :: 'a::len word \<Rightarrow> _\<close>, "bvneg"),
+ (\<^term>\<open>plus :: 'a::len word \<Rightarrow> _\<close>, "bvadd"),
+ (\<^term>\<open>minus :: 'a::len word \<Rightarrow> _\<close>, "bvsub"),
+ (\<^term>\<open>times :: 'a::len word \<Rightarrow> _\<close>, "bvmul"),
+ (\<^term>\<open>bitNOT :: 'a::len word \<Rightarrow> _\<close>, "bvnot"),
+ (\<^term>\<open>bitAND :: 'a::len word \<Rightarrow> _\<close>, "bvand"),
+ (\<^term>\<open>bitOR :: 'a::len word \<Rightarrow> _\<close>, "bvor"),
+ (\<^term>\<open>bitXOR :: 'a::len word \<Rightarrow> _\<close>, "bvxor"),
+ (\<^term>\<open>word_cat :: 'a::len word \<Rightarrow> _\<close>, "concat") ] #>
fold (add_word_fun shift) [
- (@{term "shiftl :: 'a::len word \<Rightarrow> _ "}, "bvshl"),
- (@{term "shiftr :: 'a::len word \<Rightarrow> _"}, "bvlshr"),
- (@{term "sshiftr :: 'a::len word \<Rightarrow> _"}, "bvashr") ] #>
+ (\<^term>\<open>shiftl :: 'a::len word \<Rightarrow> _ \<close>, "bvshl"),
+ (\<^term>\<open>shiftr :: 'a::len word \<Rightarrow> _\<close>, "bvlshr"),
+ (\<^term>\<open>sshiftr :: 'a::len word \<Rightarrow> _\<close>, "bvashr") ] #>
add_word_fun extract
- (@{term "slice :: _ \<Rightarrow> 'a::len word \<Rightarrow> _"}, "extract") #>
+ (\<^term>\<open>slice :: _ \<Rightarrow> 'a::len word \<Rightarrow> _\<close>, "extract") #>
fold (add_word_fun extend) [
- (@{term "ucast :: 'a::len word \<Rightarrow> _"}, "zero_extend"),
- (@{term "scast :: 'a::len word \<Rightarrow> _"}, "sign_extend") ] #>
+ (\<^term>\<open>ucast :: 'a::len word \<Rightarrow> _\<close>, "zero_extend"),
+ (\<^term>\<open>scast :: 'a::len word \<Rightarrow> _\<close>, "sign_extend") ] #>
fold (add_word_fun rotate) [
- (@{term word_rotl}, "rotate_left"),
- (@{term word_rotr}, "rotate_right") ] #>
+ (\<^term>\<open>word_rotl\<close>, "rotate_left"),
+ (\<^term>\<open>word_rotr\<close>, "rotate_right") ] #>
fold (add_word_fun if_fixed_args) [
- (@{term "less :: 'a::len word \<Rightarrow> _"}, "bvult"),
- (@{term "less_eq :: 'a::len word \<Rightarrow> _"}, "bvule"),
- (@{term word_sless}, "bvslt"),
- (@{term word_sle}, "bvsle") ]
+ (\<^term>\<open>less :: 'a::len word \<Rightarrow> _\<close>, "bvult"),
+ (\<^term>\<open>less_eq :: 'a::len word \<Rightarrow> _\<close>, "bvule"),
+ (\<^term>\<open>word_sless\<close>, "bvslt"),
+ (\<^term>\<open>word_sle\<close>, "bvsle") ]
end
--- a/src/HOL/Word/Tools/word_lib.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Word/Tools/word_lib.ML Sat Jan 05 17:24:33 2019 +0100
@@ -17,31 +17,31 @@
fun dest_binT T =
(case T of
- Type (@{type_name "Numeral_Type.num0"}, _) => 0
- | Type (@{type_name "Numeral_Type.num1"}, _) => 1
- | Type (@{type_name "Numeral_Type.bit0"}, [T]) => 2 * dest_binT T
- | Type (@{type_name "Numeral_Type.bit1"}, [T]) => 1 + 2 * dest_binT T
+ Type (\<^type_name>\<open>Numeral_Type.num0\<close>, _) => 0
+ | Type (\<^type_name>\<open>Numeral_Type.num1\<close>, _) => 1
+ | Type (\<^type_name>\<open>Numeral_Type.bit0\<close>, [T]) => 2 * dest_binT T
+ | Type (\<^type_name>\<open>Numeral_Type.bit1\<close>, [T]) => 1 + 2 * dest_binT T
| _ => raise TYPE ("dest_binT", [T], []))
-fun is_wordT (Type (@{type_name word}, _)) = true
+fun is_wordT (Type (\<^type_name>\<open>word\<close>, _)) = true
| is_wordT _ = false
-fun dest_wordT (Type (@{type_name word}, [T])) = dest_binT T
+fun dest_wordT (Type (\<^type_name>\<open>word\<close>, [T])) = dest_binT T
| dest_wordT T = raise TYPE ("dest_wordT", [T], [])
fun mk_bitT i T =
if i = 0
- then Type (@{type_name "Numeral_Type.bit0"}, [T])
- else Type (@{type_name "Numeral_Type.bit1"}, [T])
+ then Type (\<^type_name>\<open>Numeral_Type.bit0\<close>, [T])
+ else Type (\<^type_name>\<open>Numeral_Type.bit1\<close>, [T])
fun mk_binT size =
- if size = 0 then @{typ "Numeral_Type.num0"}
- else if size = 1 then @{typ "Numeral_Type.num1"}
+ if size = 0 then \<^typ>\<open>Numeral_Type.num0\<close>
+ else if size = 1 then \<^typ>\<open>Numeral_Type.num1\<close>
else let val (q, r) = Integer.div_mod size 2 in mk_bitT r (mk_binT q) end
fun mk_wordT size =
- if size >= 0 then Type (@{type_name "word"}, [mk_binT size])
+ if size >= 0 then Type (\<^type_name>\<open>word\<close>, [mk_binT size])
else raise TYPE ("mk_wordT: " ^ string_of_int size, [], [])
end
--- a/src/HOL/Word/WordBitwise.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/Word/WordBitwise.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
text \<open>Breaking up word equalities into equalities on their
bit lists. Equalities are generated and manipulated in the
- reverse order to @{const to_bl}.\<close>
+ reverse order to \<^const>\<open>to_bl\<close>.\<close>
lemma word_eq_rbl_eq: "x = y \<longleftrightarrow> rev (to_bl x) = rev (to_bl y)"
by simp
@@ -368,7 +368,7 @@
apply auto
done
-text \<open>Lemmas for unpacking @{term "rev (to_bl n)"} for numerals n and also
+text \<open>Lemmas for unpacking \<^term>\<open>rev (to_bl n)\<close> for numerals n and also
for irreducible values and expressions.\<close>
lemma rev_bin_to_bl_simps:
@@ -413,22 +413,22 @@
structure Word_Bitwise_Tac =
struct
-val word_ss = simpset_of @{theory_context Word};
+val word_ss = simpset_of \<^theory_context>\<open>Word\<close>;
fun mk_nat_clist ns =
- fold_rev (Thm.mk_binop @{cterm "Cons :: nat \<Rightarrow> _"})
- ns @{cterm "[] :: nat list"};
+ fold_rev (Thm.mk_binop \<^cterm>\<open>Cons :: nat \<Rightarrow> _\<close>)
+ ns \<^cterm>\<open>[] :: nat list\<close>;
fun upt_conv ctxt ct =
case Thm.term_of ct of
- (@{const upt} $ n $ m) =>
+ (\<^const>\<open>upt\<close> $ n $ m) =>
let
val (i, j) = apply2 (snd o HOLogic.dest_number) (n, m);
- val ns = map (Numeral.mk_cnumber @{ctyp nat}) (i upto (j - 1))
+ val ns = map (Numeral.mk_cnumber \<^ctyp>\<open>nat\<close>) (i upto (j - 1))
|> mk_nat_clist;
val prop =
- Thm.mk_binop @{cterm "(=) :: nat list \<Rightarrow> _"} ct ns
- |> Thm.apply @{cterm Trueprop};
+ Thm.mk_binop \<^cterm>\<open>(=) :: nat list \<Rightarrow> _\<close> ct ns
+ |> Thm.apply \<^cterm>\<open>Trueprop\<close>;
in
try (fn () =>
Goal.prove_internal ctxt [] prop
@@ -438,18 +438,18 @@
| _ => NONE;
val expand_upt_simproc =
- Simplifier.make_simproc @{context} "expand_upt"
- {lhss = [@{term "upt x y"}], proc = K upt_conv};
+ Simplifier.make_simproc \<^context> "expand_upt"
+ {lhss = [\<^term>\<open>upt x y\<close>], proc = K upt_conv};
fun word_len_simproc_fn ctxt ct =
(case Thm.term_of ct of
- Const (@{const_name len_of}, _) $ t =>
+ Const (\<^const_name>\<open>len_of\<close>, _) $ t =>
(let
val T = fastype_of t |> dest_Type |> snd |> the_single
- val n = Numeral.mk_cnumber @{ctyp nat} (Word_Lib.dest_binT T);
+ val n = Numeral.mk_cnumber \<^ctyp>\<open>nat\<close> (Word_Lib.dest_binT T);
val prop =
- Thm.mk_binop @{cterm "(=) :: nat \<Rightarrow> _"} ct n
- |> Thm.apply @{cterm Trueprop};
+ Thm.mk_binop \<^cterm>\<open>(=) :: nat \<Rightarrow> _\<close> ct n
+ |> Thm.apply \<^cterm>\<open>Trueprop\<close>;
in
Goal.prove_internal ctxt [] prop (K (simp_tac (put_simpset word_ss ctxt) 1))
|> mk_meta_eq |> SOME
@@ -457,8 +457,8 @@
| _ => NONE);
val word_len_simproc =
- Simplifier.make_simproc @{context} "word_len"
- {lhss = [@{term "len_of x"}], proc = K word_len_simproc_fn};
+ Simplifier.make_simproc \<^context> "word_len"
+ {lhss = [\<^term>\<open>len_of x\<close>], proc = K word_len_simproc_fn};
(* convert 5 or nat 5 to Suc 4 when n_sucs = 1, Suc (Suc 4) when n_sucs = 2,
or just 5 (discarding nat) when n_sucs = 0 *)
@@ -467,10 +467,10 @@
let
val (f $ arg) = Thm.term_of ct;
val n =
- (case arg of @{term nat} $ n => n | n => n)
+ (case arg of \<^term>\<open>nat\<close> $ n => n | n => n)
|> HOLogic.dest_number |> snd;
val (i, j) = if n > n_sucs then (n_sucs, n - n_sucs) else (n, 0);
- val arg' = funpow i HOLogic.mk_Suc (HOLogic.mk_number @{typ nat} j);
+ val arg' = funpow i HOLogic.mk_Suc (HOLogic.mk_number \<^typ>\<open>nat\<close> j);
val _ = if arg = arg' then raise TERM ("", []) else ();
fun propfn g =
HOLogic.mk_eq (g arg, g arg')
@@ -485,30 +485,30 @@
end handle TERM _ => NONE;
fun nat_get_Suc_simproc n_sucs ts =
- Simplifier.make_simproc @{context} "nat_get_Suc"
- {lhss = map (fn t => t $ @{term "n :: nat"}) ts,
+ Simplifier.make_simproc \<^context> "nat_get_Suc"
+ {lhss = map (fn t => t $ \<^term>\<open>n :: nat\<close>) ts,
proc = K (nat_get_Suc_simproc_fn n_sucs)};
val no_split_ss =
- simpset_of (put_simpset HOL_ss @{context}
+ simpset_of (put_simpset HOL_ss \<^context>
|> Splitter.del_split @{thm if_split});
val expand_word_eq_sss =
- (simpset_of (put_simpset HOL_basic_ss @{context} addsimps
+ (simpset_of (put_simpset HOL_basic_ss \<^context> addsimps
@{thms word_eq_rbl_eq word_le_rbl word_less_rbl word_sle_rbl word_sless_rbl}),
map simpset_of [
- put_simpset no_split_ss @{context} addsimps
+ put_simpset no_split_ss \<^context> addsimps
@{thms rbl_word_plus rbl_word_and rbl_word_or rbl_word_not
rbl_word_neg bl_word_sub rbl_word_xor
rbl_word_cat rbl_word_slice rbl_word_scast
rbl_word_ucast rbl_shiftl rbl_shiftr rbl_sshiftr
rbl_word_if},
- put_simpset no_split_ss @{context} addsimps
+ put_simpset no_split_ss \<^context> addsimps
@{thms to_bl_numeral to_bl_neg_numeral to_bl_0 rbl_word_1},
- put_simpset no_split_ss @{context} addsimps
+ put_simpset no_split_ss \<^context> addsimps
@{thms rev_rev_ident rev_replicate rev_map to_bl_upt word_size}
addsimprocs [word_len_simproc],
- put_simpset no_split_ss @{context} addsimps
+ put_simpset no_split_ss \<^context> addsimps
@{thms list.simps split_conv replicate.simps list.map
zip_Cons_Cons zip_Nil drop_Suc_Cons drop_0 drop_Nil
foldr.simps map2_Cons map2_Nil takefill_Suc_Cons
@@ -518,11 +518,11 @@
rev_bl_order_simps}
addsimprocs [expand_upt_simproc,
nat_get_Suc_simproc 4
- [@{term replicate}, @{term "takefill x"},
- @{term drop}, @{term "bin_to_bl"},
- @{term "takefill_last x"},
- @{term "drop_nonempty x"}]],
- put_simpset no_split_ss @{context} addsimps @{thms xor3_simps carry_simps if_bool_simps}
+ [\<^term>\<open>replicate\<close>, \<^term>\<open>takefill x\<close>,
+ \<^term>\<open>drop\<close>, \<^term>\<open>bin_to_bl\<close>,
+ \<^term>\<open>takefill_last x\<close>,
+ \<^term>\<open>drop_nonempty x\<close>]],
+ put_simpset no_split_ss \<^context> addsimps @{thms xor3_simps carry_simps if_bool_simps}
])
fun tac ctxt =
--- a/src/HOL/ex/Adhoc_Overloading_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Adhoc_Overloading_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -82,7 +82,7 @@
subsection \<open>Adhoc Overloading inside Locales\<close>
text \<open>As example we use permutations that are parametrized over an
-atom type @{typ "'a"}.\<close>
+atom type \<^typ>\<open>'a\<close>.\<close>
definition perms :: "('a \<Rightarrow> 'a) set" where
"perms = {f. bij f \<and> finite {x. f x \<noteq> x}}"
@@ -178,7 +178,7 @@
consts PERMUTE :: "'a perm \<Rightarrow> 'b \<Rightarrow> 'b" (infixr "\<bullet>" 75)
-text \<open>Then we add a locale for types @{typ 'b} that support
+text \<open>Then we add a locale for types \<^typ>\<open>'b\<close> that support
appliciation of permutations.\<close>
locale permute =
fixes permute :: "'a perm \<Rightarrow> 'b \<Rightarrow> 'b"
--- a/src/HOL/ex/Antiquote.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Antiquote.thy Sat Jan 05 17:24:33 2019 +0100
@@ -21,11 +21,11 @@
parse_translation
\<open>[Syntax_Trans.quote_antiquote_tr
- @{syntax_const "_Expr"} @{const_syntax var} @{const_syntax Expr}]\<close>
+ \<^syntax_const>\<open>_Expr\<close> \<^const_syntax>\<open>var\<close> \<^const_syntax>\<open>Expr\<close>]\<close>
print_translation
\<open>[Syntax_Trans.quote_antiquote_tr'
- @{syntax_const "_Expr"} @{const_syntax var} @{const_syntax Expr}]\<close>
+ \<^syntax_const>\<open>_Expr\<close> \<^const_syntax>\<open>var\<close> \<^const_syntax>\<open>Expr\<close>]\<close>
term "EXPR (a + b + c)"
term "EXPR (a + b + c + VAR x + VAR y + 1)"
--- a/src/HOL/ex/Arith_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Arith_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -13,25 +13,24 @@
distribution. This file merely contains some additional tests and special
corner cases. Some rather technical remarks:
- @{ML Lin_Arith.simple_tac} is a very basic version of the tactic. It performs no
+ \<^ML>\<open>Lin_Arith.simple_tac\<close> is a very basic version of the tactic. It performs no
meta-to-object-logic conversion, and only some splitting of operators.
- @{ML Lin_Arith.tac} performs meta-to-object-logic conversion, full
+ \<^ML>\<open>Lin_Arith.tac\<close> performs meta-to-object-logic conversion, full
splitting of operators, and NNF normalization of the goal. The \<open>arith\<close>
method combines them both, and tries other methods (e.g.~\<open>presburger\<close>)
as well. This is the one that you should use in your proofs!
- An \<open>arith\<close>-based simproc is available as well (see @{ML
- Lin_Arith.simproc}), which---for performance
- reasons---however does even less splitting than @{ML Lin_Arith.simple_tac}
+ An \<open>arith\<close>-based simproc is available as well (see \<^ML>\<open>Lin_Arith.simproc\<close>), which---for performance
+ reasons---however does even less splitting than \<^ML>\<open>Lin_Arith.simple_tac\<close>
at the moment (namely inequalities only). (On the other hand, it
- does take apart conjunctions, which @{ML Lin_Arith.simple_tac} currently
+ does take apart conjunctions, which \<^ML>\<open>Lin_Arith.simple_tac\<close> currently
does not do.)
\<close>
-subsection \<open>Splitting of Operators: @{term max}, @{term min}, @{term abs},
- @{term minus}, @{term nat}, @{term modulo},
- @{term divide}\<close>
+subsection \<open>Splitting of Operators: \<^term>\<open>max\<close>, \<^term>\<open>min\<close>, \<^term>\<open>abs\<close>,
+ \<^term>\<open>minus\<close>, \<^term>\<open>nat\<close>, \<^term>\<open>modulo\<close>,
+ \<^term>\<open>divide\<close>\<close>
lemma "(i::nat) <= max i j"
by linarith
--- a/src/HOL/ex/Ballot.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Ballot.thy Sat Jan 05 17:24:33 2019 +0100
@@ -140,7 +140,7 @@
lemma all_countings: "all_countings a b = (a + b) choose a"
unfolding all_countings_set by (simp add: n_subsets)
-subsection \<open>Facts About @{term valid_countings}\<close>
+subsection \<open>Facts About \<^term>\<open>valid_countings\<close>\<close>
subsubsection \<open>Non-Recursive Cases\<close>
@@ -255,7 +255,7 @@
"valid_countings a b = (if a + b = 0 then 1 else ((a - b) * ((a + b) choose a)) div (a + b))"
by (simp add: valid_countings[symmetric] valid_countings_a_0)
-subsection \<open>Relation Between @{term valid_countings} and @{term all_countings}\<close>
+subsection \<open>Relation Between \<^term>\<open>valid_countings\<close> and \<^term>\<open>all_countings\<close>\<close>
lemma main_nat: "(a + b) * valid_countings a b = (a - b) * all_countings a b"
unfolding valid_countings all_countings ..
--- a/src/HOL/ex/CTL.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/CTL.thy Sat Jan 05 17:24:33 2019 +0100
@@ -36,7 +36,7 @@
\<^smallskip>
The CTL path operators are more interesting; they are based on an arbitrary,
but fixed model \<open>\<M>\<close>, which is simply a transition relation over states
- @{typ 'a}.
+ \<^typ>\<open>'a\<close>.
\<close>
axiomatization \<M> :: "('a \<times> 'a) set"
@@ -115,8 +115,8 @@
by (simp add: lfp_gfp)
text \<open>
- In order to give dual fixed point representations of @{term "\<^bold>A\<^bold>F p"} and
- @{term "\<^bold>A\<^bold>G p"}:
+ In order to give dual fixed point representations of \<^term>\<open>\<^bold>A\<^bold>F p\<close> and
+ \<^term>\<open>\<^bold>A\<^bold>G p\<close>:
\<close>
lemma AF_lfp: "\<^bold>A\<^bold>F p = lfp (\<lambda>s. p \<union> \<^bold>A\<^bold>X s)"
@@ -144,10 +144,9 @@
qed
text \<open>
- From the greatest fixed point definition of @{term "\<^bold>A\<^bold>G p"}, we derive as
- a consequence of the Knaster-Tarski theorem on the one hand that @{term
- "\<^bold>A\<^bold>G p"} is a fixed point of the monotonic function
- @{term "\<lambda>s. p \<inter> \<^bold>A\<^bold>X s"}.
+ From the greatest fixed point definition of \<^term>\<open>\<^bold>A\<^bold>G p\<close>, we derive as
+ a consequence of the Knaster-Tarski theorem on the one hand that \<^term>\<open>\<^bold>A\<^bold>G p\<close> is a fixed point of the monotonic function
+ \<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close>.
\<close>
lemma AG_fp: "\<^bold>A\<^bold>G p = p \<inter> \<^bold>A\<^bold>X \<^bold>A\<^bold>G p"
@@ -175,10 +174,9 @@
text \<open>
On the other hand, we have from the Knaster-Tarski fixed point theorem that
- any other post-fixed point of @{term "\<lambda>s. p \<inter> \<^bold>A\<^bold>X s"} is smaller than
- @{term "\<^bold>A\<^bold>G p"}. A post-fixed point is a set of states \<open>q\<close> such that @{term
- "q \<subseteq> p \<inter> \<^bold>A\<^bold>X q"}. This leads to the following co-induction principle for
- @{term "\<^bold>A\<^bold>G p"}.
+ any other post-fixed point of \<^term>\<open>\<lambda>s. p \<inter> \<^bold>A\<^bold>X s\<close> is smaller than
+ \<^term>\<open>\<^bold>A\<^bold>G p\<close>. A post-fixed point is a set of states \<open>q\<close> such that \<^term>\<open>q \<subseteq> p \<inter> \<^bold>A\<^bold>X q\<close>. This leads to the following co-induction principle for
+ \<^term>\<open>\<^bold>A\<^bold>G p\<close>.
\<close>
lemma AG_I: "q \<subseteq> p \<inter> \<^bold>A\<^bold>X q \<Longrightarrow> q \<subseteq> \<^bold>A\<^bold>G p"
@@ -200,7 +198,7 @@
by (simp only: AG_gfp, rule gfp_mono) auto
text \<open>
- The formula @{term "AG p"} implies @{term "AX p"} (we use substitution of
+ The formula \<^term>\<open>AG p\<close> implies \<^term>\<open>AX p\<close> (we use substitution of
\<open>\<subseteq>\<close> with monotonicity).
\<close>
@@ -233,7 +231,7 @@
\<^smallskip>
We now give an alternative characterization of the \<open>\<^bold>A\<^bold>G\<close> operator, which
describes the \<open>\<^bold>A\<^bold>G\<close> operator in an ``operational'' way by tree induction:
- In a state holds @{term "AG p"} iff in that state holds \<open>p\<close>, and in all
+ In a state holds \<^term>\<open>AG p\<close> iff in that state holds \<open>p\<close>, and in all
reachable states \<open>s\<close> follows from the fact that \<open>p\<close> holds in \<open>s\<close>, that \<open>p\<close>
also holds in all successor states of \<open>s\<close>. We use the co-induction principle
@{thm [source] AG_I} to establish this in a purely algebraic manner.
--- a/src/HOL/ex/Cartouche_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Cartouche_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
[OF \<open>x = y\<close>]}.\<close>
have \<open>x = y\<close>
- by (tactic \<open>resolve_tac @{context} @{thms \<open>x = y\<close>} 1\<close>)
+ by (tactic \<open>resolve_tac \<^context> @{thms \<open>x = y\<close>} 1\<close>)
\<comment> \<open>more cartouches involving ML\<close>
end
@@ -42,7 +42,7 @@
subsection \<open>Outer syntax: cartouche within command syntax\<close>
ML \<open>
- Outer_Syntax.command @{command_keyword cartouche} ""
+ Outer_Syntax.command \<^command_keyword>\<open>cartouche\<close> ""
(Parse.cartouche >> (fn s =>
Toplevel.keep (fn _ => writeln s)))
\<close>
@@ -61,17 +61,17 @@
if Symbol.is_ascii s then ord s
else if s = "\<newline>" then 10
else error ("String literal contains illegal symbol: " ^ quote s ^ Position.here pos);
- in list_comb (Syntax.const @{const_syntax Char}, String_Syntax.mk_bits_syntax 8 c) end;
+ in list_comb (Syntax.const \<^const_syntax>\<open>Char\<close>, String_Syntax.mk_bits_syntax 8 c) end;
- fun mk_string [] = Const (@{const_syntax Nil}, @{typ string})
+ fun mk_string [] = Const (\<^const_syntax>\<open>Nil\<close>, \<^typ>\<open>string\<close>)
| mk_string (s :: ss) =
- Syntax.const @{const_syntax Cons} $ mk_char s $ mk_string ss;
+ Syntax.const \<^const_syntax>\<open>Cons\<close> $ mk_char s $ mk_string ss;
in
fun string_tr content args =
let fun err () = raise TERM ("string_tr", args) in
(case args of
- [(c as Const (@{syntax_const "_constrain"}, _)) $ Free (s, _) $ p] =>
+ [(c as Const (\<^syntax_const>\<open>_constrain\<close>, _)) $ Free (s, _) $ p] =>
(case Term_Position.decode_position p of
SOME (pos, _) => c $ mk_string (content (s, pos)) $ p
| NONE => err ())
@@ -83,7 +83,7 @@
syntax "_cartouche_string" :: \<open>cartouche_position \<Rightarrow> string\<close> ("_")
parse_translation \<open>
- [(@{syntax_const "_cartouche_string"},
+ [(\<^syntax_const>\<open>_cartouche_string\<close>,
K (string_tr (Symbol_Pos.cartouche_content o Symbol_Pos.explode)))]
\<close>
@@ -100,7 +100,7 @@
syntax "_string_string" :: \<open>string_position \<Rightarrow> string\<close> ("_")
parse_translation \<open>
- [(@{syntax_const "_string_string"}, K (string_tr Lexicon.explode_string))]
+ [(\<^syntax_const>\<open>_string_string\<close>, K (string_tr Lexicon.explode_string))]
\<close>
term \<open>""\<close>
@@ -113,16 +113,15 @@
subsubsection \<open>Further nesting: antiquotations\<close>
ML \<open>
- @{term \<open>""\<close>};
- @{term \<open>"abc"\<close>};
- @{term \<open>"abc" @ "xyz"\<close>};
- @{term \<open>"\<newline>"\<close>};
- @{term \<open>"\001\010\100"\<close>};
+ \<^term>\<open>""\<close>;
+ \<^term>\<open>"abc"\<close>;
+ \<^term>\<open>"abc" @ "xyz"\<close>;
+ \<^term>\<open>"\<newline>"\<close>;
+ \<^term>\<open>"\001\010\100"\<close>;
\<close>
text \<open>
- @{ML
- \<open>
+ \<^ML>\<open>
(
@{term \<open>""\<close>};
@{term \<open>"abc"\<close>};
@@ -131,7 +130,6 @@
@{term \<open>"\001\010\100"\<close>}
)
\<close>
- }
\<close>
@@ -139,15 +137,14 @@
ML \<open>
Outer_Syntax.command
- @{command_keyword text_cartouche} ""
+ \<^command_keyword>\<open>text_cartouche\<close> ""
(Parse.opt_target -- Parse.input Parse.cartouche
>> Pure_Syn.document_command {markdown = true})
\<close>
text_cartouche
\<open>
- @{ML
- \<open>
+ \<^ML>\<open>
(
@{term \<open>""\<close>};
@{term \<open>"abc"\<close>};
@@ -156,7 +153,6 @@
@{term \<open>"\001\010\100"\<close>}
)
\<close>
- }
\<close>
@@ -192,17 +188,17 @@
\<close>
lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close>
- apply (ml_tactic \<open>resolve_tac @{context} @{thms impI} 1\<close>)
- apply (ml_tactic \<open>eresolve_tac @{context} @{thms conjE} 1\<close>)
- apply (ml_tactic \<open>resolve_tac @{context} @{thms conjI} 1\<close>)
- apply (ml_tactic \<open>ALLGOALS (assume_tac @{context})\<close>)
+ apply (ml_tactic \<open>resolve_tac \<^context> @{thms impI} 1\<close>)
+ apply (ml_tactic \<open>eresolve_tac \<^context> @{thms conjE} 1\<close>)
+ apply (ml_tactic \<open>resolve_tac \<^context> @{thms conjI} 1\<close>)
+ apply (ml_tactic \<open>ALLGOALS (assume_tac \<^context>)\<close>)
done
lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close> by (ml_tactic \<open>blast_tac ctxt 1\<close>)
ML \<open>@{lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close> by (ml_tactic \<open>blast_tac ctxt 1\<close>)}\<close>
-text \<open>@{ML \<open>@{lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close> by (ml_tactic \<open>blast_tac ctxt 1\<close>)}\<close>}\<close>
+text \<open>\<^ML>\<open>@{lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close> by (ml_tactic \<open>blast_tac ctxt 1\<close>)}\<close>\<close>
subsubsection \<open>Implicit version: method with special name "cartouche" (dynamic!)\<close>
@@ -213,17 +209,17 @@
\<close>
lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close>
- apply \<open>resolve_tac @{context} @{thms impI} 1\<close>
- apply \<open>eresolve_tac @{context} @{thms conjE} 1\<close>
- apply \<open>resolve_tac @{context} @{thms conjI} 1\<close>
- apply \<open>ALLGOALS (assume_tac @{context})\<close>
+ apply \<open>resolve_tac \<^context> @{thms impI} 1\<close>
+ apply \<open>eresolve_tac \<^context> @{thms conjE} 1\<close>
+ apply \<open>resolve_tac \<^context> @{thms conjI} 1\<close>
+ apply \<open>ALLGOALS (assume_tac \<^context>)\<close>
done
lemma \<open>A \<and> B \<longrightarrow> B \<and> A\<close>
- by (\<open>resolve_tac @{context} @{thms impI} 1\<close>,
- \<open>eresolve_tac @{context} @{thms conjE} 1\<close>,
- \<open>resolve_tac @{context} @{thms conjI} 1\<close>,
- \<open>assume_tac @{context} 1\<close>+)
+ by (\<open>resolve_tac \<^context> @{thms impI} 1\<close>,
+ \<open>eresolve_tac \<^context> @{thms conjE} 1\<close>,
+ \<open>resolve_tac \<^context> @{thms conjI} 1\<close>,
+ \<open>assume_tac \<^context> 1\<close>+)
subsection \<open>ML syntax\<close>
--- a/src/HOL/ex/Classical.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Classical.thy Sat Jan 05 17:24:33 2019 +0100
@@ -428,7 +428,7 @@
lemma "(\<forall>x y z. R(x,y) \<and> R(y,z) \<longrightarrow> R(x,z)) \<and>
(\<forall>x. \<exists>y. R(x,y)) \<longrightarrow>
\<not> (\<forall>x. P x = (\<forall>y. R(x,y) \<longrightarrow> \<not> P y))"
-by (tactic\<open>Meson.safe_best_meson_tac @{context} 1\<close>)
+by (tactic\<open>Meson.safe_best_meson_tac \<^context> 1\<close>)
\<comment> \<open>In contrast, \<open>meson\<close> is SLOW: 7.6s on griffon\<close>
@@ -722,7 +722,7 @@
(\<forall>x y. bird x \<and> snail y \<longrightarrow> \<not>eats x y) \<and>
(\<forall>x. (caterpillar x \<or> snail x) \<longrightarrow> (\<exists>y. plant y \<and> eats x y))
\<longrightarrow> (\<exists>x y. animal x \<and> animal y \<and> (\<exists>z. grain z \<and> eats y z \<and> eats x y))"
-by (tactic\<open>Meson.safe_best_meson_tac @{context} 1\<close>)
+by (tactic\<open>Meson.safe_best_meson_tac \<^context> 1\<close>)
\<comment> \<open>Nearly twice as fast as \<open>meson\<close>,
which performs iterative deepening rather than best-first search\<close>
--- a/src/HOL/ex/Code_Lazy_Demo.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Code_Lazy_Demo.thy Sat Jan 05 17:24:33 2019 +0100
@@ -6,7 +6,7 @@
"HOL-Library.RBT_Impl"
begin
-text \<open>This theory demonstrates the use of the @{theory "HOL-Library.Code_Lazy"} theory.\<close>
+text \<open>This theory demonstrates the use of the \<^theory>\<open>HOL-Library.Code_Lazy\<close> theory.\<close>
section \<open>Streams\<close>
@@ -93,7 +93,7 @@
(case rbt_iterator rbt of \<^bold>\<lbrakk>\<^bold>\<rbrakk> \<Rightarrow> None
| kv ### _ \<Rightarrow> Some kv)"
-value "find_min tree" \<comment> \<open>Observe that @{const rbt_iterator} is evaluated only for going down
+value "find_min tree" \<comment> \<open>Observe that \<^const>\<open>rbt_iterator\<close> is evaluated only for going down
to the first leaf, not for the whole tree (as seen by the ticks).\<close>
text \<open>With strict lists, the whole tree is converted into a list.\<close>
@@ -132,7 +132,7 @@
value [code] "mk_tree 10"
value [code] "let t = mk_tree 10; _ = subtree [True, True, False, False] t in t"
- \<comment> \<open>Since @{const mk_tree} shares the two subtrees of a node thanks to the let binding,
+ \<comment> \<open>Since \<^const>\<open>mk_tree\<close> shares the two subtrees of a node thanks to the let binding,
digging into one subtree spreads to the whole tree.\<close>
value [code] "let t = mk_tree 3; _ = subtree [True, True, False, False] t in t"
@@ -142,7 +142,7 @@
by simp
value [code] "mk_tree 10"
- \<comment> \<open>The recursive call to @{const mk_tree} is not guarded by a lazy constructor,
+ \<comment> \<open>The recursive call to \<^const>\<open>mk_tree\<close> is not guarded by a lazy constructor,
so all the suspensions are built up immediately.\<close>
lemma mk_tree_Suc [code]: "mk_tree (Suc n) = mk_tree n \<triangle> mk_tree n"
--- a/src/HOL/ex/Code_Timing.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Code_Timing.thy Sat Jan 05 17:24:33 2019 +0100
@@ -20,8 +20,8 @@
ML \<open>
local
- val ctxt = @{context};
- val consts = [@{const_name required_symbols}];
+ val ctxt = \<^context>;
+ val consts = [\<^const_name>\<open>required_symbols\<close>];
in
val simp = Code_Simp.static_conv
{ ctxt = ctxt, consts = consts, simpset = NONE };
@@ -31,23 +31,23 @@
\<close>
ML_val \<open>
- simp @{context} @{cterm "primes_upto 100"}
+ simp \<^context> \<^cterm>\<open>primes_upto 100\<close>
\<close>
ML_val \<open>
- simp @{context} @{cterm "primes_upto 200"}
+ simp \<^context> \<^cterm>\<open>primes_upto 200\<close>
\<close>
ML_val \<open>
- nbe @{context} @{cterm "primes_upto 200"}
+ nbe \<^context> \<^cterm>\<open>primes_upto 200\<close>
\<close>
ML_val \<open>
- nbe @{context} @{cterm "primes_upto 400"}
+ nbe \<^context> \<^cterm>\<open>primes_upto 400\<close>
\<close>
ML_val \<open>
- nbe @{context} @{cterm "primes_upto 600"}
+ nbe \<^context> \<^cterm>\<open>primes_upto 600\<close>
\<close>
end
--- a/src/HOL/ex/Commands.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Commands.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,7 +15,7 @@
subsection \<open>Diagnostic command: no state change\<close>
ML \<open>
- Outer_Syntax.command @{command_keyword print_test} "print term test"
+ Outer_Syntax.command \<^command_keyword>\<open>print_test\<close> "print term test"
(Parse.term >> (fn s => Toplevel.keep (fn st =>
let
val ctxt = Toplevel.context_of st;
@@ -31,10 +31,10 @@
subsection \<open>Old-style global theory declaration\<close>
ML \<open>
- Outer_Syntax.command @{command_keyword global_test} "test constant declaration"
+ Outer_Syntax.command \<^command_keyword>\<open>global_test\<close> "test constant declaration"
(Parse.binding >> (fn b => Toplevel.theory (fn thy =>
let
- val thy' = Sign.add_consts [(b, @{typ 'a}, NoSyn)] thy;
+ val thy' = Sign.add_consts [(b, \<^typ>\<open>'a\<close>, NoSyn)] thy;
in thy' end)));
\<close>
@@ -46,8 +46,8 @@
subsection \<open>Local theory specification\<close>
ML \<open>
- Outer_Syntax.local_theory @{command_keyword local_test} "test local definition"
- (Parse.binding -- (@{keyword "="} |-- Parse.term) >> (fn (b, s) => fn lthy =>
+ Outer_Syntax.local_theory \<^command_keyword>\<open>local_test\<close> "test local definition"
+ (Parse.binding -- (\<^keyword>\<open>=\<close> |-- Parse.term) >> (fn (b, s) => fn lthy =>
let
val t = Syntax.read_term lthy s;
val (def, lthy') = Local_Theory.define ((b, NoSyn), ((Thm.def_binding b, []), t)) lthy;
--- a/src/HOL/ex/Computations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Computations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -54,22 +54,22 @@
declare [[ML_source_trace = false]]
ML_val \<open>
- comp_nat @{context} @{term "fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0"}
- |> Syntax.string_of_term @{context}
+ comp_nat \<^context> \<^term>\<open>fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0\<close>
+ |> Syntax.string_of_term \<^context>
|> writeln
\<close>
ML_val \<open>
- comp_bool @{context} @{term "fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0 < fib (Suc (Suc 0))"}
+ comp_bool \<^context> \<^term>\<open>fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0 < fib (Suc (Suc 0))\<close>
\<close>
ML_val \<open>
- comp_check @{context} @{cprop "fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0 > fib (Suc (Suc 0))"}
+ comp_check \<^context> \<^cprop>\<open>fib (Suc (Suc (Suc 0)) * Suc (Suc (Suc 0))) + Suc 0 > fib (Suc (Suc 0))\<close>
\<close>
ML_val \<open>
- comp_numeral @{context} @{term "Suc 42 + 7"}
- |> Syntax.string_of_term @{context}
+ comp_numeral \<^context> \<^term>\<open>Suc 42 + 7\<close>
+ |> Syntax.string_of_term \<^context>
|> writeln
\<close>
--- a/src/HOL/ex/Datatype_Record_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Datatype_Record_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -22,7 +22,7 @@
lemma "\<lparr> a = 1, b = 2 \<rparr> = X 1 2" ..
-local_setup \<open>Datatype_Records.mk_update_defs @{type_name x}\<close>
+local_setup \<open>Datatype_Records.mk_update_defs \<^type_name>\<open>x\<close>\<close>
lemma "(X 1 2) \<lparr> b := 3 \<rparr> = X 1 3"
by (simp add: datatype_record_update)
--- a/src/HOL/ex/Dedekind_Real.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Dedekind_Real.thy Sat Jan 05 17:24:33 2019 +0100
@@ -170,7 +170,7 @@
thus ?thesis .
qed
-text \<open>preal lemmas instantiated to @{term "Rep_preal X"}\<close>
+text \<open>preal lemmas instantiated to \<^term>\<open>Rep_preal X\<close>\<close>
lemma mem_Rep_preal_Ex: "\<exists>x. x \<in> Rep_preal X"
thm preal_Ex_mem
@@ -356,7 +356,7 @@
text\<open>Multiplication of two positive reals gives a positive real.\<close>
-text\<open>Lemmas for proving positive reals multiplication set in @{typ preal}\<close>
+text\<open>Lemmas for proving positive reals multiplication set in \<^typ>\<open>preal\<close>\<close>
text\<open>Part 1 of Dedekind sections definition\<close>
lemma mult_set_not_empty:
@@ -921,8 +921,8 @@
subsection\<open>Subtraction for Positive Reals\<close>
-text\<open>Gleason prop. 9-3.5(iv), page 123: proving @{prop "A < B ==> \<exists>D. A + D =
-B"}. We define the claimed @{term D} and show that it is a positive real\<close>
+text\<open>Gleason prop. 9-3.5(iv), page 123: proving \<^prop>\<open>A < B ==> \<exists>D. A + D =
+B\<close>. We define the claimed \<^term>\<open>D\<close> and show that it is a positive real\<close>
text\<open>Part 1 of Dedekind sections definition\<close>
lemma diff_set_not_empty:
@@ -986,7 +986,7 @@
done
-text\<open>proving that @{term "R + D \<le> S"}\<close>
+text\<open>proving that \<^term>\<open>R + D \<le> S\<close>\<close>
lemma less_add_left_lemma:
assumes Rless: "R < S"
@@ -1011,7 +1011,7 @@
apply (blast intro: less_add_left_lemma)
done
-subsection\<open>proving that @{term "S \<le> R + D"} --- trickier\<close>
+subsection\<open>proving that \<^term>\<open>S \<le> R + D\<close> --- trickier\<close>
lemma lemma_sum_mem_Rep_preal_ex:
"x \<in> Rep_preal S ==> \<exists>e. 0 < e & x + e \<in> Rep_preal S"
@@ -1105,7 +1105,7 @@
qed
-subsection\<open>Completeness of type @{typ preal}\<close>
+subsection\<open>Completeness of type \<^typ>\<open>preal\<close>\<close>
text\<open>Prove that supremum is a cut\<close>
@@ -1265,8 +1265,8 @@
apply (blast dest: preal_trans_lemma)
done
-text\<open>Reduces equality of equivalence classes to the @{term realrel} relation:
- @{term "(realrel `` {x} = realrel `` {y}) = ((x,y) \<in> realrel)"}\<close>
+text\<open>Reduces equality of equivalence classes to the \<^term>\<open>realrel\<close> relation:
+ \<^term>\<open>(realrel `` {x} = realrel `` {y}) = ((x,y) \<in> realrel)\<close>\<close>
lemmas equiv_realrel_iff =
eq_equiv_class_iff [OF equiv_realrel UNIV_I UNIV_I]
@@ -1583,7 +1583,7 @@
by (simp only: real_sgn_def)
qed
-text\<open>The function @{term real_of_preal} requires many proofs, but it seems
+text\<open>The function \<^term>\<open>real_of_preal\<close> requires many proofs, but it seems
to be essential for proving completeness of the reals from that of the
positive reals.\<close>
--- a/src/HOL/ex/Erdoes_Szekeres.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Erdoes_Szekeres.thy Sat Jan 05 17:24:33 2019 +0100
@@ -8,7 +8,7 @@
imports Main
begin
-subsection \<open>Addition to @{theory HOL.Lattices_Big} Theory\<close>
+subsection \<open>Addition to \<^theory>\<open>HOL.Lattices_Big\<close> Theory\<close>
lemma Max_gr:
assumes "finite A"
@@ -16,7 +16,7 @@
shows "x < Max A"
using assms Max_ge less_le_trans by blast
-subsection \<open>Additions to @{theory HOL.Finite_Set} Theory\<close>
+subsection \<open>Additions to \<^theory>\<open>HOL.Finite_Set\<close> Theory\<close>
lemma obtain_subset_with_card_n:
assumes "n \<le> card S"
--- a/src/HOL/ex/Execute_Choice.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Execute_Choice.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,7 +14,7 @@
"valuesum m = (\<Sum>k \<in> Mapping.keys m. the (Mapping.lookup m k))"
text \<open>
- Not that instead of defining @{term valuesum} with choice, we define it
+ Not that instead of defining \<^term>\<open>valuesum\<close> with choice, we define it
directly and derive a description involving choice afterwards:
\<close>
@@ -58,7 +58,7 @@
text \<open>
Given \<open>valuesum_rec\<close> as initial description, we stepwise refine it to something executable;
- first, we formally insert the constructor @{term Mapping} and split the one equation into two,
+ first, we formally insert the constructor \<^term>\<open>Mapping\<close> and split the one equation into two,
where the second one provides the necessary context:
\<close>
@@ -72,7 +72,7 @@
As a side effect the precondition disappears (but note this has nothing to do with choice!).
The first equation deals with the uncritical empty case and can already be used for code generation.
- Using \<open>valuesum_choice\<close>, we are able to prove an executable version of @{term valuesum}:
+ Using \<open>valuesum_choice\<close>, we are able to prove an executable version of \<^term>\<open>valuesum\<close>:
\<close>
lemma valuesum_rec_exec [code]:
--- a/src/HOL/ex/Function_Growth.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Function_Growth.thy Sat Jan 05 17:24:33 2019 +0100
@@ -80,7 +80,7 @@
text \<open>
This yields \<open>f \<cong> g \<longleftrightarrow> f \<in> \<Theta>(g)\<close>. Concerning \<open>c\<^sub>1\<close> and \<open>c\<^sub>2\<close>
- restricted to @{typ nat}, see note above on \<open>(\<lesssim>)\<close>.
+ restricted to \<^typ>\<open>nat\<close>, see note above on \<open>(\<lesssim>)\<close>.
\<close>
lemma equiv_funI:
@@ -162,7 +162,7 @@
it occurs on the \emph{right} hand side of the \<open>(>)\<close>. The reason
is that the situation is dual to the definition of \<open>O\<close>: the definition
works since \<open>c\<close> may become arbitrary small. Since this is not possible
- within @{term \<nat>}, we push the coefficient to the left hand side instead such
+ within \<^term>\<open>\<nat>\<close>, we push the coefficient to the left hand side instead such
that it may become arbitrary big instead.
\<close>
@@ -291,7 +291,7 @@
has to be added yet.
\<close>
-text \<open>@{prop "(\<lambda>n. f n + k) \<cong> f"}\<close>
+text \<open>\<^prop>\<open>(\<lambda>n. f n + k) \<cong> f\<close>\<close>
lemma equiv_fun_mono_const:
assumes "mono f" and "\<exists>n. f n > 0"
@@ -384,7 +384,7 @@
(*lemma
"Discrete.log \<prec> Discrete.sqrt"
proof (rule less_fun_strongI)*)
-text \<open>@{prop "Discrete.log \<prec> Discrete.sqrt"}\<close>
+text \<open>\<^prop>\<open>Discrete.log \<prec> Discrete.sqrt\<close>\<close>
lemma
"Discrete.sqrt \<prec> id"
@@ -418,6 +418,6 @@
(*lemma
"(\<lambda>n. n ^ k) \<prec> (\<lambda>n. 2 ^ n)"
proof (rule less_fun_strongI)*)
-text \<open>@{prop "(\<lambda>n. n ^ k) \<prec> (\<lambda>n. 2 ^ n)"}\<close>
+text \<open>\<^prop>\<open>(\<lambda>n. n ^ k) \<prec> (\<lambda>n. 2 ^ n)\<close>\<close>
end
--- a/src/HOL/ex/Groebner_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Groebner_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -14,21 +14,21 @@
fixes x :: int
shows "x ^ 3 = x ^ 3"
apply (tactic \<open>ALLGOALS (CONVERSION
- (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv @{context}))))\<close>)
+ (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv \<^context>))))\<close>)
by (rule refl)
lemma
fixes x :: int
shows "(x - (-2))^5 = x ^ 5 + (10 * x ^ 4 + (40 * x ^ 3 + (80 * x\<^sup>2 + (80 * x + 32))))"
apply (tactic \<open>ALLGOALS (CONVERSION
- (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv @{context}))))\<close>)
+ (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv \<^context>))))\<close>)
by (rule refl)
schematic_goal
fixes x :: int
shows "(x - (-2))^5 * (y - 78) ^ 8 = ?X"
apply (tactic \<open>ALLGOALS (CONVERSION
- (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv @{context}))))\<close>)
+ (Conv.arg_conv (Conv.arg1_conv (Semiring_Normalizer.semiring_normalize_conv \<^context>))))\<close>)
by (rule refl)
lemma "((-3) ^ (Suc (Suc (Suc 0)))) == (X::'a::{comm_ring_1})"
--- a/src/HOL/ex/Hex_Bin_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Hex_Bin_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,7 +25,7 @@
lemma "- 0x0A = - 10" by (rule refl)
text \<open>
- Hex and bin numerals are printed as decimal: @{term "0b10"}
+ Hex and bin numerals are printed as decimal: \<^term>\<open>0b10\<close>
\<close>
term "0b10"
term "0x0A"
--- a/src/HOL/ex/Iff_Oracle.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Iff_Oracle.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,15 +12,15 @@
subsection \<open>Oracle declaration\<close>
text \<open>
- This oracle makes tautologies of the form @{prop "P \<longleftrightarrow> P \<longleftrightarrow> P \<longleftrightarrow> P"}.
+ This oracle makes tautologies of the form \<^prop>\<open>P \<longleftrightarrow> P \<longleftrightarrow> P \<longleftrightarrow> P\<close>.
The length is specified by an integer, which is checked to be even
and positive.
\<close>
oracle iff_oracle = \<open>
let
- fun mk_iff 1 = Var (("P", 0), @{typ bool})
- | mk_iff n = HOLogic.mk_eq (Var (("P", 0), @{typ bool}), mk_iff (n - 1));
+ fun mk_iff 1 = Var (("P", 0), \<^typ>\<open>bool\<close>)
+ | mk_iff n = HOLogic.mk_eq (Var (("P", 0), \<^typ>\<open>bool\<close>), mk_iff (n - 1));
in
fn (thy, n) =>
if n > 0 andalso n mod 2 = 0
@@ -32,23 +32,23 @@
subsection \<open>Oracle as low-level rule\<close>
-ML \<open>iff_oracle (@{theory}, 2)\<close>
-ML \<open>iff_oracle (@{theory}, 10)\<close>
+ML \<open>iff_oracle (\<^theory>, 2)\<close>
+ML \<open>iff_oracle (\<^theory>, 10)\<close>
ML \<open>
- Thm.peek_status (iff_oracle (@{theory}, 10));
- @{assert} (#oracle it);
+ Thm.peek_status (iff_oracle (\<^theory>, 10));
+ \<^assert> (#oracle it);
\<close>
text \<open>These oracle calls had better fail.\<close>
ML \<open>
- (iff_oracle (@{theory}, 5); error "Bad oracle")
+ (iff_oracle (\<^theory>, 5); error "Bad oracle")
handle Fail _ => writeln "Oracle failed, as expected"
\<close>
ML \<open>
- (iff_oracle (@{theory}, 1); error "Bad oracle")
+ (iff_oracle (\<^theory>, 1); error "Bad oracle")
handle Fail _ => writeln "Oracle failed, as expected"
\<close>
--- a/src/HOL/ex/LocaleTest2.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/LocaleTest2.thy Sat Jan 05 17:24:33 2019 +0100
@@ -466,7 +466,7 @@
qed
qed
-subsubsection \<open>Total order \<open><=\<close> on @{typ int}\<close>
+subsubsection \<open>Total order \<open><=\<close> on \<^typ>\<open>int\<close>\<close>
interpretation int: dpo "(<=) :: [int, int] => bool"
rewrites "(dpo.less (<=) (x::int) y) = (x < y)"
@@ -522,7 +522,7 @@
thm int.less_total text \<open>from dlo\<close>
-subsubsection \<open>Total order \<open><=\<close> on @{typ nat}\<close>
+subsubsection \<open>Total order \<open><=\<close> on \<^typ>\<open>nat\<close>\<close>
interpretation nat: dpo "(<=) :: [nat, nat] => bool"
rewrites "dpo.less (<=) (x::nat) y = (x < y)"
@@ -573,7 +573,7 @@
thm nat.less_total text \<open>from ldo\<close>
-subsubsection \<open>Lattice \<open>dvd\<close> on @{typ nat}\<close>
+subsubsection \<open>Lattice \<open>dvd\<close> on \<^typ>\<open>nat\<close>\<close>
interpretation nat_dvd: dpo "(dvd) :: [nat, nat] => bool"
rewrites "dpo.less (dvd) (x::nat) y = (x dvd y & x ~= y)"
--- a/src/HOL/ex/ML.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/ML.thy Sat Jan 05 17:24:33 2019 +0100
@@ -35,7 +35,7 @@
\<close>
ML \<open>length []\<close>
-ML \<open>@{assert} (length [] = 0)\<close>
+ML \<open>\<^assert> (length [] = 0)\<close>
text \<open>Formal entities from the surrounding context may be referenced as
@@ -43,9 +43,9 @@
term "1 + 1" \<comment> \<open>term within theory source\<close>
-ML \<open>@{term "1 + 1"} (* term as symbolic ML datatype value *)\<close>
+ML \<open>\<^term>\<open>1 + 1\<close> (* term as symbolic ML datatype value *)\<close>
-ML \<open>@{term "1 + (1::int)"}\<close>
+ML \<open>\<^term>\<open>1 + (1::int)\<close>\<close>
ML \<open>
@@ -53,7 +53,7 @@
val s = \<open>1 + 1\<close>;
(* read term via old-style string interface *)
- val t = Syntax.read_term @{context} (Syntax.implode_input s);
+ val t = Syntax.read_term \<^context> (Syntax.implode_input s);
\<close>
@@ -122,7 +122,7 @@
\<close>
text \<open>
- The @{ML_structure Par_List} module provides high-level combinators for
+ The \<^ML_structure>\<open>Par_List\<close> module provides high-level combinators for
parallel list operations.
\<close>
@@ -141,7 +141,7 @@
value "factorial 4" \<comment> \<open>evaluation via ML code generation in the background\<close>
declare [[ML_source_trace]]
-ML \<open>@{term "factorial 4"}\<close> \<comment> \<open>symbolic term in ML\<close>
+ML \<open>\<^term>\<open>factorial 4\<close>\<close> \<comment> \<open>symbolic term in ML\<close>
ML \<open>@{code "factorial"}\<close> \<comment> \<open>ML code from function specification\<close>
--- a/src/HOL/ex/Meson_Test.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Meson_Test.thy Sat Jan 05 17:24:33 2019 +0100
@@ -24,21 +24,21 @@
"(\<exists>x. P x) & (\<forall>x. L x --> ~ (M x & R x)) & (\<forall>x. P x --> (M x & L x)) & ((\<forall>x. P x --> Q x) | (\<exists>x. P x & R x)) --> (\<exists>x. Q x & P x)"
apply (rule ccontr)
ML_prf \<open>
- val ctxt = @{context};
- val prem25 = Thm.assume @{cprop "\<not> ?thesis"};
+ val ctxt = \<^context>;
+ val prem25 = Thm.assume \<^cprop>\<open>\<not> ?thesis\<close>;
val nnf25 = Meson.make_nnf ctxt prem25;
val xsko25 = Meson.skolemize ctxt nnf25;
\<close>
- apply (tactic \<open>cut_tac xsko25 1 THEN REPEAT (eresolve_tac @{context} [exE] 1)\<close>)
+ apply (tactic \<open>cut_tac xsko25 1 THEN REPEAT (eresolve_tac \<^context> [exE] 1)\<close>)
ML_val \<open>
- val ctxt = @{context};
+ val ctxt = \<^context>;
val [_, sko25] = #prems (#1 (Subgoal.focus ctxt 1 NONE (#goal @{Isar.goal})));
val clauses25 = Meson.make_clauses ctxt [sko25]; (*7 clauses*)
val horns25 = Meson.make_horns clauses25; (*16 Horn clauses*)
val go25 :: _ = Meson.gocls clauses25;
val ctxt' = fold Thm.declare_hyps (maps Thm.chyps_of (go25 :: horns25)) ctxt;
- Goal.prove ctxt' [] [] @{prop False} (fn _ =>
+ Goal.prove ctxt' [] [] \<^prop>\<open>False\<close> (fn _ =>
resolve_tac ctxt' [go25] 1 THEN
Meson.depth_prolog_tac ctxt' horns25);
\<close>
@@ -48,23 +48,23 @@
"((\<exists>x. p x) = (\<exists>x. q x)) & (\<forall>x. \<forall>y. p x & q y --> (r x = s y)) --> ((\<forall>x. p x --> r x) = (\<forall>x. q x --> s x))"
apply (rule ccontr)
ML_prf \<open>
- val ctxt = @{context};
- val prem26 = Thm.assume @{cprop "\<not> ?thesis"}
+ val ctxt = \<^context>;
+ val prem26 = Thm.assume \<^cprop>\<open>\<not> ?thesis\<close>
val nnf26 = Meson.make_nnf ctxt prem26;
val xsko26 = Meson.skolemize ctxt nnf26;
\<close>
- apply (tactic \<open>cut_tac xsko26 1 THEN REPEAT (eresolve_tac @{context} [exE] 1)\<close>)
+ apply (tactic \<open>cut_tac xsko26 1 THEN REPEAT (eresolve_tac \<^context> [exE] 1)\<close>)
ML_val \<open>
- val ctxt = @{context};
+ val ctxt = \<^context>;
val [_, sko26] = #prems (#1 (Subgoal.focus ctxt 1 NONE (#goal @{Isar.goal})));
val clauses26 = Meson.make_clauses ctxt [sko26];
- val _ = @{assert} (length clauses26 = 9);
+ val _ = \<^assert> (length clauses26 = 9);
val horns26 = Meson.make_horns clauses26;
- val _ = @{assert} (length horns26 = 24);
+ val _ = \<^assert> (length horns26 = 24);
val go26 :: _ = Meson.gocls clauses26;
val ctxt' = fold Thm.declare_hyps (maps Thm.chyps_of (go26 :: horns26)) ctxt;
- Goal.prove ctxt' [] [] @{prop False} (fn _ =>
+ Goal.prove ctxt' [] [] \<^prop>\<open>False\<close> (fn _ =>
resolve_tac ctxt' [go26] 1 THEN
Meson.depth_prolog_tac ctxt' horns26); (*7 ms*)
(*Proof is of length 107!!*)
@@ -75,23 +75,23 @@
"(\<forall>x. \<forall>y. q x y = (\<forall>z. p z x = (p z y::bool))) --> (\<forall>x. (\<forall>y. q x y = (q y x::bool)))"
apply (rule ccontr)
ML_prf \<open>
- val ctxt = @{context};
- val prem43 = Thm.assume @{cprop "\<not> ?thesis"};
+ val ctxt = \<^context>;
+ val prem43 = Thm.assume \<^cprop>\<open>\<not> ?thesis\<close>;
val nnf43 = Meson.make_nnf ctxt prem43;
val xsko43 = Meson.skolemize ctxt nnf43;
\<close>
- apply (tactic \<open>cut_tac xsko43 1 THEN REPEAT (eresolve_tac @{context} [exE] 1)\<close>)
+ apply (tactic \<open>cut_tac xsko43 1 THEN REPEAT (eresolve_tac \<^context> [exE] 1)\<close>)
ML_val \<open>
- val ctxt = @{context};
+ val ctxt = \<^context>;
val [_, sko43] = #prems (#1 (Subgoal.focus ctxt 1 NONE (#goal @{Isar.goal})));
val clauses43 = Meson.make_clauses ctxt [sko43];
- val _ = @{assert} (length clauses43 = 6);
+ val _ = \<^assert> (length clauses43 = 6);
val horns43 = Meson.make_horns clauses43;
- val _ = @{assert} (length horns43 = 16);
+ val _ = \<^assert> (length horns43 = 16);
val go43 :: _ = Meson.gocls clauses43;
val ctxt' = fold Thm.declare_hyps (maps Thm.chyps_of (go43 :: horns43)) ctxt;
- Goal.prove ctxt' [] [] @{prop False} (fn _ =>
+ Goal.prove ctxt' [] [] \<^prop>\<open>False\<close> (fn _ =>
resolve_tac ctxt' [go43] 1 THEN
Meson.best_prolog_tac ctxt' Meson.size_of_subgoals horns43); (*7ms*)
\<close>
--- a/src/HOL/ex/Multiquote.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Multiquote.thy Sat Jan 05 17:24:33 2019 +0100
@@ -19,20 +19,20 @@
parse_translation \<open>
let
- fun antiquote_tr i (Const (@{syntax_const "_antiquote"}, _) $
- (t as Const (@{syntax_const "_antiquote"}, _) $ _)) = skip_antiquote_tr i t
- | antiquote_tr i (Const (@{syntax_const "_antiquote"}, _) $ t) =
+ fun antiquote_tr i (Const (\<^syntax_const>\<open>_antiquote\<close>, _) $
+ (t as Const (\<^syntax_const>\<open>_antiquote\<close>, _) $ _)) = skip_antiquote_tr i t
+ | antiquote_tr i (Const (\<^syntax_const>\<open>_antiquote\<close>, _) $ t) =
antiquote_tr i t $ Bound i
| antiquote_tr i (t $ u) = antiquote_tr i t $ antiquote_tr i u
| antiquote_tr i (Abs (x, T, t)) = Abs (x, T, antiquote_tr (i + 1) t)
| antiquote_tr _ a = a
- and skip_antiquote_tr i ((c as Const (@{syntax_const "_antiquote"}, _)) $ t) =
+ and skip_antiquote_tr i ((c as Const (\<^syntax_const>\<open>_antiquote\<close>, _)) $ t) =
c $ skip_antiquote_tr i t
| skip_antiquote_tr i t = antiquote_tr i t;
fun quote_tr [t] = Abs ("s", dummyT, antiquote_tr 0 (Term.incr_boundvars 1 t))
| quote_tr ts = raise TERM ("quote_tr", ts);
- in [(@{syntax_const "_quote"}, K quote_tr)] end
+ in [(\<^syntax_const>\<open>_quote\<close>, K quote_tr)] end
\<close>
text \<open>basic examples\<close>
--- a/src/HOL/ex/PER.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/PER.thy Sat Jan 05 17:24:33 2019 +0100
@@ -149,7 +149,7 @@
text \<open>
The quotient type \<open>'a quot\<close> consists of all
- \emph{equivalence classes} over elements of the base type @{typ 'a}.
+ \emph{equivalence classes} over elements of the base type \<^typ>\<open>'a\<close>.
\<close>
definition "quot = {{x. a \<sim> x}| a::'a::partial_equiv. True}"
--- a/src/HOL/ex/Primrec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Primrec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -76,7 +76,7 @@
by (blast intro: ack_less_mono2 less_le_trans)
-text \<open>PROPERTY A 4'? Extra lemma needed for @{term CONSTANT} case, constant functions\<close>
+text \<open>PROPERTY A 4'? Extra lemma needed for \<^term>\<open>CONSTANT\<close> case, constant functions\<close>
lemma less_ack1 [iff]: "i < ack i j"
apply (induct i)
@@ -91,8 +91,7 @@
by (induct j) simp_all
-text \<open>PROPERTY A 9. The unary \<open>1\<close> and \<open>2\<close> in @{term
- ack} is essential for the rewriting.\<close>
+text \<open>PROPERTY A 9. The unary \<open>1\<close> and \<open>2\<close> in \<^term>\<open>ack\<close> is essential for the rewriting.\<close>
lemma ack_2 [simp]: "ack (Suc (Suc 0)) j = 2 * j + 3"
by (induct j) simp_all
@@ -170,7 +169,7 @@
"hd0 (m # ms) = m"
-text \<open>Inductive definition of the set of primitive recursive functions of type @{typ "nat list => nat"}.\<close>
+text \<open>Inductive definition of the set of primitive recursive functions of type \<^typ>\<open>nat list => nat\<close>.\<close>
definition SC :: "nat list => nat" where
"SC l = Suc (hd0 l)"
@@ -191,7 +190,7 @@
(case l of
[] => 0
| x # l' => rec_nat (f l') (\<lambda>y r. g (r # y # l')) x)"
- \<comment> \<open>Note that @{term g} is applied first to @{term "PREC f g y"} and then to @{term y}!\<close>
+ \<comment> \<open>Note that \<^term>\<open>g\<close> is applied first to \<^term>\<open>PREC f g y\<close> and then to \<^term>\<open>y\<close>!\<close>
inductive PRIMREC :: "(nat list => nat) => bool" where
SC: "PRIMREC SC" |
@@ -241,7 +240,7 @@
done
-text \<open>@{term COMP} case\<close>
+text \<open>\<^term>\<open>COMP\<close> case\<close>
lemma COMP_map_aux: "\<forall>f \<in> set fs. PRIMREC f \<and> (\<exists>kf. \<forall>l. f l < ack kf (sum_list l))
==> \<exists>k. \<forall>l. sum_list (map (\<lambda>f. f l) fs) < ack k (sum_list l)"
@@ -262,7 +261,7 @@
done
-text \<open>@{term PREC} case\<close>
+text \<open>\<^term>\<open>PREC\<close> case\<close>
lemma PREC_case_aux:
"\<forall>l. f l + sum_list l < ack kf (sum_list l) ==>
--- a/src/HOL/ex/Radix_Sort.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Radix_Sort.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
text \<open>The \<open>Radix_Sort\<close> locale provides a sorting function \<open>radix_sort\<close> that sorts
lists of lists. It is parameterized by a sorting function \<open>sort1 f\<close> that also sorts
lists of lists, but only w.r.t. the column selected by \<open>f\<close>.
-Working with lists, \<open>f\<close> is instantiated with @{term"\<lambda>xs. xs ! n"} to select the \<open>n\<close>-th element.
+Working with lists, \<open>f\<close> is instantiated with \<^term>\<open>\<lambda>xs. xs ! n\<close> to select the \<open>n\<close>-th element.
A more efficient implementation would sort lists of arrays because arrays support
constant time access to every element.\<close>
--- a/src/HOL/ex/Records.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Records.thy Sat Jan 05 17:24:33 2019 +0100
@@ -198,7 +198,7 @@
subsubsection \<open>Non-coercive structural subtyping\<close>
text \<open>
- Term @{term foo11} has type @{typ cpoint}, not type @{typ point} ---
+ Term \<^term>\<open>foo11\<close> has type \<^typ>\<open>cpoint\<close>, not type \<^typ>\<open>point\<close> ---
Great!
\<close>
@@ -215,7 +215,7 @@
ypos' :: nat
text \<open>
- \noindent May not apply @{term getX} to @{term [source] "(| xpos' =
+ \noindent May not apply \<^term>\<open>getX\<close> to @{term [source] "(| xpos' =
2, ypos' = 0 |)"} -- type error.
\<close>
@@ -245,49 +245,49 @@
(generalized) predicate on the record is passed as parameter that
decides whether or how `deep' to split the record. It can peek on the
subterm starting at the quantified occurrence of the record (including
-the quantifier). The value @{ML "0"} indicates no split, a value
-greater @{ML "0"} splits up to the given bound of record extension and
-finally the value @{ML "~1"} completely splits the record.
+the quantifier). The value \<^ML>\<open>0\<close> indicates no split, a value
+greater \<^ML>\<open>0\<close> splits up to the given bound of record extension and
+finally the value \<^ML>\<open>~1\<close> completely splits the record.
@{ML [source] "Record.split_simp_tac"} additionally takes a list of
equations for simplification and can also split fixed record variables.
\<close>
lemma "(\<forall>r. P (xpos r)) \<longrightarrow> (\<forall>x. P x)"
- apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss @{context}
+ apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss \<^context>
addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<forall>r. P (xpos r)) \<longrightarrow> (\<forall>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (xpos r)) \<longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss @{context}
+ apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss \<^context>
addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (xpos r)) \<longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "\<And>r. P (xpos r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss @{context}
+ apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss \<^context>
addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply auto
done
lemma "\<And>r. P (xpos r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
lemma "P (xpos r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
@@ -298,7 +298,7 @@
assume pre: "P (xpos r)"
then have "\<exists>x. P x"
apply -
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
}
@@ -309,7 +309,7 @@
illustrated by the following lemma.\<close>
lemma "\<exists>r. xpos r = x"
- apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss @{context}
+ apply (tactic \<open>simp_tac (put_simpset HOL_basic_ss \<^context>
addsimprocs [Record.ex_sel_eq_simproc]) 1\<close>)
done
--- a/src/HOL/ex/Reflection_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Reflection_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,8 +12,8 @@
text \<open>
Consider an HOL type \<open>\<sigma>\<close>, the structure of which is not recongnisable
-on the theory level. This is the case of @{typ bool}, arithmetical terms such as @{typ int},
-@{typ real} etc \dots In order to implement a simplification on terms of type \<open>\<sigma>\<close> we
+on the theory level. This is the case of \<^typ>\<open>bool\<close>, arithmetical terms such as \<^typ>\<open>int\<close>,
+\<^typ>\<open>real\<close> etc \dots In order to implement a simplification on terms of type \<open>\<sigma>\<close> we
often need its structure. Traditionnaly such simplifications are written in ML,
proofs are synthesized.
@@ -35,12 +35,12 @@
The method \<open>reflection\<close> uses \<open>reify\<close> and has a very similar signature:
\<open>reflection corr_thm eqs (t)\<close>. Here again \<open>eqs\<close> and \<open>(t)\<close>
are as described above and \<open>corr_thm\<close> is a theorem proving
-@{prop "I vs (f t) = I vs t"}. We assume that \<open>I\<close> is the interpretation
+\<^prop>\<open>I vs (f t) = I vs t\<close>. We assume that \<open>I\<close> is the interpretation
and \<open>f\<close> is some useful and executable simplification of type \<open>\<tau> \<Rightarrow> \<tau>\<close>.
-The method \<open>reflection\<close> applies reification and hence the theorem @{prop "t = I xs s"}
-and hence using \<open>corr_thm\<close> derives @{prop "t = I xs (f s)"}. It then uses
-normalization by equational rewriting to prove @{prop "f s = s'"} which almost finishes
-the proof of @{prop "t = t'"} where @{prop "I xs s' = t'"}.
+The method \<open>reflection\<close> applies reification and hence the theorem \<^prop>\<open>t = I xs s\<close>
+and hence using \<open>corr_thm\<close> derives \<^prop>\<open>t = I xs (f s)\<close>. It then uses
+normalization by equational rewriting to prove \<^prop>\<open>f s = s'\<close> which almost finishes
+the proof of \<^prop>\<open>t = t'\<close> where \<^prop>\<open>I xs s' = t'\<close>.
\<close>
text \<open>Example 1 : Propositional formulae and NNF.\<close>
@@ -81,7 +81,7 @@
apply (reify Ifm.simps)
oops
-text \<open>Method \<open>reify\<close> maps a @{typ bool} to an @{typ fm}. For this it needs the
+text \<open>Method \<open>reify\<close> maps a \<^typ>\<open>bool\<close> to an \<^typ>\<open>fm\<close>. For this it needs the
semantics of \<open>fm\<close>, i.e.\ the rewrite rules in \<open>Ifm.simps\<close>.\<close>
text \<open>You can also just pick up a subterm to reify.\<close>
@@ -115,12 +115,12 @@
| "nnf (NOT (NOT p)) = nnf p"
| "nnf (NOT p) = NOT p"
-text \<open>The correctness theorem of @{const nnf}: it preserves the semantics of @{typ fm}\<close>
+text \<open>The correctness theorem of \<^const>\<open>nnf\<close>: it preserves the semantics of \<^typ>\<open>fm\<close>\<close>
lemma nnf [reflection]:
"Ifm (nnf p) vs = Ifm p vs"
by (induct p rule: nnf.induct) auto
-text \<open>Now let's perform NNF using our @{const nnf} function defined above. First to the
+text \<open>Now let's perform NNF using our \<^const>\<open>nnf\<close> function defined above. First to the
whole subgoal.\<close>
lemma "A \<noteq> B \<and> (B \<longrightarrow> A \<noteq> (B \<or> C \<and> (B \<longrightarrow> A \<or> D))) \<longrightarrow> A \<or> B \<and> D"
apply (reflection Ifm.simps)
@@ -199,7 +199,7 @@
apply (reify Inum_eqs' ("1 * (2 * x + (y::nat) + 0 + 1)"))
oops
-text \<open>Okay, let's try reflection. Some simplifications on @{typ num} follow. You can
+text \<open>Okay, let's try reflection. Some simplifications on \<^typ>\<open>num\<close> follow. You can
skim until the main theorem \<open>linum\<close>.\<close>
fun lin_add :: "num \<Rightarrow> num \<Rightarrow> num"
--- a/src/HOL/ex/Refute_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Refute_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -15,8 +15,8 @@
lemma "P \<and> Q"
apply (rule conjI)
-refute [expect = genuine] 1 \<comment> \<open>refutes @{term "P"}\<close>
-refute [expect = genuine] 2 \<comment> \<open>refutes @{term "Q"}\<close>
+refute [expect = genuine] 1 \<comment> \<open>refutes \<^term>\<open>P\<close>\<close>
+refute [expect = genuine] 2 \<comment> \<open>refutes \<^term>\<open>Q\<close>\<close>
refute [expect = genuine] \<comment> \<open>equivalent to 'refute 1'\<close>
\<comment> \<open>here 'refute 3' would cause an exception, since we only have 2 subgoals\<close>
refute [maxsize = 5, expect = genuine] \<comment> \<open>we can override parameters ...\<close>
@@ -472,7 +472,7 @@
subsubsection \<open>Subtypes (typedef), typedecl\<close>
-text \<open>A completely unspecified non-empty subset of @{typ "'a"}:\<close>
+text \<open>A completely unspecified non-empty subset of \<^typ>\<open>'a\<close>:\<close>
definition "myTdef = insert (undefined::'a) (undefined::'a set)"
@@ -704,8 +704,8 @@
lemma "P Suc"
refute [maxsize = 3, expect = none]
-\<comment> \<open>@{term Suc} is a partial function (regardless of the size
- of the model), hence @{term "P Suc"} is undefined and no
+\<comment> \<open>\<^term>\<open>Suc\<close> is a partial function (regardless of the size
+ of the model), hence \<^term>\<open>P Suc\<close> is undefined and no
model will be found\<close>
oops
--- a/src/HOL/ex/Rewrite_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Rewrite_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -219,12 +219,12 @@
shows "\<And>a b :: nat. P ((a + 1) * (b + 1))"
apply (tactic \<open>
let
- val (x, ctxt) = yield_singleton Variable.add_fixes "x" @{context}
+ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context>
(* Note that the pattern order is reversed *)
val pat = [
- Rewrite.For [(x, SOME @{typ nat})],
+ Rewrite.For [(x, SOME \<^typ>\<open>nat\<close>)],
Rewrite.In,
- Rewrite.Term (@{const plus(nat)} $ Free (x, @{typ nat}) $ @{term "1 :: nat"}, [])]
+ Rewrite.Term (@{const plus(nat)} $ Free (x, \<^typ>\<open>nat\<close>) $ \<^term>\<open>1 :: nat\<close>, [])]
val to = NONE
in CCONVERSION (Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute}) 1 end
\<close>)
@@ -236,14 +236,14 @@
shows "Q (\<lambda>b :: int. P (\<lambda>a. a + b) (\<lambda>a. b + a))"
apply (tactic \<open>
let
- val (x, ctxt) = yield_singleton Variable.add_fixes "x" @{context}
+ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context>
val pat = [
Rewrite.Concl,
Rewrite.In,
- Rewrite.Term (Free ("Q", (@{typ "int"} --> TVar (("'b",0), [])) --> @{typ bool})
- $ Abs ("x", @{typ int}, Rewrite.mk_hole 1 (@{typ int} --> TVar (("'b",0), [])) $ Bound 0), [(x, @{typ int})]),
+ Rewrite.Term (Free ("Q", (\<^typ>\<open>int\<close> --> TVar (("'b",0), [])) --> \<^typ>\<open>bool\<close>)
+ $ Abs ("x", \<^typ>\<open>int\<close>, Rewrite.mk_hole 1 (\<^typ>\<open>int\<close> --> TVar (("'b",0), [])) $ Bound 0), [(x, \<^typ>\<open>int\<close>)]),
Rewrite.In,
- Rewrite.Term (@{const plus(int)} $ Free (x, @{typ int}) $ Var (("c", 0), @{typ int}), [])
+ Rewrite.Term (@{const plus(int)} $ Free (x, \<^typ>\<open>int\<close>) $ Var (("c", 0), \<^typ>\<open>int\<close>), [])
]
val to = NONE
in CCONVERSION (Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute}) 1 end
@@ -253,15 +253,15 @@
(* There is also conversion-like rewrite function: *)
ML \<open>
- val ct = @{cprop "Q (\<lambda>b :: int. P (\<lambda>a. a + b) (\<lambda>a. b + a))"}
- val (x, ctxt) = yield_singleton Variable.add_fixes "x" @{context}
+ val ct = \<^cprop>\<open>Q (\<lambda>b :: int. P (\<lambda>a. a + b) (\<lambda>a. b + a))\<close>
+ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context>
val pat = [
Rewrite.Concl,
Rewrite.In,
- Rewrite.Term (Free ("Q", (@{typ "int"} --> TVar (("'b",0), [])) --> @{typ bool})
- $ Abs ("x", @{typ int}, Rewrite.mk_hole 1 (@{typ int} --> TVar (("'b",0), [])) $ Bound 0), [(x, @{typ int})]),
+ Rewrite.Term (Free ("Q", (\<^typ>\<open>int\<close> --> TVar (("'b",0), [])) --> \<^typ>\<open>bool\<close>)
+ $ Abs ("x", \<^typ>\<open>int\<close>, Rewrite.mk_hole 1 (\<^typ>\<open>int\<close> --> TVar (("'b",0), [])) $ Bound 0), [(x, \<^typ>\<open>int\<close>)]),
Rewrite.In,
- Rewrite.Term (@{const plus(int)} $ Free (x, @{typ int}) $ Var (("c", 0), @{typ int}), [])
+ Rewrite.Term (@{const plus(int)} $ Free (x, \<^typ>\<open>int\<close>) $ Var (("c", 0), \<^typ>\<open>int\<close>), [])
]
val to = NONE
val th = Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute} ct
@@ -270,11 +270,11 @@
section \<open>Regression tests\<close>
ML \<open>
- val ct = @{cterm "(\<lambda>b :: int. (\<lambda>a. b + a))"}
- val (x, ctxt) = yield_singleton Variable.add_fixes "x" @{context}
+ val ct = \<^cterm>\<open>(\<lambda>b :: int. (\<lambda>a. b + a))\<close>
+ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context>
val pat = [
Rewrite.In,
- Rewrite.Term (@{const plus(int)} $ Var (("c", 0), @{typ int}) $ Var (("c", 0), @{typ int}), [])
+ Rewrite.Term (@{const plus(int)} $ Var (("c", 0), \<^typ>\<open>int\<close>) $ Var (("c", 0), \<^typ>\<open>int\<close>), [])
]
val to = NONE
val _ =
@@ -284,7 +284,7 @@
\<close>
ML \<open>
- Rewrite.params_pconv (Conv.all_conv |> K |> K) @{context} (Vartab.empty, []) @{cterm "\<And>x. PROP A"}
+ Rewrite.params_pconv (Conv.all_conv |> K |> K) \<^context> (Vartab.empty, []) \<^cterm>\<open>\<And>x. PROP A\<close>
\<close>
lemma
--- a/src/HOL/ex/SAT_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/SAT_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -534,9 +534,9 @@
| and_to_list fm acc = rev (fm :: acc)
val clauses = and_to_list prop_fm []
val terms = map (HOLogic.mk_Trueprop o Prop_Logic.term_of_prop_formula) clauses
- val cterms = map (Thm.cterm_of @{context}) terms
+ val cterms = map (Thm.cterm_of \<^context>) terms
val start = Timing.start ()
- val _ = SAT.rawsat_thm @{context} cterms
+ val _ = SAT.rawsat_thm \<^context> cterms
in
(Timing.result start, ! SAT.counter)
end;
--- a/src/HOL/ex/Simproc_Tests.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Simproc_Tests.thy Sat Jan 05 17:24:33 2019 +0100
@@ -27,21 +27,21 @@
fix a b c d :: nat
{
assume "b = Suc c" have "a + b = Suc (c + a)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_sums}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_sums\<close>]\<close>) fact
next
assume "b < Suc c" have "a + b < Suc (c + a)"
- by (tactic \<open>test @{context} [@{simproc natless_cancel_sums}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natless_cancel_sums\<close>]\<close>) fact
next
assume "b \<le> Suc c" have "a + b \<le> Suc (c + a)"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_sums}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_sums\<close>]\<close>) fact
next
assume "b - Suc c = d" have "a + b - Suc (c + a) = d"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_sums}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_sums\<close>]\<close>) fact
}
end
schematic_goal "\<And>(y::?'b::size). size (?x::?'a::size) \<le> size y + size ?x"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_sums}]\<close>) (rule le0)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_sums\<close>]\<close>) (rule le0)
(* TODO: test more simprocs with schematic variables *)
subsection \<open>Abelian group cancellation simprocs\<close>
@@ -50,10 +50,10 @@
fix a b c u :: "'a::ab_group_add"
{
assume "(a + 0) - (b + 0) = u" have "(a + c) - (b + c) = u"
- by (tactic \<open>test @{context} [@{simproc group_cancel_diff}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>group_cancel_diff\<close>]\<close>) fact
next
assume "a + 0 = b + 0" have "a + c = b + c"
- by (tactic \<open>test @{context} [@{simproc group_cancel_eq}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>group_cancel_eq\<close>]\<close>) fact
}
end
(* TODO: more tests for Groups.group_cancel_{add,diff,eq,less,le} *)
@@ -69,61 +69,61 @@
fix a b c d oo uu i j k l u v w x y z :: "'a::comm_ring_1"
{
assume "a + - b = u" have "(a + c) - (b + c) = u"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "10 + (2 * l + oo) = uu"
have "l + 2 + 2 + 2 + (l + 2) + (oo + 2) = uu"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-3 + (i + (j + k)) = y"
have "(i + j + 12 + k) - 15 = y"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "7 + (i + (j + k)) = y"
have "(i + j + 12 + k) - 5 = y"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-4 * (u * v) + (2 * x + y) = w"
have "(2*x - (u*v) + y) - v*3*u = w"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "2 * x * u * v + y = w"
have "(2*x*u*v + (u*v)*4 + y) - v*u*4 = w"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "3 * (u * v) + (2 * x * u * v + y) = w"
have "(2*x*u*v + (u*v)*4 + y) - v*u = w"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-3 * (u * v) + (- (x * u * v) + - y) = w"
have "u*v - (x*u*v + (u*v)*4 + y) = w"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "a + - c = d"
have "a + -(b+c) + b = d"
apply (simp only: minus_add_distrib)
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-2 * b + (a + - c) = d"
have "a + -(b+c) - b = d"
apply (simp only: minus_add_distrib)
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-7 + (i + (j + (k + (- u + - y)))) = z"
have "(i + j + -2 + k) - (u + 5 + y) = z"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "-27 + (i + (j + k)) = y"
have "(i + j + -12 + k) - 15 = y"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "27 + (i + (j + k)) = y"
have "(i + j + 12 + k) - -15 = y"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
next
assume "3 + (i + (j + k)) = y"
have "(i + j + -12 + k) - -15 = y"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>]\<close>) fact
}
end
@@ -133,20 +133,20 @@
fix i j k u vv w y z w' y' z' :: "'a::comm_ring_1"
{
assume "u = 0" have "2*u = u"
- by (tactic \<open>test @{context} [@{simproc inteq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>inteq_cancel_numerals\<close>]\<close>) fact
(* conclusion matches Rings.ring_1_no_zero_divisors_class.mult_cancel_right2 *)
next
assume "i + (j + k) = 3 + (u + y)"
have "(i + j + 12 + k) = u + 15 + y"
- by (tactic \<open>test @{context} [@{simproc inteq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>inteq_cancel_numerals\<close>]\<close>) fact
next
assume "7 + (j + (i + k)) = y"
have "(i + j*2 + 12 + k) = j + 5 + y"
- by (tactic \<open>test @{context} [@{simproc inteq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>inteq_cancel_numerals\<close>]\<close>) fact
next
assume "u + (6*z + (4*y + 6*w)) = 6*z' + (4*y' + (6*w' + vv))"
have "2*y + 3*z + 6*w + 2*y + 3*z + 2*u = 2*y' + 3*z' + 6*w' + 2*y' + 3*z' + u + vv"
- by (tactic \<open>test @{context} [@{simproc int_combine_numerals}, @{simproc inteq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_combine_numerals\<close>, \<^simproc>\<open>inteq_cancel_numerals\<close>]\<close>) fact
}
end
@@ -156,18 +156,18 @@
fix b c i j k u y :: "'a::linordered_idom"
{
assume "y < 2 * b" have "y - b < b"
- by (tactic \<open>test @{context} [@{simproc intless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>intless_cancel_numerals\<close>]\<close>) fact
next
assume "c + y < 4 * b" have "y - (3*b + c) < b - 2*c"
- by (tactic \<open>test @{context} [@{simproc intless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>intless_cancel_numerals\<close>]\<close>) fact
next
assume "i + (j + k) < 8 + (u + y)"
have "(i + j + -3 + k) < u + 5 + y"
- by (tactic \<open>test @{context} [@{simproc intless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>intless_cancel_numerals\<close>]\<close>) fact
next
assume "9 + (i + (j + k)) < u + y"
have "(i + j + 3 + k) < u + -6 + y"
- by (tactic \<open>test @{context} [@{simproc intless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>intless_cancel_numerals\<close>]\<close>) fact
}
end
@@ -177,22 +177,22 @@
fix x y :: "'a::{idom,ring_char_0}"
{
assume "3*x = 4*y" have "9*x = 12 * y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "-3*x = 4*y" have "-99*x = 132 * y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "111*x = -44*y" have "999*x = -396 * y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "11*x = 9*y" have "-99*x = -81 * y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "2*x = y" have "-2 * x = -1 * y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "2*x = y" have "-2 * x = -y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_numeral_factor\<close>]\<close>) fact
}
end
@@ -202,20 +202,20 @@
fix x y z :: "'a::{unique_euclidean_semiring,comm_ring_1,ring_char_0}"
{
assume "(3*x) div (4*y) = z" have "(9*x) div (12*y) = z"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_numeral_factors}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_numeral_factors\<close>]\<close>) fact
next
assume "(-3*x) div (4*y) = z" have "(-99*x) div (132*y) = z"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_numeral_factors}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_numeral_factors\<close>]\<close>) fact
next
assume "(111*x) div (-44*y) = z" have "(999*x) div (-396*y) = z"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_numeral_factors}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_numeral_factors\<close>]\<close>) fact
next
assume "(11*x) div (9*y) = z" have "(-99*x) div (-81*y) = z"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_numeral_factors}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_numeral_factors\<close>]\<close>) fact
next
assume "(2*x) div y = z"
have "(-2 * x) div (-1 * y) = z"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_numeral_factors}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_numeral_factors\<close>]\<close>) fact
}
end
@@ -225,22 +225,22 @@
fix x y :: "'a::linordered_idom"
{
assume "3*x < 4*y" have "9*x < 12 * y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "-3*x < 4*y" have "-99*x < 132 * y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "111*x < -44*y" have "999*x < -396 * y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "9*y < 11*x" have "-99*x < -81 * y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "y < 2*x" have "-2 * x < -y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "23*y < x" have "-x < -23 * y"
- by (tactic \<open>test @{context} [@{simproc ring_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_less_cancel_numeral_factor\<close>]\<close>) fact
}
end
@@ -250,28 +250,28 @@
fix x y :: "'a::linordered_idom"
{
assume "3*x \<le> 4*y" have "9*x \<le> 12 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "-3*x \<le> 4*y" have "-99*x \<le> 132 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "111*x \<le> -44*y" have "999*x \<le> -396 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "9*y \<le> 11*x" have "-99*x \<le> -81 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "y \<le> 2*x" have "-2 * x \<le> -1 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "23*y \<le> x" have "-x \<le> -23 * y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "y \<le> 0" have "0 \<le> y * -2"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "- x \<le> y" have "- (2 * x) \<le> 2*y"
- by (tactic \<open>test @{context} [@{simproc ring_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_le_cancel_numeral_factor\<close>]\<close>) fact
}
end
@@ -281,19 +281,19 @@
fix x y z :: "'a::{field,ring_char_0}"
{
assume "(3*x) / (4*y) = z" have "(9*x) / (12 * y) = z"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(-3*x) / (4*y) = z" have "(-99*x) / (132 * y) = z"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(111*x) / (-44*y) = z" have "(999*x) / (-396 * y) = z"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(11*x) / (9*y) = z" have "(-99*x) / (-81 * y) = z"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(2*x) / y = z" have "(-2 * x) / (-1 * y) = z"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_numeral_factor\<close>]\<close>) fact
}
end
@@ -303,22 +303,22 @@
fix a b c d k x y :: "'a::idom"
{
assume "k = 0 \<or> x = y" have "x*k = k*y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> 1 = y" have "k = k*y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
next
assume "b = 0 \<or> a*c = 1" have "a*(b*c) = b"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
next
assume "a = 0 \<or> b = 0 \<or> c = d*x" have "a*(b*c) = d*b*(x*a)"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> x = y" have "x*k = k*y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> 1 = y" have "k = k*y"
- by (tactic \<open>test @{context} [@{simproc ring_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>ring_eq_cancel_factor\<close>]\<close>) fact
}
end
@@ -329,19 +329,19 @@
{
assume "(if k = 0 then 0 else x div y) = uu"
have "(x*k) div (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_factor\<close>]\<close>) fact
next
assume "(if k = 0 then 0 else 1 div y) = uu"
have "(k) div (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_factor\<close>]\<close>) fact
next
assume "(if b = 0 then 0 else a * c) = uu"
have "(a*(b*c)) div b = uu"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_factor\<close>]\<close>) fact
next
assume "(if a = 0 then 0 else if b = 0 then 0 else c div (d * x)) = uu"
have "(a*(b*c)) div (d*b*(x*a)) = uu"
- by (tactic \<open>test @{context} [@{simproc int_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>int_div_cancel_factor\<close>]\<close>) fact
}
end
@@ -355,19 +355,19 @@
{
assume "(if k = 0 then 0 else x / y) = uu"
have "(x*k) / (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_factor\<close>]\<close>) fact
next
assume "(if k = 0 then 0 else 1 / y) = uu"
have "(k) / (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_factor\<close>]\<close>) fact
next
assume "(if b = 0 then 0 else a * c) = uu"
have "(a*(b*c)) / b = uu"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_factor\<close>]\<close>) fact
next
assume "(if a = 0 then 0 else if b = 0 then 0 else c / (d * x)) = uu"
have "(a*(b*c)) / (d*b*(x*a)) = uu"
- by (tactic \<open>test @{context} [@{simproc divide_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>divide_cancel_factor\<close>]\<close>) fact
}
end
@@ -382,23 +382,23 @@
fix x y z :: "'a::linordered_idom"
{
assume "0 < z \<Longrightarrow> x < y" have "0 < z \<Longrightarrow> x*z < y*z"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < z \<Longrightarrow> x < y" have "0 < z \<Longrightarrow> x*z < z*y"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < z \<Longrightarrow> x < y" have "0 < z \<Longrightarrow> z*x < y*z"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < z \<Longrightarrow> x < y" have "0 < z \<Longrightarrow> z*x < z*y"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_less_cancel_factor\<close>]\<close>) fact
next
txt "This simproc now uses the simplifier to prove that terms to
be canceled are positive/negative."
assume z_pos: "0 < z"
assume "x < y" have "z*x < z*y"
- by (tactic \<open>CHANGED (asm_simp_tac (put_simpset HOL_basic_ss @{context}
- addsimprocs [@{simproc linordered_ring_less_cancel_factor}]
+ by (tactic \<open>CHANGED (asm_simp_tac (put_simpset HOL_basic_ss \<^context>
+ addsimprocs [\<^simproc>\<open>linordered_ring_less_cancel_factor\<close>]
addsimps [@{thm z_pos}]) 1)\<close>) fact
}
end
@@ -409,10 +409,10 @@
fix x y z :: "'a::linordered_idom"
{
assume "0 < z \<Longrightarrow> x \<le> y" have "0 < z \<Longrightarrow> x*z \<le> y*z"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_le_cancel_factor\<close>]\<close>) fact
next
assume "0 < z \<Longrightarrow> x \<le> y" have "0 < z \<Longrightarrow> z*x \<le> z*y"
- by (tactic \<open>test @{context} [@{simproc linordered_ring_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>linordered_ring_le_cancel_factor\<close>]\<close>) fact
}
end
@@ -422,28 +422,28 @@
fix x y z uu :: "'a::{field,ring_char_0}"
{
assume "5 / 6 * x = uu" have "x / 2 + x / 3 = uu"
- by (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>) fact
next
assume "6 / 9 * x + y = uu" have "x / 3 + y + x / 3 = uu"
- by (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>) fact
next
assume "9 / 9 * x = uu" have "2 * x / 3 + x / 3 = uu"
- by (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>) fact
next
assume "y + z = uu"
have "x / 2 + y - 3 * x / 6 + z = uu"
- by (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>) fact
next
assume "1 / 15 * x + y = uu"
have "7 * x / 5 + y - 4 * x / 3 = uu"
- by (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>) fact
}
end
lemma
fixes x :: "'a::{linordered_field}"
shows "2/3 * x + x / 3 = uu"
-apply (tactic \<open>test @{context} [@{simproc field_combine_numerals}]\<close>)?
+apply (tactic \<open>test \<^context> [\<^simproc>\<open>field_combine_numerals\<close>]\<close>)?
oops \<comment> \<open>FIXME: test fails\<close>
subsection \<open>\<open>nat_combine_numerals\<close>\<close>
@@ -452,25 +452,25 @@
fix i j k m n u :: nat
{
assume "4*k = u" have "k + 3*k = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
next
(* FIXME "Suc (i + 3) \<equiv> i + 4" *)
assume "4 * Suc 0 + i = u" have "Suc (i + 3) = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
next
(* FIXME "Suc (i + j + 3 + k) \<equiv> i + j + 4 + k" *)
assume "4 * Suc 0 + (i + (j + k)) = u" have "Suc (i + j + 3 + k) = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
next
assume "2 * j + 4 * k = u" have "k + j + 3*k + j = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
next
assume "6 * Suc 0 + (5 * (i * j) + (4 * k + i)) = u"
have "Suc (j*i + i + k + 5 + 3*k + i*j*4) = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
next
assume "5 * (m * n) = u" have "(2*n*m) + (3*(m*n)) = u"
- by (tactic \<open>test @{context} [@{simproc nat_combine_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_combine_numerals\<close>]\<close>) fact
}
end
@@ -480,44 +480,44 @@
fix i j k l oo u uu vv w y z w' y' z' :: "nat"
{
assume "Suc 0 * u = 0" have "2*u = (u::nat)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * u = Suc 0" have "2*u = Suc (u)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "i + (j + k) = 3 * Suc 0 + (u + y)"
have "(i + j + 12 + k) = u + 15 + y"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "7 * Suc 0 + (i + (j + k)) = u + y"
have "(i + j + 12 + k) = u + 5 + y"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "11 * Suc 0 + (i + (j + k)) = u + y"
have "(i + j + 12 + k) = Suc (u + y)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "i + (j + k) = 2 * Suc 0 + (u + y)"
have "(i + j + 5 + k) = Suc (Suc (Suc (Suc (Suc (Suc (Suc (u + y)))))))"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * u + (2 * y + 3 * z) = Suc 0"
have "2*y + 3*z + 2*u = Suc (u)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * u + (2 * y + (3 * z + (6 * w + (2 * y + 3 * z)))) = Suc 0"
have "2*y + 3*z + 6*w + 2*y + 3*z + 2*u = Suc (u)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * u + (2 * y + (3 * z + (6 * w + (2 * y + 3 * z)))) =
2 * y' + (3 * z' + (6 * w' + (2 * y' + (3 * z' + vv))))"
have "2*y + 3*z + 6*w + 2*y + 3*z + 2*u =
2*y' + 3*z' + 6*w' + 2*y' + 3*z' + u + vv"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
next
assume "2 * u + (2 * z + (5 * Suc 0 + 2 * y)) = vv"
have "6 + 2*y + 3*z + 4*u = Suc (vv + 2*u + z)"
- by (tactic \<open>test @{context} [@{simproc nateq_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nateq_cancel_numerals\<close>]\<close>) fact
}
end
@@ -528,17 +528,17 @@
fix c i j k l m oo u uu vv w y z w' y' z' :: "nat"
{
assume "0 < j" have "(2*length xs < 2*length xs + j)"
- by (tactic \<open>test @{context} [@{simproc natless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natless_cancel_numerals\<close>]\<close>) fact
next
assume "0 < j" have "(2*length xs < length xs * 2 + j)"
- by (tactic \<open>test @{context} [@{simproc natless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natless_cancel_numerals\<close>]\<close>) fact
next
assume "i + (j + k) < u + y"
have "(i + j + 5 + k) < Suc (Suc (Suc (Suc (Suc (u + y)))))"
- by (tactic \<open>test @{context} [@{simproc natless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natless_cancel_numerals\<close>]\<close>) fact
next
assume "0 < Suc 0 * (m * n) + u" have "(2*n*m) < (3*(m*n)) + u"
- by (tactic \<open>test @{context} [@{simproc natless_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natless_cancel_numerals\<close>]\<close>) fact
}
end
@@ -550,22 +550,22 @@
{
assume "u + y \<le> 36 * Suc 0 + (i + (j + k))"
have "Suc (Suc (Suc (Suc (Suc (u + y))))) \<le> ((i + j) + 41 + k)"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_numerals\<close>]\<close>) fact
next
assume "5 * Suc 0 + (case length (f c) of 0 \<Rightarrow> 0 | Suc k \<Rightarrow> k) = 0"
have "(Suc (Suc (Suc (Suc (Suc (Suc (case length (f c) of 0 => 0 | Suc k => k)))))) \<le> Suc 0)"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_numerals\<close>]\<close>) fact
next
assume "6 + length l2 = 0" have "Suc (Suc (Suc (Suc (Suc (Suc (length l1 + length l2)))))) \<le> length l1"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_numerals\<close>]\<close>) fact
next
assume "5 + length l3 = 0"
have "( (Suc (Suc (Suc (Suc (Suc (length (compT P E A ST mxr e) + length l3)))))) \<le> length (compT P E A ST mxr e))"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_numerals\<close>]\<close>) fact
next
assume "5 + length (compT P E (A \<union> A' e) ST mxr c) = 0"
have "( (Suc (Suc (Suc (Suc (Suc (length (compT P E A ST mxr e) + length (compT P E (A Un A' e) ST mxr c))))))) \<le> length (compT P E A ST mxr e))"
- by (tactic \<open>test @{context} [@{simproc natle_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natle_cancel_numerals\<close>]\<close>) fact
}
end
@@ -576,58 +576,58 @@
fix c e i j k l oo u uu vv v w x y z zz w' y' z' :: "nat"
{
assume "i + (j + k) - 3 * Suc 0 = y" have "(i + j + 12 + k) - 15 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "7 * Suc 0 + (i + (j + k)) - 0 = y" have "(i + j + 12 + k) - 5 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "u - Suc 0 * Suc 0 = y" have "Suc u - 2 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * Suc 0 + u - 0 = y" have "Suc (Suc (Suc u)) - 2 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "Suc 0 * Suc 0 + (i + (j + k)) - 0 = y"
have "(i + j + 2 + k) - 1 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "i + (j + k) - Suc 0 * Suc 0 = y"
have "(i + j + 1 + k) - 2 = y"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "2 * x + y - 2 * (u * v) = w"
have "(2*x + (u*v) + y) - v*3*u = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "2 * x * u * v + (5 + y) - 0 = w"
have "(2*x*u*v + 5 + (u*v)*4 + y) - v*u*4 = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "3 * (u * v) + (2 * x * u * v + y) - 0 = w"
have "(2*x*u*v + (u*v)*4 + y) - v*u = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "3 * u + (2 + (2 * x * u * v + y)) - 0 = w"
have "Suc (Suc (2*x*u*v + u*4 + y)) - u = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "Suc (Suc 0 * (u * v)) - 0 = w"
have "Suc ((u*v)*4) - v*3*u = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "2 - 0 = w" have "Suc (Suc ((u*v)*3)) - v*3*u = w"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "17 * Suc 0 + (i + (j + k)) - (u + y) = zz"
have "(i + j + 32 + k) - (u + 15 + y) = zz"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
next
assume "u + y - 0 = v" have "Suc (Suc (Suc (Suc (Suc (u + y))))) - 5 = v"
- by (tactic \<open>test @{context} [@{simproc natdiff_cancel_numerals}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>natdiff_cancel_numerals\<close>]\<close>) fact
}
end
-subsection \<open>Factor-cancellation simprocs for type @{typ nat}\<close>
+subsection \<open>Factor-cancellation simprocs for type \<^typ>\<open>nat\<close>\<close>
text \<open>\<open>nat_eq_cancel_factor\<close>, \<open>nat_less_cancel_factor\<close>,
\<open>nat_le_cancel_factor\<close>, \<open>nat_divide_cancel_factor\<close>, and
@@ -637,90 +637,90 @@
fix a b c d k x y uu :: nat
{
assume "k = 0 \<or> x = y" have "x*k = k*y"
- by (tactic \<open>test @{context} [@{simproc nat_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_eq_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> Suc 0 = y" have "k = k*y"
- by (tactic \<open>test @{context} [@{simproc nat_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_eq_cancel_factor\<close>]\<close>) fact
next
assume "b = 0 \<or> a * c = Suc 0" have "a*(b*c) = b"
- by (tactic \<open>test @{context} [@{simproc nat_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_eq_cancel_factor\<close>]\<close>) fact
next
assume "a = 0 \<or> b = 0 \<or> c = d * x" have "a*(b*c) = d*b*(x*a)"
- by (tactic \<open>test @{context} [@{simproc nat_eq_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_eq_cancel_factor\<close>]\<close>) fact
next
assume "0 < k \<and> x < y" have "x*k < k*y"
- by (tactic \<open>test @{context} [@{simproc nat_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < k \<and> Suc 0 < y" have "k < k*y"
- by (tactic \<open>test @{context} [@{simproc nat_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < b \<and> a * c < Suc 0" have "a*(b*c) < b"
- by (tactic \<open>test @{context} [@{simproc nat_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < a \<and> 0 < b \<and> c < d * x" have "a*(b*c) < d*b*(x*a)"
- by (tactic \<open>test @{context} [@{simproc nat_less_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_less_cancel_factor\<close>]\<close>) fact
next
assume "0 < k \<longrightarrow> x \<le> y" have "x*k \<le> k*y"
- by (tactic \<open>test @{context} [@{simproc nat_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_le_cancel_factor\<close>]\<close>) fact
next
assume "0 < k \<longrightarrow> Suc 0 \<le> y" have "k \<le> k*y"
- by (tactic \<open>test @{context} [@{simproc nat_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_le_cancel_factor\<close>]\<close>) fact
next
assume "0 < b \<longrightarrow> a * c \<le> Suc 0" have "a*(b*c) \<le> b"
- by (tactic \<open>test @{context} [@{simproc nat_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_le_cancel_factor\<close>]\<close>) fact
next
assume "0 < a \<longrightarrow> 0 < b \<longrightarrow> c \<le> d * x" have "a*(b*c) \<le> d*b*(x*a)"
- by (tactic \<open>test @{context} [@{simproc nat_le_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_le_cancel_factor\<close>]\<close>) fact
next
assume "(if k = 0 then 0 else x div y) = uu" have "(x*k) div (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc nat_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_div_cancel_factor\<close>]\<close>) fact
next
assume "(if k = 0 then 0 else Suc 0 div y) = uu" have "k div (k*y) = uu"
- by (tactic \<open>test @{context} [@{simproc nat_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_div_cancel_factor\<close>]\<close>) fact
next
assume "(if b = 0 then 0 else a * c) = uu" have "(a*(b*c)) div (b) = uu"
- by (tactic \<open>test @{context} [@{simproc nat_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_div_cancel_factor\<close>]\<close>) fact
next
assume "(if a = 0 then 0 else if b = 0 then 0 else c div (d * x)) = uu"
have "(a*(b*c)) div (d*b*(x*a)) = uu"
- by (tactic \<open>test @{context} [@{simproc nat_div_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_div_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> x dvd y" have "(x*k) dvd (k*y)"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_factor\<close>]\<close>) fact
next
assume "k = 0 \<or> Suc 0 dvd y" have "k dvd (k*y)"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_factor\<close>]\<close>) fact
next
assume "b = 0 \<or> a * c dvd Suc 0" have "(a*(b*c)) dvd (b)"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_factor\<close>]\<close>) fact
next
assume "b = 0 \<or> Suc 0 dvd a * c" have "b dvd (a*(b*c))"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_factor\<close>]\<close>) fact
next
assume "a = 0 \<or> b = 0 \<or> c dvd d * x" have "(a*(b*c)) dvd (d*b*(x*a))"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_factor\<close>]\<close>) fact
}
end
-subsection \<open>Numeral-cancellation simprocs for type @{typ nat}\<close>
+subsection \<open>Numeral-cancellation simprocs for type \<^typ>\<open>nat\<close>\<close>
notepad begin
fix x y z :: nat
{
assume "3 * x = 4 * y" have "9*x = 12 * y"
- by (tactic \<open>test @{context} [@{simproc nat_eq_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_eq_cancel_numeral_factor\<close>]\<close>) fact
next
assume "3 * x < 4 * y" have "9*x < 12 * y"
- by (tactic \<open>test @{context} [@{simproc nat_less_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_less_cancel_numeral_factor\<close>]\<close>) fact
next
assume "3 * x \<le> 4 * y" have "9*x \<le> 12 * y"
- by (tactic \<open>test @{context} [@{simproc nat_le_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_le_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(3 * x) div (4 * y) = z" have "(9*x) div (12 * y) = z"
- by (tactic \<open>test @{context} [@{simproc nat_div_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_div_cancel_numeral_factor\<close>]\<close>) fact
next
assume "(3 * x) dvd (4 * y)" have "(9*x) dvd (12 * y)"
- by (tactic \<open>test @{context} [@{simproc nat_dvd_cancel_numeral_factor}]\<close>) fact
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>nat_dvd_cancel_numeral_factor\<close>]\<close>) fact
}
end
@@ -728,39 +728,39 @@
notepad begin
have "(10::int) div 3 = 3"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(10::int) mod 3 = 1"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(10::int) div -3 = -4"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(10::int) mod -3 = -2"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(-10::int) div 3 = -4"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(-10::int) mod 3 = 2"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(-10::int) div -3 = 3"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(-10::int) mod -3 = -1"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(8452::int) mod 3 = 1"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(59485::int) div 434 = 137"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "(1000006::int) mod 10 = 6"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "10000000 div 2 = (5000000::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "10000001 mod 2 = (1::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "10000055 div 32 = (312501::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "10000055 mod 32 = (23::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "100094 div 144 = (695::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
have "100094 mod 144 = (14::int)"
- by (tactic \<open>test @{context} [@{simproc numeral_divmod}]\<close>)
+ by (tactic \<open>test \<^context> [\<^simproc>\<open>numeral_divmod\<close>]\<close>)
end
end
--- a/src/HOL/ex/Sorting_Algorithms_Examples.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Sorting_Algorithms_Examples.thy Sat Jan 05 17:24:33 2019 +0100
@@ -20,14 +20,14 @@
ML \<open>
local
- val term_of_int_list = HOLogic.mk_list @{typ int}
- o map (HOLogic.mk_number @{typ int} o @{code integer_of_int});
+ val term_of_int_list = HOLogic.mk_list \<^typ>\<open>int\<close>
+ o map (HOLogic.mk_number \<^typ>\<open>int\<close> o @{code integer_of_int});
- fun raw_sort (ctxt, ct, ks) = Thm.mk_binop @{cterm "Pure.eq :: int list \<Rightarrow> int list \<Rightarrow> prop"}
+ fun raw_sort (ctxt, ct, ks) = Thm.mk_binop \<^cterm>\<open>Pure.eq :: int list \<Rightarrow> int list \<Rightarrow> prop\<close>
ct (Thm.cterm_of ctxt (term_of_int_list ks));
val (_, sort_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding sort}, raw_sort)));
+ (Thm.add_oracle (\<^binding>\<open>sort\<close>, raw_sort)));
in
@@ -43,22 +43,22 @@
declare [[code_timing]]
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "sort int_abs_reversed example_1"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>sort int_abs_reversed example_1\<close>\<close>
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "quicksort int_abs_reversed example_1"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>quicksort int_abs_reversed example_1\<close>\<close>
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "mergesort int_abs_reversed example_1"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>mergesort int_abs_reversed example_1\<close>\<close>
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "sort int_abs_reversed example_2"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>sort int_abs_reversed example_2\<close>\<close>
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "quicksort int_abs_reversed example_2"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>quicksort int_abs_reversed example_2\<close>\<close>
-ML_val \<open>sort_int_abs_reversed_conv @{context}
- @{cterm "mergesort int_abs_reversed example_2"}\<close>
+ML_val \<open>sort_int_abs_reversed_conv \<^context>
+ \<^cterm>\<open>mergesort int_abs_reversed example_2\<close>\<close>
end
--- a/src/HOL/ex/Sum_of_Powers.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Sum_of_Powers.thy Sat Jan 05 17:24:33 2019 +0100
@@ -5,7 +5,7 @@
imports Complex_Main
begin
-subsection \<open>Additions to @{theory HOL.Binomial} Theory\<close>
+subsection \<open>Additions to \<^theory>\<open>HOL.Binomial\<close> Theory\<close>
lemma (in field_char_0) one_plus_of_nat_neq_zero [simp]:
"1 + of_nat n \<noteq> 0"
--- a/src/HOL/ex/Termination.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Termination.thy Sat Jan 05 17:24:33 2019 +0100
@@ -10,7 +10,7 @@
begin
subsection \<open>Manually giving termination relations using \<open>relation\<close> and
-@{term measure}\<close>
+\<^term>\<open>measure\<close>\<close>
function sum :: "nat \<Rightarrow> nat \<Rightarrow> nat"
where
--- a/src/HOL/ex/ThreeDivides.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/ThreeDivides.thy Sat Jan 05 17:24:33 2019 +0100
@@ -57,7 +57,7 @@
directly to the decimal expansion of the natural numbers.\<close>
text \<open>Here we show that the first statement in the informal proof is
-true for all natural numbers. Note we are using @{term "D i"} to
+true for all natural numbers. Note we are using \<^term>\<open>D i\<close> to
denote the $i$'th element in a sequence of numbers.\<close>
lemma digit_diff_split:
@@ -103,7 +103,7 @@
text \<open>
We now present the final theorem of this section. For any
-sequence of numbers (defined by a function @{term "D :: (nat\<Rightarrow>nat)"}),
+sequence of numbers (defined by a function \<^term>\<open>D :: (nat\<Rightarrow>nat)\<close>),
we show that 3 divides the expansive sum $\sum{(D\;x)*10^x}$ over $x$
if and only if 3 divides the sum of the individual numbers
$\sum{D\;x}$.
@@ -115,7 +115,7 @@
have mono: "(\<Sum>x<nd. D x) \<le> (\<Sum>x<nd. D x * 10^x)"
by (rule sum_mono) simp
txt \<open>This lets us form the term
- @{term "(\<Sum>x<nd. D x * 10^x) - (\<Sum>x<nd. D x)"}\<close>
+ \<^term>\<open>(\<Sum>x<nd. D x * 10^x) - (\<Sum>x<nd. D x)\<close>\<close>
{
assume "3 dvd (\<Sum>x<nd. D x)"
--- a/src/HOL/ex/Tree23.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/Tree23.thy Sat Jan 05 17:24:33 2019 +0100
@@ -207,11 +207,9 @@
lemma bal_iff_full: "bal t \<longleftrightarrow> (\<exists>n. full n t)"
by (auto elim!: bal_imp_full full_imp_bal)
-text \<open>The @{term "add0"} function either preserves the height of the
-tree, or increases it by one. The constructor returned by the @{term
-"add"} function determines which: A return value of the form @{term
-"Stay t"} indicates that the height will be the same. A value of the
-form @{term "Sprout l p r"} indicates an increase in height.\<close>
+text \<open>The \<^term>\<open>add0\<close> function either preserves the height of the
+tree, or increases it by one. The constructor returned by the \<^term>\<open>add\<close> function determines which: A return value of the form \<^term>\<open>Stay t\<close> indicates that the height will be the same. A value of the
+form \<^term>\<open>Sprout l p r\<close> indicates an increase in height.\<close>
primrec gfull :: "nat \<Rightarrow> 'a growth \<Rightarrow> bool" where
"gfull n (Stay t) \<longleftrightarrow> full n t" |
@@ -220,7 +218,7 @@
lemma gfull_add: "full n t \<Longrightarrow> gfull n (add k y t)"
by (induct set: full, auto split: ord.split growth.split)
-text \<open>The @{term "add0"} operation preserves balance.\<close>
+text \<open>The \<^term>\<open>add0\<close> operation preserves balance.\<close>
lemma bal_add0: "bal t \<Longrightarrow> bal (add0 k y t)"
unfolding bal_iff_full add0_def
@@ -230,7 +228,7 @@
apply (auto intro: full.intros)
done
-text \<open>The @{term "add0"} operation preserves order.\<close>
+text \<open>The \<^term>\<open>add0\<close> operation preserves order.\<close>
lemma ord_cases:
fixes a b :: int obtains
@@ -279,7 +277,7 @@
lemma ord0_add0: "ord0 t \<Longrightarrow> ord0 (add0 k y t)"
by (simp add: ord0_def ord'_add0)
-text \<open>The @{term "del"} function preserves balance.\<close>
+text \<open>The \<^term>\<open>del\<close> function preserves balance.\<close>
lemma del_extra_simps:
"l \<noteq> Empty \<or> r \<noteq> Empty \<Longrightarrow>
--- a/src/HOL/ex/While_Combinator_Example.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/While_Combinator_Example.thy Sat Jan 05 17:24:33 2019 +0100
@@ -9,7 +9,7 @@
imports "HOL-Library.While_Combinator"
begin
-text \<open>Computation of the @{term lfp} on finite sets via
+text \<open>Computation of the \<^term>\<open>lfp\<close> on finite sets via
iteration.\<close>
theorem lfp_conv_while:
--- a/src/HOL/ex/veriT_Preprocessing.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/HOL/ex/veriT_Preprocessing.thy Sat Jan 05 17:24:33 2019 +0100
@@ -47,7 +47,7 @@
val left_prems = map2 (curry Ctr_Sugar_Util.mk_Trueprop_eq) ts us;
val right_prem = Ctr_Sugar_Util.mk_Trueprop_eq (list_comb (p, us), q);
val concl = Ctr_Sugar_Util.mk_Trueprop_eq
- (Const (@{const_name Let}, tuple_T --> lambda_T --> B) $ tuple_t $ lambda_t, q);
+ (Const (\<^const_name>\<open>Let\<close>, tuple_T --> lambda_T --> B) $ tuple_t $ lambda_t, q);
val goal = Logic.list_implies (left_prems @ [right_prem], concl);
val vars = Variable.add_free_names ctxt goal [];
@@ -69,21 +69,21 @@
| Let of term list;
fun str_of_rule_name Refl = "Refl"
- | str_of_rule_name (Taut th) = "Taut[" ^ @{make_string} th ^ "]"
- | str_of_rule_name (Trans t) = "Trans[" ^ Syntax.string_of_term @{context} t ^ "]"
+ | str_of_rule_name (Taut th) = "Taut[" ^ \<^make_string> th ^ "]"
+ | str_of_rule_name (Trans t) = "Trans[" ^ Syntax.string_of_term \<^context> t ^ "]"
| str_of_rule_name Cong = "Cong"
| str_of_rule_name Bind = "Bind"
| str_of_rule_name Sko_Ex = "Sko_Ex"
| str_of_rule_name Sko_All = "Sko_All"
| str_of_rule_name (Let ts) =
- "Let[" ^ commas (map (Syntax.string_of_term @{context}) ts) ^ "]";
+ "Let[" ^ commas (map (Syntax.string_of_term \<^context>) ts) ^ "]";
datatype node = N of rule_name * node list;
fun lambda_count (Abs (_, _, t)) = lambda_count t + 1
| lambda_count ((t as Abs _) $ _) = lambda_count t - 1
- | lambda_count ((t as Const (@{const_name case_prod}, _) $ _) $ _) = lambda_count t - 1
- | lambda_count (Const (@{const_name case_prod}, _) $ t) = lambda_count t - 1
+ | lambda_count ((t as Const (\<^const_name>\<open>case_prod\<close>, _) $ _) $ _) = lambda_count t - 1
+ | lambda_count (Const (\<^const_name>\<open>case_prod\<close>, _) $ t) = lambda_count t - 1
| lambda_count _ = 0;
fun zoom apply =
@@ -96,19 +96,19 @@
let val (t', u') = zo 1 (T :: bound_Ts) (t, u) in
(t' $ arg, u')
end
- | zo 0 bound_Ts ((t as Const (@{const_name case_prod}, _) $ _) $ arg, u) =
+ | zo 0 bound_Ts ((t as Const (\<^const_name>\<open>case_prod\<close>, _) $ _) $ arg, u) =
let val (t', u') = zo 1 bound_Ts (t, u) in
(t' $ arg, u')
end
| zo 0 bound_Ts tu = apply bound_Ts tu
- | zo n bound_Ts (Const (@{const_name case_prod},
- Type (@{type_name fun}, [Type (@{type_name fun}, [A, Type (@{type_name fun}, [B, _])]),
- Type (@{type_name fun}, [AB, _])])) $ t, u) =
+ | zo n bound_Ts (Const (\<^const_name>\<open>case_prod\<close>,
+ Type (\<^type_name>\<open>fun\<close>, [Type (\<^type_name>\<open>fun\<close>, [A, Type (\<^type_name>\<open>fun\<close>, [B, _])]),
+ Type (\<^type_name>\<open>fun\<close>, [AB, _])])) $ t, u) =
let
val (t', u') = zo (n + 1) bound_Ts (t, u);
val C = range_type (range_type (fastype_of t'));
in
- (Const (@{const_name case_prod}, (A --> B --> C) --> AB --> C) $ t', u')
+ (Const (\<^const_name>\<open>case_prod\<close>, (A --> B --> C) --> AB --> C) $ t', u')
end
| zo n bound_Ts (Abs (s, T, t), u) =
let val (t', u') = zo (n - 1) (T :: bound_Ts) (t, u) in
@@ -130,26 +130,26 @@
fun apply_Bind (lhs, rhs) =
(case (lhs, rhs) of
- (Const (@{const_name All}, _) $ Abs (_, T, t), Const (@{const_name All}, _) $ Abs (s, U, u)) =>
+ (Const (\<^const_name>\<open>All\<close>, _) $ Abs (_, T, t), Const (\<^const_name>\<open>All\<close>, _) $ Abs (s, U, u)) =>
(Abs (s, T, t), Abs (s, U, u))
- | (Const (@{const_name Ex}, _) $ t, Const (@{const_name Ex}, _) $ u) => (t, u)
+ | (Const (\<^const_name>\<open>Ex\<close>, _) $ t, Const (\<^const_name>\<open>Ex\<close>, _) $ u) => (t, u)
| _ => raise TERM ("apply_Bind", [lhs, rhs]));
fun apply_Sko_Ex (lhs, rhs) =
(case lhs of
- Const (@{const_name Ex}, _) $ (t as Abs (_, T, _)) =>
+ Const (\<^const_name>\<open>Ex\<close>, _) $ (t as Abs (_, T, _)) =>
(t $ (HOLogic.choice_const T $ t), rhs)
| _ => raise TERM ("apply_Sko_Ex", [lhs]));
fun apply_Sko_All (lhs, rhs) =
(case lhs of
- Const (@{const_name All}, _) $ (t as Abs (s, T, body)) =>
+ Const (\<^const_name>\<open>All\<close>, _) $ (t as Abs (s, T, body)) =>
(t $ (HOLogic.choice_const T $ Abs (s, T, HOLogic.mk_not body)), rhs)
| _ => raise TERM ("apply_Sko_All", [lhs]));
fun apply_Let_left ts j (lhs, _) =
(case lhs of
- Const (@{const_name Let}, _) $ t $ _ =>
+ Const (\<^const_name>\<open>Let\<close>, _) $ t $ _ =>
let val ts0 = HOLogic.strip_tuple t in
(nth ts0 j, nth ts j)
end
@@ -158,7 +158,7 @@
fun apply_Let_right ts bound_Ts (lhs, rhs) =
let val t' = mk_tuple1 bound_Ts ts in
(case lhs of
- Const (@{const_name Let}, _) $ _ $ u => (u $ t', rhs)
+ Const (\<^const_name>\<open>Let\<close>, _) $ _ $ u => (u $ t', rhs)
| _ => raise TERM ("apply_Let_right", [lhs, rhs]))
end;
@@ -167,7 +167,7 @@
val goal = HOLogic.mk_Trueprop (HOLogic.mk_eq lrhs);
val ary = length prems;
- val _ = warning (Syntax.string_of_term @{context} goal);
+ val _ = warning (Syntax.string_of_term \<^context> goal);
val _ = warning (str_of_rule_name rule_name);
val parents =
@@ -219,163 +219,163 @@
ML \<open>
val proof0 =
- ((@{term "\<exists>x :: nat. p x"},
- @{term "p (SOME x :: nat. p x)"}),
+ ((\<^term>\<open>\<exists>x :: nat. p x\<close>,
+ \<^term>\<open>p (SOME x :: nat. p x)\<close>),
N (Sko_Ex, [N (Refl, [])]));
-reconstruct_proof @{context} proof0;
+reconstruct_proof \<^context> proof0;
\<close>
ML \<open>
val proof1 =
- ((@{term "\<not> (\<forall>x :: nat. \<exists>y :: nat. p x y)"},
- @{term "\<not> (\<exists>y :: nat. p (SOME x :: nat. \<not> (\<exists>y :: nat. p x y)) y)"}),
+ ((\<^term>\<open>\<not> (\<forall>x :: nat. \<exists>y :: nat. p x y)\<close>,
+ \<^term>\<open>\<not> (\<exists>y :: nat. p (SOME x :: nat. \<not> (\<exists>y :: nat. p x y)) y)\<close>),
N (Cong, [N (Sko_All, [N (Bind, [N (Refl, [])])])]));
-reconstruct_proof @{context} proof1;
+reconstruct_proof \<^context> proof1;
\<close>
ML \<open>
val proof2 =
- ((@{term "\<forall>x :: nat. \<exists>y :: nat. \<exists>z :: nat. p x y z"},
- @{term "\<forall>x :: nat. p x (SOME y :: nat. \<exists>z :: nat. p x y z)
- (SOME z :: nat. p x (SOME y :: nat. \<exists>z :: nat. p x y z) z)"}),
+ ((\<^term>\<open>\<forall>x :: nat. \<exists>y :: nat. \<exists>z :: nat. p x y z\<close>,
+ \<^term>\<open>\<forall>x :: nat. p x (SOME y :: nat. \<exists>z :: nat. p x y z)
+ (SOME z :: nat. p x (SOME y :: nat. \<exists>z :: nat. p x y z) z)\<close>),
N (Bind, [N (Sko_Ex, [N (Sko_Ex, [N (Refl, [])])])]));
-reconstruct_proof @{context} proof2
+reconstruct_proof \<^context> proof2
\<close>
ML \<open>
val proof3 =
- ((@{term "\<forall>x :: nat. \<exists>x :: nat. \<exists>x :: nat. p x x x"},
- @{term "\<forall>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x)"}),
+ ((\<^term>\<open>\<forall>x :: nat. \<exists>x :: nat. \<exists>x :: nat. p x x x\<close>,
+ \<^term>\<open>\<forall>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x)\<close>),
N (Bind, [N (Sko_Ex, [N (Sko_Ex, [N (Refl, [])])])]));
-reconstruct_proof @{context} proof3
+reconstruct_proof \<^context> proof3
\<close>
ML \<open>
val proof4 =
- ((@{term "\<forall>x :: nat. \<exists>x :: nat. \<exists>x :: nat. p x x x"},
- @{term "\<forall>x :: nat. \<exists>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x)"}),
+ ((\<^term>\<open>\<forall>x :: nat. \<exists>x :: nat. \<exists>x :: nat. p x x x\<close>,
+ \<^term>\<open>\<forall>x :: nat. \<exists>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x)\<close>),
N (Bind, [N (Bind, [N (Sko_Ex, [N (Refl, [])])])]));
-reconstruct_proof @{context} proof4
+reconstruct_proof \<^context> proof4
\<close>
ML \<open>
val proof5 =
- ((@{term "\<forall>x :: nat. q \<and> (\<exists>x :: nat. \<exists>x :: nat. p x x x)"},
- @{term "\<forall>x :: nat. q \<and>
- (\<exists>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x))"}),
+ ((\<^term>\<open>\<forall>x :: nat. q \<and> (\<exists>x :: nat. \<exists>x :: nat. p x x x)\<close>,
+ \<^term>\<open>\<forall>x :: nat. q \<and>
+ (\<exists>x :: nat. p (SOME x :: nat. p x x x) (SOME x. p x x x) (SOME x. p x x x))\<close>),
N (Bind, [N (Cong, [N (Refl, []), N (Bind, [N (Sko_Ex, [N (Refl, [])])])])]));
-reconstruct_proof @{context} proof5
+reconstruct_proof \<^context> proof5
\<close>
ML \<open>
val proof6 =
- ((@{term "\<not> (\<forall>x :: nat. p \<and> (\<exists>x :: nat. \<forall>x :: nat. q x x))"},
- @{term "\<not> (\<forall>x :: nat. p \<and>
- (\<exists>x :: nat. q (SOME x :: nat. \<not> q x x) (SOME x. \<not> q x x)))"}),
+ ((\<^term>\<open>\<not> (\<forall>x :: nat. p \<and> (\<exists>x :: nat. \<forall>x :: nat. q x x))\<close>,
+ \<^term>\<open>\<not> (\<forall>x :: nat. p \<and>
+ (\<exists>x :: nat. q (SOME x :: nat. \<not> q x x) (SOME x. \<not> q x x)))\<close>),
N (Cong, [N (Bind, [N (Cong, [N (Refl, []), N (Bind, [N (Sko_All, [N (Refl, [])])])])])]));
-reconstruct_proof @{context} proof6
+reconstruct_proof \<^context> proof6
\<close>
ML \<open>
val proof7 =
- ((@{term "\<not> \<not> (\<exists>x. p x)"},
- @{term "\<not> \<not> p (SOME x. p x)"}),
+ ((\<^term>\<open>\<not> \<not> (\<exists>x. p x)\<close>,
+ \<^term>\<open>\<not> \<not> p (SOME x. p x)\<close>),
N (Cong, [N (Cong, [N (Sko_Ex, [N (Refl, [])])])]));
-reconstruct_proof @{context} proof7
+reconstruct_proof \<^context> proof7
\<close>
ML \<open>
val proof8 =
- ((@{term "\<not> \<not> (let x = Suc x in x = 0)"},
- @{term "\<not> \<not> Suc x = 0"}),
- N (Cong, [N (Cong, [N (Let [@{term "Suc x"}], [N (Refl, []), N (Refl, [])])])]));
+ ((\<^term>\<open>\<not> \<not> (let x = Suc x in x = 0)\<close>,
+ \<^term>\<open>\<not> \<not> Suc x = 0\<close>),
+ N (Cong, [N (Cong, [N (Let [\<^term>\<open>Suc x\<close>], [N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof8
+reconstruct_proof \<^context> proof8
\<close>
ML \<open>
val proof9 =
- ((@{term "\<not> (let x = Suc x in x = 0)"},
- @{term "\<not> Suc x = 0"}),
- N (Cong, [N (Let [@{term "Suc x"}], [N (Refl, []), N (Refl, [])])]));
+ ((\<^term>\<open>\<not> (let x = Suc x in x = 0)\<close>,
+ \<^term>\<open>\<not> Suc x = 0\<close>),
+ N (Cong, [N (Let [\<^term>\<open>Suc x\<close>], [N (Refl, []), N (Refl, [])])]));
-reconstruct_proof @{context} proof9
+reconstruct_proof \<^context> proof9
\<close>
ML \<open>
val proof10 =
- ((@{term "\<exists>x :: nat. p (x + 0)"},
- @{term "\<exists>x :: nat. p x"}),
+ ((\<^term>\<open>\<exists>x :: nat. p (x + 0)\<close>,
+ \<^term>\<open>\<exists>x :: nat. p x\<close>),
N (Bind, [N (Cong, [N (Taut @{thm add_0_right}, [])])]));
-reconstruct_proof @{context} proof10;
+reconstruct_proof \<^context> proof10;
\<close>
ML \<open>
val proof11 =
- ((@{term "\<not> (let (x, y) = (Suc y, Suc x) in y = 0)"},
- @{term "\<not> Suc x = 0"}),
- N (Cong, [N (Let [@{term "Suc y"}, @{term "Suc x"}], [N (Refl, []), N (Refl, []),
+ ((\<^term>\<open>\<not> (let (x, y) = (Suc y, Suc x) in y = 0)\<close>,
+ \<^term>\<open>\<not> Suc x = 0\<close>),
+ N (Cong, [N (Let [\<^term>\<open>Suc y\<close>, \<^term>\<open>Suc x\<close>], [N (Refl, []), N (Refl, []),
N (Refl, [])])]));
-reconstruct_proof @{context} proof11
+reconstruct_proof \<^context> proof11
\<close>
ML \<open>
val proof12 =
- ((@{term "\<not> (let (x, y) = (Suc y, Suc x); (u, v, w) = (y, x, y) in w = 0)"},
- @{term "\<not> Suc x = 0"}),
- N (Cong, [N (Let [@{term "Suc y"}, @{term "Suc x"}], [N (Refl, []), N (Refl, []),
- N (Let [@{term "Suc x"}, @{term "Suc y"}, @{term "Suc x"}],
+ ((\<^term>\<open>\<not> (let (x, y) = (Suc y, Suc x); (u, v, w) = (y, x, y) in w = 0)\<close>,
+ \<^term>\<open>\<not> Suc x = 0\<close>),
+ N (Cong, [N (Let [\<^term>\<open>Suc y\<close>, \<^term>\<open>Suc x\<close>], [N (Refl, []), N (Refl, []),
+ N (Let [\<^term>\<open>Suc x\<close>, \<^term>\<open>Suc y\<close>, \<^term>\<open>Suc x\<close>],
[N (Refl, []), N (Refl, []), N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof12
+reconstruct_proof \<^context> proof12
\<close>
ML \<open>
val proof13 =
- ((@{term "\<not> \<not> (let x = Suc x in x = 0)"},
- @{term "\<not> \<not> Suc x = 0"}),
- N (Cong, [N (Cong, [N (Let [@{term "Suc x"}], [N (Refl, []), N (Refl, [])])])]));
+ ((\<^term>\<open>\<not> \<not> (let x = Suc x in x = 0)\<close>,
+ \<^term>\<open>\<not> \<not> Suc x = 0\<close>),
+ N (Cong, [N (Cong, [N (Let [\<^term>\<open>Suc x\<close>], [N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof13
+reconstruct_proof \<^context> proof13
\<close>
ML \<open>
val proof14 =
- ((@{term "let (x, y) = (f (a :: nat), b :: nat) in x > a"},
- @{term "f (a :: nat) > a"}),
- N (Let [@{term "f (a :: nat) :: nat"}, @{term "b :: nat"}],
+ ((\<^term>\<open>let (x, y) = (f (a :: nat), b :: nat) in x > a\<close>,
+ \<^term>\<open>f (a :: nat) > a\<close>),
+ N (Let [\<^term>\<open>f (a :: nat) :: nat\<close>, \<^term>\<open>b :: nat\<close>],
[N (Cong, [N (Refl, [])]), N (Refl, []), N (Refl, [])]));
-reconstruct_proof @{context} proof14
+reconstruct_proof \<^context> proof14
\<close>
ML \<open>
val proof15 =
- ((@{term "let x = (let y = g (z :: nat) in f (y :: nat)) in x = Suc 0"},
- @{term "f (g (z :: nat) :: nat) = Suc 0"}),
- N (Let [@{term "f (g (z :: nat) :: nat) :: nat"}],
- [N (Let [@{term "g (z :: nat) :: nat"}], [N (Refl, []), N (Refl, [])]), N (Refl, [])]));
+ ((\<^term>\<open>let x = (let y = g (z :: nat) in f (y :: nat)) in x = Suc 0\<close>,
+ \<^term>\<open>f (g (z :: nat) :: nat) = Suc 0\<close>),
+ N (Let [\<^term>\<open>f (g (z :: nat) :: nat) :: nat\<close>],
+ [N (Let [\<^term>\<open>g (z :: nat) :: nat\<close>], [N (Refl, []), N (Refl, [])]), N (Refl, [])]));
-reconstruct_proof @{context} proof15
+reconstruct_proof \<^context> proof15
\<close>
ML \<open>
val proof16 =
- ((@{term "a > Suc b"},
- @{term "a > Suc b"}),
- N (Trans @{term "a > Suc b"}, [N (Refl, []), N (Refl, [])]));
+ ((\<^term>\<open>a > Suc b\<close>,
+ \<^term>\<open>a > Suc b\<close>),
+ N (Trans \<^term>\<open>a > Suc b\<close>, [N (Refl, []), N (Refl, [])]));
-reconstruct_proof @{context} proof16
+reconstruct_proof \<^context> proof16
\<close>
thm Suc_1
@@ -384,98 +384,98 @@
ML \<open>
val proof17 =
- ((@{term "2 :: nat"},
- @{term "Suc (Suc 0) :: nat"}),
- N (Trans @{term "Suc 1"}, [N (Taut @{thm Suc_1[symmetric]}, []), N (Cong,
+ ((\<^term>\<open>2 :: nat\<close>,
+ \<^term>\<open>Suc (Suc 0) :: nat\<close>),
+ N (Trans \<^term>\<open>Suc 1\<close>, [N (Taut @{thm Suc_1[symmetric]}, []), N (Cong,
[N (Taut @{thm One_nat_def}, [])])]));
-reconstruct_proof @{context} proof17
+reconstruct_proof \<^context> proof17
\<close>
ML \<open>
val proof18 =
- ((@{term "let x = a in let y = b in Suc x + y"},
- @{term "Suc a + b"}),
- N (Trans @{term "let y = b in Suc a + y"},
- [N (Let [@{term "a :: nat"}], [N (Refl, []), N (Refl, [])]),
- N (Let [@{term "b :: nat"}], [N (Refl, []), N (Refl, [])])]));
+ ((\<^term>\<open>let x = a in let y = b in Suc x + y\<close>,
+ \<^term>\<open>Suc a + b\<close>),
+ N (Trans \<^term>\<open>let y = b in Suc a + y\<close>,
+ [N (Let [\<^term>\<open>a :: nat\<close>], [N (Refl, []), N (Refl, [])]),
+ N (Let [\<^term>\<open>b :: nat\<close>], [N (Refl, []), N (Refl, [])])]));
-reconstruct_proof @{context} proof18
+reconstruct_proof \<^context> proof18
\<close>
ML \<open>
val proof19 =
- ((@{term "\<forall>x. let x = f (x :: nat) :: nat in g x"},
- @{term "\<forall>x. g (f (x :: nat) :: nat)"}),
- N (Bind, [N (Let [@{term "f :: nat \<Rightarrow> nat"} $ Bound 0],
+ ((\<^term>\<open>\<forall>x. let x = f (x :: nat) :: nat in g x\<close>,
+ \<^term>\<open>\<forall>x. g (f (x :: nat) :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>f :: nat \<Rightarrow> nat\<close> $ Bound 0],
[N (Refl, []), N (Refl, [])])]));
-reconstruct_proof @{context} proof19
+reconstruct_proof \<^context> proof19
\<close>
ML \<open>
val proof20 =
- ((@{term "\<forall>x. let y = Suc 0 in let x = f (x :: nat) :: nat in g x"},
- @{term "\<forall>x. g (f (x :: nat) :: nat)"}),
- N (Bind, [N (Let [@{term "Suc 0"}], [N (Refl, []), N (Let [@{term "f (x :: nat) :: nat"}],
+ ((\<^term>\<open>\<forall>x. let y = Suc 0 in let x = f (x :: nat) :: nat in g x\<close>,
+ \<^term>\<open>\<forall>x. g (f (x :: nat) :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>Suc 0\<close>], [N (Refl, []), N (Let [\<^term>\<open>f (x :: nat) :: nat\<close>],
[N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof20
+reconstruct_proof \<^context> proof20
\<close>
ML \<open>
val proof21 =
- ((@{term "\<forall>x :: nat. let x = f x :: nat in let y = x in p y"},
- @{term "\<forall>z :: nat. p (f z :: nat)"}),
- N (Bind, [N (Let [@{term "f (z :: nat) :: nat"}],
- [N (Refl, []), N (Let [@{term "f (z :: nat) :: nat"}],
+ ((\<^term>\<open>\<forall>x :: nat. let x = f x :: nat in let y = x in p y\<close>,
+ \<^term>\<open>\<forall>z :: nat. p (f z :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>f (z :: nat) :: nat\<close>],
+ [N (Refl, []), N (Let [\<^term>\<open>f (z :: nat) :: nat\<close>],
[N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof21
+reconstruct_proof \<^context> proof21
\<close>
ML \<open>
val proof22 =
- ((@{term "\<forall>x :: nat. let x = f x :: nat in let y = x in p y"},
- @{term "\<forall>x :: nat. p (f x :: nat)"}),
- N (Bind, [N (Let [@{term "f (x :: nat) :: nat"}],
- [N (Refl, []), N (Let [@{term "f (x :: nat) :: nat"}],
+ ((\<^term>\<open>\<forall>x :: nat. let x = f x :: nat in let y = x in p y\<close>,
+ \<^term>\<open>\<forall>x :: nat. p (f x :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>f (x :: nat) :: nat\<close>],
+ [N (Refl, []), N (Let [\<^term>\<open>f (x :: nat) :: nat\<close>],
[N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof22
+reconstruct_proof \<^context> proof22
\<close>
ML \<open>
val proof23 =
- ((@{term "\<forall>x :: nat. let (x, a) = (f x :: nat, 0 ::nat) in let y = x in p y"},
- @{term "\<forall>z :: nat. p (f z :: nat)"}),
- N (Bind, [N (Let [@{term "f (z :: nat) :: nat"}, @{term "0 :: nat"}],
- [N (Refl, []), N (Refl, []), N (Let [@{term "f (z :: nat) :: nat"}],
+ ((\<^term>\<open>\<forall>x :: nat. let (x, a) = (f x :: nat, 0 ::nat) in let y = x in p y\<close>,
+ \<^term>\<open>\<forall>z :: nat. p (f z :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>f (z :: nat) :: nat\<close>, \<^term>\<open>0 :: nat\<close>],
+ [N (Refl, []), N (Refl, []), N (Let [\<^term>\<open>f (z :: nat) :: nat\<close>],
[N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof23
+reconstruct_proof \<^context> proof23
\<close>
ML \<open>
val proof24 =
- ((@{term "\<forall>x :: nat. let (x, a) = (f x :: nat, 0 ::nat) in let y = x in p y"},
- @{term "\<forall>x :: nat. p (f x :: nat)"}),
- N (Bind, [N (Let [@{term "f (x :: nat) :: nat"}, @{term "0 :: nat"}],
- [N (Refl, []), N (Refl, []), N (Let [@{term "f (x :: nat) :: nat"}],
+ ((\<^term>\<open>\<forall>x :: nat. let (x, a) = (f x :: nat, 0 ::nat) in let y = x in p y\<close>,
+ \<^term>\<open>\<forall>x :: nat. p (f x :: nat)\<close>),
+ N (Bind, [N (Let [\<^term>\<open>f (x :: nat) :: nat\<close>, \<^term>\<open>0 :: nat\<close>],
+ [N (Refl, []), N (Refl, []), N (Let [\<^term>\<open>f (x :: nat) :: nat\<close>],
[N (Refl, []), N (Refl, [])])])]));
-reconstruct_proof @{context} proof24
+reconstruct_proof \<^context> proof24
\<close>
ML \<open>
val proof25 =
- ((@{term "let vr0 = vr1 in let vr1 = vr2 in vr0 + vr1 + vr2 :: nat"},
- @{term "vr1 + vr2 + vr2 :: nat"}),
- N (Trans @{term "let vr1a = vr2 in vr1 + vr1a + vr2 :: nat"},
- [N (Let [@{term "vr1 :: nat"}], [N (Refl, []), N (Refl, [])]),
- N (Let [@{term "vr2 :: nat"}], [N (Refl, []), N (Refl, [])])]));
+ ((\<^term>\<open>let vr0 = vr1 in let vr1 = vr2 in vr0 + vr1 + vr2 :: nat\<close>,
+ \<^term>\<open>vr1 + vr2 + vr2 :: nat\<close>),
+ N (Trans \<^term>\<open>let vr1a = vr2 in vr1 + vr1a + vr2 :: nat\<close>,
+ [N (Let [\<^term>\<open>vr1 :: nat\<close>], [N (Refl, []), N (Refl, [])]),
+ N (Let [\<^term>\<open>vr2 :: nat\<close>], [N (Refl, []), N (Refl, [])])]));
-reconstruct_proof @{context} proof25
+reconstruct_proof \<^context> proof25
\<close>
end
--- a/src/Tools/Code/code_runtime.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Tools/Code/code_runtime.ML Sat Jan 05 17:24:33 2019 +0100
@@ -651,7 +651,7 @@
fun ml_computation_check_antiq raw_spec ctxt =
let
- val cTs = insert (op =) (dest_Const @{const holds}) (prep_spec ctxt raw_spec);
+ val cTs = insert (op =) (dest_Const \<^const>\<open>holds\<close>) (prep_spec ctxt raw_spec);
in (print_computation_check ctxt, register_computation cTs \<^typ>\<open>prop\<close> ctxt) end;
end; (*local*)
--- a/src/ZF/Inductive.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/ZF/Inductive.thy Sat Jan 05 17:24:33 2019 +0100
@@ -37,8 +37,8 @@
ML \<open>
structure Lfp =
struct
- val oper = @{const lfp}
- val bnd_mono = @{const bnd_mono}
+ val oper = \<^const>\<open>lfp\<close>
+ val bnd_mono = \<^const>\<open>bnd_mono\<close>
val bnd_monoI = @{thm bnd_monoI}
val subs = @{thm def_lfp_subset}
val Tarski = @{thm def_lfp_unfold}
@@ -47,8 +47,8 @@
structure Standard_Prod =
struct
- val sigma = @{const Sigma}
- val pair = @{const Pair}
+ val sigma = \<^const>\<open>Sigma\<close>
+ val pair = \<^const>\<open>Pair\<close>
val split_name = \<^const_name>\<open>split\<close>
val pair_iff = @{thm Pair_iff}
val split_eq = @{thm split}
@@ -61,10 +61,10 @@
structure Standard_Sum =
struct
- val sum = @{const sum}
- val inl = @{const Inl}
- val inr = @{const Inr}
- val elim = @{const case}
+ val sum = \<^const>\<open>sum\<close>
+ val inl = \<^const>\<open>Inl\<close>
+ val inr = \<^const>\<open>Inr\<close>
+ val elim = \<^const>\<open>case\<close>
val case_inl = @{thm case_Inl}
val case_inr = @{thm case_Inr}
val inl_iff = @{thm Inl_iff}
@@ -84,8 +84,8 @@
structure Gfp =
struct
- val oper = @{const gfp}
- val bnd_mono = @{const bnd_mono}
+ val oper = \<^const>\<open>gfp\<close>
+ val bnd_mono = \<^const>\<open>bnd_mono\<close>
val bnd_monoI = @{thm bnd_monoI}
val subs = @{thm def_gfp_subset}
val Tarski = @{thm def_gfp_unfold}
@@ -94,8 +94,8 @@
structure Quine_Prod =
struct
- val sigma = @{const QSigma}
- val pair = @{const QPair}
+ val sigma = \<^const>\<open>QSigma\<close>
+ val pair = \<^const>\<open>QPair\<close>
val split_name = \<^const_name>\<open>qsplit\<close>
val pair_iff = @{thm QPair_iff}
val split_eq = @{thm qsplit}
@@ -108,10 +108,10 @@
structure Quine_Sum =
struct
- val sum = @{const qsum}
- val inl = @{const QInl}
- val inr = @{const QInr}
- val elim = @{const qcase}
+ val sum = \<^const>\<open>qsum\<close>
+ val inl = \<^const>\<open>QInl\<close>
+ val inr = \<^const>\<open>QInr\<close>
+ val elim = \<^const>\<open>qcase\<close>
val case_inl = @{thm qcase_QInl}
val case_inr = @{thm qcase_QInr}
val inl_iff = @{thm QInl_iff}
--- a/src/ZF/Tools/datatype_package.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/ZF/Tools/datatype_package.ML Sat Jan 05 17:24:33 2019 +0100
@@ -56,7 +56,7 @@
Syntax.string_of_term_global \<^theory>\<open>IFOL\<close> t));
val rec_names = (*nat doesn't have to be added*)
\<^const_name>\<open>nat\<close> :: map (#1 o dest_Const) rec_hds
- val u = if co then @{const QUniv.quniv} else @{const Univ.univ}
+ val u = if co then \<^const>\<open>QUniv.quniv\<close> else \<^const>\<open>Univ.univ\<close>
val cs = (fold o fold) (fn (_, _, _, prems) => prems |> (fold o fold_aterms)
(fn t as Const (a, _) => if member (op =) rec_names a then I else insert (op =) t
| _ => I)) con_ty_lists [];
@@ -88,7 +88,7 @@
(** Define the constructors **)
(*The empty tuple is 0*)
- fun mk_tuple [] = @{const zero}
+ fun mk_tuple [] = \<^const>\<open>zero\<close>
| mk_tuple args = foldr1 (fn (t1, t2) => Pr.pair $ t1 $ t2) args;
fun mk_inject n k u = Balanced_Tree.access
@@ -162,7 +162,7 @@
Non-identifiers (e.g. infixes) get a name of the form f_op_nnn. **)
(*a recursive call for x is the application rec`x *)
- val rec_call = @{const apply} $ Free ("rec", \<^typ>\<open>i\<close>);
+ val rec_call = \<^const>\<open>apply\<close> $ Free ("rec", \<^typ>\<open>i\<close>);
(*look back down the "case args" (which have been reversed) to
determine the de Bruijn index*)
@@ -231,7 +231,7 @@
val recursor_def =
Misc_Legacy.mk_defpair
(recursor_tm,
- @{const Univ.Vrecursor} $
+ \<^const>\<open>Univ.Vrecursor\<close> $
absfree ("rec", \<^typ>\<open>i\<close>) (list_comb (case_const, recursor_cases)));
(* Build the new theory *)
--- a/src/ZF/Tools/inductive_package.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/ZF/Tools/inductive_package.ML Sat Jan 05 17:24:33 2019 +0100
@@ -115,7 +115,7 @@
(*The Part(A,h) terms -- compose injections to make h*)
fun mk_Part (Bound 0) = Free(X', Ind_Syntax.iT) (*no mutual rec, no Part needed*)
- | mk_Part h = @{const Part} $ Free(X', Ind_Syntax.iT) $ Abs (w', Ind_Syntax.iT, h);
+ | mk_Part h = \<^const>\<open>Part\<close> $ Free(X', Ind_Syntax.iT) $ Abs (w', Ind_Syntax.iT, h);
(*Access to balanced disjoint sums via injections*)
val parts = map mk_Part
@@ -289,7 +289,7 @@
SOME pred => prem :: FOLogic.mk_Trueprop (pred $ t) :: iprems
| NONE => (*possibly membership in M(rec_tm), for M monotone*)
let fun mk_sb (rec_tm,pred) =
- (rec_tm, @{const Collect} $ rec_tm $ pred)
+ (rec_tm, \<^const>\<open>Collect\<close> $ rec_tm $ pred)
in subst_free (map mk_sb ind_alist) prem :: iprems end)
| add_induct_prem ind_alist (prem,iprems) = prem :: iprems;
@@ -378,7 +378,7 @@
val qconcl =
List.foldr FOLogic.mk_all
(FOLogic.imp $
- (@{const mem} $ elem_tuple $ rec_tm)
+ (\<^const>\<open>mem\<close> $ elem_tuple $ rec_tm)
$ (list_comb (pfree, elem_frees))) elem_frees
in (CP.ap_split elem_type FOLogic.oT pfree,
qconcl)
@@ -388,7 +388,7 @@
(*Used to form simultaneous induction lemma*)
fun mk_rec_imp (rec_tm,pred) =
- FOLogic.imp $ (@{const mem} $ Bound 0 $ rec_tm) $
+ FOLogic.imp $ (\<^const>\<open>mem\<close> $ Bound 0 $ rec_tm) $
(pred $ Bound 0);
(*To instantiate the main induction rule*)
--- a/src/ZF/ind_syntax.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/ZF/ind_syntax.ML Sat Jan 05 17:24:33 2019 +0100
@@ -23,10 +23,10 @@
(*Creates All(%v.v:A --> P(v)) rather than Ball(A,P) *)
fun mk_all_imp (A,P) =
FOLogic.all_const iT $
- Abs("v", iT, FOLogic.imp $ (@{const mem} $ Bound 0 $ A) $
+ Abs("v", iT, FOLogic.imp $ (\<^const>\<open>mem\<close> $ Bound 0 $ A) $
Term.betapply(P, Bound 0));
-fun mk_Collect (a, D, t) = @{const Collect} $ D $ absfree (a, iT) t;
+fun mk_Collect (a, D, t) = \<^const>\<open>Collect\<close> $ D $ absfree (a, iT) t;
(*simple error-checking in the premises of an inductive definition*)
fun chk_prem rec_hd (Const (\<^const_name>\<open>conj\<close>, _) $ _ $ _) =
@@ -83,20 +83,20 @@
Logic.list_implies
(map FOLogic.mk_Trueprop prems,
FOLogic.mk_Trueprop
- (@{const mem} $ list_comb (Const (Sign.full_bname sg name, T), args)
+ (\<^const>\<open>mem\<close> $ list_comb (Const (Sign.full_bname sg name, T), args)
$ rec_tm))
in map mk_intr constructs end;
fun mk_all_intr_tms sg arg = flat (ListPair.map (mk_intr_tms sg) arg);
-fun mk_Un (t1, t2) = @{const Un} $ t1 $ t2;
+fun mk_Un (t1, t2) = \<^const>\<open>Un\<close> $ t1 $ t2;
(*Make a datatype's domain: form the union of its set parameters*)
fun union_params (rec_tm, cs) =
let val (_,args) = strip_comb rec_tm
fun is_ind arg = (type_of arg = iT)
in case filter is_ind (args @ cs) of
- [] => @{const zero}
+ [] => \<^const>\<open>zero\<close>
| u_args => Balanced_Tree.make mk_Un u_args
end;
--- a/src/ZF/int_arith.ML Sat Jan 05 17:00:43 2019 +0100
+++ b/src/ZF/int_arith.ML Sat Jan 05 17:24:33 2019 +0100
@@ -42,7 +42,7 @@
(*Utilities*)
-fun mk_numeral i = @{const integ_of} $ mk_bin i;
+fun mk_numeral i = \<^const>\<open>integ_of\<close> $ mk_bin i;
fun dest_numeral (Const(\<^const_name>\<open>integ_of\<close>, _) $ w) = dest_bin w
| dest_numeral t = raise TERM ("dest_numeral", [t]);
@@ -70,7 +70,7 @@
| dest_summing (pos, Const (\<^const_name>\<open>zdiff\<close>, _) $ t $ u, ts) =
dest_summing (pos, t, dest_summing (not pos, u, ts))
| dest_summing (pos, t, ts) =
- if pos then t::ts else @{const zminus} $ t :: ts;
+ if pos then t::ts else \<^const>\<open>zminus\<close> $ t :: ts;
fun dest_sum t = dest_summing (true, t, []);