# HG changeset patch # User wenzelm # Date 1445102794 -7200 # Node ID 5f2ddeb15c060f517c1f25a7e084b18b0b98c189 # Parent 987533262fc26558eb95c6218bfd04123f949837 clarified nesting of paragraphs: indentation is taken into account more uniformly; tuned; diff -r 987533262fc2 -r 5f2ddeb15c06 src/Doc/Implementation/ML.thy --- a/src/Doc/Implementation/ML.thy Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Doc/Implementation/ML.thy Sat Oct 17 19:26:34 2015 +0200 @@ -206,22 +206,18 @@ framework (\secref{sec:context} and \chref{ch:local-theory}) have firm naming conventions as follows: - \begin{itemize} - - \item theories are called @{ML_text thy}, rarely @{ML_text theory} + \<^item> theories are called @{ML_text thy}, rarely @{ML_text theory} (never @{ML_text thry}) - \item proof contexts are called @{ML_text ctxt}, rarely @{ML_text + \<^item> proof contexts are called @{ML_text ctxt}, rarely @{ML_text context} (never @{ML_text ctx}) - \item generic contexts are called @{ML_text context} - - \item local theories are called @{ML_text lthy}, except for local + \<^item> generic contexts are called @{ML_text context} + + \<^item> local theories are called @{ML_text lthy}, except for local theories that are treated as proof context (which is a semantic super-type) - \end{itemize} - Variations with primed or decimal numbers are always possible, as well as semantic prefixes like @{ML_text foo_thy} or @{ML_text bar_ctxt}, but the base conventions above need to be preserved. @@ -231,25 +227,21 @@ \<^item> The main logical entities (\secref{ch:logic}) have established naming convention as follows: - \begin{itemize} - - \item sorts are called @{ML_text S} - - \item types are called @{ML_text T}, @{ML_text U}, or @{ML_text + \<^item> sorts are called @{ML_text S} + + \<^item> types are called @{ML_text T}, @{ML_text U}, or @{ML_text ty} (never @{ML_text t}) - \item terms are called @{ML_text t}, @{ML_text u}, or @{ML_text + \<^item> terms are called @{ML_text t}, @{ML_text u}, or @{ML_text tm} (never @{ML_text trm}) - \item certified types are called @{ML_text cT}, rarely @{ML_text + \<^item> certified types are called @{ML_text cT}, rarely @{ML_text T}, with variants as for types - \item certified terms are called @{ML_text ct}, rarely @{ML_text + \<^item> certified terms are called @{ML_text ct}, rarely @{ML_text t}, with variants as for terms (never @{ML_text ctrm}) - \item theorems are called @{ML_text th}, or @{ML_text thm} - - \end{itemize} + \<^item> theorems are called @{ML_text th}, or @{ML_text thm} Proper semantic names override these conventions completely. For example, the left-hand side of an equation (as a term) can be called @@ -1376,15 +1368,11 @@ Isabelle-specific purposes with the following implicit substructures packed into the string content: - \begin{enumerate} - - \item sequence of Isabelle symbols (see also \secref{sec:symbols}), + \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}), with @{ML Symbol.explode} as key operation; - \item XML tree structure via YXML (see also @{cite "isabelle-system"}), + \<^enum> XML tree structure via YXML (see also @{cite "isabelle-system"}), with @{ML YXML.parse_body} as key operation. - - \end{enumerate} Note that Isabelle/ML string literals may refer Isabelle symbols like ``@{verbatim \}'' natively, \emph{without} escaping the backslash. This is a @@ -2140,21 +2128,19 @@ fork several futures simultaneously. The @{text params} consist of the following fields: - \begin{itemize} - - \item @{text "name : string"} (default @{ML "\"\""}) specifies a common name + \<^item> @{text "name : string"} (default @{ML "\"\""}) specifies a common name for the tasks of the forked futures, which serves diagnostic purposes. - \item @{text "group : Future.group option"} (default @{ML NONE}) specifies + \<^item> @{text "group : Future.group option"} (default @{ML NONE}) specifies an optional task group for the forked futures. @{ML NONE} means that a new sub-group of the current worker-thread task context is created. If this is not a worker thread, the group will be a new root in the group hierarchy. - \item @{text "deps : Future.task list"} (default @{ML "[]"}) specifies + \<^item> @{text "deps : Future.task list"} (default @{ML "[]"}) specifies dependencies on other future tasks, i.e.\ the adjacency relation in the global task queue. Dependencies on already finished tasks are ignored. - \item @{text "pri : int"} (default @{ML 0}) specifies a priority within the + \<^item> @{text "pri : int"} (default @{ML 0}) specifies a priority within the task queue. Typically there is only little deviation from the default priority @{ML 0}. @@ -2167,7 +2153,7 @@ priority tasks that are queued later need to wait until this (or another) worker thread becomes free again. - \item @{text "interrupts : bool"} (default @{ML true}) tells whether the + \<^item> @{text "interrupts : bool"} (default @{ML true}) tells whether the worker thread that processes the corresponding task is initially put into interruptible state. This state may change again while running, by modifying the thread attributes. @@ -2176,8 +2162,6 @@ the responsibility of the programmer that this special state is retained only briefly. - \end{itemize} - \<^descr> @{ML Future.join}~@{text x} retrieves the value of an already finished future, which may lead to an exception, according to the result of its previous evaluation. diff -r 987533262fc2 -r 5f2ddeb15c06 src/Doc/Isar_Ref/Generic.thy --- a/src/Doc/Isar_Ref/Generic.thy Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Doc/Isar_Ref/Generic.thy Sat Oct 17 19:26:34 2015 +0200 @@ -521,9 +521,7 @@ The Simplifier accepts the following formats for the @{text "lhs"} term: - \begin{enumerate} - - \item First-order patterns, considering the sublanguage of + \<^enum> First-order patterns, considering the sublanguage of application of constant operators to variable operands, without @{text "\"}-abstractions or functional variables. For example: @@ -531,7 +529,7 @@ @{text "(?x + ?y) + ?z \ ?x + (?y + ?z)"} \\ @{text "f (f ?x ?y) ?z \ f ?x (f ?y ?z)"} - \item Higher-order patterns in the sense of @{cite "nipkow-patterns"}. + \<^enum> Higher-order patterns in the sense of @{cite "nipkow-patterns"}. These are terms in @{text "\"}-normal form (this will always be the case unless you have done something strange) where each occurrence of an unknown is of the form @{text "?F x\<^sub>1 \ x\<^sub>n"}, where the @@ -541,7 +539,7 @@ or its symmetric form, since the @{text "rhs"} is also a higher-order pattern. - \item Physical first-order patterns over raw @{text "\"}-term + \<^enum> Physical first-order patterns over raw @{text "\"}-term structure without @{text "\\\"}-equality; abstractions and bound variables are treated like quasi-constant term material. @@ -554,8 +552,6 @@ rewrite rule of the second category since conditions can be arbitrary terms. - \end{enumerate} - \<^descr> @{attribute split} declares case split rules. \<^descr> @{attribute cong} declares congruence rules to the Simplifier @@ -1519,28 +1515,24 @@ inferences. It is faster and more powerful than the other classical reasoning tools, but has major limitations too. - \begin{itemize} - - \item It does not use the classical wrapper tacticals, such as the + \<^item> It does not use the classical wrapper tacticals, such as the integration with the Simplifier of @{method fastforce}. - \item It does not perform higher-order unification, as needed by the + \<^item> It does not perform higher-order unification, as needed by the rule @{thm [source=false] rangeI} in HOL. There are often alternatives to such rules, for example @{thm [source=false] range_eqI}. - \item Function variables may only be applied to parameters of the + \<^item> Function variables may only be applied to parameters of the subgoal. (This restriction arises because the prover does not use higher-order unification.) If other function variables are present then the prover will fail with the message @{verbatim [display] \Function unknown's argument not a bound variable\} - \item Its proof strategy is more general than @{method fast} but can + \<^item> Its proof strategy is more general than @{method fast} but can be slower. If @{method blast} fails or seems to be running forever, try @{method fast} and the other proof tools described below. - \end{itemize} - The optional integer argument specifies a bound for the number of unsafe steps used in a proof. By default, @{method blast} starts with a bound of 0 and increases it successively to 20. In contrast, diff -r 987533262fc2 -r 5f2ddeb15c06 src/Doc/Isar_Ref/HOL_Specific.thy --- a/src/Doc/Isar_Ref/HOL_Specific.thy Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Doc/Isar_Ref/HOL_Specific.thy Sat Oct 17 19:26:34 2015 +0200 @@ -595,16 +595,14 @@ order on the result type. By default, the following modes are defined: - \begin{description} - - \item @{text option} defines functions that map into the @{type + \<^descr> @{text option} defines functions that map into the @{type option} type. Here, the value @{term None} is used to model a non-terminating computation. Monotonicity requires that if @{term None} is returned by a recursive call, then the overall result must also be @{term None}. This is best achieved through the use of the monadic operator @{const "Option.bind"}. - \item @{text tailrec} defines functions with an arbitrary result + \<^descr> @{text tailrec} defines functions with an arbitrary result type and uses the slightly degenerated partial order where @{term "undefined"} is the bottom element. Now, monotonicity requires that if @{term undefined} is returned by a recursive call, then the @@ -613,8 +611,6 @@ is directly returned. Thus, this mode of operation allows the definition of arbitrary tail-recursive functions. - \end{description} - Experienced users may define new modes by instantiating the locale @{const "partial_function_definitions"} appropriately. @@ -1345,9 +1341,7 @@ \<^descr> @{command (HOL) "setup_lifting"} Sets up the Lifting package to work with a user-defined type. The command supports two modes. - \begin{enumerate} - - \item The first one is a low-level mode when the user must provide as a + \<^enum> The first one is a low-level mode when the user must provide as a first argument of @{command (HOL) "setup_lifting"} a quotient theorem @{term "Quotient R Abs Rep T"}. The package configures a transfer rule for equality, a domain transfer rules and sets up the @{command_def (HOL) @@ -1361,15 +1355,13 @@ Users generally will not prove the @{text Quotient} theorem manually for new types, as special commands exist to automate the process. - \item When a new subtype is defined by @{command (HOL) typedef}, @{command + \<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} can be used in its second mode, where only the @{term type_definition} theorem @{term "type_definition Rep Abs A"} is used as an argument of the command. The command internally proves the corresponding @{term Quotient} theorem and registers it with @{command (HOL) setup_lifting} using its first mode. - \end{enumerate} - For quotients, the command @{command (HOL) quotient_type} can be used. The command defines a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved and registered by @{command @@ -1423,21 +1415,17 @@ code execution through series of internal type and lifting definitions if the return type @{text "\"} meets the following inductive conditions: - \begin{description} - - \item @{text "\"} is a type variable - - \item @{text "\ = \\<^sub>1 \ \\<^sub>n \"}, + \<^descr> @{text "\"} is a type variable + + \<^descr> @{text "\ = \\<^sub>1 \ \\<^sub>n \"}, where @{text "\"} is an abstract type constructor and @{text "\\<^sub>1 \ \\<^sub>n"} do not contain abstract types (i.e.\ @{typ "int dlist"} is allowed whereas @{typ "int dlist dlist"} not) - \item @{text "\ = \\<^sub>1 \ \\<^sub>n \"}, @{text "\"} is a type constructor that + \<^descr> @{text "\ = \\<^sub>1 \ \\<^sub>n \"}, @{text "\"} is a type constructor that was defined as a (co)datatype whose constructor argument types do not contain either non-free datatypes or the function type. - \end{description} - Integration with [@{attribute code} equation]: For total quotients, @{command (HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation. @@ -1878,9 +1866,7 @@ quickcheck uses exhaustive testing. A number of configuration options are supported for @{command (HOL) "quickcheck"}, notably: - \begin{description} - - \item[@{text tester}] specifies which testing approach to apply. + \<^descr>[@{text tester}] specifies which testing approach to apply. There are three testers, @{text exhaustive}, @{text random}, and @{text narrowing}. An unknown configuration option is treated as an argument to tester, making @{text "tester ="} optional. When @@ -1891,31 +1877,31 @@ quickcheck_random_active}, @{attribute quickcheck_narrowing_active} are set to true. - \item[@{text size}] specifies the maximum size of the search space + \<^descr>[@{text size}] specifies the maximum size of the search space for assignment values. - \item[@{text genuine_only}] sets quickcheck only to return genuine + \<^descr>[@{text genuine_only}] sets quickcheck only to return genuine counterexample, but not potentially spurious counterexamples due to underspecified functions. - \item[@{text abort_potential}] sets quickcheck to abort once it + \<^descr>[@{text abort_potential}] sets quickcheck to abort once it found a potentially spurious counterexample and to not continue to search for a further genuine counterexample. For this option to be effective, the @{text genuine_only} option must be set to false. - \item[@{text eval}] takes a term or a list of terms and evaluates + \<^descr>[@{text eval}] takes a term or a list of terms and evaluates these terms under the variable assignment found by quickcheck. This option is currently only supported by the default (exhaustive) tester. - \item[@{text iterations}] sets how many sets of assignments are + \<^descr>[@{text iterations}] sets how many sets of assignments are generated for each particular size. - \item[@{text no_assms}] specifies whether assumptions in + \<^descr>[@{text no_assms}] specifies whether assumptions in structured proofs should be ignored. - \item[@{text locale}] specifies how to process conjectures in + \<^descr>[@{text locale}] specifies how to process conjectures in a locale context, i.e.\ they can be interpreted or expanded. The option is a whitespace-separated list of the two words @{text interpret} and @{text expand}. The list determines the @@ -1924,91 +1910,86 @@ The option is only provided as attribute declaration, but not as parameter to the command. - \item[@{text timeout}] sets the time limit in seconds. - - \item[@{text default_type}] sets the type(s) generally used to + \<^descr>[@{text timeout}] sets the time limit in seconds. + + \<^descr>[@{text default_type}] sets the type(s) generally used to instantiate type variables. - \item[@{text report}] if set quickcheck reports how many tests + \<^descr>[@{text report}] if set quickcheck reports how many tests fulfilled the preconditions. - \item[@{text use_subtype}] if set quickcheck automatically lifts + \<^descr>[@{text use_subtype}] if set quickcheck automatically lifts conjectures to registered subtypes if possible, and tests the lifted conjecture. - \item[@{text quiet}] if set quickcheck does not output anything + \<^descr>[@{text quiet}] if set quickcheck does not output anything while testing. - \item[@{text verbose}] if set quickcheck informs about the current + \<^descr>[@{text verbose}] if set quickcheck informs about the current size and cardinality while testing. - \item[@{text expect}] can be used to check if the user's + \<^descr>[@{text expect}] can be used to check if the user's expectation was met (@{text no_expectation}, @{text no_counterexample}, or @{text counterexample}). - \end{description} - These option can be given within square brackets. Using the following type classes, the testers generate values and convert them back into Isabelle terms for displaying counterexamples. - \begin{description} - - \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive} - and @{class full_exhaustive} implement the testing. They take a - testing function as a parameter, which takes a value of type @{typ "'a"} - and optionally produces a counterexample, and a size parameter for the test values. - In @{class full_exhaustive}, the testing function parameter additionally - expects a lazy term reconstruction in the type @{typ Code_Evaluation.term} - of the tested value. - - The canonical implementation for @{text exhaustive} testers calls the given - testing function on all values up to the given size and stops as soon - as a counterexample is found. - - \item[@{text random}] The operation @{const Quickcheck_Random.random} - of the type class @{class random} generates a pseudo-random - value of the given size and a lazy term reconstruction of the value - in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator - is defined in theory @{theory Random}. - - \item[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"} - using the type classes @{class narrowing} and @{class partial_term_of}. - Variables in the current goal are initially represented as symbolic variables. - If the execution of the goal tries to evaluate one of them, the test engine - replaces it with refinements provided by @{const narrowing}. - Narrowing views every value as a sum-of-products which is expressed using the operations - @{const Quickcheck_Narrowing.cons} (embedding a value), - @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum). - The refinement should enable further evaluation of the goal. - - For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"} - can be recursively defined as - @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons []) + \<^descr>[@{text exhaustive}] The parameters of the type classes @{class exhaustive} + and @{class full_exhaustive} implement the testing. They take a + testing function as a parameter, which takes a value of type @{typ "'a"} + and optionally produces a counterexample, and a size parameter for the test values. + In @{class full_exhaustive}, the testing function parameter additionally + expects a lazy term reconstruction in the type @{typ Code_Evaluation.term} + of the tested value. + + The canonical implementation for @{text exhaustive} testers calls the given + testing function on all values up to the given size and stops as soon + as a counterexample is found. + + \<^descr>[@{text random}] The operation @{const Quickcheck_Random.random} + of the type class @{class random} generates a pseudo-random + value of the given size and a lazy term reconstruction of the value + in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator + is defined in theory @{theory Random}. + + \<^descr>[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"} + using the type classes @{class narrowing} and @{class partial_term_of}. + Variables in the current goal are initially represented as symbolic variables. + If the execution of the goal tries to evaluate one of them, the test engine + replaces it with refinements provided by @{const narrowing}. + Narrowing views every value as a sum-of-products which is expressed using the operations + @{const Quickcheck_Narrowing.cons} (embedding a value), + @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum). + The refinement should enable further evaluation of the goal. + + For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"} + can be recursively defined as + @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons []) + (Quickcheck_Narrowing.apply (Quickcheck_Narrowing.apply - (Quickcheck_Narrowing.apply - (Quickcheck_Narrowing.cons (op #)) - narrowing) - narrowing)"}. - If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty - list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively - refined if needed. - - To reconstruct counterexamples, the operation @{const partial_term_of} transforms - @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}. - The deep representation models symbolic variables as - @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to - @{const Code_Evaluation.Free}, and refined values as - @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"} - denotes the index in the sum of refinements. In the above example for lists, - @{term "0"} corresponds to @{term "[]"} and @{term "1"} - to @{term "op #"}. - - The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of} - such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor, - but it does not ensures consistency with @{const narrowing}. - \end{description} + (Quickcheck_Narrowing.cons (op #)) + narrowing) + narrowing)"}. + If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty + list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively + refined if needed. + + To reconstruct counterexamples, the operation @{const partial_term_of} transforms + @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}. + The deep representation models symbolic variables as + @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to + @{const Code_Evaluation.Free}, and refined values as + @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"} + denotes the index in the sum of refinements. In the above example for lists, + @{term "0"} corresponds to @{term "[]"} and @{term "1"} + to @{term "op #"}. + + The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of} + such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor, + but it does not ensures consistency with @{const narrowing}. \<^descr> @{command (HOL) "quickcheck_params"} changes @{command (HOL) "quickcheck"} configuration options persistently. @@ -2207,9 +2188,7 @@ @{cite \\S3.2\ "Chaieb-thesis"}. The method handles deals with two main classes of problems: - \begin{enumerate} - - \item Universal problems over multivariate polynomials in a + \<^enum> Universal problems over multivariate polynomials in a (semi)-ring/field/idom; the capabilities of the method are augmented according to properties of these structures. For this problem class the method is only complete for algebraically closed fields, since @@ -2219,7 +2198,7 @@ The problems can contain equations @{text "p = 0"} or inequations @{text "q \ 0"} anywhere within a universal problem statement. - \item All-exists problems of the following restricted (but useful) + \<^enum> All-exists problems of the following restricted (but useful) form: @{text [display] "\x\<^sub>1 \ x\<^sub>n. @@ -2232,8 +2211,6 @@ Here @{text "e\<^sub>1, \, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate polynomials only in the variables mentioned as arguments. - \end{enumerate} - The proof method is preceded by a simplification step, which may be modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}. This acts like declarations for the Simplifier diff -r 987533262fc2 -r 5f2ddeb15c06 src/Doc/Isar_Ref/Inner_Syntax.thy --- a/src/Doc/Isar_Ref/Inner_Syntax.thy Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Doc/Isar_Ref/Inner_Syntax.thy Sat Oct 17 19:26:34 2015 +0200 @@ -818,30 +818,26 @@ \<^item> Dummy variables (written as underscore) may occur in different roles. - \begin{description} - - \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an + \<^descr> A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an anonymous inference parameter, which is filled-in according to the most general type produced by the type-checking phase. - \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where + \<^descr> A bound ``@{text "_"}'' refers to a vacuous abstraction, where the body does not refer to the binding introduced here. As in the term @{term "\x _. x"}, which is @{text "\"}-equivalent to @{text "\x y. x"}. - \item A free ``@{text "_"}'' refers to an implicit outer binding. + \<^descr> A free ``@{text "_"}'' refers to an implicit outer binding. Higher definitional packages usually allow forms like @{text "f x _ = x"}. - \item A schematic ``@{text "_"}'' (within a term pattern, see + \<^descr> A schematic ``@{text "_"}'' (within a term pattern, see \secref{sec:term-decls}) refers to an anonymous variable that is implicitly abstracted over its context of locally bound variables. For example, this allows pattern matching of @{text "{x. f x = g x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by using both bound and schematic dummies. - \end{description} - \<^descr> The three literal dots ``@{verbatim "..."}'' may be also written as ellipsis symbol @{verbatim "\"}. In both cases this refers to a special schematic variable, which is bound in the @@ -870,12 +866,10 @@ current context. The output can be quite large; the most important sections are explained below. - \begin{description} - - \item @{text "lexicon"} lists the delimiters of the inner token + \<^descr> @{text "lexicon"} lists the delimiters of the inner token language; see \secref{sec:inner-lex}. - \item @{text "prods"} lists the productions of the underlying + \<^descr> @{text "prods"} lists the productions of the underlying priority grammar; see \secref{sec:priority-grammar}. The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text @@ -896,22 +890,20 @@ Priority information attached to chain productions is ignored; only the dummy value @{text "-1"} is displayed. - \item @{text "print modes"} lists the alternative print modes + \<^descr> @{text "print modes"} lists the alternative print modes provided by this grammar; see \secref{sec:print-modes}. - \item @{text "parse_rules"} and @{text "print_rules"} relate to + \<^descr> @{text "parse_rules"} and @{text "print_rules"} relate to syntax translations (macros); see \secref{sec:syn-trans}. - \item @{text "parse_ast_translation"} and @{text + \<^descr> @{text "parse_ast_translation"} and @{text "print_ast_translation"} list sets of constants that invoke translation functions for abstract syntax trees, which are only required in very special situations; see \secref{sec:tr-funs}. - \item @{text "parse_translation"} and @{text "print_translation"} + \<^descr> @{text "parse_translation"} and @{text "print_translation"} list the sets of constants that invoke regular translation functions; see \secref{sec:tr-funs}. - - \end{description} \ @@ -1167,20 +1159,17 @@ (@{verbatim "_"}). The latter correspond to nonterminal symbols @{text "A\<^sub>i"} derived from the argument types @{text "\\<^sub>i"} as follows: - \begin{itemize} - \item @{text "prop"} if @{text "\\<^sub>i = prop"} + \<^item> @{text "prop"} if @{text "\\<^sub>i = prop"} - \item @{text "logic"} if @{text "\\<^sub>i = (\)\"} for logical type + \<^item> @{text "logic"} if @{text "\\<^sub>i = (\)\"} for logical type constructor @{text "\ \ prop"} - \item @{text any} if @{text "\\<^sub>i = \"} for type variables + \<^item> @{text any} if @{text "\\<^sub>i = \"} for type variables - \item @{text "\"} if @{text "\\<^sub>i = \"} for nonterminal @{text "\"} + \<^item> @{text "\"} if @{text "\\<^sub>i = \"} for nonterminal @{text "\"} (syntactic type constructor) - \end{itemize} - Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the given list @{text "ps"}; missing priorities default to 0. @@ -1243,19 +1232,15 @@ AST rewrite rules @{text "(lhs, rhs)"} need to obey the following side-conditions: - \begin{itemize} - - \item Rules must be left linear: @{text "lhs"} must not contain + \<^item> Rules must be left linear: @{text "lhs"} must not contain repeated variables.\footnote{The deeper reason for this is that AST equality is not well-defined: different occurrences of the ``same'' AST could be decorated differently by accidental type-constraints or source position information, for example.} - \item Every variable in @{text "rhs"} must also occur in @{text + \<^item> Every variable in @{text "rhs"} must also occur in @{text "lhs"}. - \end{itemize} - \<^descr> @{command "no_translations"}~@{text rules} removes syntactic translation rules, which are interpreted in the same manner as for @{command "translations"} above. diff -r 987533262fc2 -r 5f2ddeb15c06 src/Doc/Isar_Ref/Spec.thy --- a/src/Doc/Isar_Ref/Spec.thy Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Doc/Isar_Ref/Spec.thy Sat Oct 17 19:26:34 2015 +0200 @@ -565,37 +565,33 @@ The @{text body} consists of context elements. - \begin{description} - - \item @{element "fixes"}~@{text "x :: \ (mx)"} declares a local + \<^descr> @{element "fixes"}~@{text "x :: \ (mx)"} declares a local parameter of type @{text \} and mixfix annotation @{text mx} (both are optional). The special syntax declaration ``@{text "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may be referenced implicitly in this context. - \item @{element "constrains"}~@{text "x :: \"} introduces a type + \<^descr> @{element "constrains"}~@{text "x :: \"} introduces a type constraint @{text \} on the local parameter @{text x}. This element is deprecated. The type constraint should be introduced in the @{keyword "for"} clause or the relevant @{element "fixes"} element. - \item @{element "assumes"}~@{text "a: \\<^sub>1 \ \\<^sub>n"} + \<^descr> @{element "assumes"}~@{text "a: \\<^sub>1 \ \\<^sub>n"} introduces local premises, similar to @{command "assume"} within a proof (cf.\ \secref{sec:proof-context}). - \item @{element "defines"}~@{text "a: x \ t"} defines a previously + \<^descr> @{element "defines"}~@{text "a: x \ t"} defines a previously declared parameter. This is similar to @{command "def"} within a proof (cf.\ \secref{sec:proof-context}), but @{element "defines"} takes an equational proposition instead of variable-term pair. The left-hand side of the equation may have additional arguments, e.g.\ ``@{element "defines"}~@{text "f x\<^sub>1 \ x\<^sub>n \ t"}''. - \item @{element "notes"}~@{text "a = b\<^sub>1 \ b\<^sub>n"} + \<^descr> @{element "notes"}~@{text "a = b\<^sub>1 \ b\<^sub>n"} reconsiders facts within a local context. Most notably, this may include arbitrary declarations in any attribute specifications included here, e.g.\ a local @{attribute simp} rule. - \end{description} - Both @{element "assumes"} and @{element "defines"} elements contribute to the locale specification. When defining an operation derived from the parameters, @{command "definition"} @@ -865,28 +861,20 @@ also \emph{rewrite definitions} may be specified. Semantically, a rewrite definition - \begin{itemize} - - \item produces a corresponding definition in + \<^item> produces a corresponding definition in the local theory's underlying target \emph{and} - \item augments the rewrite morphism with the equation + \<^item> augments the rewrite morphism with the equation stemming from the symmetric of the corresponding definition. - - \end{itemize} This is technically different to to a naive combination of a conventional definition and an explicit rewrite equation: - \begin{itemize} - - \item Definitions are parsed in the syntactic interpretation + \<^item> Definitions are parsed in the syntactic interpretation context, just like equations. - \item The proof needs not consider the equations stemming from + \<^item> The proof needs not consider the equations stemming from definitions -- they are proved implicitly by construction. - - \end{itemize} Rewrite definitions yield a pattern for introducing new explicit operations for existing terms after interpretation. diff -r 987533262fc2 -r 5f2ddeb15c06 src/Pure/Thy/markdown.ML --- a/src/Pure/Thy/markdown.ML Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Pure/Thy/markdown.ML Sat Oct 17 19:26:34 2015 +0200 @@ -22,13 +22,12 @@ val is_control: Symbol.symbol -> bool datatype kind = Itemize | Enumerate | Description val print_kind: kind -> string - type marker = {indent: int, kind: kind} type line val line_source: line -> Antiquote.text_antiquote list val line_content: line -> Antiquote.text_antiquote list val make_line: Antiquote.text_antiquote list -> line val empty_line: line - datatype block = Paragraph of line list | List of marker * block list + datatype block = Paragraph of line list | List of {indent: int, kind: kind, body: block list} val read_lines: line list -> block list val read_antiquotes: Antiquote.text_antiquote list -> block list val read_source: Input.source -> block list @@ -49,23 +48,25 @@ | print_kind Enumerate = "enumerate" | print_kind Description = "description"; -type marker = {indent: int, kind: kind}; - datatype line = Line of {source: Antiquote.text_antiquote list, - content: Antiquote.text_antiquote list, is_empty: bool, - marker: (marker * Position.T) option}; + indent: int, + item: kind option, + item_pos: Position.T, + content: Antiquote.text_antiquote list}; val eof_line = Line {source = [Antiquote.Text [(Symbol.eof, Position.none)]], - content = [], is_empty = false, marker = NONE}; + is_empty = false, indent = 0, item = NONE, item_pos = Position.none, content = []}; fun line_source (Line {source, ...}) = source; +fun line_is_empty (Line {is_empty, ...}) = is_empty; +fun line_indent (Line {indent, ...}) = indent; +fun line_item (Line {item, ...}) = item; +fun line_item_pos (Line {item_pos, ...}) = item_pos; fun line_content (Line {content, ...}) = content; -fun line_is_empty (Line {is_empty, ...}) = is_empty; -fun line_marker (Line {marker, ...}) = marker; (* make line *) @@ -86,24 +87,28 @@ val scan_marker = Scan.many is_space -- Symbol_Pos.scan_pos -- - (Symbol_Pos.$$ "\\<^item>" >> K Itemize || - Symbol_Pos.$$ "\\<^enum>" >> K Enumerate || - Symbol_Pos.$$ "\\<^descr>" >> K Description) - >> (fn ((spaces, pos), kind) => ({indent = length spaces, kind = kind}, pos)); + Scan.option + (Symbol_Pos.$$ "\\<^item>" >> K Itemize || + Symbol_Pos.$$ "\\<^enum>" >> K Enumerate || + Symbol_Pos.$$ "\\<^descr>" >> K Description) --| Scan.many is_space + >> (fn ((sp, pos), item) => (length sp, item, if is_some item then pos else Position.none)); fun read_marker (Antiquote.Text ss :: rest) = - (case Scan.finite Symbol_Pos.stopper (Scan.option scan_marker --| Scan.many is_space) ss of + (case Scan.finite Symbol_Pos.stopper scan_marker ss of (marker, []) => (marker, rest) | (marker, ss') => (marker, Antiquote.Text ss' :: rest)) - | read_marker source = (NONE, source); + | read_marker source = ((0, NONE, Position.none), source); in fun make_line source = let val _ = check_blanks source; - val (marker, content) = read_marker source; - in Line {source = source, content = content, is_empty = is_empty source, marker = marker} end; + val ((indent, item, item_pos), content) = read_marker source; + in + Line {source = source, is_empty = is_empty source, indent = indent, + item = item, item_pos = item_pos, content = content} + end; val empty_line = make_line []; @@ -112,46 +117,53 @@ (* document blocks *) -datatype block = Paragraph of line list | List of marker * block list; +datatype block = + Paragraph of line list | List of {indent: int, kind: kind, body: block list}; fun block_lines (Paragraph lines) = lines - | block_lines (List (_, blocks)) = maps block_lines blocks; + | block_lines (List {body, ...}) = maps block_lines body; fun block_range (Paragraph lines) = Antiquote.range (maps line_content lines) - | block_range (List (_, blocks)) = Antiquote.range (maps line_source (maps block_lines blocks)); + | block_range (List {body, ...}) = Antiquote.range (maps line_source (maps block_lines body)); + +fun block_indent (List {indent, ...}) = indent + | block_indent (Paragraph (line :: _)) = line_indent line + | block_indent _ = 0; + +fun block_list indent0 kind0 (List {indent, kind, body}) = + if indent0 = indent andalso kind0 = kind then SOME body else NONE + | block_list _ _ _ = NONE; + +val is_list = fn List _ => true | _ => false; (* read document *) local -fun add_span (opt_marker, body) document = - (case (opt_marker, document) of - (SOME marker, (list as List (list_marker, list_body)) :: rest) => - if marker = list_marker then - List (list_marker, body @ list_body) :: rest - else if #indent marker < #indent list_marker then - add_span (opt_marker, body @ [list]) rest - else - List (marker, body) :: document - | (SOME marker, _) => List (marker, body) :: document - | (NONE, _) => body @ document); +fun build (indent, item, rev_body) document = + (case (item, document) of + (SOME kind, block :: blocks) => + (case block_list indent kind block of + SOME list => List {indent = indent, kind = kind, body = fold cons rev_body list} :: blocks + | NONE => + if (if is_list block then indent < block_indent block else indent <= block_indent block) + then build (indent, item, block :: rev_body) blocks + else List {indent = indent, kind = kind, body = rev rev_body} :: document) + | (SOME kind, []) => [List {indent = indent, kind = kind, body = rev rev_body}] + | (NONE, _) => fold cons rev_body document); fun plain_line line = - not (line_is_empty line) andalso is_none (line_marker line) andalso line <> eof_line; - -val parse_paragraph = Scan.many1 plain_line >> Paragraph; + not (line_is_empty line) andalso is_none (line_item line) andalso line <> eof_line; -val parse_span = - parse_paragraph >> (fn par => (NONE, [par])) || - Scan.one (is_some o line_marker) -- Scan.many plain_line -- - Scan.repeat (Scan.one line_is_empty |-- parse_paragraph) >> - (fn ((line, lines), pars) => - (Option.map #1 (line_marker line), Paragraph (line :: lines) :: pars)); +val parse_paragraph = + Scan.one (fn line => line <> eof_line) -- Scan.many plain_line >> (fn (line, lines) => + let val block = Paragraph (line :: lines) + in (line_indent line, line_item line, [block]) end); val parse_document = - parse_span ::: Scan.repeat (Scan.option (Scan.one line_is_empty) |-- parse_span) - >> (fn spans => fold_rev add_span spans []); + parse_paragraph ::: Scan.repeat (Scan.option (Scan.one line_is_empty) |-- parse_paragraph) + >> (fn pars => fold_rev build pars []); in @@ -173,17 +185,16 @@ local -fun line_reports depth (Line {marker = SOME (_, pos), content, ...}) = - cons (pos, Markup.markdown_item depth) #> - append (text_reports content) - | line_reports _ _ = I; +fun line_reports depth line = + cons (line_item_pos line, Markup.markdown_item depth) #> + append (text_reports (line_content line)); fun block_reports depth block = (case block of Paragraph lines => cons (#1 (block_range block), Markup.markdown_paragraph) #> fold (line_reports depth) lines - | List ({kind, ...}, body) => + | List {kind, body, ...} => cons (#1 (block_range block), Markup.markdown_list (print_kind kind)) #> fold (block_reports (depth + 1)) body); diff -r 987533262fc2 -r 5f2ddeb15c06 src/Pure/Thy/thy_output.ML --- a/src/Pure/Thy/thy_output.ML Fri Oct 16 14:53:26 2015 +0200 +++ b/src/Pure/Thy/thy_output.ML Sat Oct 17 19:26:34 2015 +0200 @@ -195,8 +195,8 @@ fun output_blocks blocks = space_implode "\n\n" (map output_block blocks) and output_block (Markdown.Paragraph lines) = cat_lines (map (output_antiquotes o Markdown.line_source) lines) - | output_block (Markdown.List (marker, body)) = - let val env = Markdown.print_kind (#kind marker) in + | output_block (Markdown.List {kind, body, ...}) = + let val env = Markdown.print_kind kind in "%\n\\begin{" ^ env ^ "}%\n" ^ output_blocks body ^ "%\n\\end{" ^ env ^ "}%\n"