# HG changeset patch # User wenzelm # Date 1445202032 -7200 # Node ID 6e789d198bbda0af6b3ba044b64605d36dc1e22d # Parent cd82b10239329c918866259c406d6631eba6d688# Parent e467ae7aa8086ac6009276ac7ce393596494fc21 merged diff -r cd82b1023932 -r 6e789d198bbd NEWS --- a/NEWS Sun Oct 18 17:25:13 2015 +0200 +++ b/NEWS Sun Oct 18 23:00:32 2015 +0200 @@ -22,9 +22,8 @@ * Toplevel theorem statement 'proposition' is another alias for 'theorem'. -* HTML presentation uses the standard IsabelleText font and Unicode -rendering of Isabelle symbols like Isabelle/Scala/jEdit. The former -print mode "HTML" looses its special meaning. +* There is a new short form for antiquotations with a single argument +that is a cartouche: \<^name>\...\ is equivalent to @{name \...\}. *** Prover IDE -- Isabelle/Scala/jEdit *** @@ -57,6 +56,10 @@ *** Document preparation *** +* HTML presentation uses the standard IsabelleText font and Unicode +rendering of Isabelle symbols like Isabelle/Scala/jEdit. The former +print mode "HTML" looses its special meaning. + * Commands 'paragraph' and 'subparagraph' provide additional section headings. Thus there are 6 levels of standard headings, as in HTML. @@ -79,6 +82,10 @@ 'text' (with antiquotations and control symbols). The key difference is the lack of the surrounding isabelle markup environment in output. +* Document antiquotations @{emph} and @{bold} output LaTeX source +recursively, adding appropriate text style markup. These are typically +used in the short form \<^emph>\...\ and \<^bold>\...\. + *** Isar *** diff -r cd82b1023932 -r 6e789d198bbd etc/symbols --- a/etc/symbols Sun Oct 18 17:25:13 2015 +0200 +++ b/etc/symbols Sun Oct 18 23:00:32 2015 +0200 @@ -359,7 +359,7 @@ \<^item> code: 0x0025aa group: control font: IsabelleText \<^enum> code: 0x0025b8 group: control font: IsabelleText \<^descr> code: 0x0027a7 group: control font: IsabelleText -#\<^emph> code: 0x002217 group: control font: IsabelleText +\<^emph> code: 0x002217 group: control font: IsabelleText \<^bold> code: 0x002759 group: control font: IsabelleText \<^sub> code: 0x0021e9 group: control font: IsabelleText \<^sup> code: 0x0021e7 group: control font: IsabelleText diff -r cd82b1023932 -r 6e789d198bbd lib/texinputs/isabelle.sty --- a/lib/texinputs/isabelle.sty Sun Oct 18 17:25:13 2015 +0200 +++ b/lib/texinputs/isabelle.sty Sun Oct 18 23:00:32 2015 +0200 @@ -44,6 +44,7 @@ \def\isactrlmedskip{\medskip} \def\isactrlbigskip{\bigskip} +\newcommand{\isaantiqcontrol}[1]{\isatt{{\char`\\}{\char`\<}{\char`\^}#1{\char`\>}}} \newenvironment{isaantiq}{{\isacharat\isacharbraceleft}}{{\isacharbraceright}} \newdimen\isa@parindent\newdimen\isa@parskip diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Eisbach/Manual.thy --- a/src/Doc/Eisbach/Manual.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Eisbach/Manual.thy Sun Oct 18 23:00:32 2015 +0200 @@ -58,7 +58,7 @@ text \ In this example, the facts @{text impI} and @{text conjE} are static. They are evaluated once when the method is defined and cannot be changed later. - This makes the method stable in the sense of \emph{static scoping}: naming + This makes the method stable in the sense of \<^emph>\static scoping\: naming another fact @{text impI} in a later context won't affect the behaviour of @{text "prop_solver\<^sub>1"}. \ @@ -103,7 +103,7 @@ A @{text "named theorem"} is a fact whose contents are produced dynamically within the current proof context. The Isar command @{command_ref "named_theorems"} provides simple access to this concept: it declares a - dynamic fact with corresponding \emph{attribute} for managing + dynamic fact with corresponding \<^emph>\attribute\ for managing this particular data slot in the context. \ @@ -171,10 +171,10 @@ section \Higher-order methods\ text \ - The \emph{structured concatenation} combinator ``@{text "method\<^sub>1 ; + The \<^emph>\structured concatenation\ combinator ``@{text "method\<^sub>1 ; method\<^sub>2"}'' was introduced in Isabelle2015, motivated by development of Eisbach. It is similar to ``@{text "method\<^sub>1, method\<^sub>2"}'', but @{text - method\<^sub>2} is invoked on on \emph{all} subgoals that have newly emerged from + method\<^sub>2} is invoked on on \<^emph>\all\ subgoals that have newly emerged from @{text method\<^sub>1}. This is useful to handle cases where the number of subgoals produced by a method is determined dynamically at run-time. \ @@ -192,7 +192,7 @@ method combinators with prefix syntax. For example, to more usefully exploit Isabelle's backtracking, the explicit requirement that a method solve all produced subgoals is frequently useful. This can easily be written as a - \emph{higher-order method} using ``@{text ";"}''. The @{keyword "methods"} + \<^emph>\higher-order method\ using ``@{text ";"}''. The @{keyword "methods"} keyword denotes method parameters that are other proof methods to be invoked by the method being defined. \ @@ -300,9 +300,9 @@ Matching allows methods to introspect the goal state, and to implement more explicit control flow. In the basic case, a term or fact @{text ts} is given - to match against as a \emph{match target}, along with a collection of + to match against as a \<^emph>\match target\, along with a collection of pattern-method pairs @{text "(p, m)"}: roughly speaking, when the pattern - @{text p} matches any member of @{text ts}, the \emph{inner} method @{text + @{text p} matches any member of @{text ts}, the \<^emph>\inner\ method @{text m} will be executed. \ @@ -327,8 +327,8 @@ text\ In the previous example we were able to match against an assumption out of the Isar proof state. In general, however, proof subgoals can be - \emph{unstructured}, with goal parameters and premises arising from rule - application. To address this, @{method match} uses \emph{subgoal focusing} + \<^emph>\unstructured\, with goal parameters and premises arising from rule + application. To address this, @{method match} uses \<^emph>\subgoal focusing\ to produce structured goals out of unstructured ones. In place of fact or term, we may give the keyword @{keyword_def "premises"} as the match target. This causes a subgoal @@ -467,11 +467,11 @@ subsubsection \Inner focusing\ text \ - Premises are \emph{accumulated} for the purposes of subgoal focusing. + Premises are \<^emph>\accumulated\ for the purposes of subgoal focusing. In contrast to using standard methods like @{method frule} within focused match, another @{method match} will have access to all the premises of the outer focus. - \ +\ lemma "A \ B \ A \ B" by (match premises in H: A \ \intro conjI, rule H, @@ -551,8 +551,8 @@ text \ The @{attribute of} attribute behaves similarly. It is worth noting, however, that the positional instantiation of @{attribute of} occurs against - the position of the variables as they are declared \emph{in the match - pattern}. + the position of the variables as they are declared \<^emph>\in the match + pattern\. \ lemma @@ -569,7 +569,7 @@ declared the @{typ 'a} slot first and the @{typ 'b} slot second. To get the dynamic behaviour of @{attribute of} we can choose to invoke it - \emph{unchecked}. This avoids trying to do any type inference for the + \<^emph>\unchecked\. This avoids trying to do any type inference for the provided parameters, instead storing them as their most general type and doing type matching at run-time. This, like @{attribute OF}, will throw errors if the expected slots don't exist or there is a type mismatch. @@ -605,7 +605,7 @@ text \ In all previous examples, @{method match} was only ever searching for a single rule or premise. Each local fact would therefore always have a length - of exactly one. We may, however, wish to find \emph{all} matching results. + of exactly one. We may, however, wish to find \<^emph>\all\ matching results. To achieve this, we can simply mark a given pattern with the @{text "(multi)"} argument. \ @@ -806,7 +806,7 @@ text \ The @{method match} method is not aware of the logical content of match targets. Each pattern is simply matched against the shallow structure of a - fact or term. Most facts are in \emph{normal form}, which curries premises + fact or term. Most facts are in \<^emph>\normal form\, which curries premises via meta-implication @{text "_ \ _"}. \ @@ -835,11 +835,11 @@ to @{term "A"} and @{term Q} to @{term "B \ C"}. Our pattern, with a concrete @{term "C"} in the conclusion, will fail to match this fact. - To express our desired match, we may \emph{uncurry} our rules before + To express our desired match, we may \<^emph>\uncurry\ our rules before matching against them. This forms a meta-conjunction of all premises in a fact, so that only one implication remains. For example the uncurried version of @{term "A \ B \ C"} is @{term "A &&& B \ C"}. This will now match - our desired pattern @{text "_ \ C"}, and can be \emph{curried} after the + our desired pattern @{text "_ \ C"}, and can be \<^emph>\curried\ after the match to put it back into normal form. \ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Eisbach/Preface.thy --- a/src/Doc/Eisbach/Preface.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Eisbach/Preface.thy Sun Oct 18 23:00:32 2015 +0200 @@ -5,7 +5,7 @@ begin text \ - \emph{Eisbach} is a collection of tools which form the basis for defining + \<^emph>\Eisbach\ is a collection of tools which form the basis for defining new proof methods in Isabelle/Isar~@{cite "Wenzel-PhD"}. It can be thought of as a ``proof method language'', but is more precisely an infrastructure for defining new proof methods out of existing ones. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Integration.thy --- a/src/Doc/Implementation/Integration.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Integration.thy Sun Oct 18 23:00:32 2015 +0200 @@ -9,7 +9,7 @@ section \Isar toplevel \label{sec:isar-toplevel}\ text \ - The Isar \emph{toplevel state} represents the outermost configuration that + The Isar \<^emph>\toplevel state\ represents the outermost configuration that is transformed by a sequence of transitions (commands) within a theory body. This is a pure value with pure functions acting on it in a timeless and stateless manner. Historically, the sequence of transitions was wrapped up @@ -149,8 +149,8 @@ In batch mode and within dumped logic images, the theory database maintains a collection of theories as a directed acyclic graph. A theory may refer to other theories as @{keyword "imports"}, or to auxiliary files via special - \emph{load commands} (e.g.\ @{command ML_file}). For each theory, the base - directory of its own theory file is called \emph{master directory}: this is + \<^emph>\load commands\ (e.g.\ @{command ML_file}). For each theory, the base + directory of its own theory file is called \<^emph>\master directory\: this is used as the relative location to refer to other files from that theory. \ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Isar.thy --- a/src/Doc/Implementation/Isar.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Isar.thy Sun Oct 18 23:00:32 2015 +0200 @@ -8,7 +8,7 @@ @{cite \\S2\ "isabelle-isar-ref"}) consists of three main categories of language elements: - \<^enum> Proof \emph{commands} define the primary language of + \<^enum> Proof \<^emph>\commands\ define the primary language of transactions of the underlying Isar/VM interpreter. Typical examples are @{command "fix"}, @{command "assume"}, @{command "show"}, @{command "proof"}, and @{command "qed"}. @@ -17,7 +17,7 @@ to expressions of structured proof text, such that both the machine and the human reader can give it a meaning as formal reasoning. - \<^enum> Proof \emph{methods} define a secondary language of mixed + \<^enum> Proof \<^emph>\methods\ define a secondary language of mixed forward-backward refinement steps involving facts and goals. Typical examples are @{method rule}, @{method unfold}, and @{method simp}. @@ -26,7 +26,7 @@ language, say as arguments to @{command "proof"}, @{command "qed"}, or @{command "by"}. - \<^enum> \emph{Attributes} define a tertiary language of small + \<^enum> \<^emph>\Attributes\ define a tertiary language of small annotations to theorems being defined or referenced. Attributes can modify both the context and the theorem. @@ -37,7 +37,7 @@ section \Proof commands\ -text \A \emph{proof command} is state transition of the Isar/VM +text \A \<^emph>\proof command\ is state transition of the Isar/VM proof interpreter. In principle, Isar proof commands could be defined in user-space as @@ -50,7 +50,7 @@ What can be done realistically is to define some diagnostic commands that inspect the general state of the Isar/VM, and report some feedback to the user. Typically this involves checking of the - linguistic \emph{mode} of a proof state, or peeking at the pending + linguistic \<^emph>\mode\ of a proof state, or peeking at the pending goals (if available). Another common application is to define a toplevel command that @@ -169,7 +169,7 @@ \ (cases \ goal)\<^sup>*\<^sup>*"} that operates on the full Isar goal configuration with context, goal facts, and tactical goal state and enumerates possible follow-up goal states, with the potential - addition of named extensions of the proof context (\emph{cases}). + addition of named extensions of the proof context (\<^emph>\cases\). The latter feature is rarely used. This means a proof method is like a structurally enhanced tactic @@ -178,8 +178,8 @@ additions. \<^item> Goal addressing is further limited either to operate - uniformly on \emph{all} subgoals, or specifically on the - \emph{first} subgoal. + uniformly on \<^emph>\all\ subgoals, or specifically on the + \<^emph>\first\ subgoal. Exception: old-style tactic emulations that are embedded into the method space, e.g.\ @{method rule_tac}. @@ -250,7 +250,7 @@ Empirically, any Isar proof method can be categorized as follows. - \<^enum> \emph{Special method with cases} with named context additions + \<^enum> \<^emph>\Special method with cases\ with named context additions associated with the follow-up goal state. Example: @{method "induct"}, which is also a ``raw'' method since it @@ -258,18 +258,18 @@ Pure conjunction (\secref{sec:logic-aux}), instead of separate subgoals (\secref{sec:tactical-goals}). - \<^enum> \emph{Structured method} with strong emphasis on facts outside + \<^enum> \<^emph>\Structured method\ with strong emphasis on facts outside the goal state. Example: @{method "rule"}, which captures the key ideas behind structured reasoning in Isar in its purest form. - \<^enum> \emph{Simple method} with weaker emphasis on facts, which are + \<^enum> \<^emph>\Simple method\ with weaker emphasis on facts, which are inserted into subgoals to emulate old-style tactical ``premises''. Examples: @{method "simp"}, @{method "blast"}, @{method "auto"}. - \<^enum> \emph{Old-style tactic emulation} with detailed numeric goal + \<^enum> \<^emph>\Old-style tactic emulation\ with detailed numeric goal addressing and explicit references to entities of the internal goal state (which are otherwise invisible from proper Isar proof text). The naming convention @{text "foo_tac"} makes this special @@ -477,7 +477,7 @@ \<^medskip> The technical treatment of rules from the context requires further attention. Above we rebuild a fresh @{ML_type simpset} from - the arguments and \emph{all} rules retrieved from the context on + the arguments and \<^emph>\all\ rules retrieved from the context on every invocation of the method. This does not scale to really large collections of rules, which easily emerges in the context of a big theory library, for example. @@ -495,12 +495,12 @@ section \Attributes \label{sec:attributes}\ -text \An \emph{attribute} is a function @{text "context \ thm \ +text \An \<^emph>\attribute\ is a function @{text "context \ thm \ context \ thm"}, which means both a (generic) context and a theorem can be modified simultaneously. In practice this mixed form is very - rare, instead attributes are presented either as \emph{declaration - attribute:} @{text "thm \ context \ context"} or \emph{rule - attribute:} @{text "context \ thm \ thm"}. + rare, instead attributes are presented either as \<^emph>\declaration + attribute:\ @{text "thm \ context \ context"} or \<^emph>\rule + attribute:\ @{text "context \ thm \ thm"}. Attributes can have additional arguments via concrete syntax. There is a collection of context-sensitive parsers for various logical diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Local_Theory.thy --- a/src/Doc/Implementation/Local_Theory.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Local_Theory.thy Sun Oct 18 23:00:32 2015 +0200 @@ -5,12 +5,12 @@ chapter \Local theory specifications \label{ch:local-theory}\ text \ - A \emph{local theory} combines aspects of both theory and proof + A \<^emph>\local theory\ combines aspects of both theory and proof context (cf.\ \secref{sec:context}), such that definitional specifications may be given relatively to parameters and assumptions. A local theory is represented as a regular proof - context, augmented by administrative data about the \emph{target - context}. + context, augmented by administrative data about the \<^emph>\target + context\. The target is usually derived from the background theory by adding local @{text "\"} and @{text "\"} elements, plus @@ -45,7 +45,7 @@ \secref{sec:variables}). These definitional primitives essentially act like @{text "let"}-bindings within a local context that may already contain earlier @{text "let"}-bindings and some initial - @{text "\"}-bindings. Thus we gain \emph{dependent definitions} + @{text "\"}-bindings. Thus we gain \<^emph>\dependent definitions\ that are relative to an initial axiomatic context. The following diagram illustrates this idea of axiomatic elements versus definitional elements: @@ -72,7 +72,7 @@ The cumulative sequence of @{text "\"} and @{text "\"} produced at package runtime is managed by the local theory - infrastructure by means of an \emph{auxiliary context}. Thus the + infrastructure by means of an \<^emph>\auxiliary context\. Thus the system holds up the impression of working within a fully abstract situation with hypothetical entities: @{text "\ c \ t"} always results in a literal fact @{text "\<^BG>c \ t\<^EN>"}, where @@ -113,7 +113,7 @@ \<^descr> @{ML Named_Target.init}~@{text "before_exit name thy"} initializes a local theory derived from the given background theory. - An empty name refers to a \emph{global theory} context, and a + An empty name refers to a \<^emph>\global theory\ context, and a non-empty name refers to a @{command locale} or @{command class} context (a fully-qualified internal name is expected here). This is useful for experimentation --- normally the Isar toplevel already @@ -132,7 +132,7 @@ Unless an explicit name binding is given for the RHS, the resulting fact will be called @{text "b_def"}. Any given attributes are applied to that same fact --- immediately in the auxiliary context - \emph{and} in any transformed versions stemming from target-specific + \<^emph>\and\ in any transformed versions stemming from target-specific policies or any later interpretations of results from the target context (think of @{command locale} and @{command interpretation}, for example). This means that attributes should be usually plain diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Logic.thy --- a/src/Doc/Implementation/Logic.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Logic.thy Sun Oct 18 23:00:32 2015 +0200 @@ -37,14 +37,14 @@ algebra; types are qualified by ordered type classes. \<^medskip> - A \emph{type class} is an abstract syntactic entity - declared in the theory context. The \emph{subclass relation} @{text + A \<^emph>\type class\ is an abstract syntactic entity + declared in the theory context. The \<^emph>\subclass relation\ @{text "c\<^sub>1 \ c\<^sub>2"} is specified by stating an acyclic generating relation; the transitive closure is maintained internally. The resulting relation is an ordering: reflexive, transitive, and antisymmetric. - A \emph{sort} is a list of type classes written as @{text "s = {c\<^sub>1, + A \<^emph>\sort\ is a list of type classes written as @{text "s = {c\<^sub>1, \, c\<^sub>m}"}, it represents symbolic intersection. Notationally, the curly braces are omitted for singleton intersections, i.e.\ any class @{text "c"} may be read as a sort @{text "{c}"}. The ordering @@ -57,20 +57,20 @@ in the current theory is the least element wrt.\ the sort ordering. \<^medskip> - A \emph{fixed type variable} is a pair of a basic name + A \<^emph>\fixed type variable\ is a pair of a basic name (starting with a @{text "'"} character) and a sort constraint, e.g.\ @{text "('a, s)"} which is usually printed as @{text "\\<^sub>s"}. - A \emph{schematic type variable} is a pair of an indexname and a + A \<^emph>\schematic type variable\ is a pair of an indexname and a sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually printed as @{text "?\\<^sub>s"}. - Note that \emph{all} syntactic components contribute to the identity + Note that \<^emph>\all\ syntactic components contribute to the identity of type variables: basic name, index, and sort constraint. The core logic handles type variables with the same name but different sorts as different, although the type-inference layer (which is outside the core) rejects anything like that. - A \emph{type constructor} @{text "\"} is a @{text "k"}-ary operator + A \<^emph>\type constructor\ @{text "\"} is a @{text "k"}-ary operator on types declared in the theory. Type constructor application is written postfix as @{text "(\\<^sub>1, \, \\<^sub>k)\"}. For @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"} @@ -80,17 +80,17 @@ right-associative infix @{text "\ \ \"} instead of @{text "(\, \)fun"}. - The logical category \emph{type} is defined inductively over type + The logical category \<^emph>\type\ is defined inductively over type variables and type constructors as follows: @{text "\ = \\<^sub>s | ?\\<^sub>s | (\\<^sub>1, \, \\<^sub>k)\"}. - A \emph{type abbreviation} is a syntactic definition @{text + A \<^emph>\type abbreviation\ is a syntactic definition @{text "(\<^vec>\)\ = \"} of an arbitrary type expression @{text "\"} over variables @{text "\<^vec>\"}. Type abbreviations appear as type constructors in the syntax, but are expanded before entering the logical core. - A \emph{type arity} declares the image behavior of a type + A \<^emph>\type arity\ declares the image behavior of a type constructor wrt.\ the algebra of sorts: @{text "\ :: (s\<^sub>1, \, s\<^sub>k)s"} means that @{text "(\\<^sub>1, \, \\<^sub>k)\"} is of sort @{text "s"} if every argument type @{text "\\<^sub>i"} is @@ -99,7 +99,7 @@ (\<^vec>s)c'"} for any @{text "c' \ c"}. \<^medskip> - The sort algebra is always maintained as \emph{coregular}, + The sort algebra is always maintained as \<^emph>\coregular\, which means that type arities are consistent with the subclass relation: for any type constructor @{text "\"}, and classes @{text "c\<^sub>1 \ c\<^sub>2"}, and arities @{text "\ :: @@ -238,7 +238,7 @@ have an explicit name and type in each occurrence. \<^medskip> - A \emph{bound variable} is a natural number @{text "b"}, + A \<^emph>\bound variable\ is a natural number @{text "b"}, which accounts for the number of intermediate binders between the variable occurrence in the body and its binding position. For example, the de-Bruijn term @{text "\\<^bsub>bool\<^esub>. \\<^bsub>bool\<^esub>. 1 \ 0"} would @@ -247,26 +247,26 @@ different de-Bruijn indices at different occurrences, depending on the nesting of abstractions. - A \emph{loose variable} is a bound variable that is outside the + A \<^emph>\loose variable\ is a bound variable that is outside the scope of local binders. The types (and names) for loose variables can be managed as a separate context, that is maintained as a stack of hypothetical binders. The core logic operates on closed terms, without any loose variables. - A \emph{fixed variable} is a pair of a basic name and a type, e.g.\ + A \<^emph>\fixed variable\ is a pair of a basic name and a type, e.g.\ @{text "(x, \)"} which is usually printed @{text "x\<^sub>\"} here. A - \emph{schematic variable} is a pair of an indexname and a type, + \<^emph>\schematic variable\ is a pair of an indexname and a type, e.g.\ @{text "((x, 0), \)"} which is likewise printed as @{text "?x\<^sub>\"}. \<^medskip> - A \emph{constant} is a pair of a basic name and a type, + A \<^emph>\constant\ is a pair of a basic name and a type, e.g.\ @{text "(c, \)"} which is usually printed as @{text "c\<^sub>\"} here. Constants are declared in the context as polymorphic families @{text "c :: \"}, meaning that all substitution instances @{text "c\<^sub>\"} for @{text "\ = \\"} are valid. - The vector of \emph{type arguments} of constant @{text "c\<^sub>\"} wrt.\ + The vector of \<^emph>\type arguments\ of constant @{text "c\<^sub>\"} wrt.\ the declaration @{text "c :: \"} is defined as the codomain of the matcher @{text "\ = {?\\<^sub>1 \ \\<^sub>1, \, ?\\<^sub>n \ \\<^sub>n}"} presented in canonical order @{text "(\\<^sub>1, \, \\<^sub>n)"}, corresponding to the @@ -279,14 +279,14 @@ Constant declarations @{text "c :: \"} may contain sort constraints for type variables in @{text "\"}. These are observed by - type-inference as expected, but \emph{ignored} by the core logic. + type-inference as expected, but \<^emph>\ignored\ by the core logic. This means the primitive logic is able to reason with instances of polymorphic constants that the user-level type-checker would reject due to violation of type class restrictions. \<^medskip> - An \emph{atomic term} is either a variable or constant. - The logical category \emph{term} is defined inductively over atomic + An \<^emph>\atomic term\ is either a variable or constant. + The logical category \<^emph>\term\ is defined inductively over atomic terms, with abstraction and application as follows: @{text "t = b | x\<^sub>\ | ?x\<^sub>\ | c\<^sub>\ | \\<^sub>\. t | t\<^sub>1 t\<^sub>2"}. Parsing and printing takes care of converting between an external representation with named bound @@ -303,7 +303,7 @@ \qquad \infer{@{text "t u :: \"}}{@{text "t :: \ \ \"} & @{text "u :: \"}} \] - A \emph{well-typed term} is a term that can be typed according to these rules. + A \<^emph>\well-typed term\ is a term that can be typed according to these rules. Typing information can be omitted: type-inference is able to reconstruct the most general type of a raw term, while assigning @@ -319,7 +319,7 @@ polymorphic constants occur routinely. \<^medskip> - The \emph{hidden polymorphism} of a term @{text "t :: \"} + The \<^emph>\hidden polymorphism\ of a term @{text "t :: \"} is the set of type variables occurring in @{text "t"}, but not in its type @{text "\"}. This means that the term implicitly depends on type arguments that are not accounted in the result type, i.e.\ @@ -328,7 +328,7 @@ pathological situation notoriously demands additional care. \<^medskip> - A \emph{term abbreviation} is a syntactic definition @{text + A \<^emph>\term abbreviation\ is a syntactic definition @{text "c\<^sub>\ \ t"} of a closed term @{text "t"} of type @{text "\"}, without any hidden polymorphism. A term abbreviation looks like a constant in the syntax, but is expanded before entering the logical @@ -475,8 +475,8 @@ section \Theorems \label{sec:thms}\ text \ - A \emph{proposition} is a well-typed term of type @{text "prop"}, a - \emph{theorem} is a proven proposition (depending on a context of + A \<^emph>\proposition\ is a well-typed term of type @{text "prop"}, a + \<^emph>\theorem\ is a proven proposition (depending on a context of hypotheses and the background theory). Primitive inferences include plain Natural Deduction rules for the primary connectives @{text "\"} and @{text "\"} of the framework. There is also a builtin @@ -493,7 +493,7 @@ derivability judgment @{text "A\<^sub>1, \, A\<^sub>n \ B"} is defined inductively by the primitive inferences given in \figref{fig:prim-rules}, with the global restriction that the - hypotheses must \emph{not} contain any schematic variables. The + hypotheses must \<^emph>\not\ contain any schematic variables. The builtin equality is conceptually axiomatized as shown in \figref{fig:pure-equality}, although the implementation works directly with derived inferences. @@ -597,7 +597,7 @@ the result belongs to a different proof context. \<^medskip> - An \emph{oracle} is a function that produces axioms on the + An \<^emph>\oracle\ is a function that produces axioms on the fly. Logically, this is an instance of the @{text "axiom"} rule (\figref{fig:prim-rules}), but there is an operational difference. The system always records oracle invocations within derivations of @@ -608,14 +608,14 @@ Later on, theories are usually developed in a strictly definitional fashion, by stating only certain equalities over new constants. - A \emph{simple definition} consists of a constant declaration @{text + A \<^emph>\simple definition\ consists of a constant declaration @{text "c :: \"} together with an axiom @{text "\ c \ t"}, where @{text "t :: \"} is a closed term without any hidden polymorphism. The RHS may depend on further defined constants, but not @{text "c"} itself. Definitions of functions may be presented as @{text "c \<^vec>x \ t"} instead of the puristic @{text "c \ \\<^vec>x. t"}. - An \emph{overloaded definition} consists of a collection of axioms + An \<^emph>\overloaded definition\ consists of a collection of axioms for the same constant, with zero or one equations @{text "c((\<^vec>\)\) \ t"} for each type constructor @{text "\"} (for distinct variables @{text "\<^vec>\"}). The RHS may mention @@ -717,7 +717,7 @@ cf.\ \secref{sec:context-theory}. \<^descr> @{ML Thm.transfer}~@{text "thy thm"} transfers the given - theorem to a \emph{larger} theory, see also \secref{sec:context}. + theorem to a \<^emph>\larger\ theory, see also \secref{sec:context}. This formal adjustment of the background context has no logical significance, but is occasionally required for formal reasons, e.g.\ when theorems that are imported from more basic theories are used in @@ -920,7 +920,7 @@ "\\<^sub>1, \, \\<^sub>n"} cover the propositions @{text "\"}, @{text "\"}, as well as the proof of @{text "\ \ \"}. - These \emph{sort hypothesis} of a theorem are passed monotonically + These \<^emph>\sort hypothesis\ of a theorem are passed monotonically through further derivations. They are redundant, as long as the statement of a theorem still contains the type variables that are accounted here. The logical significance of sort hypotheses is @@ -965,7 +965,7 @@ text \Thanks to the inference kernel managing sort hypothesis according to their logical significance, this example is merely an - instance of \emph{ex falso quodlibet consequitur} --- not a collapse + instance of \<^emph>\ex falso quodlibet consequitur\ --- not a collapse of the logical framework!\ @@ -975,9 +975,9 @@ The primitive inferences covered so far mostly serve foundational purposes. User-level reasoning usually works via object-level rules that are represented as theorems of Pure. Composition of rules - involves \emph{backchaining}, \emph{higher-order unification} modulo + involves \<^emph>\backchaining\, \<^emph>\higher-order unification\ modulo @{text "\\\"}-conversion of @{text "\"}-terms, and so-called - \emph{lifting} of rules into a context of @{text "\"} and @{text + \<^emph>\lifting\ of rules into a context of @{text "\"} and @{text "\"} connectives. Thus the full power of higher-order Natural Deduction in Isabelle/Pure becomes readily available. \ @@ -989,7 +989,7 @@ The idea of object-level rules is to model Natural Deduction inferences in the style of Gentzen @{cite "Gentzen:1935"}, but we allow arbitrary nesting similar to @{cite extensions91}. The most basic - rule format is that of a \emph{Horn Clause}: + rule format is that of a \<^emph>\Horn Clause\: \[ \infer{@{text "A"}}{@{text "A\<^sub>1"} & @{text "\"} & @{text "A\<^sub>n"}} \] @@ -1010,8 +1010,8 @@ Nesting of rules means that the positions of @{text "A\<^sub>i"} may again hold compound rules, not just atomic propositions. - Propositions of this format are called \emph{Hereditary Harrop - Formulae} in the literature @{cite "Miller:1991"}. Here we give an + Propositions of this format are called \<^emph>\Hereditary Harrop + Formulae\ in the literature @{cite "Miller:1991"}. Here we give an inductive characterization as follows: \<^medskip> @@ -1180,7 +1180,7 @@ logical framework. The proof term can be inspected by a separate proof-checker, for example. - According to the well-known \emph{Curry-Howard isomorphism}, a proof + According to the well-known \<^emph>\Curry-Howard isomorphism\, a proof can be viewed as a @{text "\"}-term. Following this idea, proofs in Isabelle are internally represented by a datatype similar to the one for terms described in \secref{sec:terms}. On top of these @@ -1193,9 +1193,9 @@ polymorphism and type classes are ignored. \<^medskip> - \emph{Proof abstractions} of the form @{text "\<^bold>\x :: \. prf"} + \<^emph>\Proof abstractions\ of the form @{text "\<^bold>\x :: \. prf"} or @{text "\<^bold>\p : A. prf"} correspond to introduction of @{text - "\"}/@{text "\"}, and \emph{proof applications} of the form @{text + "\"}/@{text "\"}, and \<^emph>\proof applications\ of the form @{text "p \ t"} or @{text "p \ q"} correspond to elimination of @{text "\"}/@{text "\"}. Actual types @{text "\"}, propositions @{text "A"}, and terms @{text "t"} might be suppressed and reconstructed @@ -1205,10 +1205,10 @@ Various atomic proofs indicate special situations within the proof construction as follows. - A \emph{bound proof variable} is a natural number @{text "b"} that + A \<^emph>\bound proof variable\ is a natural number @{text "b"} that acts as de-Bruijn index for proof term abstractions. - A \emph{minimal proof} ``@{text "?"}'' is a dummy proof term. This + A \<^emph>\minimal proof\ ``@{text "?"}'' is a dummy proof term. This indicates some unrecorded part of the proof. @{text "Hyp A"} refers to some pending hypothesis by giving its @@ -1216,26 +1216,26 @@ similar to loose bound variables or free variables within a term (\secref{sec:terms}). - An \emph{axiom} or \emph{oracle} @{text "a : A[\<^vec>\]"} refers + An \<^emph>\axiom\ or \<^emph>\oracle\ @{text "a : A[\<^vec>\]"} refers some postulated @{text "proof constant"}, which is subject to schematic polymorphism of theory content, and the particular type instantiation may be given explicitly. The vector of types @{text "\<^vec>\"} refers to the schematic type variables in the generic proposition @{text "A"} in canonical order. - A \emph{proof promise} @{text "a : A[\<^vec>\]"} is a placeholder + A \<^emph>\proof promise\ @{text "a : A[\<^vec>\]"} is a placeholder for some proof of polymorphic proposition @{text "A"}, with explicit type instantiation as given by the vector @{text "\<^vec>\"}, as above. Unlike axioms or oracles, proof promises may be - \emph{fulfilled} eventually, by substituting @{text "a"} by some + \<^emph>\fulfilled\ eventually, by substituting @{text "a"} by some particular proof @{text "q"} at the corresponding type instance. This acts like Hindley-Milner @{text "let"}-polymorphism: a generic local proof definition may get used at different type instances, and is replaced by the concrete instance eventually. - A \emph{named theorem} wraps up some concrete proof as a closed + A \<^emph>\named theorem\ wraps up some concrete proof as a closed formal entity, in the manner of constant definitions for proof - terms. The \emph{proof body} of such boxed theorems involves some + terms. The \<^emph>\proof body\ of such boxed theorems involves some digest about oracles and promises occurring in the original proof. This allows the inference kernel to manage this critical information without the full overhead of explicit proof terms. @@ -1247,15 +1247,15 @@ text \Fully explicit proof terms can be large, but most of this information is redundant and can be reconstructed from the context. Therefore, the Isabelle/Pure inference kernel records only - \emph{implicit} proof terms, by omitting all typing information in + \<^emph>\implicit\ proof terms, by omitting all typing information in terms, all term and type labels of proof abstractions, and some argument terms of applications @{text "p \ t"} (if possible). There are separate operations to reconstruct the full proof term - later on, using \emph{higher-order pattern unification} + later on, using \<^emph>\higher-order pattern unification\ @{cite "nipkow-patterns" and "Berghofer-Nipkow:2000:TPHOL"}. - The \emph{proof checker} expects a fully reconstructed proof term, + The \<^emph>\proof checker\ expects a fully reconstructed proof term, and can turn it into a theorem by replaying its primitive inferences within the kernel.\ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/ML.thy --- a/src/Doc/Implementation/ML.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/ML.thy Sun Oct 18 23:00:32 2015 +0200 @@ -12,11 +12,11 @@ environment. This covers a variety of aspects that are geared towards an efficient and robust platform for applications of formal logic with fully foundational proof construction --- according to - the well-known \emph{LCF principle}. There is specific + the well-known \<^emph>\LCF principle\. There is specific infrastructure with library modules to address the needs of this difficult task. For example, the raw parallel programming model of Poly/ML is presented as considerably more abstract concept of - \emph{futures}, which is then used to augment the inference + \<^emph>\futures\, which is then used to augment the inference kernel, Isar theory and proof interpreter, and PIDE document management. The main aspects of Isabelle/ML are introduced below. These @@ -26,14 +26,14 @@ history of changes.\footnote{See @{url "http://isabelle.in.tum.de/repos/isabelle"} for the full Mercurial history. There are symbolic tags to refer to official - Isabelle releases, as opposed to arbitrary \emph{tip} versions that + Isabelle releases, as opposed to arbitrary \<^emph>\tip\ versions that merely reflect snapshots that are never really up-to-date.}\ section \Style and orthography\ text \The sources of Isabelle/Isar are optimized for - \emph{readability} and \emph{maintainability}. The main purpose is + \<^emph>\readability\ and \<^emph>\maintainability\. The main purpose is to tell an informed reader what is really going on and how things really work. This is a non-trivial aim, but it is supported by a certain style of writing Isabelle/ML that has emerged from long @@ -42,7 +42,7 @@ @{url "http://caml.inria.fr/resources/doc/guides/guidelines.en.html"} which shares many of our means and ends.} - The main principle behind any coding style is \emph{consistency}. + The main principle behind any coding style is \<^emph>\consistency\. For a single author of a small program this merely means ``choose your style and stick to it''. A complex project like Isabelle, with long years of development and different contributors, requires more @@ -53,7 +53,7 @@ of modules and sub-systems, without deviating from some general principles how to write Isabelle/ML. - In a sense, good coding style is like an \emph{orthography} for the + In a sense, good coding style is like an \<^emph>\orthography\ for the sources: it helps to read quickly over the text and see through the main points, without getting distracted by accidental presentation of free-style code. @@ -90,7 +90,7 @@ with more text *)\} - As in regular typography, there is some extra space \emph{before} + As in regular typography, there is some extra space \<^emph>\before\ section headings that are adjacent to plain text, but not other headings as in the example above. @@ -106,7 +106,7 @@ text \Since ML is the primary medium to express the meaning of the source text, naming of ML entities requires special care. - \paragraph{Notation.} A name consists of 1--3 \emph{words} (rarely + \paragraph{Notation.} A name consists of 1--3 \<^emph>\words\ (rarely 4, but not more) that are separated by underscore. There are three variants concerning upper or lower case letters, which are used for certain ML categories as follows: @@ -122,7 +122,7 @@ For historical reasons, many capitalized names omit underscores, e.g.\ old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}. - Genuine mixed-case names are \emph{not} used, because clear division + Genuine mixed-case names are \<^emph>\not\ used, because clear division of words is essential for readability.\footnote{Camel-case was invented to workaround the lack of underscore in some early non-ASCII character sets. Later it became habitual in some language @@ -296,7 +296,7 @@ val pair = (a, b); val record = {foo = 1, bar = 2};\} - Lines are normally broken \emph{after} an infix operator or + Lines are normally broken \<^emph>\after\ an infix operator or punctuation character. For example: @{verbatim [display] @@ -320,7 +320,7 @@ curried function, or @{ML_text "g (a, b)"} for a tupled function. Note that the space between @{ML_text g} and the pair @{ML_text "(a, b)"} follows the important principle of - \emph{compositionality}: the layout of @{ML_text "g p"} does not + \<^emph>\compositionality\: the layout of @{ML_text "g p"} does not change when @{ML_text "p"} is refined to the concrete pair @{ML_text "(a, b)"}. @@ -359,9 +359,9 @@ The second form has many problems: it assumes a fixed-width font when viewing the sources, it uses more space on the line and thus makes it hard to observe its strict length limit (working against - \emph{readability}), it requires extra editing to adapt the layout + \<^emph>\readability\), it requires extra editing to adapt the layout to changes of the initial text (working against - \emph{maintainability}) etc. + \<^emph>\maintainability\) etc. \<^medskip> For similar reasons, any kind of two-dimensional or tabular @@ -557,7 +557,7 @@ Removing the above ML declaration from the source text will remove any trace of this definition, as expected. The Isabelle/ML toplevel environment is - managed in a \emph{stateless} way: in contrast to the raw ML toplevel, there + managed in a \<^emph>\stateless\ way: in contrast to the raw ML toplevel, there are no global side-effects involved here.\footnote{Such a stateless compilation environment is also a prerequisite for robust parallel compilation within independent nodes of the implicit theory development @@ -585,7 +585,7 @@ \<^medskip> Two further ML commands are useful in certain situations: - @{command_ref ML_val} and @{command_ref ML_command} are \emph{diagnostic} in + @{command_ref ML_val} and @{command_ref ML_command} are \<^emph>\diagnostic\ in the sense that there is no effect on the underlying environment, and can thus be used anywhere. The examples below produce long strings of digits by invoking @{ML factorial}: @{command ML_val} takes care of printing the ML @@ -649,7 +649,7 @@ subsection \Antiquotations \label{sec:ML-antiq}\ text \A very important consequence of embedding ML into Isar is the - concept of \emph{ML antiquotation}. The standard token language of + concept of \<^emph>\ML antiquotation\. The standard token language of ML is augmented by special syntactic entities of the following form: @{rail \ @@ -663,8 +663,8 @@ A regular antiquotation @{text "@{name args}"} processes its arguments by the usual means of the Isar source language, and produces corresponding ML source text, either as literal - \emph{inline} text (e.g.\ @{text "@{term t}"}) or abstract - \emph{value} (e.g. @{text "@{thm th}"}). This pre-compilation + \<^emph>\inline\ text (e.g.\ @{text "@{term t}"}) or abstract + \<^emph>\value\ (e.g. @{text "@{thm th}"}). This pre-compilation scheme allows to refer to formal entities in a robust manner, with proper static scoping and with some degree of logical checking of small portions of the code. @@ -725,13 +725,13 @@ section \Canonical argument order \label{sec:canonical-argument-order}\ text \Standard ML is a language in the tradition of @{text - "\"}-calculus and \emph{higher-order functional programming}, + "\"}-calculus and \<^emph>\higher-order functional programming\, similar to OCaml, Haskell, or Isabelle/Pure and HOL as logical languages. Getting acquainted with the native style of representing functions in that setting can save a lot of extra boiler-plate of redundant shuffling of arguments, auxiliary abstractions etc. - Functions are usually \emph{curried}: the idea of turning arguments + Functions are usually \<^emph>\curried\: the idea of turning arguments of type @{text "\\<^sub>i"} (for @{text "i \ {1, \ n}"}) into a result of type @{text "\"} is represented by the iterated function space @{text "\\<^sub>1 \ \ \ \\<^sub>n \ \"}. This is isomorphic to the well-known @@ -740,7 +740,7 @@ difference is even more significant in HOL, because the redundant tuple structure needs to be accommodated extraneous proof steps.} - Currying gives some flexibility due to \emph{partial application}. A + Currying gives some flexibility due to \<^emph>\partial application\. A function @{text "f: \\<^sub>1 \ \\<^sub>2 \ \"} can be applied to @{text "x: \\<^sub>1"} and the remaining @{text "(f x): \\<^sub>2 \ \"} passed to another function etc. How well this works in practice depends on the order of @@ -749,7 +749,7 @@ glue code. Thus we would get exponentially many opportunities to decorate the code with meaningless permutations of arguments. - This can be avoided by \emph{canonical argument order}, which + This can be avoided by \<^emph>\canonical argument order\, which observes certain standard patterns and minimizes adhoc permutations in their application. In Isabelle/ML, large portions of text can be written without auxiliary operations like @{text "swap: \ \ \ \ \ \ @@ -759,8 +759,8 @@ \<^medskip> The main idea is that arguments that vary less are moved further to the left than those that vary more. Two particularly - important categories of functions are \emph{selectors} and - \emph{updates}. + important categories of functions are \<^emph>\selectors\ and + \<^emph>\updates\. The subsequent scheme is based on a hypothetical set-like container of type @{text "\"} that manages elements of type @{text "\"}. Both @@ -799,11 +799,11 @@ text \Regular function application and infix notation works best for relatively deeply structured expressions, e.g.\ @{text "h (f x y + g - z)"}. The important special case of \emph{linear transformation} + z)"}. The important special case of \<^emph>\linear transformation\ applies a cascade of functions @{text "f\<^sub>n (\ (f\<^sub>1 x))"}. This becomes hard to read and maintain if the functions are themselves given as complex expressions. The notation can be significantly - improved by introducing \emph{forward} versions of application and + improved by introducing \<^emph>\forward\ versions of application and composition as follows: \<^medskip> @@ -1034,7 +1034,7 @@ \begin{warn} Regular Isabelle/ML code should output messages exclusively by the - official channels. Using raw I/O on \emph{stdout} or \emph{stderr} + official channels. Using raw I/O on \<^emph>\stdout\ or \<^emph>\stderr\ instead (e.g.\ via @{ML TextIO.output}) is apt to cause problems in the presence of parallel and asynchronous processing of Isabelle theories. Such raw output might be displayed by the front-end in @@ -1097,7 +1097,7 @@ \paragraph{Regular user errors.} These are meant to provide informative feedback about malformed input etc. - The \emph{error} function raises the corresponding @{ML ERROR} + The \<^emph>\error\ function raises the corresponding @{ML ERROR} exception, with a plain text message as argument. @{ML ERROR} exceptions can be handled internally, in order to be ignored, turned into other exceptions, or cascaded by appending messages. If the @@ -1110,7 +1110,7 @@ @{text "@{make_string}"} nor @{text "@{here}"}! Grammatical correctness of error messages can be improved by - \emph{omitting} final punctuation: messages are often concatenated + \<^emph>\omitting\ final punctuation: messages are often concatenated or put into a larger context (e.g.\ augmented with source position). Note that punctuation after formal entities (types, terms, theorems) is particularly prone to user confusion. @@ -1132,7 +1132,7 @@ exceptions locally, e.g.\ to organize internal failures robustly without overlapping with existing exceptions. Exceptions that are exposed in module signatures require extra care, though, and should - \emph{not} be introduced by default. Surprise by users of a module + \<^emph>\not\ be introduced by default. Surprise by users of a module can be often minimized by using plain user errors instead. \paragraph{Interrupts.} These indicate arbitrary system events: @@ -1179,8 +1179,8 @@ \<^descr> @{ML try}~@{text "f x"} makes the partiality of evaluating @{text "f x"} explicit via the option datatype. Interrupts are - \emph{not} handled here, i.e.\ this form serves as safe replacement - for the \emph{unsafe} version @{ML_text "(SOME"}~@{text "f + \<^emph>\not\ handled here, i.e.\ this form serves as safe replacement + for the \<^emph>\unsafe\ version @{ML_text "(SOME"}~@{text "f x"}~@{ML_text "handle _ => NONE)"} that is occasionally seen in books about SML97, but not in Isabelle/ML. @@ -1223,7 +1223,7 @@ section \Strings of symbols \label{sec:symbols}\ -text \A \emph{symbol} constitutes the smallest textual unit in +text \A \<^emph>\symbol\ constitutes the smallest textual unit in Isabelle/ML --- raw ML characters are normally not encountered at all. Isabelle strings consist of a sequence of symbols, represented as a packed string or an exploded list of strings. Each symbol is @@ -1306,7 +1306,7 @@ \<^descr> Type @{ML_type "Symbol.sym"} is a concrete datatype that represents the different kinds of symbols explicitly, with constructors @{ML "Symbol.Char"}, @{ML "Symbol.UTF8"}, - @{ML "Symbol.Sym"}, @{ML "Symbol.Ctrl"}, @{ML "Symbol.Raw"}, + @{ML "Symbol.Sym"}, @{ML "Symbol.Control"}, @{ML "Symbol.Raw"}, @{ML "Symbol.Malformed"}. \<^descr> @{ML "Symbol.decode"} converts the string representation of a @@ -1347,7 +1347,7 @@ @{index_ML_type char} \\ \end{mldecls} - \<^descr> Type @{ML_type char} is \emph{not} used. The smallest textual + \<^descr> Type @{ML_type char} is \<^emph>\not\ used. The smallest textual unit in Isabelle is represented as a ``symbol'' (see \secref{sec:symbols}). \ @@ -1375,7 +1375,7 @@ with @{ML YXML.parse_body} as key operation. Note that Isabelle/ML string literals may refer Isabelle symbols like - ``@{verbatim \}'' natively, \emph{without} escaping the backslash. This is a + ``@{verbatim \}'' natively, \<^emph>\without\ escaping the backslash. This is a consequence of Isabelle treating all source text as strings of symbols, instead of raw characters. \ @@ -1408,7 +1408,7 @@ \end{mldecls} \<^descr> Type @{ML_type int} represents regular mathematical integers, which - are \emph{unbounded}. Overflow is treated properly, but should never happen + are \<^emph>\unbounded\. Overflow is treated properly, but should never happen in practice.\footnote{The size limit for integer bit patterns in memory is 64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.} This works uniformly for all supported ML platforms (Poly/ML and SML/NJ). @@ -1517,7 +1517,7 @@ @{assert} (list2 = items); \ -text \The subsequent example demonstrates how to \emph{merge} two +text \The subsequent example demonstrates how to \<^emph>\merge\ two lists in a natural way.\ ML_val \ @@ -1588,7 +1588,7 @@ text \Due to ubiquitous parallelism in Isabelle/ML (see also \secref{sec:multi-threading}), the mutable reference cells of Standard ML are notorious for causing problems. In a highly - parallel system, both correctness \emph{and} performance are easily + parallel system, both correctness \<^emph>\and\ performance are easily degraded when using mutable data. The unwieldy name of @{ML Unsynchronized.ref} for the constructor @@ -1637,7 +1637,7 @@ @{cite "Sutter:2005"}.} Isabelle/Isar exploits the inherent structure of theories and proofs to - support \emph{implicit parallelism} to a large extent. LCF-style theorem + support \<^emph>\implicit parallelism\ to a large extent. LCF-style theorem proving provides almost ideal conditions for that, see also @{cite "Wenzel:2009"}. This means, significant parts of theory and proof checking is parallelized by default. In Isabelle2013, a maximum @@ -1777,10 +1777,10 @@ Isabelle environment. User code should not break this abstraction, but stay within the confines of concurrent Isabelle/ML. - A \emph{synchronized variable} is an explicit state component associated + A \<^emph>\synchronized variable\ is an explicit state component associated with mechanisms for locking and signaling. There are operations to await a condition, change the state, and signal the change to all other waiting - threads. Synchronized access to the state variable is \emph{not} re-entrant: + threads. Synchronized access to the state variable is \<^emph>\not\ re-entrant: direct or indirect nesting within the same thread will cause a deadlock!\ text %mlref \ @@ -1849,7 +1849,7 @@ arguments. The result is either an explicit value or an implicit exception. - \emph{Managed evaluation} in Isabelle/ML organizes expressions and + \<^emph>\Managed evaluation\ in Isabelle/ML organizes expressions and results to control certain physical side-conditions, to say more specifically when and how evaluation happens. For example, the Isabelle/ML library supports lazy evaluation with memoing, parallel @@ -1857,7 +1857,7 @@ evaluation with time limit etc. \<^medskip> - An \emph{unevaluated expression} is represented either as + An \<^emph>\unevaluated expression\ is represented either as unit abstraction @{verbatim "fn () => a"} of type @{verbatim "unit -> 'a"} or as regular function @{verbatim "fn a => b"} of type @{verbatim "'a -> 'b"}. Both forms @@ -1873,13 +1873,13 @@ applied to some argument. \<^medskip> - \emph{Reified results} make the disjoint sum of regular + \<^emph>\Reified results\ make the disjoint sum of regular values versions exceptional situations explicit as ML datatype: @{text "'a result = Res of 'a | Exn of exn"}. This is typically used for administrative purposes, to store the overall outcome of an evaluation process. - \emph{Parallel exceptions} aggregate reified results, such that + \<^emph>\Parallel exceptions\ aggregate reified results, such that multiple exceptions are digested as a collection in canonical form that identifies exceptions according to their original occurrence. This is particular important for parallel evaluation via futures @@ -1944,7 +1944,7 @@ programming interfaces that resemble the sequential versions. What remains is the application-specific problem to present - expressions with suitable \emph{granularity}: each list element + expressions with suitable \<^emph>\granularity\: each list element corresponds to one evaluation task. If the granularity is too coarse, the available CPUs are not saturated. If it is too fine-grained, CPU cycles are wasted due to the overhead of @@ -2005,7 +2005,7 @@ multi-threading, synchronous program exceptions and asynchronous interrupts. The first thread that invokes @{text force} on an unfinished lazy value - changes its state into a \emph{promise} of the eventual result and starts + changes its state into a \<^emph>\promise\ of the eventual result and starts evaluating it. Any other threads that @{text force} the same lazy value in the meantime need to wait for it to finish, by producing a regular result or program exception. If the evaluation attempt is interrupted, this event is @@ -2015,7 +2015,7 @@ This means a lazy value is completely evaluated at most once, in a thread-safe manner. There might be multiple interrupted evaluation attempts, and multiple receivers of intermediate interrupt events. Interrupts are - \emph{not} made persistent: later evaluation attempts start again from the + \<^emph>\not\ made persistent: later evaluation attempts start again from the original expression. \ @@ -2059,8 +2059,8 @@ above). Technically, a future is a single-assignment variable together with a - \emph{task} that serves administrative purposes, notably within the - \emph{task queue} where new futures are registered for eventual evaluation + \<^emph>\task\ that serves administrative purposes, notably within the + \<^emph>\task queue\ where new futures are registered for eventual evaluation and the worker threads retrieve their work. The pool of worker threads is limited, in correlation with the number of @@ -2072,7 +2072,7 @@ timeout. \<^medskip> - Each future task belongs to some \emph{task group}, which + Each future task belongs to some \<^emph>\task group\, which represents the hierarchic structure of related tasks, together with the exception status a that point. By default, the task group of a newly created future is a new sub-group of the presently running one, but it is also @@ -2082,17 +2082,17 @@ all its sub-groups. Thus interrupts are propagated down the group hierarchy. Regular program exceptions are treated likewise: failure of the evaluation of some future task affects its own group and all sub-groups. Given a - particular task group, its \emph{group status} cumulates all relevant + particular task group, its \<^emph>\group status\ cumulates all relevant exceptions according to its position within the group hierarchy. Interrupted tasks that lack regular result information, will pick up parallel exceptions from the cumulative group status. \<^medskip> - A \emph{passive future} or \emph{promise} is a future with slightly + A \<^emph>\passive future\ or \<^emph>\promise\ is a future with slightly different evaluation policies: there is only a single-assignment variable - and some expression to evaluate for the \emph{failed} case (e.g.\ to clean + and some expression to evaluate for the \<^emph>\failed\ case (e.g.\ to clean up resources when canceled). A regular result is produced by external means, - using a separate \emph{fulfill} operation. + using a separate \<^emph>\fulfill\ operation. Promises are managed in the same task queue, so regular futures may depend on them. This allows a form of reactive programming, where some promises are diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Prelim.thy --- a/src/Doc/Implementation/Prelim.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Prelim.thy Sun Oct 18 23:00:32 2015 +0200 @@ -27,12 +27,12 @@ principles: \<^item> Transfer: monotonicity of derivations admits results to be - transferred into a \emph{larger} context, i.e.\ @{text "\ \\<^sub>\ + transferred into a \<^emph>\larger\ context, i.e.\ @{text "\ \\<^sub>\ \"} implies @{text "\' \\<^sub>\\<^sub>' \"} for contexts @{text "\' \ \"} and @{text "\' \ \"}. \<^item> Export: discharge of hypotheses admits results to be exported - into a \emph{smaller} context, i.e.\ @{text "\' \\<^sub>\ \"} + into a \<^emph>\smaller\ context, i.e.\ @{text "\' \\<^sub>\ \"} implies @{text "\ \\<^sub>\ \ \ \"} where @{text "\' \ \"} and @{text "\ = \' - \"}. Note that @{text "\"} remains unchanged here, only the @{text "\"} part is affected. @@ -42,9 +42,9 @@ By modeling the main characteristics of the primitive @{text "\"} and @{text "\"} above, and abstracting over any particular logical content, we arrive at the fundamental notions of - \emph{theory context} and \emph{proof context} in Isabelle/Isar. - These implement a certain policy to manage arbitrary \emph{context - data}. There is a strongly-typed mechanism to declare new kinds of + \<^emph>\theory context\ and \<^emph>\proof context\ in Isabelle/Isar. + These implement a certain policy to manage arbitrary \<^emph>\context + data\. There is a strongly-typed mechanism to declare new kinds of data at compile time. The internal bootstrap process of Isabelle/Pure eventually reaches a @@ -73,7 +73,7 @@ subsection \Theory context \label{sec:context-theory}\ -text \A \emph{theory} is a data container with explicit name and +text \A \<^emph>\theory\ is a data container with explicit name and unique identifier. Theories are related by a (nominal) sub-theory relation, which corresponds to the dependency graph of the original construction; each theory is derived from a certain sub-graph of @@ -227,7 +227,7 @@ @{ML_antiquotation_def "context"} & : & @{text ML_antiquotation} \\ \end{matharray} - \<^descr> @{text "@{context}"} refers to \emph{the} context at + \<^descr> @{text "@{context}"} refers to \<^emph>\the\ context at compile-time --- as abstract value. Independently of (local) theory or proof mode, this always produces a meaningful result. @@ -294,7 +294,7 @@ \end{tabular} \<^medskip> - The @{text "empty"} value acts as initial default for \emph{any} + The @{text "empty"} value acts as initial default for \<^emph>\any\ theory that does not declare actual data content; @{text "extend"} is acts like a unitary version of @{text "merge"}. @@ -322,7 +322,7 @@ from the given background theory and should be somehow ``immediate''. Whenever a proof context is initialized, which happens frequently, the the system invokes the @{text "init"} - operation of \emph{all} theory data slots ever declared. This also + operation of \<^emph>\all\ theory data slots ever declared. This also means that one needs to be economic about the total number of proof data declarations in the system, i.e.\ each ML module should declare at most one, sometimes two data slots for its internal use. @@ -456,7 +456,7 @@ subsection \Configuration options \label{sec:config-options}\ -text \A \emph{configuration option} is a named optional value of +text \A \<^emph>\configuration option\ is a named optional value of some basic type (Boolean, integer, string) that is stored in the context. It is a simple application of general context data (\secref{sec:context-data}) that is sufficiently common to justify @@ -604,11 +604,11 @@ subsection \Basic names \label{sec:basic-name}\ text \ - A \emph{basic name} essentially consists of a single Isabelle + A \<^emph>\basic name\ essentially consists of a single Isabelle identifier. There are conventions to mark separate classes of basic names, by attaching a suffix of underscores: one underscore means - \emph{internal name}, two underscores means \emph{Skolem name}, - three underscores means \emph{internal Skolem name}. + \<^emph>\internal name\, two underscores means \<^emph>\Skolem name\, + three underscores means \<^emph>\internal Skolem name\. For example, the basic name @{text "foo"} has the internal version @{text "foo_"}, with Skolem versions @{text "foo__"} and @{text @@ -622,7 +622,7 @@ \<^medskip> Manipulating binding scopes often requires on-the-fly - renamings. A \emph{name context} contains a collection of already + renamings. A \<^emph>\name context\ contains a collection of already used names. The @{text "declare"} operation adds names to the context. @@ -718,7 +718,7 @@ subsection \Indexed names \label{sec:indexname}\ text \ - An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic + An \<^emph>\indexed name\ (or @{text "indexname"}) is a pair of a basic name and a natural number. This representation allows efficient renaming by incrementing the second component only. The canonical way to rename two collections of indexnames apart from each other is @@ -765,10 +765,10 @@ subsection \Long names \label{sec:long-name}\ -text \A \emph{long name} consists of a sequence of non-empty name +text \A \<^emph>\long name\ consists of a sequence of non-empty name components. The packed representation uses a dot as separator, as - in ``@{text "A.b.c"}''. The last component is called \emph{base - name}, the remaining prefix is called \emph{qualifier} (which may be + in ``@{text "A.b.c"}''. The last component is called \<^emph>\base + name\, the remaining prefix is called \<^emph>\qualifier\ (which may be empty). The qualifier can be understood as the access path to the named entity while passing through some nested block-structure, although our free-form long names do not really enforce any strict diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Proof.thy --- a/src/Doc/Implementation/Proof.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Proof.thy Sun Oct 18 23:00:32 2015 +0200 @@ -11,7 +11,7 @@ is considered as ``free''. Logically, free variables act like outermost universal quantification at the sequent level: @{text "A\<^sub>1(x), \, A\<^sub>n(x) \ B(x)"} means that the result - holds \emph{for all} values of @{text "x"}. Free variables for + holds \<^emph>\for all\ values of @{text "x"}. Free variables for terms (not types) can be fully internalized into the logic: @{text "\ B(x)"} and @{text "\ \x. B(x)"} are interchangeable, provided that @{text "x"} does not occur elsewhere in the context. @@ -22,8 +22,8 @@ The Pure logic represents the idea of variables being either inside or outside the current scope by providing separate syntactic - categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\ - \emph{schematic variables} (e.g.\ @{text "?x"}). Incidently, a + categories for \<^emph>\fixed variables\ (e.g.\ @{text "x"}) vs.\ + \<^emph>\schematic variables\ (e.g.\ @{text "?x"}). Incidently, a universal result @{text "\ \x. B(x)"} has the HHF normal form @{text "\ B(?x)"}, which represents its generality without requiring an explicit quantifier. The same principle works for type variables: @@ -40,12 +40,12 @@ We allow a slightly less formalistic mode of operation: term variables @{text "x"} are fixed without specifying a type yet - (essentially \emph{all} potential occurrences of some instance + (essentially \<^emph>\all\ potential occurrences of some instance @{text "x\<^sub>\"} are fixed); the first occurrence of @{text "x"} within a specific term assigns its most general type, which is then maintained consistently in the context. The above example becomes @{text "\ = x: term, \: type, A(x\<^sub>\)"}, where type @{text - "\"} is fixed \emph{after} term @{text "x"}, and the constraint + "\"} is fixed \<^emph>\after\ term @{text "x"}, and the constraint @{text "x :: \"} is an implicit consequence of the occurrence of @{text "x\<^sub>\"} in the subsequent proposition. @@ -219,7 +219,7 @@ section \Assumptions \label{sec:assumptions}\ text \ - An \emph{assumption} is a proposition that it is postulated in the + An \<^emph>\assumption\ is a proposition that it is postulated in the current context. Local conclusions may use assumptions as additional facts, but this imposes implicit hypotheses that weaken the overall statement. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Syntax.thy --- a/src/Doc/Implementation/Syntax.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Syntax.thy Sun Oct 18 23:00:32 2015 +0200 @@ -8,10 +8,10 @@ text \Pure @{text "\"}-calculus as introduced in \chref{ch:logic} is an adequate foundation for logical languages --- in the tradition of - \emph{higher-order abstract syntax} --- but end-users require + \<^emph>\higher-order abstract syntax\ --- but end-users require additional means for reading and printing of terms and types. This - important add-on outside the logical core is called \emph{inner - syntax} in Isabelle jargon, as opposed to the \emph{outer syntax} of + important add-on outside the logical core is called \<^emph>\inner + syntax\ in Isabelle jargon, as opposed to the \<^emph>\outer syntax\ of the theory and proof language @{cite "isabelle-isar-ref"}. For example, according to @{cite church40} quantifiers are represented as @@ -26,13 +26,13 @@ the system.} \<^medskip> - The main inner syntax operations are \emph{read} for - parsing together with type-checking, and \emph{pretty} for formatted + The main inner syntax operations are \<^emph>\read\ for + parsing together with type-checking, and \<^emph>\pretty\ for formatted output. See also \secref{sec:read-print}. Furthermore, the input and output syntax layers are sub-divided into - separate phases for \emph{concrete syntax} versus \emph{abstract - syntax}, see also \secref{sec:parse-unparse} and + separate phases for \<^emph>\concrete syntax\ versus \<^emph>\abstract + syntax\, see also \secref{sec:parse-unparse} and \secref{sec:term-check}, respectively. This results in the following decomposition of the main operations: @@ -147,16 +147,16 @@ text \ Parsing and unparsing converts between actual source text and a certain - \emph{pre-term} format, where all bindings and scopes are already resolved + \<^emph>\pre-term\ format, where all bindings and scopes are already resolved faithfully. Thus the names of free variables or constants are determined in the sense of the logical context, but type information might be still - missing. Pre-terms support an explicit language of \emph{type constraints} - that may be augmented by user code to guide the later \emph{check} phase. + missing. Pre-terms support an explicit language of \<^emph>\type constraints\ + that may be augmented by user code to guide the later \<^emph>\check\ phase. Actual parsing is based on traditional lexical analysis and Earley parsing for arbitrary context-free grammars. The user can specify the grammar - declaratively via mixfix annotations. Moreover, there are \emph{syntax - translations} that can be augmented by the user, either declaratively via + declaratively via mixfix annotations. Moreover, there are \<^emph>\syntax + translations\ that can be augmented by the user, either declaratively via @{command translations} or programmatically via @{command parse_translation}, @{command print_translation} @{cite "isabelle-isar-ref"}. The final scope-resolution is performed by the system, @@ -204,10 +204,10 @@ fully-annotated terms in the sense of the logical core (\chref{ch:logic}). - The \emph{check} phase is meant to subsume a variety of mechanisms + The \<^emph>\check\ phase is meant to subsume a variety of mechanisms in the manner of ``type-inference'' or ``type-reconstruction'' or ``type-improvement'', not just type-checking in the narrow sense. - The \emph{uncheck} phase is roughly dual, it prunes type-information + The \<^emph>\uncheck\ phase is roughly dual, it prunes type-information before pretty printing. A typical add-on for the check/uncheck syntax layer is the @{command diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Implementation/Tactic.thy --- a/src/Doc/Implementation/Tactic.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Implementation/Tactic.thy Sun Oct 18 23:00:32 2015 +0200 @@ -93,14 +93,14 @@ maps a given goal state (represented as a theorem, cf.\ \secref{sec:tactical-goals}) to a lazy sequence of potential successor states. The underlying sequence implementation is lazy - both in head and tail, and is purely functional in \emph{not} + both in head and tail, and is purely functional in \<^emph>\not\ supporting memoing.\footnote{The lack of memoing and the strict nature of ML requires some care when working with low-level sequence operations, to avoid duplicate or premature evaluation of results. It also means that modified runtime behavior, such as timeout, is very hard to achieve for general tactics.} - An \emph{empty result sequence} means that the tactic has failed: in + An \<^emph>\empty result sequence\ means that the tactic has failed: in a compound tactic expression other tactics might be tried instead, or the whole refinement step might fail outright, producing a toplevel error message in the end. When implementing tactics from @@ -108,7 +108,7 @@ mapping regular error conditions to an empty result; only serious faults should emerge as exceptions. - By enumerating \emph{multiple results}, a tactic can easily express + By enumerating \<^emph>\multiple results\, a tactic can easily express the potential outcome of an internal search process. There are also combinators for building proof tools that involve search systematically, see also \secref{sec:tacticals}. @@ -120,7 +120,7 @@ must not change the main conclusion (apart from instantiating schematic goal variables). - Tactics with explicit \emph{subgoal addressing} are of the form + Tactics with explicit \<^emph>\subgoal addressing\ are of the form @{text "int \ tactic"} and may be applied to a particular subgoal (counting from 1). If the subgoal number is out of range, the tactic should fail with an empty result sequence, but must not raise @@ -229,14 +229,14 @@ subsection \Resolution and assumption tactics \label{sec:resolve-assume-tac}\ -text \\emph{Resolution} is the most basic mechanism for refining a +text \\<^emph>\Resolution\ is the most basic mechanism for refining a subgoal using a theorem as object-level rule. - \emph{Elim-resolution} is particularly suited for elimination rules: + \<^emph>\Elim-resolution\ is particularly suited for elimination rules: it resolves with a rule, proves its first premise by assumption, and finally deletes that assumption from any new subgoals. - \emph{Destruct-resolution} is like elim-resolution, but the given + \<^emph>\Destruct-resolution\ is like elim-resolution, but the given destruction rules are first turned into canonical elimination - format. \emph{Forward-resolution} is like destruct-resolution, but + format. \<^emph>\Forward-resolution\ is like destruct-resolution, but without deleting the selected assumption. The @{text "r/e/d/f"} naming convention is maintained for several different kinds of resolution rules and tactics. @@ -520,7 +520,7 @@ section \Tacticals \label{sec:tacticals}\ -text \A \emph{tactical} is a functional combinator for building up +text \A \<^emph>\tactical\ is a functional combinator for building up complex tactics from simpler ones. Common tacticals perform sequential composition, disjunctive choice, iteration, or goal addressing. Various search strategies may be expressed via @@ -572,7 +572,7 @@ \<^descr> @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the possible results of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}. Unlike - @{ML_op "ORELSE"} there is \emph{no commitment} to either tactic, so + @{ML_op "ORELSE"} there is \<^emph>\no commitment\ to either tactic, so @{ML_op "APPEND"} helps to avoid incompleteness during search, at the cost of potential inefficiencies. @@ -688,7 +688,7 @@ @{ML_type "int -> tactic"} can be used together with tacticals that act like ``subgoal quantifiers'': guided by success of the body tactic a certain range of subgoals is covered. Thus the body tactic - is applied to \emph{all} subgoals, \emph{some} subgoal etc. + is applied to \<^emph>\all\ subgoals, \<^emph>\some\ subgoal etc. Suppose that the goal state has @{text "n \ 0"} subgoals. Many of these tacticals address subgoal ranges counting downwards from diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Document_Preparation.thy --- a/src/Doc/Isar_Ref/Document_Preparation.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Document_Preparation.thy Sun Oct 18 23:00:32 2015 +0200 @@ -9,8 +9,8 @@ within that format. This allows to produce papers, books, theses etc.\ from Isabelle theory sources. - {\LaTeX} output is generated while processing a \emph{session} in - batch mode, as explained in the \emph{The Isabelle System Manual} + {\LaTeX} output is generated while processing a \<^emph>\session\ in + batch mode, as explained in the \<^emph>\The Isabelle System Manual\ @{cite "isabelle-system"}. The main Isabelle tools to get started with document preparation are @{tool_ref mkroot} and @{tool_ref build}. @@ -67,7 +67,7 @@ All text passed to any of the above markup commands may refer to formal - entities via \emph{document antiquotations}, see also \secref{sec:antiq}. + entities via \<^emph>\document antiquotations\, see also \secref{sec:antiq}. These are interpreted in the present theory or proof context. \<^medskip> @@ -81,7 +81,6 @@ text \ \begin{matharray}{rcl} - @{command_def "print_antiquotations"}@{text "\<^sup>*"} & : & @{text "context \ "} \\ @{antiquotation_def "theory"} & : & @{text antiquotation} \\ @{antiquotation_def "thm"} & : & @{text antiquotation} \\ @{antiquotation_def "lemma"} & : & @{text antiquotation} \\ @@ -104,10 +103,13 @@ @{antiquotation_def ML_type} & : & @{text antiquotation} \\ @{antiquotation_def ML_structure} & : & @{text antiquotation} \\ @{antiquotation_def ML_functor} & : & @{text antiquotation} \\ + @{antiquotation_def emph} & : & @{text antiquotation} \\ + @{antiquotation_def bold} & : & @{text antiquotation} \\ @{antiquotation_def verbatim} & : & @{text antiquotation} \\ @{antiquotation_def "file"} & : & @{text antiquotation} \\ @{antiquotation_def "url"} & : & @{text antiquotation} \\ @{antiquotation_def "cite"} & : & @{text antiquotation} \\ + @{command_def "print_antiquotations"}@{text "\<^sup>*"} & : & @{text "context \ "} \\ \end{matharray} The overall content of an Isabelle/Isar theory may alternate between @@ -116,7 +118,7 @@ (\secref{sec:markup}) or document comments (\secref{sec:comments}). The argument of markup commands quotes informal text to be printed in the resulting document, but may again refer to formal entities - via \emph{document antiquotations}. + via \<^emph>\document antiquotations\. For example, embedding @{verbatim \@{term [show_types] "f x = a + x"}\} within a text block makes @@ -129,15 +131,25 @@ antiquotations are checked within the current theory or proof context. + \<^medskip> + Antiquotations are in general written @{verbatim "@{"}@{text "name + [options] arguments"}@{verbatim "}"}. The short form @{verbatim + \\\}@{verbatim "<^"}@{text name}@{verbatim ">"}@{text + "\argument_content\"} (without surrounding @{verbatim "@{"}@{text + "\"}@{verbatim "}"}) works for a single argument that is a cartouche. + + \begingroup + \def\isasymcontrolstart{\isatt{\isacharbackslash\isacharless\isacharcircum}} @{rail \ - @@{command print_antiquotations} ('!'?) + @{syntax_def antiquotation}: + '@{' antiquotation_body '}' | + '\' @{syntax_ref name} '>' @{syntax_ref cartouche} \} + \endgroup %% FIXME less monolithic presentation, move to individual sections!? @{rail \ - '@{' antiquotation '}' - ; - @{syntax_def antiquotation}: + @{syntax_def antiquotation_body}: @@{antiquotation theory} options @{syntax name} | @@{antiquotation thm} options styles @{syntax thmrefs} | @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? | @@ -163,6 +175,8 @@ @@{antiquotation ML_type} options @{syntax text} | @@{antiquotation ML_structure} options @{syntax text} | @@{antiquotation ML_functor} options @{syntax text} | + @@{antiquotation emph} options @{syntax text} | + @@{antiquotation bold} options @{syntax text} | @@{antiquotation verbatim} options @{syntax text} | @@{antiquotation "file"} options @{syntax name} | @@{antiquotation file_unchecked} options @{syntax name} | @@ -176,16 +190,14 @@ styles: '(' (style + ',') ')' ; style: (@{syntax name} +) + ; + @@{command print_antiquotations} ('!'?) \} - Note that the syntax of antiquotations may \emph{not} include source + Note that the syntax of antiquotations may \<^emph>\not\ include source comments @{verbatim "(*"}~@{text "\"}~@{verbatim "*)"} nor verbatim text @{verbatim "{*"}~@{text "\"}~@{verbatim "*}"}. - \<^descr> @{command "print_antiquotations"} prints all document antiquotations - that are defined in the current context; the ``@{text "!"}'' option - indicates extra verbosity. - \<^descr> @{text "@{theory A}"} prints the name @{text "A"}, which is guaranteed to refer to a valid ancestor theory in the current context. @@ -230,7 +242,7 @@ e.g.\ small pieces of terms that should not be parsed or type-checked yet. - \<^descr> @{text "@{goals}"} prints the current \emph{dynamic} goal + \<^descr> @{text "@{goals}"} prints the current \<^emph>\dynamic\ goal state. This is mainly for support of tactic-emulation scripts within Isar. Presentation of goal states does not conform to the idea of human-readable proof documents! @@ -257,6 +269,12 @@ check text @{text s} as ML value, infix operator, type, structure, and functor respectively. The source is printed verbatim. + \<^descr> @{text "@{emph s}"} prints document source recursively, with {\LaTeX} + markup @{verbatim \\emph\}@{text "\"}@{verbatim \}\}. + + \<^descr> @{text "@{bold s}"} prints document source recursively, with {\LaTeX} + markup @{verbatim \\textbf{\}@{text "\"}@{verbatim \}\}. + \<^descr> @{text "@{verbatim s}"} prints uninterpreted source text literally as ASCII characters, using some type-writer font style. @@ -283,13 +301,17 @@ @{antiquotation_option_def cite_macro}, or the configuration option @{attribute cite_macro} in the context. For example, @{text "@{cite [cite_macro = nocite] foobar}"} produces @{verbatim \\nocite{foobar}\}. + + \<^descr> @{command "print_antiquotations"} prints all document antiquotations + that are defined in the current context; the ``@{text "!"}'' option + indicates extra verbosity. \ subsection \Styled antiquotations\ text \The antiquotations @{text thm}, @{text prop} and @{text - term} admit an extra \emph{style} specification to modify the + term} admit an extra \<^emph>\style\ specification to modify the printed result. A style is specified by a name with a possibly empty number of arguments; multiple styles can be sequenced with commas. The following standard styles are available: @@ -473,7 +495,7 @@ below. \begingroup - \def\isasymnewline{\isatext{\tt\isacharbackslash}} + \def\isasymnewline{\isatt{\isacharbackslash\isacharless newline\isachargreater}} @{rail \ rule? + ';' ; diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Framework.thy --- a/src/Doc/Isar_Ref/Framework.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Framework.thy Sun Oct 18 23:00:32 2015 +0200 @@ -254,8 +254,8 @@ propositions of proof terms have been shown. The @{text "\"}-structure of proofs can be recorded as an optional feature of the Pure inference kernel @{cite "Berghofer-Nipkow:2000:TPHOL"}, but - the formal system can never depend on them due to \emph{proof - irrelevance}. + the formal system can never depend on them due to \<^emph>\proof + irrelevance\. On top of this most primitive layer of proofs, Pure implements a generic calculus for nested natural deduction rules, similar to @@ -333,7 +333,7 @@ that rule statements always observe the normal form where quantifiers are pulled in front of implications at each level of nesting. This means that any Pure proposition may be presented as a - \emph{Hereditary Harrop Formula} @{cite "Miller:1991"} which is of the + \<^emph>\Hereditary Harrop Formula\ @{cite "Miller:1991"} which is of the form @{text "\x\<^sub>1 \ x\<^sub>m. H\<^sub>1 \ \ H\<^sub>n \ A"} for @{text "m, n \ 0"}, and @{text "A"} atomic, and @{text "H\<^sub>1, \, H\<^sub>n"} being recursively of the same format. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Generic.thy --- a/src/Doc/Isar_Ref/Generic.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Generic.thy Sun Oct 18 23:00:32 2015 +0200 @@ -149,7 +149,7 @@ \} \<^descr> @{attribute tagged}~@{text "name value"} and @{attribute - untagged}~@{text name} add and remove \emph{tags} of some theorem. + untagged}~@{text name} add and remove \<^emph>\tags\ of some theorem. Tags may be any list of string pairs that serve as formal comment. The first string is considered the tag name, the second its value. Note that @{attribute untagged} removes any tags of the same name. @@ -395,7 +395,7 @@ lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops -text \The above attempt \emph{fails}, because @{term "0"} and @{term +text \The above attempt \<^emph>\fails\, because @{term "0"} and @{term "op +"} in the HOL library are declared as generic type class operations, without stating any algebraic laws yet. More specific types are required to get access to certain standard simplifications @@ -586,7 +586,7 @@ @{text [display] "?p = ?q \ (?q \ ?a = ?c) \ (\ ?q \ ?b = ?d) \ (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"} - A congruence rule can also \emph{prevent} simplification of some + A congruence rule can also \<^emph>\prevent\ simplification of some arguments. Here is an alternative congruence rule for conditional expressions that conforms to non-strict functional evaluation: @{text [display] "?p = ?q \ (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"} @@ -617,7 +617,7 @@ point. Also note that definitional packages like @{command "datatype"}, @{command "primrec"}, @{command "fun"} routinely declare Simplifier rules to the target context, while plain - @{command "definition"} is an exception in \emph{not} declaring + @{command "definition"} is an exception in \<^emph>\not\ declaring anything. \<^medskip> @@ -647,7 +647,7 @@ subsection \Ordered rewriting with permutative rules\ -text \A rewrite rule is \emph{permutative} if the left-hand side and +text \A rewrite rule is \<^emph>\permutative\ if the left-hand side and right-hand side are the equal up to renaming of variables. The most common permutative rule is commutativity: @{text "?x + ?y = ?y + ?x"}. Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) - @@ -656,7 +656,7 @@ special attention. Because ordinary rewriting loops given such rules, the Simplifier - employs a special strategy, called \emph{ordered rewriting}. + employs a special strategy, called \<^emph>\ordered rewriting\. Permutative rules are detected and only applied if the rewriting step decreases the redex wrt.\ a given term ordering. For example, commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then @@ -688,7 +688,7 @@ \<^item> To complete your set of rewrite rules, you must add not just associativity (A) and commutativity (C) but also a derived rule - \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}. + \<^emph>\left-commutativity\ (LC): @{text "f x (f y z) = f y (f x z)"}. Ordered rewriting with the combination of A, C, and LC sorts a term @@ -818,7 +818,7 @@ Any successful result needs to be a (possibly conditional) rewrite rule @{text "t \ u"} that is applicable to the current redex. The rule will be applied just as any ordinary rewrite rule. It is - expected to be already in \emph{internal form}, bypassing the + expected to be already in \<^emph>\internal form\, bypassing the automatic preprocessing of object-level equivalences. \begin{matharray}{rcl} @@ -1027,7 +1027,7 @@ \begin{warn} If a premise of a congruence rule cannot be proved, then the congruence is ignored. This should only happen if the rule is - \emph{conditional} --- that is, contains premises not of the form + \<^emph>\conditional\ --- that is, contains premises not of the form @{text "t = ?x"}. Otherwise it indicates that some congruence rule, or possibly the subgoaler or solver, is faulty. \end{warn} @@ -1054,7 +1054,7 @@ all over again. Each of the subgoals generated by the looper is attacked in turn, in reverse order. - A typical looper is \emph{case splitting}: the expansion of a + A typical looper is \<^emph>\case splitting\: the expansion of a conditional. Another possibility is to apply an elimination rule on the assumptions. More adventurous loopers could start an induction. @@ -1138,7 +1138,7 @@ Note that forward simplification restricts the Simplifier to its most basic operation of term rewriting; solver and looper tactics - (\secref{sec:simp-strategies}) are \emph{not} involved here. The + (\secref{sec:simp-strategies}) are \<^emph>\not\ involved here. The @{attribute simplified} attribute should be only rarely required under normal circumstances. \ @@ -1183,21 +1183,21 @@ interactive proof. But natural deduction does not easily lend itself to automation, and has a bias towards intuitionism. For certain proofs in classical logic, it can not be called natural. - The \emph{sequent calculus}, a generalization of natural deduction, + The \<^emph>\sequent calculus\, a generalization of natural deduction, is easier to automate. - A \textbf{sequent} has the form @{text "\ \ \"}, where @{text "\"} + A \<^bold>\sequent\ has the form @{text "\ \ \"}, where @{text "\"} and @{text "\"} are sets of formulae.\footnote{For first-order logic, sequents can equivalently be made from lists or multisets of formulae.} The sequent @{text "P\<^sub>1, \, P\<^sub>m \ Q\<^sub>1, \, Q\<^sub>n"} is - \textbf{valid} if @{text "P\<^sub>1 \ \ \ P\<^sub>m"} implies @{text "Q\<^sub>1 \ \ \ + \<^bold>\valid\ if @{text "P\<^sub>1 \ \ \ P\<^sub>m"} implies @{text "Q\<^sub>1 \ \ \ Q\<^sub>n"}. Thus @{text "P\<^sub>1, \, P\<^sub>m"} represent assumptions, each of which is true, while @{text "Q\<^sub>1, \, Q\<^sub>n"} represent alternative goals. A - sequent is \textbf{basic} if its left and right sides have a common + sequent is \<^bold>\basic\ if its left and right sides have a common formula, as in @{text "P, Q \ Q, R"}; basic sequents are trivially valid. - Sequent rules are classified as \textbf{right} or \textbf{left}, + Sequent rules are classified as \<^bold>\right\ or \<^bold>\left\, indicating which side of the @{text "\"} symbol they operate on. Rules that operate on the right side are analogous to natural deduction's introduction rules, and left rules are analogous to @@ -1341,8 +1341,8 @@ text \The proof tools of the Classical Reasoner depend on collections of rules declared in the context, which are classified - as introduction, elimination or destruction and as \emph{safe} or - \emph{unsafe}. In general, safe rules can be attempted blindly, + as introduction, elimination or destruction and as \<^emph>\safe\ or + \<^emph>\unsafe\. In general, safe rules can be attempted blindly, while unsafe rules must be used with care. A safe rule must never reduce a provable goal to an unprovable set of subgoals. @@ -1393,9 +1393,9 @@ \<^descr> @{attribute intro}, @{attribute elim}, and @{attribute dest} declare introduction, elimination, and destruction rules, - respectively. By default, rules are considered as \emph{unsafe} + respectively. By default, rules are considered as \<^emph>\unsafe\ (i.e.\ not applied blindly without backtracking), while ``@{text - "!"}'' classifies as \emph{safe}. Rule declarations marked by + "!"}'' classifies as \<^emph>\safe\. Rule declarations marked by ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\ \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps of the @{method rule} method). The optional natural number @@ -1702,7 +1702,7 @@ The classical reasoning tools --- except @{method blast} --- allow to modify this basic proof strategy by applying two lists of - arbitrary \emph{wrapper tacticals} to it. The first wrapper list, + arbitrary \<^emph>\wrapper tacticals\ to it. The first wrapper list, which is considered to contain safe wrappers only, affects @{method safe_step} and all the tactics that call it. The second one, which may contain unsafe wrappers, affects the unsafe parts of @{method @@ -1723,7 +1723,7 @@ tactic. \<^descr> @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a - safe wrapper, such that it is tried \emph{before} each safe step of + safe wrapper, such that it is tried \<^emph>\before\ each safe step of the search. \<^descr> @{text "ctxt addSafter (name, tac)"} adds the given tactic as a @@ -1738,10 +1738,10 @@ \<^descr> @{text "ctxt addbefore (name, tac)"} adds the given tactic as an unsafe wrapper, such that it its result is concatenated - \emph{before} the result of each unsafe step. + \<^emph>\before\ the result of each unsafe step. \<^descr> @{text "ctxt addafter (name, tac)"} adds the given tactic as an - unsafe wrapper, such that it its result is concatenated \emph{after} + unsafe wrapper, such that it its result is concatenated \<^emph>\after\ the result of each unsafe step. \<^descr> @{text "ctxt delWrapper name"} deletes the unsafe wrapper with diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/HOL_Specific.thy --- a/src/Doc/Isar_Ref/HOL_Specific.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/HOL_Specific.thy Sun Oct 18 23:00:32 2015 +0200 @@ -30,7 +30,7 @@ logic is widely applicable in many areas of mathematics and computer science. In a sense, Higher-Order Logic is simpler than First-Order Logic, because there are fewer restrictions and special cases. Note - that HOL is \emph{weaker} than FOL with axioms for ZF set theory, + that HOL is \<^emph>\weaker\ than FOL with axioms for ZF set theory, which is traditionally considered the standard foundation of regular mathematics, but for most applications this does not matter. If you prefer ML to Lisp, you will probably prefer HOL to ZF. @@ -77,13 +77,13 @@ @{attribute_def (HOL) mono} & : & @{text attribute} \\ \end{matharray} - An \emph{inductive definition} specifies the least predicate or set + An \<^emph>\inductive definition\ specifies the least predicate or set @{text R} closed under given rules: applying a rule to elements of @{text R} yields a result within @{text R}. For example, a structural operational semantics is an inductive definition of an evaluation relation. - Dually, a \emph{coinductive definition} specifies the greatest + Dually, a \<^emph>\coinductive definition\ specifies the greatest predicate or set @{text R} that is consistent with given rules: every element of @{text R} can be seen as arising by applying a rule to elements of @{text R}. An important example is using @@ -134,7 +134,7 @@ in each occurrence within the given @{text "clauses"}. The optional @{keyword "monos"} declaration contains additional - \emph{monotonicity theorems}, which are required for each operator + \<^emph>\monotonicity theorems\, which are required for each operator applied to a recursive set in the introduction rules. \<^descr> @{command (HOL) "inductive_set"} and @{command (HOL) @@ -395,7 +395,7 @@ text \Since the value of an expression depends on the value of its variables, the functions @{const evala} and @{const evalb} take an - additional parameter, an \emph{environment} that maps variables to their + additional parameter, an \<^emph>\environment\ that maps variables to their values. \<^medskip> @@ -823,7 +823,7 @@ subsection \Basic concepts\ text \ - Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records + Isabelle/HOL supports both \<^emph>\fixed\ and \<^emph>\schematic\ records at the level of terms and types. The notation is as follows: \begin{center} @@ -847,7 +847,7 @@ @{text x} and @{text y} as before, but also possibly further fields as indicated by the ``@{text "\"}'' notation (which is actually part of the syntax). The improper field ``@{text "\"}'' of a record - scheme is called the \emph{more part}. Logically it is just a free + scheme is called the \<^emph>\more part\. Logically it is just a free variable, which is occasionally referred to as ``row variable'' in the literature. The more part of a record scheme may be instantiated by zero or more further components. For example, the @@ -935,7 +935,7 @@ @{text "c\<^sub>1 :: \\<^sub>1, \, c\<^sub>n :: \\<^sub>n"}. \<^medskip> - \textbf{Selectors} and \textbf{updates} are available for any + \<^bold>\Selectors\ and \<^bold>\updates\ are available for any field (including ``@{text more}''): \begin{matharray}{lll} @@ -954,7 +954,7 @@ proven within the logic for any two fields, but not as a general theorem. \<^medskip> - The \textbf{make} operation provides a cumulative record + The \<^bold>\make\ operation provides a cumulative record constructor function: \begin{matharray}{lll} @@ -964,7 +964,7 @@ \<^medskip> We now reconsider the case of non-root records, which are derived of some parent. In general, the latter may depend on another parent as - well, resulting in a list of \emph{ancestor records}. Appending the lists + well, resulting in a list of \<^emph>\ancestor records\. Appending the lists of fields of all ancestors results in a certain field prefix. The record package automatically takes care of this by lifting operations over this context of ancestor fields. Assuming that @{text "(\\<^sub>1, \, \\<^sub>m) t"} has @@ -1042,7 +1042,7 @@ problem. \<^enum> The derived record operations @{text "t.make"}, @{text "t.fields"}, - @{text "t.extend"}, @{text "t.truncate"} are \emph{not} treated + @{text "t.extend"}, @{text "t.truncate"} are \<^emph>\not\ treated automatically, but usually need to be expanded by hand, using the collective fact @{text "t.defs"}. \ @@ -1221,7 +1221,7 @@ the base name of the type constructor, an explicit prefix can be given alternatively. - The given term @{text "m"} is considered as \emph{mapper} for the + The given term @{text "m"} is considered as \<^emph>\mapper\ for the corresponding type constructor and must conform to the following type pattern: @@ -1847,8 +1847,8 @@ Alternatively a specific evaluator can be selected using square brackets; typical evaluators use the current set of code equations to normalize and include @{text simp} for fully symbolic evaluation - using the simplifier, @{text nbe} for \emph{normalization by - evaluation} and \emph{code} for code generation in SML. + using the simplifier, @{text nbe} for \<^emph>\normalization by + evaluation\ and \<^emph>\code\ for code generation in SML. \<^descr> @{command (HOL) "values"}~@{text t} enumerates a set comprehension by evaluation and prints its values up to the given @@ -2027,7 +2027,7 @@ \end{matharray} Coercive subtyping allows the user to omit explicit type - conversions, also called \emph{coercions}. Type inference will add + conversions, also called \<^emph>\coercions\. Type inference will add them as necessary when parsing a term. See @{cite "traytel-berghofer-nipkow-2011"} for details. @@ -2256,8 +2256,8 @@ @@{method (HOL) coherent} @{syntax thmrefs}? \} - \<^descr> @{method (HOL) coherent} solves problems of \emph{Coherent - Logic} @{cite "Bezem-Coquand:2005"}, which covers applications in + \<^descr> @{method (HOL) coherent} solves problems of \<^emph>\Coherent + Logic\ @{cite "Bezem-Coquand:2005"}, which covers applications in confluence theory, lattice theory and projective geometry. See @{file "~~/src/HOL/ex/Coherent.thy"} for some examples. \ @@ -2296,7 +2296,7 @@ datatype} package already takes care of this. These unstructured tactics feature both goal addressing and dynamic - instantiation. Note that named rule cases are \emph{not} provided + instantiation. Note that named rule cases are \<^emph>\not\ provided as would be by the proper @{method cases} and @{method induct} proof methods (see \secref{sec:cases-induct}). Unlike the @{method induct} method, @{method induct_tac} does not handle structured rule @@ -2339,7 +2339,7 @@ chapter \Executable code\ -text \For validation purposes, it is often useful to \emph{execute} +text \For validation purposes, it is often useful to \<^emph>\execute\ specifications. In principle, execution could be simulated by Isabelle's inference kernel, i.e. by a combination of resolution and simplification. Unfortunately, this approach is rather inefficient. A more efficient way @@ -2352,9 +2352,9 @@ functional programs (including overloading using type classes) targeting SML @{cite SML}, OCaml @{cite OCaml}, Haskell @{cite "haskell-revised-report"} and Scala @{cite "scala-overview-tech-report"}. - Conceptually, code generation is split up in three steps: \emph{selection} - of code theorems, \emph{translation} into an abstract executable view and - \emph{serialization} to a specific \emph{target language}. Inductive + Conceptually, code generation is split up in three steps: \<^emph>\selection\ + of code theorems, \<^emph>\translation\ into an abstract executable view and + \<^emph>\serialization\ to a specific \<^emph>\target language\. Inductive specifications can be executed using the predicate compiler which operates within HOL. See @{cite "isabelle-codegen"} for an introduction. @@ -2470,7 +2470,7 @@ Constants may be specified by giving them literally, referring to all executable constants within a certain theory by giving @{text "name._"}, - or referring to \emph{all} executable constants currently available by + or referring to \<^emph>\all\ executable constants currently available by giving @{text "_"}. By default, exported identifiers are minimized per module. This can be @@ -2478,16 +2478,16 @@ By default, for each involved theory one corresponding name space module is generated. Alternatively, a module name may be specified after the - @{keyword "module_name"} keyword; then \emph{all} code is placed in this + @{keyword "module_name"} keyword; then \<^emph>\all\ code is placed in this module. - For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification - refers to a single file; for \emph{Haskell}, it refers to a whole + For \<^emph>\SML\, \<^emph>\OCaml\ and \<^emph>\Scala\ the file specification + refers to a single file; for \<^emph>\Haskell\, it refers to a whole directory, where code is generated in multiple files reflecting the module hierarchy. Omitting the file specification denotes standard output. Serializers take an optional list of arguments in parentheses. For - \emph{Haskell} a module name prefix may be given using the ``@{text + \<^emph>\Haskell\ a module name prefix may be given using the ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate datatype declaration. @@ -2556,7 +2556,7 @@ (constants, type constructors, classes, class relations, instances, module names) with target-specific hints how these symbols shall be named. These hints gain precedence over names for symbols with no hints at all. - Conflicting hints are subject to name disambiguation. \emph{Warning:} It + Conflicting hints are subject to name disambiguation. \<^emph>\Warning:\ It is at the discretion of the user to ensure that name prefixes of identifiers in compound statements like type classes or datatypes are still the same. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Inner_Syntax.thy --- a/src/Doc/Isar_Ref/Inner_Syntax.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Inner_Syntax.thy Sun Oct 18 23:00:32 2015 +0200 @@ -23,8 +23,8 @@ Further details of the syntax engine involves the classical distinction of lexical language versus context-free grammar (see - \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax - transformations} (see \secref{sec:syntax-transformations}). + \secref{sec:pure-syntax}), and various mechanisms for \<^emph>\syntax + transformations\ (see \secref{sec:syntax-transformations}). \ @@ -187,7 +187,7 @@ The @{text \}-contraction law asserts @{prop "(\x. f x) \ f"}, provided @{text x} is not free in @{text f}. It asserts - \emph{extensionality} of functions: @{prop "f \ g"} if @{prop "f x \ + \<^emph>\extensionality\ of functions: @{prop "f \ g"} if @{prop "f x \ g x"} for all @{text x}. Higher-order unification frequently puts terms into a fully @{text \}-expanded form. For example, if @{text F} has type @{text "(\ \ \) \ \"} then its expanded form is @{term @@ -212,11 +212,11 @@ \<^descr> @{attribute show_hyps} controls printing of implicit hypotheses of local facts. Normally, only those hypotheses are - displayed that are \emph{not} covered by the assumptions of the + displayed that are \<^emph>\not\ covered by the assumptions of the current context: this situation indicates a fault in some tool being used. - By enabling @{attribute show_hyps}, output of \emph{all} hypotheses + By enabling @{attribute show_hyps}, output of \<^emph>\all\ hypotheses can be enforced, which is occasionally useful for diagnostic purposes. @@ -244,7 +244,7 @@ @{index_ML Print_Mode.with_modes: "string list -> ('a -> 'b) -> 'a -> 'b"} \\ \end{mldecls} - The \emph{print mode} facility allows to modify various operations + The \<^emph>\print mode\ facility allows to modify various operations for printing. Commands like @{command typ}, @{command term}, @{command thm} (see \secref{sec:print-diag}) take additional print modes as optional argument. The underlying ML operations are as @@ -295,7 +295,7 @@ section \Mixfix annotations \label{sec:mixfix}\ -text \Mixfix annotations specify concrete \emph{inner syntax} of +text \Mixfix annotations specify concrete \<^emph>\inner syntax\ of Isabelle types and terms. Locally fixed parameters in toplevel theorem statements, locale and class specifications also admit mixfix annotations in a fairly uniform manner. A mixfix annotation @@ -406,7 +406,7 @@ \<^descr> @{verbatim "/"}@{text s} allows a line break. Here @{text s} stands for the string of spaces (zero or more) right after the - slash. These spaces are printed if the break is \emph{not} taken. + slash. These spaces are printed if the break is \<^emph>\not\ taken. The general idea of pretty printing with blocks and breaks is also @@ -448,7 +448,7 @@ subsection \Binders\ -text \A \emph{binder} is a variable-binding construct such as a +text \A \<^emph>\binder\ is a variable-binding construct such as a quantifier. The idea to formalize @{text "\x. b"} as @{text "All (\x. b)"} for @{text "All :: ('a \ bool) \ bool"} already goes back to @{cite church40}. Isabelle declarations of certain higher-order @@ -544,11 +544,11 @@ (\secref{sec:outer-lex}), but some details are different. There are two main categories of inner syntax tokens: - \<^enum> \emph{delimiters} --- the literal tokens occurring in + \<^enum> \<^emph>\delimiters\ --- the literal tokens occurring in productions of the given priority grammar (cf.\ \secref{sec:priority-grammar}); - \<^enum> \emph{named tokens} --- various categories of identifiers etc. + \<^enum> \<^emph>\named tokens\ --- various categories of identifiers etc. Delimiters override named tokens and may thus render certain @@ -591,17 +591,17 @@ subsection \Priority grammars \label{sec:priority-grammar}\ -text \A context-free grammar consists of a set of \emph{terminal - symbols}, a set of \emph{nonterminal symbols} and a set of - \emph{productions}. Productions have the form @{text "A = \"}, +text \A context-free grammar consists of a set of \<^emph>\terminal + symbols\, a set of \<^emph>\nonterminal symbols\ and a set of + \<^emph>\productions\. Productions have the form @{text "A = \"}, where @{text A} is a nonterminal and @{text \} is a string of terminals and nonterminals. One designated nonterminal is called - the \emph{root symbol}. The language defined by the grammar + the \<^emph>\root symbol\. The language defined by the grammar consists of all strings of terminals that can be derived from the root symbol by applying productions as rewrite rules. - The standard Isabelle parser for inner syntax uses a \emph{priority - grammar}. Each nonterminal is decorated by an integer priority: + The standard Isabelle parser for inner syntax uses a \<^emph>\priority + grammar\. Each nonterminal is decorated by an integer priority: @{text "A\<^sup>(\<^sup>p\<^sup>)"}. In a derivation, @{text "A\<^sup>(\<^sup>p\<^sup>)"} may be rewritten using a production @{text "A\<^sup>(\<^sup>q\<^sup>) = \"} only if @{text "p \ q"}. Any priority grammar can be translated into a normal context-free @@ -751,7 +751,7 @@ which are terms of type @{typ prop}. The syntax of such formulae of the meta-logic is carefully distinguished from usual conventions for object-logics. In particular, plain @{text "\"}-term notation is - \emph{not} recognized as @{syntax (inner) prop}. + \<^emph>\not\ recognized as @{syntax (inner) prop}. \<^descr> @{syntax_ref (inner) aprop} denotes atomic propositions, which are embedded into regular @{syntax (inner) prop} by means of an @@ -770,7 +770,7 @@ anything defined by the user. When specifying notation for logical entities, all logical types - (excluding @{typ prop}) are \emph{collapsed} to this single category + (excluding @{typ prop}) are \<^emph>\collapsed\ to this single category of @{syntax (inner) logic}. \<^descr> @{syntax_ref (inner) index} denotes an optional index term for @@ -877,15 +877,15 @@ @{text "\ => name"}. These names later become the heads of parse trees; they also guide the pretty printer. - Productions without such parse tree names are called \emph{copy - productions}. Their right-hand side must have exactly one + Productions without such parse tree names are called \<^emph>\copy + productions\. Their right-hand side must have exactly one nonterminal symbol (or named token). The parser does not create a new parse tree node for copy productions, but simply returns the parse tree of the right-hand symbol. If the right-hand side of a copy production consists of a single - nonterminal without any delimiters, then it is called a \emph{chain - production}. Chain productions act as abbreviations: conceptually, + nonterminal without any delimiters, then it is called a \<^emph>\chain + production\. Chain productions act as abbreviations: conceptually, they are removed from the grammar by adding new productions. Priority information attached to chain productions is ignored; only the dummy value @{text "-1"} is displayed. @@ -976,8 +976,8 @@ the user, or implicit position information by the system --- both need to be passed-through carefully by syntax transformations. - Pre-terms are further processed by the so-called \emph{check} and - \emph{uncheck} phases that are intertwined with type-inference (see + Pre-terms are further processed by the so-called \<^emph>\check\ and + \<^emph>\uncheck\ phases that are intertwined with type-inference (see also @{cite "isabelle-implementation"}). The latter allows to operate on higher-order abstract syntax with proper binding and type information already available. @@ -1054,7 +1054,7 @@ represent terms cannot distinguish constants and variables syntactically. Explicit indication of @{text "CONST c"} inside the term language is required, unless @{text "c"} is known as special - \emph{syntax constant} (see also @{command syntax}). It is also + \<^emph>\syntax constant\ (see also @{command syntax}). It is also possible to use @{command syntax} declarations (without mixfix annotation) to enforce that certain unqualified names are always treated as constant within the syntax machinery. @@ -1076,7 +1076,7 @@ Since syntax transformations do not know about this later name resolution, there can be surprises in boundary cases. - \emph{Authentic syntax names} for @{ML Ast.Constant} avoid this + \<^emph>\Authentic syntax names\ for @{ML Ast.Constant} avoid this problem: the fully-qualified constant name with a special prefix for its formal category (@{text "class"}, @{text "type"}, @{text "const"}, @{text "fixed"}) represents the information faithfully @@ -1212,7 +1212,7 @@ rule undergo parsing and parse AST translations \secref{sec:tr-funs}, in order to perform some fundamental normalization like @{text "\x y. b \ \x. \y. b"}, but other AST - translation rules are \emph{not} applied recursively here. + translation rules are \<^emph>\not\ applied recursively here. When processing AST patterns, the inner syntax lexer runs in a different mode that allows identifiers to start with underscore. @@ -1280,12 +1280,12 @@ Let @{text "t"} be the abstract syntax tree to be normalized and @{text "(lhs, rhs)"} some translation rule. A subtree @{text "u"} - of @{text "t"} is called \emph{redex} if it is an instance of @{text + of @{text "t"} is called \<^emph>\redex\ if it is an instance of @{text "lhs"}; in this case the pattern @{text "lhs"} is said to match the object @{text "u"}. A redex matched by @{text "lhs"} may be replaced by the corresponding instance of @{text "rhs"}, thus - \emph{rewriting} the AST @{text "t"}. Matching requires some notion - of \emph{place-holders} in rule patterns: @{ML Ast.Variable} serves + \<^emph>\rewriting\ the AST @{text "t"}. Matching requires some notion + of \<^emph>\place-holders\ in rule patterns: @{ML Ast.Variable} serves this purpose. More precisely, the matching of the object @{text "u"} against the @@ -1322,7 +1322,7 @@ \begin{warn} If syntax translation rules work incorrectly, the output of - @{command_ref print_syntax} with its \emph{rules} sections reveals the + @{command_ref print_syntax} with its \<^emph>\rules\ sections reveals the actual internal forms of AST pattern, without potentially confusing concrete syntax. Recall that AST constants appear as quoted strings and variables without quotes. @@ -1330,7 +1330,7 @@ \begin{warn} If @{attribute_ref eta_contract} is set to @{text "true"}, terms - will be @{text "\"}-contracted \emph{before} the AST rewriter sees + will be @{text "\"}-contracted \<^emph>\before\ the AST rewriter sees them. Thus some abstraction nodes needed for print rules to match may vanish. For example, @{text "Ball A (\x. P x)"} would contract to @{text "Ball A P"} and the standard print rule would fail to @@ -1435,7 +1435,7 @@ the form @{ML Const}~@{text "(c, \)"} or @{ML Const}~@{text "(c, \) $ x\<^sub>1 $ \ $ x\<^sub>n"}. Terms allow more sophisticated transformations than ASTs do, typically involving abstractions and bound - variables. \emph{Typed} print translations may even peek at the type + variables. \<^emph>\Typed\ print translations may even peek at the type @{text "\"} of the constant they are invoked on, although some information might have been suppressed for term output already. @@ -1575,7 +1575,7 @@ syntax priority of the argument slot is given by its nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"}. The argument @{text "t\<^sub>i"} that corresponds to the position of @{text "A\<^sup>(\<^sup>p\<^sup>)"} is printed recursively, and then put in - parentheses \emph{if} its priority @{text "p"} requires this. The + parentheses \<^emph>\if\ its priority @{text "p"} requires this. The resulting output is concatenated with the syntactic sugar according to the grammar production. @@ -1592,7 +1592,7 @@ AST variable @{text "x"} is printed as @{text "x"} outright. \<^medskip> - White space is \emph{not} inserted automatically. If + White space is \<^emph>\not\ inserted automatically. If blanks (or breaks) are required to separate tokens, they need to be specified in the mixfix declaration (\secref{sec:mixfix}). \ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Outer_Syntax.thy --- a/src/Doc/Isar_Ref/Outer_Syntax.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Outer_Syntax.thy Sun Oct 18 23:00:32 2015 +0200 @@ -6,10 +6,10 @@ text \ The rather generic framework of Isabelle/Isar syntax emerges from - three main syntactic categories: \emph{commands} of the top-level - Isar engine (covering theory and proof elements), \emph{methods} for + three main syntactic categories: \<^emph>\commands\ of the top-level + Isar engine (covering theory and proof elements), \<^emph>\methods\ for general goal refinements (analogous to traditional ``tactics''), and - \emph{attributes} for operations on facts (within a certain + \<^emph>\attributes\ for operations on facts (within a certain context). Subsequently we give a reference of basic syntactic entities underlying Isabelle/Isar syntax in a bottom-up manner. Concrete theory and proof language elements will be introduced later @@ -18,11 +18,11 @@ \<^medskip> In order to get started with writing well-formed Isabelle/Isar documents, the most important aspect to be noted is - the difference of \emph{inner} versus \emph{outer} syntax. Inner + the difference of \<^emph>\inner\ versus \<^emph>\outer\ syntax. Inner syntax is that of Isabelle types and terms of the logic, while outer syntax is that of Isabelle/Isar theory sources (specifications and proofs). As a general rule, inner syntax entities may occur only as - \emph{atomic entities} within outer syntax. For example, the string + \<^emph>\atomic entities\ within outer syntax. For example, the string @{verbatim \"x + y"\} and identifier @{verbatim z} are legal term specifications within a theory, while @{verbatim "x + y"} without quotes is not. @@ -69,13 +69,13 @@ text \The outer lexical syntax consists of three main categories of syntax tokens: - \<^enum> \emph{major keywords} --- the command names that are available + \<^enum> \<^emph>\major keywords\ --- the command names that are available in the present logic session; - \<^enum> \emph{minor keywords} --- additional literal tokens required + \<^enum> \<^emph>\minor keywords\ --- additional literal tokens required by the syntax of commands; - \<^enum> \emph{named tokens} --- various categories of identifiers etc. + \<^enum> \<^emph>\named tokens\ --- various categories of identifiers etc. Major keywords and minor keywords are guaranteed to be disjoint. @@ -163,7 +163,7 @@ "\"}~@{verbatim "*)"} and may be nested, although the user-interface might prevent this. Note that this form indicates source comments only, which are stripped after lexical analysis of the input. The - Isar syntax also provides proper \emph{document comments} that are + Isar syntax also provides proper \<^emph>\document comments\ that are considered as part of the text (see \secref{sec:comments}). Common mathematical symbols such as @{text \} are represented in @@ -188,8 +188,8 @@ subsection \Names\ text \Entity @{syntax name} usually refers to any name of types, - constants, theorems etc.\ that are to be \emph{declared} or - \emph{defined} (so qualified identifiers are excluded here). Quoted + constants, theorems etc.\ that are to be \<^emph>\declared\ or + \<^emph>\defined\ (so qualified identifiers are excluded here). Quoted strings provide an escape for non-identifier names or those ruled out by outer syntax keywords (e.g.\ quoted @{verbatim \"let"\}). Already existing objects are usually referenced by @{syntax @@ -538,8 +538,8 @@ variables, and type constraints. Criteria can be preceded by ``@{text "-"}'' to select theorems that - do \emph{not} match. Note that giving the empty list of criteria - yields \emph{all} currently known facts. An optional limit for the + do \<^emph>\not\ match. Note that giving the empty list of criteria + yields \<^emph>\all\ currently known facts. An optional limit for the number of printed facts may be given; the default is 40. By default, duplicates are removed from the search result. Use @{text with_dups} to display duplicates. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Preface.thy --- a/src/Doc/Isar_Ref/Preface.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Preface.thy Sun Oct 18 23:00:32 2015 +0200 @@ -3,14 +3,14 @@ begin text \ - The \emph{Isabelle} system essentially provides a generic + The \<^emph>\Isabelle\ system essentially provides a generic infrastructure for building deductive systems (programmed in Standard ML), with a special focus on interactive theorem proving in higher-order logics. Many years ago, even end-users would refer to certain ML functions (goal commands, tactics, tacticals etc.) to pursue their everyday theorem proving tasks. - In contrast \emph{Isar} provides an interpreted language environment + In contrast \<^emph>\Isar\ provides an interpreted language environment of its own, which has been specifically tailored for the needs of theory and proof development. Compared to raw ML, the Isabelle/Isar top-level provides a more robust and comfortable development @@ -18,7 +18,7 @@ transactions with unlimited undo etc. In its pioneering times, the Isabelle/Isar version of the - \emph{Proof~General} user interface @{cite proofgeneral and + \<^emph>\Proof~General\ user interface @{cite proofgeneral and "Aspinall:TACAS:2000"} has contributed to the success of for interactive theory and proof development in this advanced theorem proving environment, even though it was somewhat @@ -30,8 +30,8 @@ Apart from the technical advances over bare-bones ML programming, the main purpose of the Isar language is to provide a conceptually different view on machine-checked proofs - @{cite "Wenzel:1999:TPHOL" and "Wenzel-PhD"}. \emph{Isar} stands for - \emph{Intelligible semi-automated reasoning}. Drawing from both the + @{cite "Wenzel:1999:TPHOL" and "Wenzel-PhD"}. \<^emph>\Isar\ stands for + \<^emph>\Intelligible semi-automated reasoning\. Drawing from both the traditions of informal mathematical proof texts and high-level programming languages, Isar offers a versatile environment for structured formal proof documents. Thus properly written Isar @@ -61,8 +61,8 @@ Isabelle/HOL. \<^medskip> - Isar commands may be either \emph{proper} document - constructors, or \emph{improper commands}. Some proof methods and + Isar commands may be either \<^emph>\proper\ document + constructors, or \<^emph>\improper commands\. Some proof methods and attributes introduced later are classified as improper as well. Improper Isar language elements, which are marked by ``@{text "\<^sup>*"}'' in the subsequent chapters; they are often helpful diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Proof.thy --- a/src/Doc/Isar_Ref/Proof.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Proof.thy Sun Oct 18 23:00:32 2015 +0200 @@ -12,12 +12,12 @@ the following three different modes of operation: \<^descr> @{text "proof(prove)"} means that a new goal has just been - stated that is now to be \emph{proven}; the next command may refine + stated that is now to be \<^emph>\proven\; the next command may refine it by some proof method, and enter a sub-proof to establish the actual result. \<^descr> @{text "proof(state)"} is like a nested theory mode: the - context may be augmented by \emph{stating} additional assumptions, + context may be augmented by \<^emph>\stating\ additional assumptions, intermediate results etc. \<^descr> @{text "proof(chain)"} is intermediate between @{text @@ -73,7 +73,7 @@ While Isar is inherently block-structured, opening and closing blocks is mostly handled rather casually, with little explicit user-intervention. Any local goal statement automatically opens - \emph{two} internal blocks, which are closed again when concluding + \<^emph>\two\ internal blocks, which are closed again when concluding the sub-proof (by @{command "qed"} etc.). Sections of different context within a sub-proof may be switched via @{command "next"}, which is just a single block-close followed by block-open again. @@ -90,7 +90,7 @@ \<^descr> @{command "{"} and @{command "}"} explicitly open and close blocks. Any current facts pass through ``@{command "{"}'' unchanged, while ``@{command "}"}'' causes any result to be - \emph{exported} into the enclosing context. Thus fixed variables + \<^emph>\exported\ into the enclosing context. Thus fixed variables are generalized, assumptions discharged, and local definitions unfolded (cf.\ \secref{sec:proof-context}). There is no difference of @{command "assume"} and @{command "presume"} in this mode of @@ -117,7 +117,7 @@ in any way. A typical application of @{command "oops"} is to explain Isar proofs - \emph{within} the system itself, in conjunction with the document + \<^emph>\within\ the system itself, in conjunction with the document preparation tools of Isabelle described in \chref{ch:document-prep}. Thus partial or even wrong proof attempts can be discussed in a logically sound manner. Note that the Isabelle {\LaTeX} macros can @@ -141,7 +141,7 @@ The logical proof context consists of fixed variables and assumptions. The former closely correspond to Skolem constants, or meta-level universal quantification as provided by the Isabelle/Pure - logical framework. Introducing some \emph{arbitrary, but fixed} + logical framework. Introducing some \<^emph>\arbitrary, but fixed\ variable via ``@{command "fix"}~@{text x}'' results in a local value that may be used in the subsequent proof as any other variable or constant. Furthermore, any result @{text "\ \[x]"} exported from @@ -180,7 +180,7 @@ \} \<^descr> @{command "fix"}~@{text x} introduces a local variable @{text - x} that is \emph{arbitrary, but fixed.} + x} that is \<^emph>\arbitrary, but fixed\. \<^descr> @{command "assume"}~@{text "a: \"} and @{command "presume"}~@{text "a: \"} introduce a local fact @{text "\ \ \"} by @@ -225,8 +225,8 @@ Polymorphism of term bindings is handled in Hindley-Milner style, similar to ML. Type variables referring to local assumptions or - open goal statements are \emph{fixed}, while those of finished - results or bound by @{command "let"} may occur in \emph{arbitrary} + open goal statements are \<^emph>\fixed\, while those of finished + results or bound by @{command "let"} may occur in \<^emph>\arbitrary\ instances later. Even though actual polymorphism should be rarely used in practice, this mechanism is essential to achieve proper incremental type-inference, as the user proceeds to build up the @@ -257,7 +257,7 @@ others (such as @{command "assume"}, @{command "have"} etc.). - Some \emph{implicit} term abbreviations\index{term abbreviations} + Some \<^emph>\implicit\ term abbreviations\index{term abbreviations} for goals and facts are available as well. For any open goal, @{variable_ref thesis} refers to its object-level statement, abstracted over any meta-level parameters (if present). Likewise, @@ -288,9 +288,9 @@ chaining towards the next goal via @{command "then"} (and variants); @{command "from"} and @{command "with"} are composite forms involving @{command "note"}. The @{command "using"} elements - augments the collection of used facts \emph{after} a goal has been + augments the collection of used facts \<^emph>\after\ a goal has been stated. Note that the special theorem name @{fact_ref this} refers - to the most recently established facts, but only \emph{before} + to the most recently established facts, but only \<^emph>\before\ issuing a follow-up claim. @{rail \ @@ -528,7 +528,7 @@ in the sense that new threads of calculational reasoning are commenced for any new block (as opened by a local goal, for example). This means that, apart from being able to nest - calculations, there is no separate \emph{begin-calculation} command + calculations, there is no separate \<^emph>\begin-calculation\ command required. \<^medskip> @@ -620,13 +620,13 @@ methods: (@{syntax nameref} @{syntax args} | @{syntax method}) + (',' | ';' | '|') \} - Regular Isar proof methods do \emph{not} admit direct goal addressing, but + Regular Isar proof methods do \<^emph>\not\ admit direct goal addressing, but refer to the first subgoal or to all subgoals uniformly. Nonetheless, the subsequent mechanisms allow to imitate the effect of subgoal addressing that is known from ML tactics. \<^medskip> - Goal \emph{restriction} means the proof state is wrapped-up in a + Goal \<^emph>\restriction\ means the proof state is wrapped-up in a way that certain subgoals are exposed, and other subgoals are ``parked'' elsewhere. Thus a proof method has no other chance than to operate on the subgoals that are presently exposed. @@ -676,13 +676,13 @@ Structured proof composition in Isar admits proof methods to be invoked in two places only. - \<^enum> An \emph{initial} refinement step @{command_ref + \<^enum> An \<^emph>\initial\ refinement step @{command_ref "proof"}~@{text "m\<^sub>1"} reduces a newly stated goal to a number of sub-goals that are to be solved later. Facts are passed to @{text "m\<^sub>1"} for forward chaining, if so indicated by @{text "proof(chain)"} mode. - \<^enum> A \emph{terminal} conclusion step @{command_ref "qed"}~@{text + \<^enum> A \<^emph>\terminal\ conclusion step @{command_ref "qed"}~@{text "m\<^sub>2"} is intended to solve remaining goals. No facts are passed to @{text "m\<^sub>2"}. @@ -726,7 +726,7 @@ proof method @{text "m\<^sub>2"} and concludes the sub-proof by assumption. If the goal had been @{text "show"} (or @{text "thus"}), some pending sub-goal is solved as well by the rule resulting from the - result \emph{exported} into the enclosing goal context. Thus @{text + result \<^emph>\exported\ into the enclosing goal context. Thus @{text "qed"} may fail for two reasons: either @{text "m\<^sub>2"} fails, or the resulting rule does not fit to any pending goal\footnote{This includes any additional ``strong'' assumptions as introduced by @@ -735,8 +735,8 @@ @{command "have"}, or weakening the local context by replacing occurrences of @{command "assume"} by @{command "presume"}. - \<^descr> @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal - proof}\index{proof!terminal}; it abbreviates @{command + \<^descr> @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \<^emph>\terminal + proof\\index{proof!terminal}; it abbreviates @{command "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"}, but with backtracking across both methods. Debugging an unsuccessful @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} command can be done by expanding its @@ -744,15 +744,15 @@ @{text "apply"}~@{text "m\<^sub>1"}) is already sufficient to see the problem. - \<^descr> ``@{command ".."}'' is a \emph{standard - proof}\index{proof!standard}; it abbreviates @{command "by"}~@{text + \<^descr> ``@{command ".."}'' is a \<^emph>\standard + proof\\index{proof!standard}; it abbreviates @{command "by"}~@{text "standard"}. - \<^descr> ``@{command "."}'' is a \emph{trivial - proof}\index{proof!trivial}; it abbreviates @{command "by"}~@{text + \<^descr> ``@{command "."}'' is a \<^emph>\trivial + proof\\index{proof!trivial}; it abbreviates @{command "by"}~@{text "this"}. - \<^descr> @{command "sorry"} is a \emph{fake proof}\index{proof!fake} + \<^descr> @{command "sorry"} is a \<^emph>\fake proof\\index{proof!fake} pretending to solve the pending claim without further ado. This only works in interactive development, or if the @{attribute quick_and_dirty} is enabled. Facts emerging from fake @@ -765,7 +765,7 @@ \<^descr> @{method standard} refers to the default refinement step of some Isar language elements (notably @{command proof} and ``@{command ".."}''). - It is \emph{dynamically scoped}, so the behaviour depends on the + It is \<^emph>\dynamically scoped\, so the behaviour depends on the application environment. In Isabelle/Pure, @{method standard} performs elementary introduction~/ @@ -838,7 +838,7 @@ Note that command @{command_ref "proof"} without any method actually performs a single reduction step using the @{method_ref (Pure) rule} - method; thus a plain \emph{do-nothing} proof step would be ``@{command + method; thus a plain \<^emph>\do-nothing\ proof step would be ``@{command "proof"}~@{text "-"}'' rather than @{command "proof"} alone. \<^descr> @{method "goal_cases"}~@{text "a\<^sub>1 \ a\<^sub>n"} turns the current subgoals @@ -1069,7 +1069,7 @@ \<^descr> @{attribute case_names}~@{text "c\<^sub>1 \ c\<^sub>k"} declares names for the local contexts of premises of a theorem; @{text "c\<^sub>1, \, c\<^sub>k"} - refers to the \emph{prefix} of the list of premises. Each of the + refers to the \<^emph>\prefix\ of the list of premises. Each of the cases @{text "c\<^sub>i"} can be of the form @{text "c[h\<^sub>1 \ h\<^sub>n]"} where the @{text "h\<^sub>1 \ h\<^sub>n"} are the names of the hypotheses in case @{text "c\<^sub>i"} from left to right. @@ -1089,7 +1089,7 @@ theorem. An empty list of names may be given to skip positions, leaving the present parameters unchanged. - Note that the default usage of case rules does \emph{not} directly + Note that the default usage of case rules does \<^emph>\not\ directly expose parameters to the proof context. \<^descr> @{attribute consumes}~@{text n} declares the number of ``major @@ -1186,8 +1186,8 @@ \end{tabular} \<^medskip> - Several instantiations may be given, referring to the \emph{suffix} - of premises of the case rule; within each premise, the \emph{prefix} + Several instantiations may be given, referring to the \<^emph>\suffix\ + of premises of the case rule; within each premise, the \<^emph>\prefix\ of variables is instantiated. In most situations, only a single term needs to be specified; this refers to the first variable of the last premise (it is usually the same for all cases). The @{text @@ -1211,7 +1211,7 @@ Several instantiations may be given, each referring to some part of a mutual inductive definition or datatype --- only related partial induction rules may be used together, though. Any of the lists of - terms @{text "P, x, \"} refers to the \emph{suffix} of variables + terms @{text "P, x, \"} refers to the \<^emph>\suffix\ of variables present in the induction rule. This enables the writer to specify only induction variables, or both predicates and variables, for example. @@ -1272,7 +1272,7 @@ covered, while the conclusions consist of several alternatives being named after the individual destructor patterns. - The given instantiation refers to the \emph{suffix} of variables + The given instantiation refers to the \<^emph>\suffix\ of variables occurring in the rule's major premise, or conclusion if unavailable. An additional ``@{text "taking: t\<^sub>1 \ t\<^sub>n"}'' specification may be required in order to specify the bisimulation @@ -1454,7 +1454,7 @@ later results by discharging these assumptions again. Note that according to the parameter scopes within the elimination rule, - results \emph{must not} refer to hypothetical parameters; otherwise the + results \<^emph>\must not\ refer to hypothetical parameters; otherwise the export will fail! This restriction conforms to the usual manner of existential reasoning in Natural Deduction. @@ -1476,7 +1476,7 @@ \<^descr> @{command guess} is similar to @{command obtain}, but it derives the obtained context elements from the course of tactical reasoning in the proof. Thus it can considerably obscure the proof: it is classified as - \emph{improper}. + \<^emph>\improper\. A proof with @{command guess} starts with a fixed goal @{text thesis}. The subsequent refinement steps may turn this to anything of the form @{text diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Proof_Script.thy --- a/src/Doc/Isar_Ref/Proof_Script.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Proof_Script.thy Sun Oct 18 23:00:32 2015 +0200 @@ -6,8 +6,8 @@ text \ Interactive theorem proving is traditionally associated with ``proof - scripts'', but Isabelle/Isar is centered around structured \emph{proof - documents} instead (see also \chref{ch:proofs}). + scripts'', but Isabelle/Isar is centered around structured \<^emph>\proof + documents\ instead (see also \chref{ch:proofs}). Nonetheless, it is possible to emulate proof scripts by sequential refinements of a proof state in backwards mode, notably with the @{command @@ -51,7 +51,7 @@ given just as in tactic scripts. Facts are passed to @{text m} as indicated by the goal's - forward-chain mode, and are \emph{consumed} afterwards. Thus any + forward-chain mode, and are \<^emph>\consumed\ afterwards. Thus any further @{command "apply"} command would always work in a purely backward manner. @@ -111,8 +111,8 @@ Goal parameters may be specified separately, in order to allow referring to them in the proof body: ``@{command subgoal}~@{keyword "for"}~@{text "x - y z"}'' names a \emph{prefix}, and ``@{command subgoal}~@{keyword - "for"}~@{text "\ x y z"}'' names a \emph{suffix} of goal parameters. The + y z"}'' names a \<^emph>\prefix\, and ``@{command subgoal}~@{keyword + "for"}~@{text "\ x y z"}'' names a \<^emph>\suffix\ of goal parameters. The latter uses a literal @{verbatim "\"} symbol as notation. Parameter positions may be skipped via dummies (underscore). Unspecified names remain internal, and thus inaccessible in the proof text. @@ -266,7 +266,7 @@ \<^descr> @{method rename_tac}~@{text "x\<^sub>1 \ x\<^sub>n"} renames parameters of a goal according to the list @{text "x\<^sub>1, \, x\<^sub>n"}, which refers to the - \emph{suffix} of variables. + \<^emph>\suffix\ of variables. \<^descr> @{method rotate_tac}~@{text n} rotates the premises of a subgoal by @{text n} positions: from right to left if @{text n} is diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Quick_Reference.thy --- a/src/Doc/Isar_Ref/Quick_Reference.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Quick_Reference.thy Sun Oct 18 23:00:32 2015 +0200 @@ -112,7 +112,7 @@ text \ \begin{tabular}{ll} - \multicolumn{2}{l}{\textbf{Single steps (forward-chaining facts)}} \\[0.5ex] + \multicolumn{2}{l}{\<^bold>\Single steps (forward-chaining facts)\} \\[0.5ex] @{method assumption} & apply some assumption \\ @{method this} & apply current facts \\ @{method rule}~@{text a} & apply some rule \\ @@ -121,14 +121,14 @@ @{method cases}~@{text t} & case analysis (provides cases) \\ @{method induct}~@{text x} & proof by induction (provides cases) \\[2ex] - \multicolumn{2}{l}{\textbf{Repeated steps (inserting facts)}} \\[0.5ex] + \multicolumn{2}{l}{\<^bold>\Repeated steps (inserting facts)\} \\[0.5ex] @{method "-"} & no rules \\ @{method intro}~@{text a} & introduction rules \\ @{method intro_classes} & class introduction rules \\ @{method elim}~@{text a} & elimination rules \\ @{method unfold}~@{text a} & definitional rewrite rules \\[2ex] - \multicolumn{2}{l}{\textbf{Automated proof tools (inserting facts)}} \\[0.5ex] + \multicolumn{2}{l}{\<^bold>\Automated proof tools (inserting facts)\} \\[0.5ex] @{method iprover} & intuitionistic proof search \\ @{method blast}, @{method fast} & Classical Reasoner \\ @{method simp}, @{method simp_all} & Simplifier (+ Splitter) \\ @@ -142,7 +142,7 @@ text \ \begin{tabular}{ll} - \multicolumn{2}{l}{\textbf{Rules}} \\[0.5ex] + \multicolumn{2}{l}{\<^bold>\Rules\} \\[0.5ex] @{attribute OF}~@{text a} & rule resolved with facts (skipping ``@{text _}'') \\ @{attribute of}~@{text t} & rule instantiated with terms (skipping ``@{text _}'') \\ @{attribute "where"}~@{text "x = t"} & rule instantiated with terms, by variable name \\ @@ -151,7 +151,7 @@ @{attribute rule_format} & result put into standard rule format \\ @{attribute elim_format} & destruct rule turned into elimination rule format \\[1ex] - \multicolumn{2}{l}{\textbf{Declarations}} \\[0.5ex] + \multicolumn{2}{l}{\<^bold>\Declarations\} \\[0.5ex] @{attribute simp} & Simplifier rule \\ @{attribute intro}, @{attribute elim}, @{attribute dest} & Pure or Classical Reasoner rule \\ @{attribute iff} & Simplifier + Classical Reasoner rule \\ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Spec.thy --- a/src/Doc/Isar_Ref/Spec.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Spec.thy Sun Oct 18 23:00:32 2015 +0200 @@ -37,11 +37,11 @@ theorem} or @{command lemma} are merely a special case of that, defining a theorem from a given proposition and its proof. - The theory body may be sub-structured by means of \emph{local theory - targets}, such as @{command "locale"} and @{command "class"}. It is also + The theory body may be sub-structured by means of \<^emph>\local theory + targets\, such as @{command "locale"} and @{command "class"}. It is also possible to use @{command context}~@{keyword "begin"}~\dots~@{command end} - blocks to delimited a local theory context: a \emph{named context} to - augment a locale or class specification, or an \emph{unnamed context} to + blocks to delimited a local theory context: a \<^emph>\named context\ to + augment a locale or class specification, or an \<^emph>\unnamed context\ to refer to local parameters and assumptions that are discharged later. See \secref{sec:target} for more details. @@ -131,11 +131,11 @@ (fixed variables) and assumptions (hypotheses). Definitions and theorems depending on the context may be added incrementally later on. - \emph{Named contexts} refer to locales (cf.\ \secref{sec:locale}) or + \<^emph>\Named contexts\ refer to locales (cf.\ \secref{sec:locale}) or type classes (cf.\ \secref{sec:class}); the name ``@{text "-"}'' signifies the global theory context. - \emph{Unnamed contexts} may introduce additional parameters and + \<^emph>\Unnamed contexts\ may introduce additional parameters and assumptions, and results produced in the context are generalized accordingly. Such auxiliary contexts may be nested within other targets, like @{command "locale"}, @{command "class"}, @{command @@ -233,8 +233,8 @@ attributes that operate on the context without a theorem, as in @{text "[[show_types = false]]"} for example. - Expressions of this form may be defined as \emph{bundled - declarations} in the context, and included in other situations later + Expressions of this form may be defined as \<^emph>\bundled + declarations\ in the context, and included in other situations later on. Including declaration bundles augments a local context casually without logical dependencies, which is in contrast to locales and locale interpretation (\secref{sec:locale}). @@ -453,7 +453,7 @@ A locale may be opened with the purpose of appending to its list of declarations (cf.\ \secref{sec:target}). When opening a locale declarations from all dependencies are collected and are presented - as a local theory. In this process, which is called \emph{roundup}, + as a local theory. In this process, which is called \<^emph>\roundup\, redundant locale instances are omitted. A locale instance is redundant if it is subsumed by an instance encountered earlier. A more detailed description of this process is available elsewhere @@ -464,7 +464,7 @@ subsection \Locale expressions \label{sec:locale-expr}\ text \ - A \emph{locale expression} denotes a context composed of instances + A \<^emph>\locale expression\ denotes a context composed of instances of existing locales. The context consists of the declaration elements from the locale instances. Redundant locale instances are omitted according to roundup. @@ -665,8 +665,8 @@ Locales may be instantiated, and the resulting instantiated declarations added to the current context. This requires proof (of - the instantiated specification) and is called \emph{locale - interpretation}. Interpretation is possible in locales (@{command + the instantiated specification) and is called \<^emph>\locale + interpretation\. Interpretation is possible in locales (@{command "sublocale"}), global and local theories (@{command "interpretation"}) and also within proof bodies (@{command "interpret"}). @@ -723,7 +723,7 @@ context --- for example, because a constant of that name exists --- add it to the @{keyword "for"} clause. - The equations @{text eqns} yield \emph{rewrite morphisms}, which are + The equations @{text eqns} yield \<^emph>\rewrite morphisms\, which are unfolded during post-processing and are useful for interpreting concepts introduced through definitions. The equations must be proved. @@ -795,7 +795,7 @@ Since attributes are applied to interpreted theorems, interpretation may modify the context of common proof tools, e.g.\ the Simplifier or Classical Reasoner. As the behavior of such - tools is \emph{not} stable under interpretation morphisms, manual + tools is \<^emph>\not\ stable under interpretation morphisms, manual declarations might have to be added to the target context of the interpretation to revert such declarations. \end{warn} @@ -821,7 +821,7 @@ \<^enum> a unified view on arbitrary suitable local theories as interpretation target; - \<^enum> rewrite morphisms by means of \emph{rewrite definitions}. + \<^enum> rewrite morphisms by means of \<^emph>\rewrite definitions\. \begin{matharray}{rcl} @@ -857,12 +857,12 @@ (see \secref{sec:target}) are not admissible since they are non-permanent due to their very nature. - In additions to \emph{rewrite morphisms} specified by @{text eqns}, - also \emph{rewrite definitions} may be specified. Semantically, a + In additions to \<^emph>\rewrite morphisms\ specified by @{text eqns}, + also \<^emph>\rewrite definitions\ may be specified. Semantically, a rewrite definition \<^item> produces a corresponding definition in - the local theory's underlying target \emph{and} + the local theory's underlying target \<^emph>\and\ \<^item> augments the rewrite morphism with the equation stemming from the symmetric of the corresponding definition. @@ -895,7 +895,7 @@ @{method_def intro_classes} & : & @{text method} \\ \end{matharray} - A class is a particular locale with \emph{exactly one} type variable + A class is a particular locale with \<^emph>\exactly one\ type variable @{text \}. Beyond the underlying locale, a corresponding type class is established which is interpreted logically as axiomatic type class @{cite "Wenzel:1997:TPHOL"} whose logical content are the @@ -929,7 +929,7 @@ superclasses}. Any @{element "fixes"} in @{text body} are lifted to the global - theory level (\emph{class operations} @{text "f\<^sub>1, \, + theory level (\<^emph>\class operations\ @{text "f\<^sub>1, \, f\<^sub>n"} of class @{text c}), mapping the local type parameter @{text \} to a schematic type variable @{text "?\ :: c"}. @@ -1025,7 +1025,7 @@ text \The class relation together with the collection of type-constructor arities must obey the principle of - \emph{co-regularity} as defined below. + \<^emph>\co-regularity\ as defined below. \<^medskip> For the subsequent formulation of co-regularity we assume @@ -1269,7 +1269,7 @@ \} \<^descr> @{command "type_synonym"}~@{text "(\\<^sub>1, \, \\<^sub>n) t = \"} introduces a - \emph{type synonym} @{text "(\\<^sub>1, \, \\<^sub>n) t"} for the existing type @{text + \<^emph>\type synonym\ @{text "(\\<^sub>1, \, \\<^sub>n) t"} for the existing type @{text "\"}. Unlike the semantic type definitions in Isabelle/HOL, type synonyms are merely syntactic abbreviations without any logical significance. Internally, type synonyms are fully expanded. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/Isar_Ref/Synopsis.thy --- a/src/Doc/Isar_Ref/Synopsis.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/Isar_Ref/Synopsis.thy Sun Oct 18 23:00:32 2015 +0200 @@ -133,7 +133,7 @@ \<^item> Facts can be named after the main term within the proposition. - \<^item> Facts should \emph{not} be named after the command that + \<^item> Facts should \<^emph>\not\ be named after the command that introduced them (@{command "assume"}, @{command "have"}). This is misleading and hard to maintain. diff -r cd82b1023932 -r 6e789d198bbd src/Doc/JEdit/JEdit.thy --- a/src/Doc/JEdit/JEdit.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/JEdit/JEdit.thy Sun Oct 18 23:00:32 2015 +0200 @@ -9,11 +9,11 @@ section \Concepts and terminology\ text \ - Isabelle/jEdit is a Prover IDE that integrates \emph{parallel proof - checking} @{cite "Wenzel:2009" and "Wenzel:2013:ITP"} with - \emph{asynchronous user interaction} @{cite "Wenzel:2010" and + Isabelle/jEdit is a Prover IDE that integrates \<^emph>\parallel proof + checking\ @{cite "Wenzel:2009" and "Wenzel:2013:ITP"} with + \<^emph>\asynchronous user interaction\ @{cite "Wenzel:2010" and "Wenzel:2012:UITP-EPTCS" and "Wenzel:2014:ITP-PIDE" and "Wenzel:2014:UITP"}, - based on a document-oriented approach to \emph{continuous proof processing} + based on a document-oriented approach to \<^emph>\continuous proof processing\ @{cite "Wenzel:2011:CICM" and "Wenzel:2012"}. Many concepts and system components are fit together in order to make this work. The main building blocks are as follows. @@ -72,12 +72,12 @@ Isabelle/jEdit (\figref{fig:isabelle-jedit}) consists of some plugins for the jEdit text editor, while preserving its general look-and-feel as far as possible. The main plugin is called ``Isabelle'' and has its own menu - \emph{Plugins~/ Isabelle} with access to several panels (see also - \secref{sec:dockables}), as well as \emph{Plugins~/ Plugin Options~/ - Isabelle} (see also \secref{sec:options}). + \<^emph>\Plugins~/ Isabelle\ with access to several panels (see also + \secref{sec:dockables}), as well as \<^emph>\Plugins~/ Plugin Options~/ + Isabelle\ (see also \secref{sec:options}). The options allow to specify a logic session name --- the same selector is - accessible in the \emph{Theories} panel (\secref{sec:theories}). On + accessible in the \<^emph>\Theories\ panel (\secref{sec:theories}). On application startup, the selected logic session image is provided automatically by the Isabelle build tool @{cite "isabelle-system"}: if it is absent or outdated wrt.\ its sources, the build process updates it before @@ -109,13 +109,13 @@ subsection \Documentation\ text \ - The \emph{Documentation} panel of Isabelle/jEdit provides access to the + The \<^emph>\Documentation\ panel of Isabelle/jEdit provides access to the standard Isabelle documentation: PDF files are opened by regular desktop operations of the underlying platform. The section ``Original jEdit - Documentation'' contains the original \emph{User's Guide} of this + Documentation'' contains the original \<^emph>\User's Guide\ of this sophisticated text editor. The same is accessible via the @{verbatim Help} menu or @{verbatim F1} keyboard shortcut, using the built-in HTML viewer of - Java/Swing. The latter also includes \emph{Frequently Asked Questions} and + Java/Swing. The latter also includes \<^emph>\Frequently Asked Questions\ and documentation of individual plugins. Most of the information about generic jEdit is relevant for Isabelle/jEdit @@ -129,7 +129,7 @@ subsection \Plugins\ text \ - The \emph{Plugin Manager} of jEdit allows to augment editor functionality by + The \<^emph>\Plugin Manager\ of jEdit allows to augment editor functionality by JVM modules (jars) that are provided by the central plugin repository, which is accessible via various mirror sites. @@ -142,15 +142,15 @@ at a grand scale. \<^medskip> - The main \emph{Isabelle} plugin is an integral part of + The main \<^emph>\Isabelle\ plugin is an integral part of Isabelle/jEdit and needs to remain active at all times! A few additional plugins are bundled with Isabelle/jEdit for convenience or out of necessity, - notably \emph{Console} with its Isabelle/Scala sub-plugin - (\secref{sec:scala-console}) and \emph{SideKick} with some Isabelle-specific + notably \<^emph>\Console\ with its Isabelle/Scala sub-plugin + (\secref{sec:scala-console}) and \<^emph>\SideKick\ with some Isabelle-specific parsers for document tree structure (\secref{sec:sidekick}). The - \emph{Navigator} plugin is particularly important for hyperlinks within the + \<^emph>\Navigator\ plugin is particularly important for hyperlinks within the formal document-model (\secref{sec:tooltips-hyperlinks}). Further plugins - (e.g.\ \emph{ErrorList}, \emph{Code2HTML}) are included to saturate the + (e.g.\ \<^emph>\ErrorList\, \<^emph>\Code2HTML\) are included to saturate the dependencies of bundled plugins, but have no particular use in Isabelle/jEdit. \ @@ -161,8 +161,8 @@ text \Both jEdit and Isabelle have distinctive management of persistent options. - Regular jEdit options are accessible via the dialogs \emph{Utilities~/ - Global Options} or \emph{Plugins~/ Plugin Options}, with a second chance to + Regular jEdit options are accessible via the dialogs \<^emph>\Utilities~/ + Global Options\ or \<^emph>\Plugins~/ Plugin Options\, with a second chance to flip the two within the central options dialog. Changes are stored in @{file_unchecked "$ISABELLE_HOME_USER/jedit/properties"} and @{file_unchecked "$ISABELLE_HOME_USER/jedit/keymaps"}. @@ -173,11 +173,11 @@ coverage of sessions and command-line tools like @{tool build} or @{tool options}. - Those Isabelle options that are declared as \textbf{public} are configurable - in Isabelle/jEdit via \emph{Plugin Options~/ Isabelle~/ General}. Moreover, + Those Isabelle options that are declared as \<^bold>\public\ are configurable + in Isabelle/jEdit via \<^emph>\Plugin Options~/ Isabelle~/ General\. Moreover, there are various options for rendering of document content, which are - configurable via \emph{Plugin Options~/ Isabelle~/ Rendering}. Thus - \emph{Plugin Options~/ Isabelle} in jEdit provides a view on a subset of + configurable via \<^emph>\Plugin Options~/ Isabelle~/ Rendering\. Thus + \<^emph>\Plugin Options~/ Isabelle\ in jEdit provides a view on a subset of Isabelle system options. Note that some of these options affect general parameters that are relevant outside Isabelle/jEdit as well, e.g.\ @{system_option threads} or @{system_option parallel_proofs} for the @@ -200,8 +200,8 @@ text \Keyboard shortcuts used to be managed as jEdit properties in the past, but recent versions (2013) have a separate concept of - \emph{keymap} that is configurable via \emph{Global Options~/ - Shortcuts}. The @{verbatim imported} keymap is derived from the + \<^emph>\keymap\ that is configurable via \<^emph>\Global Options~/ + Shortcuts\. The @{verbatim imported} keymap is derived from the initial environment of properties that is available at the first start of the editor; afterwards the keymap file takes precedence. @@ -251,7 +251,7 @@ The @{verbatim "-m"} option specifies additional print modes for the prover process. Note that the system option @{system_option_ref jedit_print_mode} - allows to do the same persistently (e.g.\ via the \emph{Plugin Options} + allows to do the same persistently (e.g.\ via the \<^emph>\Plugin Options\ dialog of Isabelle/jEdit), without requiring command-line invocation. The @{verbatim "-J"} and @{verbatim "-j"} options allow to pass additional @@ -283,10 +283,10 @@ Isabelle/jEdit enables platform-specific look-and-feel by default as follows. - \<^descr>[Linux:] The platform-independent \emph{Nimbus} is used by + \<^descr>[Linux:] The platform-independent \<^emph>\Nimbus\ is used by default. - \emph{GTK+} also works under the side-condition that the overall GTK theme + \<^emph>\GTK+\ also works under the side-condition that the overall GTK theme is selected in a Swing-friendly way.\footnote{GTK support in Java/Swing was once marketed aggressively by Sun, but never quite finished. Today (2015) it is lagging behind further development of Swing and GTK. The graphics @@ -295,26 +295,26 @@ ``4K'' or ``UHD'' models), because the rendering by the external library is subject to global system settings for font scaling.} - \<^descr>[Windows:] Regular \emph{Windows} is used by default, but - \emph{Windows Classic} also works. + \<^descr>[Windows:] Regular \<^emph>\Windows\ is used by default, but + \<^emph>\Windows Classic\ also works. - \<^descr>[Mac OS X:] Regular \emph{Mac OS X} is used by default. + \<^descr>[Mac OS X:] Regular \<^emph>\Mac OS X\ is used by default. - The bundled \emph{MacOSX} plugin provides various functions that are + The bundled \<^emph>\MacOSX\ plugin provides various functions that are expected from applications on that particular platform: quit from menu or dock, preferences menu, drag-and-drop of text files on the application, full-screen mode for main editor windows. It is advisable to have the - \emph{MacOSX} plugin enabled all the time on that platform. + \<^emph>\MacOSX\ plugin enabled all the time on that platform. Users may experiment with different look-and-feels, but need to keep in mind that this extra variance of GUI functionality is unlikely to work in arbitrary combinations. The platform-independent - \emph{Nimbus} and \emph{Metal} should always work. The historic - \emph{CDE/Motif} should be ignored. + \<^emph>\Nimbus\ and \<^emph>\Metal\ should always work. The historic + \<^emph>\CDE/Motif\ should be ignored. - After changing the look-and-feel in \emph{Global Options~/ - Appearance}, it is advisable to restart Isabelle/jEdit in order to + After changing the look-and-feel in \<^emph>\Global Options~/ + Appearance\, it is advisable to restart Isabelle/jEdit in order to take full effect. \ @@ -334,40 +334,40 @@ HD'' category). Subsequently there are further hints to improve on that. \<^medskip> - The \textbf{operating-system platform} usually provides some + The \<^bold>\operating-system platform\ usually provides some configuration for global scaling of text fonts, e.g.\ $120\%$--$250\%$ on Windows. Changing that only has a partial effect on GUI rendering; satisfactory display quality requires further adjustments. \<^medskip> - The Isabelle/jEdit \textbf{application} and its plugins provide + The Isabelle/jEdit \<^bold>\application\ and its plugins provide various font properties that are summarized below. - \<^item> \emph{Global Options / Text Area / Text font}: the main text area + \<^item> \<^emph>\Global Options / Text Area / Text font\: the main text area font, which is also used as reference point for various derived font sizes, e.g.\ the Output panel (\secref{sec:output}). - \<^item> \emph{Global Options / Gutter / Gutter font}: the font for the gutter + \<^item> \<^emph>\Global Options / Gutter / Gutter font\: the font for the gutter area left of the main text area, e.g.\ relevant for display of line numbers (disabled by default). - \<^item> \emph{Global Options / Appearance / Button, menu and label font} as - well as \emph{List and text field font}: this specifies the primary and - secondary font for the old \emph{Metal} look-and-feel + \<^item> \<^emph>\Global Options / Appearance / Button, menu and label font\ as + well as \<^emph>\List and text field font\: this specifies the primary and + secondary font for the old \<^emph>\Metal\ look-and-feel (\secref{sec:look-and-feel}), which happens to scale better than newer ones - like \emph{Nimbus}. + like \<^emph>\Nimbus\. - \<^item> \emph{Plugin Options / Isabelle / General / Reset Font Size}: the main + \<^item> \<^emph>\Plugin Options / Isabelle / General / Reset Font Size\: the main text area font size for action @{action_ref "isabelle.reset-font-size"}, e.g.\ relevant for quick scaling like in major web browsers. - \<^item> \emph{Plugin Options / Console / General / Font}: the console window + \<^item> \<^emph>\Plugin Options / Console / General / Font\: the console window font, e.g.\ relevant for Isabelle/Scala command-line. - In \figref{fig:isabelle-jedit-hdpi} the \emph{Metal} look-and-feel is + In \figref{fig:isabelle-jedit-hdpi} the \<^emph>\Metal\ look-and-feel is configured with custom fonts at 30 pixels, and the main text area and - console at 36 pixels. Despite the old-fashioned appearance of \emph{Metal}, + console at 36 pixels. Despite the old-fashioned appearance of \<^emph>\Metal\, this leads to decent rendering quality on all platforms. \begin{figure}[htb] @@ -378,7 +378,7 @@ \label{fig:isabelle-jedit-hdpi} \end{figure} - On Linux, it is also possible to use \emph{GTK+} with a suitable theme and + On Linux, it is also possible to use \<^emph>\GTK+\ with a suitable theme and global font scaling. On Mac OS X, the default setup for ``Retina'' displays should work adequately with the native look-and-feel. \ @@ -387,30 +387,30 @@ section \Dockable windows \label{sec:dockables}\ text \ - In jEdit terminology, a \emph{view} is an editor window with one or more - \emph{text areas} that show the content of one or more \emph{buffers}. A - regular view may be surrounded by \emph{dockable windows} that show - additional information in arbitrary format, not just text; a \emph{plain - view} does not allow dockables. The \emph{dockable window manager} of jEdit - organizes these dockable windows, either as \emph{floating} windows, or - \emph{docked} panels within one of the four margins of the view. There may + In jEdit terminology, a \<^emph>\view\ is an editor window with one or more + \<^emph>\text areas\ that show the content of one or more \<^emph>\buffers\. A + regular view may be surrounded by \<^emph>\dockable windows\ that show + additional information in arbitrary format, not just text; a \<^emph>\plain + view\ does not allow dockables. The \<^emph>\dockable window manager\ of jEdit + organizes these dockable windows, either as \<^emph>\floating\ windows, or + \<^emph>\docked\ panels within one of the four margins of the view. There may be any number of floating instances of some dockable window, but at most one - docked instance; jEdit actions that address \emph{the} dockable window of a + docked instance; jEdit actions that address \<^emph>\the\ dockable window of a particular kind refer to the unique docked instance. Dockables are used routinely in jEdit for important functionality like - \emph{HyperSearch Results} or the \emph{File System Browser}. Plugins often + \<^emph>\HyperSearch Results\ or the \<^emph>\File System Browser\. Plugins often provide a central dockable to access their key functionality, which may be opened by the user on demand. The Isabelle/jEdit plugin takes this approach to the extreme: its plugin menu provides the entry-points to many panels that are managed as dockable windows. Some important panels are docked by - default, e.g.\ \emph{Documentation}, \emph{Output}, \emph{Query}, but the + default, e.g.\ \<^emph>\Documentation\, \<^emph>\Output\, \<^emph>\Query\, but the user can change this arrangement easily and persistently. Compared to plain jEdit, dockable window management in Isabelle/jEdit is slightly augmented according to the the following principles: - \<^item> Floating windows are dependent on the main window as \emph{dialog} in + \<^item> Floating windows are dependent on the main window as \<^emph>\dialog\ in the sense of Java/AWT/Swing. Dialog windows always stay on top of the view, which is particularly important in full-screen mode. The desktop environment of the underlying platform may impose further policies on such dependent @@ -419,24 +419,24 @@ \<^item> Keyboard focus of the main view vs.\ a dockable window is carefully managed according to the intended semantics, as a panel mainly for output or - input. For example, activating the \emph{Output} (\secref{sec:output}) panel + input. For example, activating the \<^emph>\Output\ (\secref{sec:output}) panel via the dockable window manager returns keyboard focus to the main text - area, but for \emph{Query} (\secref{sec:query}) the focus is given to the + area, but for \<^emph>\Query\ (\secref{sec:query}) the focus is given to the main input field of that panel. \<^item> Panels that provide their own text area for output have an additional - dockable menu item \emph{Detach}. This produces an independent copy of the - current output as a floating \emph{Info} window, which displays that content + dockable menu item \<^emph>\Detach\. This produces an independent copy of the + current output as a floating \<^emph>\Info\ window, which displays that content independently of ongoing changes of the PIDE document-model. Note that Isabelle/jEdit popup windows (\secref{sec:tooltips-hyperlinks}) provide a - similar \emph{Detach} operation as an icon. + similar \<^emph>\Detach\ operation as an icon. \ section \Isabelle symbols \label{sec:symbols}\ text \ - Isabelle sources consist of \emph{symbols} that extend plain ASCII to allow + Isabelle sources consist of \<^emph>\symbols\ that extend plain ASCII to allow infinitely many mathematical symbols within the formal sources. This works without depending on particular encodings and varying Unicode standards.\footnote{Raw Unicode characters within formal sources would @@ -464,14 +464,14 @@ \<^medskip> \paragraph{Encoding.} Technically, the Unicode view on Isabelle - symbols is an \emph{encoding} called @{verbatim "UTF-8-Isabelle"} in jEdit + symbols is an \<^emph>\encoding\ called @{verbatim "UTF-8-Isabelle"} in jEdit (not in the underlying JVM). It is provided by the Isabelle/jEdit plugin and enabled by default for all source files. Sometimes such defaults are reset accidentally, or malformed UTF-8 sequences in the text force jEdit to fall back on a different encoding like @{verbatim "ISO-8859-15"}. In that case, verbatim ``@{verbatim "\"}'' will be shown in the text buffer instead of its - Unicode rendering ``@{text "\"}''. The jEdit menu operation \emph{File~/ - Reload with Encoding~/ UTF-8-Isabelle} helps to resolve such problems (after + Unicode rendering ``@{text "\"}''. The jEdit menu operation \<^emph>\File~/ + Reload with Encoding~/ UTF-8-Isabelle\ helps to resolve such problems (after repairing malformed parts of the text). \<^medskip> @@ -496,8 +496,8 @@ \paragraph{Input methods.} In principle, Isabelle/jEdit could delegate the problem to produce Isabelle symbols in their Unicode rendering to the underlying operating system and its - \emph{input methods}. Regular jEdit also provides various ways to - work with \emph{abbreviations} to produce certain non-ASCII + \<^emph>\input methods\. Regular jEdit also provides various ways to + work with \<^emph>\abbreviations\ to produce certain non-ASCII characters. Since none of these standard input methods work satisfactorily for the mathematical characters required for Isabelle, various specific Isabelle/jEdit mechanisms are provided. @@ -505,10 +505,10 @@ This is a summary for practically relevant input methods for Isabelle symbols. - \<^enum> The \emph{Symbols} panel: some GUI buttons allow to insert + \<^enum> The \<^emph>\Symbols\ panel: some GUI buttons allow to insert certain symbols in the text buffer. There are also tooltips to reveal the official Isabelle representation with some additional - information about \emph{symbol abbreviations} (see below). + information about \<^emph>\symbol abbreviations\ (see below). \<^enum> Copy/paste from decoded source files: text that is rendered as Unicode already can be re-used to produce further text. This @@ -517,7 +517,7 @@ Isabelle symbols is used. \<^enum> Copy/paste from prover output within Isabelle/jEdit. The same - principles as for text buffers apply, but note that \emph{copy} in secondary + principles as for text buffers apply, but note that \<^emph>\copy\ in secondary Isabelle/jEdit windows works via the keyboard shortcuts @{verbatim "C+c"} or @{verbatim "C+INSERT"}, while jEdit menu actions always refer to the primary text area! @@ -535,7 +535,7 @@ \<^medskip> \begin{tabular}{lll} - \textbf{symbol} & \textbf{name with backslash} & \textbf{abbreviation} \\\hline + \<^bold>\symbol\ & \<^bold>\name with backslash\ & \<^bold>\abbreviation\ \\\hline @{text "\"} & @{verbatim "\\lambda"} & @{verbatim "%"} \\ @{text "\"} & @{verbatim "\\Rightarrow"} & @{verbatim "=>"} \\ @{text "\"} & @{verbatim "\\Longrightarrow"} & @{verbatim "==>"} \\[0.5ex] @@ -568,14 +568,14 @@ \paragraph{Control symbols.} There are some special control symbols to modify the display style of a single symbol (without nesting). Control symbols may be applied to a region of selected - text, either using the \emph{Symbols} panel or keyboard shortcuts or + text, either using the \<^emph>\Symbols\ panel or keyboard shortcuts or jEdit actions. These editor operations produce a separate control symbol for each symbol in the text, in order to make the whole text appear in a certain style. \<^medskip> \begin{tabular}{llll} - \textbf{style} & \textbf{symbol} & \textbf{shortcut} & \textbf{action} \\\hline + \<^bold>\style\ & \<^bold>\symbol\ & \<^bold>\shortcut\ & \<^bold>\action\ \\\hline superscript & @{verbatim "\<^sup>"} & @{verbatim "C+e UP"} & @{action_ref "isabelle.control-sup"} \\ subscript & @{verbatim "\<^sub>"} & @{verbatim "C+e DOWN"} & @{action_ref "isabelle.control-sub"} \\ bold face & @{verbatim "\<^bold>"} & @{verbatim "C+e RIGHT"} & @{action_ref "isabelle.control-bold"} \\ @@ -591,7 +591,7 @@ section \SideKick parsers \label{sec:sidekick}\ text \ - The \emph{SideKick} plugin provides some general services to display buffer + The \<^emph>\SideKick\ plugin provides some general services to display buffer structure in a tree view. Isabelle/jEdit provides SideKick parsers for its main mode for theory files, @@ -619,13 +619,13 @@ section \Scala console \label{sec:scala-console}\ text \ - The \emph{Console} plugin manages various shells (command interpreters), - e.g.\ \emph{BeanShell}, which is the official jEdit scripting language, and - the cross-platform \emph{System} shell. Thus the console provides similar + The \<^emph>\Console\ plugin manages various shells (command interpreters), + e.g.\ \<^emph>\BeanShell\, which is the official jEdit scripting language, and + the cross-platform \<^emph>\System\ shell. Thus the console provides similar functionality than the Emacs buffers @{verbatim "*scratch*"} and @{verbatim "*shell*"}. - Isabelle/jEdit extends the repertoire of the console by \emph{Scala}, which + Isabelle/jEdit extends the repertoire of the console by \<^emph>\Scala\, which is the regular Scala toplevel loop running inside the same JVM process as Isabelle/jEdit itself. This means the Scala command interpreter has access to the JVM name space and state of the running Prover IDE application. The @@ -647,7 +647,7 @@ text \ File specifications in jEdit follow various formats and conventions - according to \emph{Virtual File Systems}, which may be also provided by + according to \<^emph>\Virtual File Systems\, which may be also provided by additional plugins. This allows to access remote files via the @{verbatim "http:"} protocol prefix, for example. Isabelle/jEdit attempts to work with the file-system model of jEdit as far as possible. In particular, theory @@ -662,7 +662,7 @@ letters and network shares. The Java notation for files needs to be distinguished from the one of - Isabelle, which uses POSIX notation with forward slashes on \emph{all} + Isabelle, which uses POSIX notation with forward slashes on \<^emph>\all\ platforms.\footnote{Isabelle/ML on Windows uses Cygwin file-system access and Unix-style path notation.} Moreover, environment variables from the Isabelle process may be used freely, e.g.\ @{file @@ -683,7 +683,7 @@ Isabelle/jEdit imitates @{verbatim "$ISABELLE_HOME"} and @{verbatim "$ISABELLE_HOME_USER"} within the Java process environment, in order to allow easy access to these important places from the editor. The file - browser of jEdit also includes \emph{Favorites} for these two important + browser of jEdit also includes \<^emph>\Favorites\ for these two important locations. \<^medskip> @@ -718,15 +718,15 @@ subsection \Editor buffers and document nodes \label{sec:buffer-node}\ text \ - As a regular text editor, jEdit maintains a collection of \emph{buffers} to + As a regular text editor, jEdit maintains a collection of \<^emph>\buffers\ to store text files; each buffer may be associated with any number of visible - \emph{text areas}. Buffers are subject to an \emph{edit mode} that is + \<^emph>\text areas\. Buffers are subject to an \<^emph>\edit mode\ that is determined from the file name extension. The following modes are treated specifically in Isabelle/jEdit: \<^medskip> \begin{tabular}{lll} - \textbf{mode} & \textbf{file extension} & \textbf{content} \\\hline + \<^bold>\mode\ & \<^bold>\file extension\ & \<^bold>\content\ \\\hline @{verbatim "isabelle"} & @{verbatim ".thy"} & theory source \\ @{verbatim "isabelle-ml"} & @{verbatim ".ML"} & Isabelle/ML source \\ @{verbatim "sml"} & @{verbatim ".sml"} or @{verbatim ".sig"} & Standard ML source \\ @@ -734,15 +734,15 @@ \<^medskip> All jEdit buffers are automatically added to the PIDE document-model as - \emph{document nodes}. The overall document structure is defined by the + \<^emph>\document nodes\. The overall document structure is defined by the theory nodes in two dimensions: - \<^enum> via \textbf{theory imports} that are specified in the \emph{theory - header} using concrete syntax of the @{command_ref theory} command + \<^enum> via \<^bold>\theory imports\ that are specified in the \<^emph>\theory + header\ using concrete syntax of the @{command_ref theory} command @{cite "isabelle-isar-ref"}; - \<^enum> via \textbf{auxiliary files} that are loaded into a theory by special - \emph{load commands}, notably @{command_ref ML_file} and @{command_ref + \<^enum> via \<^bold>\auxiliary files\ that are loaded into a theory by special + \<^emph>\load commands\, notably @{command_ref ML_file} and @{command_ref SML_file} @{cite "isabelle-isar-ref"}. @@ -756,7 +756,7 @@ subsection \Theories \label{sec:theories}\ text \ - The \emph{Theories} panel (see also \figref{fig:theories}) provides an + The \<^emph>\Theories\ panel (see also \figref{fig:theories}) provides an overview of the status of continuous checking of theory nodes within the document model. Unlike batch sessions of @{tool build} @{cite "isabelle-system"}, theory nodes are identified by full path names; this allows @@ -780,17 +780,17 @@ @{system_option jedit_auto_load}. \<^medskip> - The visible \emph{perspective} of Isabelle/jEdit is defined by the + The visible \<^emph>\perspective\ of Isabelle/jEdit is defined by the collective view on theory buffers via open text areas. The perspective is taken as a hint for document processing: the prover ensures that those parts of a theory where the user is looking are checked, while other parts that are presently not required are ignored. The perspective is changed by opening or closing text area windows, or scrolling within a window. - The \emph{Theories} panel provides some further options to influence + The \<^emph>\Theories\ panel provides some further options to influence the process of continuous checking: it may be switched off globally to restrict the prover to superficial processing of command syntax. - It is also possible to indicate theory nodes as \emph{required} for + It is also possible to indicate theory nodes as \<^emph>\required\ for continuous checking: this means such nodes and all their imports are always processed independently of the visibility status (if continuous checking is enabled). Big theory libraries that are @@ -808,7 +808,7 @@ that is reported dynamically from the logical context. Thus the prover can provide additional markup to help the user to understand the meaning of formal text, and to produce more text with some add-on tools (e.g.\ - information messages with \emph{sendback} markup by automated provers or + information messages with \<^emph>\sendback\ markup by automated provers or disprovers in the background). \ @@ -870,7 +870,7 @@ section \Output \label{sec:output}\ text \ - Prover output consists of \emph{markup} and \emph{messages}. Both are + Prover output consists of \<^emph>\markup\ and \<^emph>\messages\. Both are directly attached to the corresponding positions in the original source text, and visualized in the text area, e.g.\ as text colours for free and bound variables, or as squiggly underlines for warnings, errors etc.\ (see @@ -900,14 +900,14 @@ the given window height. Mouse clicks on the overview area position the cursor approximately to the corresponding line of text in the buffer. - Another course-grained overview is provided by the \emph{Theories} + Another course-grained overview is provided by the \<^emph>\Theories\ panel, but without direct correspondence to text positions. A double-click on one of the theory entries with their status overview opens the corresponding text buffer, without changing the cursor position. \<^medskip> - In addition, the \emph{Output} panel displays prover + In addition, the \<^emph>\Output\ panel displays prover messages that correspond to a given command, within a separate window. @@ -916,7 +916,7 @@ (in canonical order according to the internal execution of the command). There are also control elements to modify the update policy of the output wrt.\ continued editor movements. This is particularly useful with several - independent instances of the \emph{Output} panel, which the Dockable Window + independent instances of the \<^emph>\Output\ panel, which the Dockable Window Manager of jEdit can handle conveniently. Former users of the old TTY interaction model (e.g.\ Proof~General) might @@ -929,7 +929,7 @@ situations by inspecting internal state of the prover.\footnote{In that sense, unstructured tactic scripts depend on continuous debugging with internal state inspection.} Consequently, some - special messages for \emph{tracing} or \emph{proof state} only + special messages for \<^emph>\tracing\ or \<^emph>\proof state\ only appear here, and are not attached to the original source. \<^medskip> @@ -943,16 +943,16 @@ section \Query \label{sec:query}\ text \ - The \emph{Query} panel provides various GUI forms to request extra + The \<^emph>\Query\ panel provides various GUI forms to request extra information from the prover. In old times the user would have issued some diagnostic command like @{command find_theorems} and inspected its output, but this is now integrated into the Prover IDE. - A \emph{Query} window provides some input fields and buttons for a + A \<^emph>\Query\ window provides some input fields and buttons for a particular query command, with output in a dedicated text area. There are - various query modes: \emph{Find Theorems}, \emph{Find Constants}, - \emph{Print Context}, e.g.\ see \figref{fig:query}. As usual in jEdit, - multiple \emph{Query} windows may be active at the same time: any number of + various query modes: \<^emph>\Find Theorems\, \<^emph>\Find Constants\, + \<^emph>\Print Context\, e.g.\ see \figref{fig:query}. As usual in jEdit, + multiple \<^emph>\Query\ windows may be active at the same time: any number of floating instances, but at most one docked instance (which is used by default). @@ -970,21 +970,21 @@ \<^item> The spinning wheel provides feedback about the status of a pending query wrt.\ the evaluation of its context and its own operation. - \<^item> The \emph{Apply} button attaches a fresh query invocation to the + \<^item> The \<^emph>\Apply\ button attaches a fresh query invocation to the current context of the command where the cursor is pointing in the text. - \<^item> The \emph{Search} field allows to highlight query output according to + \<^item> The \<^emph>\Search\ field allows to highlight query output according to some regular expression, in the notation that is commonly used on the Java platform.\footnote{@{url "http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html"}} This may serve as an additional visual filter of the result. - \<^item> The \emph{Zoom} box controls the font size of the output area. + \<^item> The \<^emph>\Zoom\ box controls the font size of the output area. All query operations are asynchronous: there is no need to wait for the evaluation of the document for the query context, nor for the query - operation itself. Query output may be detached as independent \emph{Info} + operation itself. Query output may be detached as independent \<^emph>\Info\ window, using a menu operation of the dockable window manager. The printed result usually provides sufficient clues about the original query, with some hyperlink to its context (via markup of its head line). @@ -994,8 +994,8 @@ subsection \Find theorems\ text \ - The \emph{Query} panel in \emph{Find Theorems} mode retrieves facts from the - theory or proof context matching all of given criteria in the \emph{Find} + The \<^emph>\Query\ panel in \<^emph>\Find Theorems\ mode retrieves facts from the + theory or proof context matching all of given criteria in the \<^emph>\Find\ text field. A single criterium has the following syntax: @{rail \ @@ -1011,8 +1011,8 @@ subsection \Find constants\ text \ - The \emph{Query} panel in \emph{Find Constants} mode prints all constants - whose type meets all of the given criteria in the \emph{Find} text field. + The \<^emph>\Query\ panel in \<^emph>\Find Constants\ mode prints all constants + whose type meets all of the given criteria in the \<^emph>\Find\ text field. A single criterium has the following syntax: @{rail \ @@ -1028,7 +1028,7 @@ subsection \Print context\ text \ - The \emph{Query} panel in \emph{Print Context} mode prints information from + The \<^emph>\Query\ panel in \<^emph>\Print Context\ mode prints information from the theory or proof context, or proof state. See also the Isar commands @{command_ref print_context}, @{command_ref print_cases}, @{command_ref print_term_bindings}, @{command_ref print_theorems}, @{command_ref @@ -1043,8 +1043,8 @@ information that can be explored further by using the @{verbatim CONTROL} modifier key on Linux and Windows, or @{verbatim COMMAND} on Mac OS X. Hovering with the mouse while the modifier is pressed reveals a - \emph{tooltip} (grey box over the text with a yellow popup) and/or a - \emph{hyperlink} (black rectangle over the text with change of mouse + \<^emph>\tooltip\ (grey box over the text with a yellow popup) and/or a + \<^emph>\hyperlink\ (black rectangle over the text with change of mouse pointer); see also \figref{fig:tooltip}. \begin{figure}[htb] @@ -1067,17 +1067,17 @@ \label{fig:nested-tooltips} \end{figure} - The tooltip popup window provides some controls to \emph{close} or - \emph{detach} the window, turning it into a separate \emph{Info} + The tooltip popup window provides some controls to \<^emph>\close\ or + \<^emph>\detach\ the window, turning it into a separate \<^emph>\Info\ window managed by jEdit. The @{verbatim ESCAPE} key closes - \emph{all} popups, which is particularly relevant when nested + \<^emph>\all\ popups, which is particularly relevant when nested tooltips are stacking up. \<^medskip> A black rectangle in the text indicates a hyperlink that may be followed by a mouse click (while the @{verbatim CONTROL} or @{verbatim COMMAND} modifier key is still pressed). Such jumps to other text locations - are recorded by the \emph{Navigator} plugin, which is bundled with + are recorded by the \<^emph>\Navigator\ plugin, which is bundled with Isabelle/jEdit and enabled by default, including navigation arrows in the main jEdit toolbar. @@ -1091,25 +1091,25 @@ section \Completion \label{sec:completion}\ text \ - Smart completion of partial input is the IDE functionality \emph{par - excellance}. Isabelle/jEdit combines several sources of information to + Smart completion of partial input is the IDE functionality \<^emph>\par + excellance\. Isabelle/jEdit combines several sources of information to achieve that. Despite its complexity, it should be possible to get some idea how completion works by experimentation, based on the overview of completion varieties in \secref{sec:completion-varieties}. The remaining subsections explain concepts around completion more systematically. \<^medskip> - \emph{Explicit completion} is triggered by the action @{action_ref + \<^emph>\Explicit completion\ is triggered by the action @{action_ref "isabelle.complete"}, which is bound to the keyboard shortcut @{verbatim "C+b"}, and thus overrides the jEdit default for @{action_ref "complete-word"}. - \emph{Implicit completion} hooks into the regular keyboard input stream of + \<^emph>\Implicit completion\ hooks into the regular keyboard input stream of the editor, with some event filtering and optional delays. \<^medskip> - Completion options may be configured in \emph{Plugin Options~/ - Isabelle~/ General~/ Completion}. These are explained in further detail + Completion options may be configured in \<^emph>\Plugin Options~/ + Isabelle~/ General~/ Completion\. These are explained in further detail below, whenever relevant. There is also a summary of options in \secref{sec:completion-options}. @@ -1133,7 +1133,7 @@ kinds and purposes. The completion mechanism supports this by the following built-in templates: - \<^descr> @{verbatim "`"} (single ASCII back-quote) supports \emph{quotations} + \<^descr> @{verbatim "`"} (single ASCII back-quote) supports \<^emph>\quotations\ via text cartouches. There are three selections, which are always presented in the same order and do not depend on any context information. The default choice produces a template ``@{text "\\\"}'', where the box indicates the @@ -1184,7 +1184,7 @@ \<^medskip> \begin{tabular}{ll} - \textbf{completion entry} & \textbf{example} \\\hline + \<^bold>\completion entry\ & \<^bold>\example\ \\\hline literal symbol & @{verbatim "\"} \\ symbol name with backslash & @{verbatim "\\"}@{verbatim forall} \\ symbol abbreviation & @{verbatim "ALL"} or @{verbatim "!"} \\ @@ -1212,13 +1212,13 @@ text \ This is genuine semantic completion, using information from the prover, so - it requires some delay. A \emph{failed name-space lookup} produces an error + it requires some delay. A \<^emph>\failed name-space lookup\ produces an error message that is annotated with a list of alternative names that are legal. The list of results is truncated according to the system option @{system_option_ref completion_limit}. The completion mechanism takes this into account when collecting information on the prover side. - Already recognized names are \emph{not} completed further, but completion + Already recognized names are \<^emph>\not\ completed further, but completion may be extended by appending a suffix of underscores. This provokes a failed lookup, and another completion attempt while ignoring the underscores. For example, in a name space where @{verbatim "foo"} and @{verbatim "foobar"} @@ -1240,7 +1240,7 @@ source text, e.g.\ for the argument of a load command (\secref{sec:aux-files}), the completion mechanism explores the directory content and offers the result as completion popup. Relative path - specifications are understood wrt.\ the \emph{master directory} of the + specifications are understood wrt.\ the \<^emph>\master directory\ of the document node (\secref{sec:buffer-node}) of the enclosing editor buffer; this requires a proper theory, not an auxiliary file. @@ -1291,19 +1291,19 @@ document-model, the default context is given by the outer syntax of the editor mode (see also \secref{sec:buffer-node}). - The semantic \emph{language context} provides information about nested + The semantic \<^emph>\language context\ provides information about nested sub-languages of Isabelle: keywords are only completed for outer syntax, symbols or antiquotations for languages that support them. E.g.\ there is no symbol completion for ML source, but within ML strings, comments, antiquotations. - The prover may produce \emph{no completion} markup in exceptional + The prover may produce \<^emph>\no completion\ markup in exceptional situations, to tell that some language keywords should be excluded from further completion attempts. For example, @{verbatim ":"} within accepted Isar syntax looses its meaning as abbreviation for symbol @{text "\"}. \<^medskip> - The completion context is \emph{ignored} for built-in templates and + The completion context is \<^emph>\ignored\ for built-in templates and symbols in their explicit form ``@{verbatim "\"}''; see also \secref{sec:completion-varieties}. This allows to complete within broken input that escapes its normal semantic context, e.g.\ antiquotations or @@ -1322,7 +1322,7 @@ "isabelle.complete"} with keyboard shortcut @{verbatim "C+b"}. This overrides the shortcut for @{action_ref "complete-word"} in jEdit, but it is possible to restore the original jEdit keyboard mapping of @{action - "complete-word"} via \emph{Global Options~/ Shortcuts} and invent a + "complete-word"} via \<^emph>\Global Options~/ Shortcuts\ and invent a different one for @{action "isabelle.complete"}. \<^descr>[Explicit spell-checker completion] works via @{action_ref @@ -1358,7 +1358,7 @@ subsection \Completion popup \label{sec:completion-popup}\ text \ - A \emph{completion popup} is a minimally invasive GUI component over the + A \<^emph>\completion popup\ is a minimally invasive GUI component over the text area that offers a selection of completion items to be inserted into the text, e.g.\ by mouse clicks. Items are sorted dynamically, according to the frequency of selection, with persistent history. The popup may interpret @@ -1373,7 +1373,7 @@ \<^medskip> \begin{tabular}{ll} - \textbf{key} & \textbf{action} \\\hline + \<^bold>\key\ & \<^bold>\action\ \\\hline @{verbatim "ENTER"} & select completion (if @{system_option jedit_completion_select_enter}) \\ @{verbatim "TAB"} & select completion (if @{system_option jedit_completion_select_tab}) \\ @{verbatim "ESCAPE"} & dismiss popup \\ @@ -1397,7 +1397,7 @@ replace text immediately in certain situations and depending on certain options like @{system_option jedit_completion_immediate}. In any case, insertion works uniformly, by imitating normal jEdit text insertion, - depending on the state of the \emph{text selection}. Isabelle/jEdit tries to + depending on the state of the \<^emph>\text selection\. Isabelle/jEdit tries to accommodate the most common forms of advanced selections in jEdit, but not all combinations make sense. At least the following important cases are well-defined: @@ -1416,8 +1416,8 @@ Support for multiple selections is particularly useful for - \emph{HyperSearch}: clicking on one of the items in the \emph{HyperSearch - Results} window makes jEdit select all its occurrences in the corresponding + \<^emph>\HyperSearch\: clicking on one of the items in the \<^emph>\HyperSearch + Results\ window makes jEdit select all its occurrences in the corresponding line of text. Then explicit completion can be invoked via @{verbatim "C+b"}, e.g.\ to replace occurrences of @{verbatim "-->"} by @{text "\"}. @@ -1435,8 +1435,8 @@ text \ This is a summary of Isabelle/Scala system options that are relevant for - completion. They may be configured in \emph{Plugin Options~/ Isabelle~/ - General} as usual. + completion. They may be configured in \<^emph>\Plugin Options~/ Isabelle~/ + General\ as usual. \<^item> @{system_option_def completion_limit} specifies the maximum number of items for various semantic completion operations (name-space entries etc.) @@ -1491,7 +1491,7 @@ text \ Continuous document processing works asynchronously in the background. Visible document source that has been evaluated may get augmented by - additional results of \emph{asynchronous print functions}. The canonical + additional results of \<^emph>\asynchronous print functions\. The canonical example is proof state output, which is always enabled. More heavy-weight print functions may be applied, in order to prove or disprove parts of the formal text by other means. @@ -1500,11 +1500,11 @@ on outermost goal statements (e.g.\ @{command lemma}, @{command theorem}), independently of the state of the current proof attempt. They work implicitly without any arguments. Results are output as - \emph{information messages}, which are indicated in the text area by + \<^emph>\information messages\, which are indicated in the text area by blue squiggles and a blue information sign in the gutter (see \figref{fig:auto-tools}). The message content may be shown as for other output (see also \secref{sec:output}). Some tools - produce output with \emph{sendback} markup, which means that + produce output with \<^emph>\sendback\ markup, which means that clicking on certain parts of the output inserts that text into the source in the proper place. @@ -1519,8 +1519,8 @@ \<^medskip> The following Isabelle system options control the behavior of automatically tried tools (see also the jEdit dialog window - \emph{Plugin Options~/ Isabelle~/ General~/ Automatically tried - tools}): + \<^emph>\Plugin Options~/ Isabelle~/ General~/ Automatically tried + tools\): \<^item> @{system_option_ref auto_methods} controls automatic use of a combination of standard proof methods (@{method auto}, @{method @@ -1544,7 +1544,7 @@ @{command_ref quickcheck}, which tests for counterexamples using a series of assignments for free variables of a subgoal. - This tool is \emph{enabled} by default. It requires little + This tool is \<^emph>\enabled\ by default. It requires little overhead, but is a bit weaker than @{command nitpick}. \<^item> @{system_option_ref auto_sledgehammer} controls a significantly @@ -1560,7 +1560,7 @@ can be solved directly by an existing theorem. This also helps to detect duplicate lemmas. - This tool is \emph{enabled} by default. + This tool is \<^emph>\enabled\ by default. Invocation of automatically tried tools is subject to some global @@ -1589,7 +1589,7 @@ section \Sledgehammer \label{sec:sledgehammer}\ -text \The \emph{Sledgehammer} panel (\figref{fig:sledgehammer}) +text \The \<^emph>\Sledgehammer\ panel (\figref{fig:sledgehammer}) provides a view on some independent execution of the Isar command @{command_ref sledgehammer}, with process indicator (spinning wheel) and GUI elements for important Sledgehammer arguments and options. Any @@ -1605,14 +1605,14 @@ \label{fig:sledgehammer} \end{figure} - The \emph{Apply} button attaches a fresh invocation of @{command + The \<^emph>\Apply\ button attaches a fresh invocation of @{command sledgehammer} to the command where the cursor is pointing in the text --- this should be some pending proof problem. Further buttons - like \emph{Cancel} and \emph{Locate} help to manage the running + like \<^emph>\Cancel\ and \<^emph>\Locate\ help to manage the running process. Results appear incrementally in the output window of the panel. - Proposed proof snippets are marked-up as \emph{sendback}, which + Proposed proof snippets are marked-up as \<^emph>\sendback\, which means a single mouse click inserts the text into a suitable place of the original source. Some manual editing may be required nonetheless, say to remove earlier proof attempts.\ @@ -1643,7 +1643,7 @@ \end{figure} It is also possible to use text folding according to this structure, by - adjusting \emph{Utilities / Buffer Options / Folding mode} of jEdit. The + adjusting \<^emph>\Utilities / Buffer Options / Folding mode\ of jEdit. The default mode @{verbatim isabelle} uses the structure of formal definitions, statements, and proofs. The alternative mode @{verbatim sidekick} uses the document structure of the SideKick parser, as explained above.\ @@ -1659,7 +1659,7 @@ The document antiquotation @{text "@{cite}"} is described in @{cite "isabelle-isar-ref"}. Within the Prover IDE it provides semantic markup for tooltips, hyperlinks, and completion for Bib{\TeX} database entries. - Isabelle/jEdit does \emph{not} know about the actual Bib{\TeX} environment + Isabelle/jEdit does \<^emph>\not\ know about the actual Bib{\TeX} environment used in {\LaTeX} batch-mode, but it can take citations from those @{verbatim ".bib"} files that happen to be open in the editor; see \figref{fig:cite-completion}. @@ -1701,7 +1701,7 @@ influences from the parallel environment that are outside the scope of the current command transaction. - The \emph{Timing} panel provides an overview of cumulative command + The \<^emph>\Timing\ panel provides an overview of cumulative command timings for each document node. Commands with elapsed time below the given threshold are ignored in the grand total. Nodes are sorted according to their overall timing. For the document node @@ -1715,10 +1715,10 @@ modifier key as explained in \secref{sec:tooltips-hyperlinks}. Actual display of timing depends on the global option @{system_option_ref jedit_timing_threshold}, which can be configured in - \emph{Plugin Options~/ Isabelle~/ General}. + \<^emph>\Plugin Options~/ Isabelle~/ General\. \<^medskip> - The \emph{Monitor} panel visualizes various data collections about + The \<^emph>\Monitor\ panel visualizes various data collections about recent activity of the Isabelle/ML task farm and the underlying ML runtime system. The display is continuously updated according to @{system_option_ref editor_chart_delay}. Note that the painting of the chart takes considerable @@ -1731,22 +1731,22 @@ section \Low-level output\ text \Prover output is normally shown directly in the main text area - or secondary \emph{Output} panels, as explained in + or secondary \<^emph>\Output\ panels, as explained in \secref{sec:output}. Beyond this, it is occasionally useful to inspect low-level output channels via some of the following additional panels: - \<^item> \emph{Protocol} shows internal messages between the + \<^item> \<^emph>\Protocol\ shows internal messages between the Isabelle/Scala and Isabelle/ML side of the PIDE document editing protocol. Recording of messages starts with the first activation of the corresponding dockable window; earlier messages are lost. Actual display of protocol messages causes considerable slowdown, so - it is important to undock all \emph{Protocol} panels for production + it is important to undock all \<^emph>\Protocol\ panels for production work. - \<^item> \emph{Raw Output} shows chunks of text from the @{verbatim + \<^item> \<^emph>\Raw Output\ shows chunks of text from the @{verbatim stdout} and @{verbatim stderr} channels of the prover process. Recording of output starts with the first activation of the corresponding dockable window; earlier output is lost. @@ -1763,14 +1763,14 @@ within the document model (\secref{sec:output}). Unhandled Isabelle/ML exceptions are printed by the system via @{ML Output.error_message}. - \<^item> \emph{Syslog} shows system messages that might be relevant to diagnose + \<^item> \<^emph>\Syslog\ shows system messages that might be relevant to diagnose problems with the startup or shutdown phase of the prover process; this also includes raw output on @{verbatim stderr}. Isabelle/ML also provides an explicit @{ML Output.system_message} operation, which is occasionally useful for diagnostic purposes within the system infrastructure itself. A limited amount of syslog messages are buffered, independently of - the docking state of the \emph{Syslog} panel. This allows to + the docking state of the \<^emph>\Syslog\ panel. This allows to diagnose serious problems with Isabelle/PIDE process management, outside of the actual protocol layer. @@ -1782,71 +1782,71 @@ chapter \Known problems and workarounds \label{sec:problems}\ text \ - \<^item> \textbf{Problem:} Odd behavior of some diagnostic commands with + \<^item> \<^bold>\Problem:\ Odd behavior of some diagnostic commands with global side-effects, like writing a physical file. - \textbf{Workaround:} Copy/paste complete command text from + \<^bold>\Workaround:\ Copy/paste complete command text from elsewhere, or disable continuous checking temporarily. - \<^item> \textbf{Problem:} No direct support to remove document nodes from the + \<^item> \<^bold>\Problem:\ No direct support to remove document nodes from the collection of theories. - \textbf{Workaround:} Clear the buffer content of unused files and close - \emph{without} saving changes. + \<^bold>\Workaround:\ Clear the buffer content of unused files and close + \<^emph>\without\ saving changes. - \<^item> \textbf{Problem:} Keyboard shortcuts @{verbatim "C+PLUS"} and + \<^item> \<^bold>\Problem:\ Keyboard shortcuts @{verbatim "C+PLUS"} and @{verbatim "C+MINUS"} for adjusting the editor font size depend on platform details and national keyboards. - \textbf{Workaround:} Rebind keys via \emph{Global Options~/ - Shortcuts}. + \<^bold>\Workaround:\ Rebind keys via \<^emph>\Global Options~/ + Shortcuts\. - \<^item> \textbf{Problem:} The Mac OS X key sequence @{verbatim - "COMMAND+COMMA"} for application \emph{Preferences} is in conflict with the - jEdit default keyboard shortcut for \emph{Incremental Search Bar} (action + \<^item> \<^bold>\Problem:\ The Mac OS X key sequence @{verbatim + "COMMAND+COMMA"} for application \<^emph>\Preferences\ is in conflict with the + jEdit default keyboard shortcut for \<^emph>\Incremental Search Bar\ (action @{action_ref "quick-search"}). - \textbf{Workaround:} Rebind key via \emph{Global Options~/ - Shortcuts} according to national keyboard, e.g.\ @{verbatim + \<^bold>\Workaround:\ Rebind key via \<^emph>\Global Options~/ + Shortcuts\ according to national keyboard, e.g.\ @{verbatim "COMMAND+SLASH"} on English ones. - \<^item> \textbf{Problem:} Mac OS X system fonts sometimes lead to + \<^item> \<^bold>\Problem:\ Mac OS X system fonts sometimes lead to character drop-outs in the main text area. - \textbf{Workaround:} Use the default @{verbatim IsabelleText} font. + \<^bold>\Workaround:\ Use the default @{verbatim IsabelleText} font. (Do not install that font on the system.) - \<^item> \textbf{Problem:} Some Linux/X11 input methods such as IBus + \<^item> \<^bold>\Problem:\ Some Linux/X11 input methods such as IBus tend to disrupt key event handling of Java/AWT/Swing. - \textbf{Workaround:} Do not use X11 input methods. Note that environment + \<^bold>\Workaround:\ Do not use X11 input methods. Note that environment variable @{verbatim XMODIFIERS} is reset by default within Isabelle settings. - \<^item> \textbf{Problem:} Some Linux/X11 window managers that are + \<^item> \<^bold>\Problem:\ Some Linux/X11 window managers that are not ``re-parenting'' cause problems with additional windows opened by Java. This affects either historic or neo-minimalistic window managers like @{verbatim awesome} or @{verbatim xmonad}. - \textbf{Workaround:} Use a regular re-parenting X11 window manager. + \<^bold>\Workaround:\ Use a regular re-parenting X11 window manager. - \<^item> \textbf{Problem:} Various forks of Linux/X11 window managers and + \<^item> \<^bold>\Problem:\ Various forks of Linux/X11 window managers and desktop environments (like Gnome) disrupt the handling of menu popups and mouse positions of Java/AWT/Swing. - \textbf{Workaround:} Use mainstream versions of Linux desktops. + \<^bold>\Workaround:\ Use mainstream versions of Linux desktops. - \<^item> \textbf{Problem:} Native Windows look-and-feel with global font + \<^item> \<^bold>\Problem:\ Native Windows look-and-feel with global font scaling leads to bad GUI rendering of various tree views. - \textbf{Workaround:} Use \emph{Metal} look-and-feel and re-adjust its + \<^bold>\Workaround:\ Use \<^emph>\Metal\ look-and-feel and re-adjust its primary and secondary font as explained in \secref{sec:hdpi}. - \<^item> \textbf{Problem:} Full-screen mode via jEdit action @{action_ref + \<^item> \<^bold>\Problem:\ Full-screen mode via jEdit action @{action_ref "toggle-full-screen"} (default keyboard shortcut @{verbatim F11}) works on Windows, but not on Mac OS X or various Linux/X11 window managers. - \textbf{Workaround:} Use native full-screen control of the window + \<^bold>\Workaround:\ Use native full-screen control of the window manager (notably on Mac OS X). \ diff -r cd82b1023932 -r 6e789d198bbd src/Doc/System/Basics.thy --- a/src/Doc/System/Basics.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/System/Basics.thy Sun Oct 18 23:00:32 2015 +0200 @@ -6,27 +6,27 @@ text \This manual describes Isabelle together with related tools and user interfaces as seen from a system oriented view. See also the - \emph{Isabelle/Isar Reference Manual} @{cite "isabelle-isar-ref"} for + \<^emph>\Isabelle/Isar Reference Manual\ @{cite "isabelle-isar-ref"} for the actual Isabelle input language and related concepts, and - \emph{The Isabelle/Isar Implementation - Manual} @{cite "isabelle-implementation"} for the main concepts of the + \<^emph>\The Isabelle/Isar Implementation + Manual\ @{cite "isabelle-implementation"} for the main concepts of the underlying implementation in Isabelle/ML. \<^medskip> The Isabelle system environment provides the following basic infrastructure to integrate tools smoothly. - \<^enum> The \emph{Isabelle settings} mechanism provides process + \<^enum> The \<^emph>\Isabelle settings\ mechanism provides process environment variables to all Isabelle executables (including tools and user interfaces). - \<^enum> The raw \emph{Isabelle process} (@{executable_ref + \<^enum> The raw \<^emph>\Isabelle process\ (@{executable_ref "isabelle_process"}) runs logic sessions either interactively or in batch mode. In particular, this view abstracts over the invocation of the actual ML system to be used. Regular users rarely need to care about the low-level process. - \<^enum> The main \emph{Isabelle tool wrapper} (@{executable_ref + \<^enum> The main \<^emph>\Isabelle tool wrapper\ (@{executable_ref isabelle}) provides a generic startup environment Isabelle related utilities, user interfaces etc. Such tools automatically benefit from the settings mechanism. @@ -36,13 +36,13 @@ section \Isabelle settings \label{sec:settings}\ text \ - The Isabelle system heavily depends on the \emph{settings - mechanism}\indexbold{settings}. Essentially, this is a statically + The Isabelle system heavily depends on the \<^emph>\settings + mechanism\\indexbold{settings}. Essentially, this is a statically scoped collection of environment variables, such as @{setting ISABELLE_HOME}, @{setting ML_SYSTEM}, @{setting ML_HOME}. These - variables are \emph{not} intended to be set directly from the shell, + variables are \<^emph>\not\ intended to be set directly from the shell, though. Isabelle employs a somewhat more sophisticated scheme of - \emph{settings files} --- one for site-wide defaults, another for + \<^emph>\settings files\ --- one for site-wide defaults, another for additional user-specific modifications. With all configuration variables in clearly defined places, this scheme is more maintainable and user-friendly than global shell environment @@ -269,8 +269,8 @@ subsection \Additional components \label{sec:components}\ -text \Any directory may be registered as an explicit \emph{Isabelle - component}. The general layout conventions are that of the main +text \Any directory may be registered as an explicit \<^emph>\Isabelle + component\. The general layout conventions are that of the main Isabelle distribution itself, and the following two files (both optional) have a special meaning: diff -r cd82b1023932 -r 6e789d198bbd src/Doc/System/Presentation.thy --- a/src/Doc/System/Presentation.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/System/Presentation.thy Sun Oct 18 23:00:32 2015 +0200 @@ -7,7 +7,7 @@ text \Isabelle provides several ways to present the outcome of formal developments, including WWW-based browsable libraries or actual printable documents. Presentation is centered around the - concept of \emph{sessions} (\chref{ch:session}). The global session + concept of \<^emph>\sessions\ (\chref{ch:session}). The global session structure is that of a tree, with Isabelle Pure at its root, further object-logics derived (e.g.\ HOLCF from HOL, and HOL from Pure), and application sessions further on in the hierarchy. @@ -59,8 +59,8 @@ descendants. Besides the HTML file that is generated for every theory, Isabelle stores links to all theories of a session in an index file. As a second hierarchy, groups of sessions are organized - as \emph{chapters}, with a separate index. Note that the implicit - tree structure of the session build hierarchy is \emph{not} relevant + as \<^emph>\chapters\, with a separate index. Note that the implicit + tree structure of the session build hierarchy is \<^emph>\not\ relevant for the presentation. Isabelle also generates graph files that represent the theory diff -r cd82b1023932 -r 6e789d198bbd src/Doc/System/Sessions.thy --- a/src/Doc/System/Sessions.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Doc/System/Sessions.thy Sun Oct 18 23:00:32 2015 +0200 @@ -4,10 +4,10 @@ chapter \Isabelle sessions and build management \label{ch:session}\ -text \An Isabelle \emph{session} consists of a collection of related +text \An Isabelle \<^emph>\session\ consists of a collection of related theories that may be associated with formal documents - (\chref{ch:present}). There is also a notion of \emph{persistent - heap} image to capture the state of a session, similar to + (\chref{ch:present}). There is also a notion of \<^emph>\persistent + heap\ image to capture the state of a session, similar to object-code in compiled programming languages. Thus the concept of session resembles that of a ``project'' in common IDE environments, but the specific name emphasizes the connection to interactive @@ -35,7 +35,7 @@ user. The ROOT file format follows the lexical conventions of the - \emph{outer syntax} of Isabelle/Isar, see also + \<^emph>\outer syntax\ of Isabelle/Isar, see also @{cite "isabelle-isar-ref"}. This defines common forms like identifiers, names, quoted strings, verbatim text, nested comments etc. The grammar for @{syntax session_chapter} and @{syntax @@ -109,7 +109,7 @@ \<^descr> \isakeyword{options}~@{text "[x = a, y = b, z]"} defines separate options (\secref{sec:system-options}) that are used when - processing this session, but \emph{without} propagation to child + processing this session, but \<^emph>\without\ propagation to child sessions. Note that @{text "z"} abbreviates @{text "z = true"} for Boolean options. @@ -371,7 +371,7 @@ option @{system_option_ref threads}. \<^medskip> - Option @{verbatim "-s"} enables \emph{system mode}, which + Option @{verbatim "-s"} enables \<^emph>\system mode\, which means that resulting heap images and log files are stored in @{file_unchecked "$ISABELLE_HOME/heaps"} instead of the default location @{setting ISABELLE_OUTPUT} (which is normally in @{setting diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Decision_Procs/ferrante_rackoff_data.ML --- a/src/HOL/Decision_Procs/ferrante_rackoff_data.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Decision_Procs/ferrante_rackoff_data.ML Sun Oct 18 23:00:32 2015 +0200 @@ -114,7 +114,7 @@ || keyword npiN || keyword lin_denseN || keyword qeN || keyword atomsN || keyword simpsN; -val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; +val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); val terms = thms >> map Drule.dest_term; in diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Decision_Procs/langford_data.ML --- a/src/HOL/Decision_Procs/langford_data.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Decision_Procs/langford_data.ML Sun Oct 18 23:00:32 2015 +0200 @@ -84,7 +84,7 @@ fun keyword k = Scan.lift (Args.$$$ k -- Args.colon) >> K (); val any_keyword = keyword qeN || keyword gatherN || keyword atomsN; - val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; + val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); val terms = thms >> map Drule.dest_term; in diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Groebner_Basis.thy --- a/src/HOL/Groebner_Basis.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Groebner_Basis.thy Sun Oct 18 23:00:32 2015 +0200 @@ -41,7 +41,7 @@ val addN = "add" val delN = "del" val any_keyword = keyword addN || keyword delN - val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; + val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); in Scan.optional (keyword addN |-- thms) [] -- Scan.optional (keyword delN |-- thms) [] >> diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Library/Reflection.thy --- a/src/HOL/Library/Reflection.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Library/Reflection.thy Sun Oct 18 23:00:32 2015 +0200 @@ -22,7 +22,7 @@ val onlyN = "only"; val rulesN = "rules"; val any_keyword = keyword onlyN || keyword rulesN; - val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; + val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); val terms = thms >> map (Thm.term_of o Drule.dest_term); in thms -- Scan.optional (keyword rulesN |-- thms) [] -- diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Library/rewrite.ML --- a/src/HOL/Library/rewrite.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Library/rewrite.ML Sun Oct 18 23:00:32 2015 +0200 @@ -343,7 +343,7 @@ | append_default (For x :: (ps as In :: Term _:: _)) = For x :: Concl :: ps | append_default ps = ps - in Scan.repeat sep_atom >> (flat #> rev #> append_default) end + in Scan.repeats sep_atom >> (rev #> append_default) end fun context_lift (scan : 'a parser) f = fn (context : Context.generic, toks) => let diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Presburger.thy --- a/src/HOL/Presburger.thy Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Presburger.thy Sun Oct 18 23:00:32 2015 +0200 @@ -400,7 +400,7 @@ val delN = "del" val elimN = "elim" val any_keyword = keyword addN || keyword delN || simple_keyword elimN - val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; + val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm) in Scan.optional (simple_keyword elimN >> K false) true -- Scan.optional (keyword addN |-- thms) [] -- diff -r cd82b1023932 -r 6e789d198bbd src/HOL/SPARK/Tools/fdl_lexer.ML --- a/src/HOL/SPARK/Tools/fdl_lexer.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/SPARK/Tools/fdl_lexer.ML Sun Oct 18 23:00:32 2015 +0200 @@ -171,7 +171,7 @@ Scan.optional (Scan.one (is_tilde o symbol) >> single) []; val long_identifier = - identifier @@@ (Scan.repeat1 ($$$ "." @@@ identifier) >> flat); + identifier @@@ Scan.repeats1 ($$$ "." @@@ identifier); val whitespace = Scan.many (is_whitespace o symbol); val whitespace1 = Scan.many1 (is_whitespace o symbol); diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Tools/ATP/atp_proof.ML --- a/src/HOL/Tools/ATP/atp_proof.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Tools/ATP/atp_proof.ML Sun Oct 18 23:00:32 2015 +0200 @@ -320,8 +320,8 @@ (parse_inference_source >> snd || scan_general_id --| skip_term >> single) x and parse_dependencies x = - (Scan.repeat (Scan.option ($$ ",") |-- parse_dependency) - >> (flat #> filter_out (curry (op =) "theory"))) x + (Scan.repeats (Scan.option ($$ ",") |-- parse_dependency) + >> (filter_out (curry (op =) "theory"))) x and parse_file_source x = (Scan.this_string "file" |-- $$ "(" |-- scan_general_id -- Scan.option ($$ "," |-- scan_general_id diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Tools/ATP/atp_satallax.ML --- a/src/HOL/Tools/ATP/atp_satallax.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Tools/ATP/atp_satallax.ML Sun Oct 18 23:00:32 2015 +0200 @@ -405,7 +405,7 @@ #> (if local_name <> satallaxN then (Scan.error (!! (fn _ => raise UNRECOGNIZED_ATP_PROOF ()) - (Scan.finite Symbol.stopper (Scan.repeat1 (parse_line local_name problem) >> flat))) + (Scan.finite Symbol.stopper (Scan.repeats1 (parse_line local_name problem)))) #> fst) else (Scan.error (!! (fn _ => raise UNRECOGNIZED_ATP_PROOF ()) diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Tools/Nitpick/nitpick_commands.ML --- a/src/HOL/Tools/Nitpick/nitpick_commands.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_commands.ML Sun Oct 18 23:00:32 2015 +0200 @@ -294,11 +294,10 @@ val parse_key = Scan.repeat1 Parse.typ_group >> space_implode " " val parse_value = - Scan.repeat1 (Parse.minus >> single + Scan.repeats1 (Parse.minus >> single || Scan.repeat1 (Scan.unless Parse.minus (Parse.name || Parse.float_number)) || @{keyword ","} |-- Parse.number >> prefix "," >> single) - >> flat val parse_param = parse_key -- Scan.optional (@{keyword "="} |-- parse_value) [] val parse_params = Scan.optional (@{keyword "["} |-- Parse.list parse_param --| @{keyword "]"}) [] diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Tools/Qelim/cooper.ML --- a/src/HOL/Tools/Qelim/cooper.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Tools/Qelim/cooper.ML Sun Oct 18 23:00:32 2015 +0200 @@ -897,7 +897,7 @@ val constsN = "consts"; val any_keyword = keyword constsN -val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; +val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); val terms = thms >> map (Thm.term_of o Drule.dest_term); fun optional scan = Scan.optional scan []; diff -r cd82b1023932 -r 6e789d198bbd src/HOL/Tools/legacy_transfer.ML --- a/src/HOL/Tools/legacy_transfer.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/HOL/Tools/legacy_transfer.ML Sun Oct 18 23:00:32 2015 +0200 @@ -217,7 +217,7 @@ || keyword_colon congN || keyword_colon labelsN || keyword_colon leavingN || keyword_colon directionN; -val thms = Scan.repeat (Scan.unless any_keyword Attrib.multi_thm) >> flat; +val thms = Scan.repeats (Scan.unless any_keyword Attrib.multi_thm); val names = Scan.repeat (Scan.unless any_keyword (Scan.lift Args.name)) val mode = keyword_colon modeN |-- ((Scan.lift (Args.$$$ manualN) >> K false) diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/antiquote.ML --- a/src/Pure/General/antiquote.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/antiquote.ML Sun Oct 18 23:00:32 2015 +0200 @@ -6,12 +6,14 @@ signature ANTIQUOTE = sig - type antiq = Symbol_Pos.T list * {start: Position.T, stop: Position.T, range: Position.range} - datatype 'a antiquote = Text of 'a | Antiq of antiq + type control = {range: Position.range, name: string * Position.T, body: Symbol_Pos.T list} + type antiq = {start: Position.T, stop: Position.T, range: Position.range, body: Symbol_Pos.T list} + datatype 'a antiquote = Text of 'a | Control of control | Antiq of antiq type text_antiquote = Symbol_Pos.T list antiquote val range: text_antiquote list -> Position.range val split_lines: text_antiquote list -> text_antiquote list list val antiq_reports: 'a antiquote list -> Position.report list + val scan_control: Symbol_Pos.T list -> control * Symbol_Pos.T list val scan_antiq: Symbol_Pos.T list -> antiq * Symbol_Pos.T list val scan_antiquote: Symbol_Pos.T list -> text_antiquote * Symbol_Pos.T list val read': Position.T -> Symbol_Pos.T list -> text_antiquote list @@ -23,13 +25,15 @@ (* datatype antiquote *) -type antiq = Symbol_Pos.T list * {start: Position.T, stop: Position.T, range: Position.range}; -datatype 'a antiquote = Text of 'a | Antiq of antiq; +type control = {range: Position.range, name: string * Position.T, body: Symbol_Pos.T list}; +type antiq = {start: Position.T, stop: Position.T, range: Position.range, body: Symbol_Pos.T list}; +datatype 'a antiquote = Text of 'a | Control of control | Antiq of antiq; type text_antiquote = Symbol_Pos.T list antiquote; fun antiquote_range (Text ss) = Symbol_Pos.range ss - | antiquote_range (Antiq (_, {range, ...})) = range; + | antiquote_range (Control {range, ...}) = range + | antiquote_range (Antiq {range, ...}) = range; fun range ants = if null ants then Position.no_range @@ -55,12 +59,13 @@ (* reports *) fun antiq_reports ants = ants |> maps - (fn Antiq (_, {start, stop, range = (pos, _)}) => - [(start, Markup.antiquote), - (stop, Markup.antiquote), - (pos, Markup.antiquoted), - (pos, Markup.language_antiquotation)] - | _ => []); + (fn Text _ => [] + | Control {range = (pos, _), ...} => [(pos, Markup.antiquoted)] + | Antiq {start, stop, range = (pos, _), ...} => + [(start, Markup.antiquote), + (stop, Markup.antiquote), + (pos, Markup.antiquoted), + (pos, Markup.language_antiquotation)]); (* scan *) @@ -72,8 +77,10 @@ val err_prefix = "Antiquotation lexical error: "; val scan_txt = - Scan.repeat1 ($$$ "@" --| Scan.ahead (~$$ "{") || - Scan.many1 (fn (s, _) => s <> "@" andalso Symbol.not_eof s)) >> flat; + Scan.repeats1 + (Scan.many1 (fn (s, _) => not (Symbol.is_control s) andalso s <> "@" andalso Symbol.not_eof s) || + Scan.one (fn (s, _) => Symbol.is_control s) --| Scan.ahead (~$$ "\\") >> single || + $$$ "@" --| Scan.ahead (~$$ "{")); val scan_antiq_body = Scan.trace (Symbol_Pos.scan_string_qq err_prefix || Symbol_Pos.scan_string_bq err_prefix) >> #2 || @@ -82,17 +89,27 @@ in +val scan_control = + Scan.one (Symbol.is_control o Symbol_Pos.symbol) -- + Scan.trace (Symbol_Pos.scan_cartouche err_prefix) >> + (fn ((control, pos), (_, body)) => + let + val Symbol.Control name = Symbol.decode control; + val range = Symbol_Pos.range ((control, pos) :: body); + in {name = (name, pos), range = range, body = body} end); + val scan_antiq = Symbol_Pos.scan_pos -- ($$ "@" |-- $$ "{" |-- Symbol_Pos.scan_pos -- Symbol_Pos.!!! (fn () => err_prefix ^ "missing closing brace") - (Scan.repeat scan_antiq_body -- Symbol_Pos.scan_pos -- ($$ "}" |-- Symbol_Pos.scan_pos))) - >> (fn (pos1, (pos2, ((body, pos3), pos4))) => - (flat body, - {start = Position.set_range (pos1, pos2), - stop = Position.set_range (pos3, pos4), - range = Position.range pos1 pos4})); + (Scan.repeats scan_antiq_body -- Symbol_Pos.scan_pos -- ($$ "}" |-- Symbol_Pos.scan_pos))) >> + (fn (pos1, (pos2, ((body, pos3), pos4))) => + {start = Position.set_range (pos1, pos2), + stop = Position.set_range (pos3, pos4), + range = Position.range pos1 pos4, + body = body}); -val scan_antiquote = scan_antiq >> Antiq || scan_txt >> Text; +val scan_antiquote = + scan_control >> Control || scan_antiq >> Antiq || scan_txt >> Text; end; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/antiquote.scala --- a/src/Pure/General/antiquote.scala Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/antiquote.scala Sun Oct 18 23:00:32 2015 +0200 @@ -14,6 +14,7 @@ { sealed abstract class Antiquote case class Text(source: String) extends Antiquote + case class Control(source: String) extends Antiquote case class Antiq(source: String) extends Antiquote @@ -24,7 +25,12 @@ trait Parsers extends Scan.Parsers { private val txt: Parser[String] = - rep1("@" ~ guard(one(s => s != "{")) | many1(s => s != "@")) ^^ (x => x.mkString) + rep1(many1(s => !Symbol.is_control(s) && s != "@") | + one(Symbol.is_control) <~ guard(opt_term(one(s => !Symbol.is_open(s)))) | + "@" <~ guard(opt_term(one(s => s != "{")))) ^^ (x => x.mkString) + + val control: Parser[String] = + one(Symbol.is_control) ~ cartouche ^^ { case x ~ y => x + y } val antiq_other: Parser[String] = many1(s => s != "\"" && s != "`" && s != "}" && !Symbol.is_open(s) && !Symbol.is_close(s)) @@ -36,7 +42,7 @@ "@{" ~ rep(antiq_body) ~ "}" ^^ { case x ~ y ~ z => x + y.mkString + z } val antiquote: Parser[Antiquote] = - antiq ^^ (x => Antiq(x)) | txt ^^ (x => Text(x)) + control ^^ (x => Control(x)) | (antiq ^^ (x => Antiq(x)) | txt ^^ (x => Text(x))) } diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/scan.ML --- a/src/Pure/General/scan.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/scan.ML Sun Oct 18 23:00:32 2015 +0200 @@ -57,6 +57,8 @@ val option: ('a -> 'b * 'a) -> 'a -> 'b option * 'a val repeat: ('a -> 'b * 'a) -> 'a -> 'b list * 'a val repeat1: ('a -> 'b * 'a) -> 'a -> 'b list * 'a + val repeats: ('a -> 'b list * 'a) -> 'a -> 'b list * 'a + val repeats1: ('a -> 'b list * 'a) -> 'a -> 'b list * 'a val single: ('a -> 'b * 'a) -> 'a -> 'b list * 'a val bulk: ('a -> 'b * 'a) -> 'a -> 'b list * 'a val max: ('a * 'a -> bool) -> ('b -> 'a * 'b) -> ('b -> 'a * 'b) -> 'b -> 'a * 'b @@ -191,6 +193,9 @@ fun repeat1 scan = scan ::: repeat scan; +fun repeats scan = repeat scan >> flat; +fun repeats1 scan = repeat1 scan >> flat; + fun single scan = scan >> (fn x => [x]); fun bulk scan = scan -- repeat (permissive scan) >> (op ::); diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/symbol.ML --- a/src/Pure/General/symbol.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/symbol.ML Sun Oct 18 23:00:32 2015 +0200 @@ -15,6 +15,7 @@ val is_symbolic: symbol -> bool val is_symbolic_char: symbol -> bool val is_printable: symbol -> bool + val is_control: symbol -> bool val eof: symbol val is_eof: symbol -> bool val not_eof: symbol -> bool @@ -39,7 +40,7 @@ val decode_raw: symbol -> string val encode_raw: string -> string datatype sym = - Char of string | UTF8 of string | Sym of string | Ctrl of string | Raw of string | + Char of string | UTF8 of string | Sym of string | Control of string | Raw of string | Malformed of string | EOF val decode: symbol -> sym datatype kind = Letter | Digit | Quasi | Blank | Other @@ -108,6 +109,9 @@ if is_char s then ord space <= ord s andalso ord s <= ord "~" else is_utf8 s orelse raw_symbolic s; +fun is_control s = + String.isPrefix "\\<^" s andalso String.isSuffix ">" s; + (* input source control *) @@ -219,7 +223,7 @@ (* symbol variants *) datatype sym = - Char of string | UTF8 of string | Sym of string | Ctrl of string | Raw of string | + Char of string | UTF8 of string | Sym of string | Control of string | Raw of string | Malformed of string | EOF; fun decode s = @@ -228,7 +232,7 @@ else if is_utf8 s then UTF8 s else if is_raw s then Raw (decode_raw s) else if is_malformed s then Malformed s - else if String.isPrefix "\\<^" s then Ctrl (String.substring (s, 3, size s - 4)) + else if is_control s then Control (String.substring (s, 3, size s - 4)) else Sym (String.substring (s, 2, size s - 3)); diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/symbol.scala --- a/src/Pure/General/symbol.scala Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/symbol.scala Sun Oct 18 23:00:32 2015 +0200 @@ -521,7 +521,7 @@ /* control symbols */ def is_control(sym: Symbol): Boolean = - sym.startsWith("\\<^") || symbols.control_decoded.contains(sym) + (sym.startsWith("\\<^") && sym.endsWith(">")) || symbols.control_decoded.contains(sym) def is_controllable(sym: Symbol): Boolean = !is_blank(sym) && !is_control(sym) && !is_open(sym) && !is_close(sym) && !is_malformed(sym) diff -r cd82b1023932 -r 6e789d198bbd src/Pure/General/symbol_pos.ML --- a/src/Pure/General/symbol_pos.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/General/symbol_pos.ML Sun Oct 18 23:00:32 2015 +0200 @@ -118,11 +118,10 @@ fun scan_strs q err_prefix = Scan.ahead ($$ q) |-- !!! (fn () => err_prefix ^ "unclosed string literal") - ((scan_pos --| $$$ q) -- - ((Scan.repeat (scan_str q err_prefix) >> flat) -- ($$$ q |-- scan_pos))); + ((scan_pos --| $$$ q) -- (Scan.repeats (scan_str q err_prefix) -- ($$$ q |-- scan_pos))); fun recover_strs q = - $$$ q @@@ (Scan.repeat (Scan.permissive (scan_str q "")) >> flat); + $$$ q @@@ Scan.repeats (Scan.permissive (scan_str q "")); in @@ -202,7 +201,7 @@ Scan.lift ($$$ "*" --| Scan.ahead (~$$$ ")")) || Scan.lift (Scan.one (fn (s, _) => s <> "*" andalso Symbol.not_eof s)) >> single; -val scan_cmts = Scan.pass 0 (Scan.repeat scan_cmt >> flat); +val scan_cmts = Scan.pass 0 (Scan.repeats scan_cmt); in @@ -266,7 +265,7 @@ in -val scan_ident = letter ::: (Scan.repeat (letdigs1 || sub ::: letdigs1) >> flat); +val scan_ident = letter ::: Scan.repeats (letdigs1 || sub ::: letdigs1); end; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Isar/attrib.ML --- a/src/Pure/Isar/attrib.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Isar/attrib.ML Sun Oct 18 23:00:32 2015 +0200 @@ -281,7 +281,7 @@ val thm = gen_thm Facts.the_single; val multi_thm = gen_thm (K I); -val thms = Scan.repeat multi_thm >> flat; +val thms = Scan.repeats multi_thm; end; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Isar/method.ML --- a/src/Pure/Isar/method.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Isar/method.ML Sun Oct 18 23:00:32 2015 +0200 @@ -526,7 +526,7 @@ local fun thms ss = - Scan.repeat (Scan.unless (Scan.lift (Scan.first ss)) Attrib.multi_thm) >> flat; + Scan.repeats (Scan.unless (Scan.lift (Scan.first ss)) Attrib.multi_thm); fun app {init, attribute, pos = _} ths context = fold_map (Thm.apply_attribute attribute) ths (Context.map_proof init context); diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Isar/parse.ML --- a/src/Pure/Isar/parse.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Isar/parse.ML Sun Oct 18 23:00:32 2015 +0200 @@ -421,10 +421,7 @@ fun args blk x = Scan.optional (args1 blk) [] x and args1 blk x = - ((Scan.repeat1 - (Scan.repeat1 (argument blk) || - argsp "(" ")" || - argsp "[" "]")) >> flat) x + (Scan.repeats1 (Scan.repeat1 (argument blk) || argsp "(" ")" || argsp "[" "]")) x and argsp l r x = (token ($$$ l) ::: !!! (args true @@@ (token ($$$ r) >> single))) x; in (args, args1) end; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Isar/token.ML --- a/src/Pure/Isar/token.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Isar/token.ML Sun Oct 18 23:00:32 2015 +0200 @@ -96,6 +96,7 @@ val source_strict: Keyword.keywords -> Position.T -> (Symbol.symbol, 'a) Source.source -> (T, (Symbol_Pos.T, Position.T * (Symbol.symbol, 'a) Source.source) Source.source) Source.source + val read_cartouche: Symbol_Pos.T list -> T val explode: Keyword.keywords -> Position.T -> string -> T list val make: (int * int) * string -> Position.T -> T * Position.T type 'a parser = T list -> 'a * T list @@ -550,10 +551,10 @@ Scan.ahead ($$ "{" -- $$ "*") |-- !!! "unclosed verbatim text" ((Symbol_Pos.scan_pos --| $$ "{" --| $$ "*") -- - ((Scan.repeat scan_verb >> flat) -- ($$ "*" |-- $$ "}" |-- Symbol_Pos.scan_pos))); + (Scan.repeats scan_verb -- ($$ "*" |-- $$ "}" |-- Symbol_Pos.scan_pos))); val recover_verbatim = - $$$ "{" @@@ $$$ "*" @@@ (Scan.repeat scan_verb >> flat); + $$$ "{" @@@ $$$ "*" @@@ Scan.repeats scan_verb; (* scan cartouche *) @@ -633,6 +634,11 @@ fun source keywords pos src = Symbol_Pos.source pos src |> source' false keywords; fun source_strict keywords pos src = Symbol_Pos.source pos src |> source' true keywords; +fun read_cartouche syms = + (case Scan.read Symbol_Pos.stopper (scan_cartouche >> token_range Cartouche) syms of + SOME tok => tok + | NONE => error ("Single cartouche expected" ^ Position.here (#1 (Symbol_Pos.range syms)))); + end; @@ -668,7 +674,7 @@ type 'a context_parser = Context.generic * T list -> 'a * (Context.generic * T list); -(* read source *) +(* read antiquotation source *) fun read_no_commands keywords scan syms = Source.of_list syms diff -r cd82b1023932 -r 6e789d198bbd src/Pure/ML/ml_context.ML --- a/src/Pure/ML/ml_context.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/ML/ml_context.ML Sun Oct 18 23:00:32 2015 +0200 @@ -133,6 +133,7 @@ fun reset_env name = ML_Lex.tokenize ("structure " ^ name ^ " = struct end"); fun expanding (Antiquote.Text tok) = ML_Lex.is_cartouche tok + | expanding (Antiquote.Control _) = true | expanding (Antiquote.Antiq _) = true; fun eval_antiquotes (ants, pos) opt_context = @@ -150,13 +151,11 @@ let fun tokenize range = apply2 (ML_Lex.tokenize #> map (ML_Lex.set_range range)); - fun expand (Antiquote.Antiq (ss, {range, ...})) ctxt = - let - val keywords = Thy_Header.get_keywords' ctxt; - val (decl, ctxt') = - apply_antiquotation (Token.read_antiq keywords antiq (ss, #1 range)) ctxt; - in (decl #> tokenize range, ctxt') end - | expand (Antiquote.Text tok) ctxt = + fun expand_src range src ctxt = + let val (decl, ctxt') = apply_antiquotation src ctxt + in (decl #> tokenize range, ctxt') end; + + fun expand (Antiquote.Text tok) ctxt = if ML_Lex.is_cartouche tok then let val range = ML_Lex.range_of tok; @@ -169,7 +168,12 @@ ("Input.source true " ^ ML_Syntax.print_string text ^ " " ^ ML_Syntax.atomic (ML_Syntax.print_range range)) ctxt; in (decl #> tokenize range, ctxt') end - else (K ([], [tok]), ctxt); + else (K ([], [tok]), ctxt) + | expand (Antiquote.Control {name, range, body}) ctxt = + expand_src range (Token.src name [Token.read_cartouche body]) ctxt + | expand (Antiquote.Antiq {range, body, ...}) ctxt = + expand_src range + (Token.read_antiq (Thy_Header.get_keywords' ctxt) antiq (body, #1 range)) ctxt; val ctxt = (case opt_ctxt of diff -r cd82b1023932 -r 6e789d198bbd src/Pure/ML/ml_lex.ML --- a/src/Pure/ML/ml_lex.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/ML/ml_lex.ML Sun Oct 18 23:00:32 2015 +0200 @@ -198,7 +198,7 @@ val scan_ident = scan_alphanumeric || scan_symbolic; val scan_long_ident = - (Scan.repeat1 (scan_alphanumeric @@@ $$$ ".") >> flat) @@@ (scan_ident || $$$ "="); + Scan.repeats1 (scan_alphanumeric @@@ $$$ ".") @@@ (scan_ident || $$$ "="); val scan_type_var = $$$ "'" @@@ scan_letdigs; @@ -249,7 +249,7 @@ $$$ "\\" @@@ !!! "bad escape character in string" scan_escape; val scan_gap = $$$ "\\" @@@ scan_blanks1 @@@ $$$ "\\"; -val scan_gaps = Scan.repeat scan_gap >> flat; +val scan_gaps = Scan.repeats scan_gap; in @@ -262,10 +262,10 @@ val scan_string = Scan.ahead ($$ "\"") |-- !!! "unclosed string literal" - ($$$ "\"" @@@ (Scan.repeat (scan_gap || scan_str) >> flat) @@@ $$$ "\""); + ($$$ "\"" @@@ Scan.repeats (scan_gap || scan_str) @@@ $$$ "\""); val recover_string = - $$$ "\"" @@@ (Scan.repeat (scan_gap || Scan.permissive scan_str) >> flat); + $$$ "\"" @@@ Scan.repeats (scan_gap || Scan.permissive scan_str); end; @@ -293,6 +293,7 @@ val scan_sml = scan_ml >> Antiquote.Text; val scan_ml_antiq = + Antiquote.scan_control >> Antiquote.Control || Antiquote.scan_antiq >> Antiquote.Antiq || Symbol_Pos.scan_cartouche err_prefix >> (Antiquote.Text o token Cartouche) || scan_ml >> Antiquote.Text; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/ML/ml_lex.scala --- a/src/Pure/ML/ml_lex.scala Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/ML/ml_lex.scala Sun Oct 18 23:00:32 2015 +0200 @@ -52,6 +52,7 @@ val SPACE = Value("white space") val CARTOUCHE = Value("text cartouche") val COMMENT = Value("comment text") + val CONTROL = Value("control symbol antiquotation") val ANTIQ = Value("antiquotation") val ANTIQ_START = Value("antiquotation: start") val ANTIQ_STOP = Value("antiquotation: stop") @@ -211,12 +212,13 @@ val keyword = literal(lexicon) ^^ (x => Token(Kind.KEYWORD, x)) + val ml_control = control ^^ (x => Token(Kind.CONTROL, x)) val ml_antiq = antiq ^^ (x => Token(Kind.ANTIQ, x)) val bad = one(_ => true) ^^ (x => Token(Kind.ERROR, x)) - space | (recover_delimited | (ml_antiq | - (((word | (real | (int | (long_ident | (ident | type_var))))) ||| keyword) | bad))) + space | (recover_delimited | (ml_control | (ml_antiq | + (((word | (real | (int | (long_ident | (ident | type_var))))) ||| keyword) | bad)))) } diff -r cd82b1023932 -r 6e789d198bbd src/Pure/PIDE/xml.ML --- a/src/Pure/PIDE/xml.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/PIDE/xml.ML Sun Oct 18 23:00:32 2015 +0200 @@ -239,12 +239,12 @@ fun parse_content xs = (parse_optional_text @@@ - (Scan.repeat + Scan.repeats ((parse_element >> single || parse_cdata >> (single o Text) || parse_processing_instruction || parse_comment) - @@@ parse_optional_text) >> flat)) xs + @@@ parse_optional_text)) xs and parse_element xs = ($$ "<" |-- parse_name -- Scan.repeat (blanks |-- parse_att) --| blanks :-- diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Syntax/lexicon.ML --- a/src/Pure/Syntax/lexicon.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Syntax/lexicon.ML Sun Oct 18 23:00:32 2015 +0200 @@ -94,7 +94,7 @@ fun !!! msg = Symbol_Pos.!!! (fn () => err_prefix ^ msg); val scan_id = Symbol_Pos.scan_ident; -val scan_longid = scan_id @@@ (Scan.repeat1 ($$$ "." @@@ scan_id) >> flat); +val scan_longid = scan_id @@@ Scan.repeats1 ($$$ "." @@@ scan_id); val scan_tid = $$$ "'" @@@ scan_id; val scan_nat = Scan.many1 (Symbol.is_digit o Symbol_Pos.symbol); @@ -238,12 +238,12 @@ val scan_str = Scan.ahead ($$ "'" -- $$ "'") |-- !!! "unclosed string literal" - ($$$ "'" @@@ $$$ "'" @@@ (Scan.repeat scan_chr >> flat) @@@ $$$ "'" @@@ $$$ "'"); + ($$$ "'" @@@ $$$ "'" @@@ Scan.repeats scan_chr @@@ $$$ "'" @@@ $$$ "'"); val scan_str_body = Scan.ahead ($$ "'" |-- $$ "'") |-- !!! "unclosed string literal" - ($$ "'" |-- $$ "'" |-- (Scan.repeat scan_chr >> flat) --| $$ "'" --| $$ "'"); + ($$ "'" |-- $$ "'" |-- Scan.repeats scan_chr --| $$ "'" --| $$ "'"); fun implode_str ss = enclose "''" "''" (implode (map (fn "'" => "\\'" | s => s) ss)); val explode_str = explode_literal scan_str_body; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Thy/latex.ML --- a/src/Pure/Thy/latex.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Thy/latex.ML Sun Oct 18 23:00:32 2015 +0200 @@ -94,14 +94,14 @@ Symbol.Char s => output_chr s | Symbol.UTF8 s => s | Symbol.Sym s => if known_sym s then enclose "{\\isasym" "}" s else output_chrs sym - | Symbol.Ctrl s => if known_ctrl s then enclose "\\isactrl" " " s else output_chrs sym + | Symbol.Control s => if known_ctrl s then enclose "\\isactrl" " " s else output_chrs sym | Symbol.Raw s => s | Symbol.Malformed s => error (Symbol.malformed_msg s) | Symbol.EOF => error "Bad EOF symbol"); fun output_ctrl_sym sym = (case Symbol.decode sym of - Symbol.Ctrl s => enclose "\\isactrl" " " s + Symbol.Control s => enclose "\\isactrl" " " s | _ => sym); in @@ -112,9 +112,12 @@ val output_syms_antiq = (fn Antiquote.Text ss => output_symbols (map Symbol_Pos.symbol ss) - | Antiquote.Antiq (ss, _) => + | Antiquote.Control {name = (name, _), body, ...} => + "\\isaantiqcontrol{" ^ output_symbols (Symbol.explode name) ^ "}" ^ + output_symbols (map Symbol_Pos.symbol body) + | Antiquote.Antiq {body, ...} => enclose "%\n\\isaantiq\n" "{}%\n\\endisaantiq\n" - (output_symbols (map Symbol_Pos.symbol ss))); + (output_symbols (map Symbol_Pos.symbol body))); val output_ctrl_symbols = implode o map output_ctrl_sym; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Thy/term_style.ML --- a/src/Pure/Thy/term_style.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Thy/term_style.ML Sun Oct 18 23:00:32 2015 +0200 @@ -67,7 +67,7 @@ end); fun sub_symbols (d :: s :: ss) = - if Symbol.is_ascii_digit d andalso not (String.isPrefix ("\<^") s) + if Symbol.is_ascii_digit d andalso not (Symbol.is_control s) then d :: "\<^sub>" :: sub_symbols (s :: ss) else d :: s :: ss | sub_symbols cs = cs; diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Thy/thy_output.ML --- a/src/Pure/Thy/thy_output.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Thy/thy_output.ML Sun Oct 18 23:00:32 2015 +0200 @@ -161,8 +161,24 @@ (* eval antiquote *) +local + +fun eval_antiq state (opts, src) = + let + val preview_ctxt = fold option opts (Toplevel.presentation_context_of state); + val print_ctxt = Context_Position.set_visible false preview_ctxt; + + fun cmd ctxt = wrap ctxt (fn () => command src state ctxt) (); + val _ = cmd preview_ctxt; + val print_modes = space_explode "," (Config.get print_ctxt modes) @ Latex.modes; + in Print_Mode.with_modes print_modes (fn () => cmd print_ctxt) () end; + +in + fun eval_antiquote _ (Antiquote.Text ss) = Symbol_Pos.content ss - | eval_antiquote state (Antiquote.Antiq (ss, {range = (pos, _), ...})) = + | eval_antiquote state (Antiquote.Control {name, body, ...}) = + eval_antiq state ([], Token.src name [Token.read_cartouche body]) + | eval_antiquote state (Antiquote.Antiq {range = (pos, _), body, ...}) = let val keywords = (case try Toplevel.presentation_context_of state of @@ -170,15 +186,9 @@ | NONE => error ("Unknown context -- cannot expand document antiquotations" ^ Position.here pos)); - - val (opts, src) = Token.read_antiq keywords antiq (ss, pos); - fun cmd ctxt = wrap ctxt (fn () => command src state ctxt) (); + in eval_antiq state (Token.read_antiq keywords antiq (body, pos)) end; - val preview_ctxt = fold option opts (Toplevel.presentation_context_of state); - val print_ctxt = Context_Position.set_visible false preview_ctxt; - val _ = cmd preview_ctxt; - val print_modes = space_explode "," (Config.get print_ctxt modes) @ Latex.modes; - in Print_Mode.with_modes print_modes (fn () => cmd print_ctxt) () end; +end; (* output text *) @@ -231,7 +241,6 @@ | Markup_Env_Token of string * Input.source | Raw_Token of Input.source; - fun basic_token pred (Basic_Token tok) = pred tok | basic_token _ _ = false; @@ -596,6 +605,24 @@ (** concrete antiquotations **) +(* control style *) + +local + +fun control_antiquotation name s1 s2 = + antiquotation name (Scan.lift Args.cartouche_input) + (fn {state, ...} => enclose s1 s2 o output_text state {markdown = false}); + +in + +val _ = + Theory.setup + (control_antiquotation @{binding "emph"} "\\emph{" "}" #> + control_antiquotation @{binding "bold"} "\\textbf{" "}"); + +end; + + (* basic entities *) local diff -r cd82b1023932 -r 6e789d198bbd src/Pure/Tools/rail.ML --- a/src/Pure/Tools/rail.ML Sun Oct 18 17:25:13 2015 +0200 +++ b/src/Pure/Tools/rail.ML Sun Oct 18 23:00:32 2015 +0200 @@ -85,8 +85,8 @@ fun token k ss = [Token (Symbol_Pos.range ss, (k, Symbol_Pos.content ss))]; -fun antiq_token (antiq as (ss, {range, ...})) = - [Token (range, (Antiq antiq, Symbol_Pos.content ss))]; +fun antiq_token antiq = + [Token (#range antiq, (Antiq antiq, Symbol_Pos.content (#body antiq)))]; val scan_space = Scan.many1 (Symbol.is_blank o Symbol_Pos.symbol); @@ -104,7 +104,7 @@ [Token (Position.range pos1 pos2, (String, Symbol_Pos.content ss))]); val scan = - (Scan.repeat scan_token >> flat) --| + Scan.repeats scan_token --| Symbol_Pos.!!! (fn () => err_prefix ^ "bad input") (Scan.ahead (Scan.one Symbol_Pos.is_eof));