# HG changeset patch # User haftmann # Date 1421325581 -3600 # Node ID 056945909f60d6fc502630fad9465b5c98ceda29 # Parent ead400fd6484637df0dcf1451762d0e4366cf152 modernized cartouches diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Adaptation.thy --- a/src/Doc/Codegen/Adaptation.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Adaptation.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,14 +2,14 @@ imports Setup begin -setup %invisible {* Code_Target.add_derived_target ("\", [("SML", I)]) - #> Code_Target.add_derived_target ("\", [("Haskell", I)]) *} +setup %invisible \Code_Target.add_derived_target ("\", [("SML", I)]) + #> Code_Target.add_derived_target ("\", [("Haskell", I)])\ -section {* Adaptation to target languages \label{sec:adaptation} *} +section \Adaptation to target languages \label{sec:adaptation}\ -subsection {* Adapting code generation *} +subsection \Adapting code generation\ -text {* +text \ The aspects of code generation introduced so far have two aspects in common: @@ -61,12 +61,12 @@ setup by importing particular library theories. In order to understand these, we provide some clues here; these however are not supposed to replace a careful study of the sources. -*} +\ -subsection {* The adaptation principle *} +subsection \The adaptation principle\ -text {* +text \ Figure \ref{fig:adaptation} illustrates what \qt{adaptation} is conceptually supposed to be: @@ -146,11 +146,11 @@ \noindent As figure \ref{fig:adaptation} illustrates, all these adaptation mechanisms have to act consistently; it is at the discretion of the user to take care for this. -*} +\ -subsection {* Common adaptation patterns *} +subsection \Common adaptation patterns\ -text {* +text \ The @{theory HOL} @{theory Main} theory already provides a code generator setup which should be suitable for most applications. Common extensions and modifications are available by certain @@ -200,14 +200,14 @@ arrays \emph{in SML only}. \end{description} -*} +\ -subsection {* Parametrising serialisation \label{sec:adaptation_mechanisms} *} +subsection \Parametrising serialisation \label{sec:adaptation_mechanisms}\ -text {* +text \ Consider the following function and its corresponding SML code: -*} +\ primrec %quote in_interval :: "nat \ nat \ nat \ bool" where "in_interval (k, l) n \ k \ n \ n \ l" @@ -219,11 +219,11 @@ | constant HOL.conj \ (SML) | constant Not \ (SML) (*>*) -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts in_interval (SML)} -*} +\ -text {* +text \ \noindent Though this is correct code, it is a little bit unsatisfactory: boolean values and operators are materialised as distinguished entities with have nothing to do with the SML-built-in @@ -232,7 +232,7 @@ which would perfectly terminate when the existing SML @{verbatim "bool"} would be used. To map the HOL @{typ bool} on SML @{verbatim "bool"}, we may use \qn{custom serialisations}: -*} +\ code_printing %quotett type_constructor bool \ (SML) "bool" @@ -240,7 +240,7 @@ | constant False \ (SML) "false" | constant HOL.conj \ (SML) "_ andalso _" -text {* +text \ \noindent The @{command_def code_printing} command takes a series of symbols (contants, type constructor, \ldots) together with target-specific custom serialisations. Each @@ -249,28 +249,28 @@ inserted whenever the type constructor would occur. Each ``@{verbatim "_"}'' in a serialisation expression is treated as a placeholder for the constant's or the type constructor's arguments. -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts in_interval (SML)} -*} +\ -text {* +text \ \noindent This still is not perfect: the parentheses around the \qt{andalso} expression are superfluous. Though the serialiser by no means attempts to imitate the rich Isabelle syntax framework, it provides some common idioms, notably associative infixes with precedences which may be used here: -*} +\ code_printing %quotett constant HOL.conj \ (SML) infixl 1 "andalso" -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts in_interval (SML)} -*} +\ -text {* +text \ \noindent The attentive reader may ask how we assert that no generated code will accidentally overwrite. For this reason the serialiser has an internal table of identifiers which have to be @@ -278,14 +278,14 @@ typically contains the keywords of the target language. It can be extended manually, thus avoiding accidental overwrites, using the @{command_def "code_reserved"} command: -*} +\ code_reserved %quote "\" bool true false andalso -text {* +text \ \noindent Next, we try to map HOL pairs to SML pairs, using the infix ``@{verbatim "*"}'' type constructor and parentheses: -*} +\ (*<*) code_printing %invisible type_constructor prod \ (SML) @@ -295,7 +295,7 @@ type_constructor prod \ (SML) infix 2 "*" | constant Pair \ (SML) "!((_),/ (_))" -text {* +text \ \noindent The initial bang ``@{verbatim "!"}'' tells the serialiser never to put parentheses around the whole expression (they are already present), while the parentheses around argument place @@ -314,28 +314,28 @@ in ``@{verbatim "fn '_ => _"}'' the first ``@{verbatim "_"}'' is a proper underscore while the second ``@{verbatim "_"}'' is a placeholder. -*} +\ -subsection {* @{text Haskell} serialisation *} +subsection \@{text Haskell} serialisation\ -text {* +text \ For convenience, the default @{text HOL} setup for @{text Haskell} maps the @{class equal} class to its counterpart in @{text Haskell}, giving custom serialisations for the class @{class equal} and its operation @{const [source] HOL.equal}. -*} +\ code_printing %quotett type_class equal \ (Haskell) "Eq" | constant HOL.equal \ (Haskell) infixl 4 "==" -text {* +text \ \noindent A problem now occurs whenever a type which is an instance of @{class equal} in @{text HOL} is mapped on a @{text Haskell}-built-in type which is also an instance of @{text Haskell} @{text Eq}: -*} +\ typedecl %quote bar @@ -351,35 +351,35 @@ (*>*) code_printing %quotett type_constructor bar \ (Haskell) "Integer" -text {* +text \ \noindent The code generator would produce an additional instance, which of course is rejected by the @{text Haskell} compiler. To suppress this additional instance: -*} +\ code_printing %quotett class_instance bar :: "HOL.equal" \ (Haskell) - -subsection {* Enhancing the target language context \label{sec:include} *} +subsection \Enhancing the target language context \label{sec:include}\ -text {* +text \ In rare cases it is necessary to \emph{enrich} the context of a target language; this can also be accomplished using the @{command "code_printing"} command: -*} +\ code_printing %quotett - code_module "Errno" \ (Haskell) {*errno i = error ("Error number: " ++ show i)*} + code_module "Errno" \ (Haskell) \errno i = error ("Error number: " ++ show i)\ code_reserved %quotett Haskell Errno -text {* +text \ \noindent Such named modules are then prepended to every generated code. Inspect such code in order to find out how this behaves with respect to a particular target language. -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Evaluation.thy --- a/src/Doc/Codegen/Evaluation.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Evaluation.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,9 +2,9 @@ imports Setup begin -section {* Evaluation \label{sec:evaluation} *} +section \Evaluation \label{sec:evaluation}\ -text {* +text \ Recalling \secref{sec:principle}, code generation turns a system of equations into a program with the \emph{same} equational semantics. As a consequence, this program can be used as a \emph{rewrite @@ -12,12 +12,12 @@ term @{term "t'"} yields the theorems @{prop "t \ t'"}. This application of code generation in the following is referred to as \emph{evaluation}. -*} +\ -subsection {* Evaluation techniques *} +subsection \Evaluation techniques\ -text {* +text \ The existing infrastructure provides a rich palette of evaluation techniques, each comprising different aspects: @@ -35,34 +35,34 @@ trustable. \end{description} -*} +\ -subsubsection {* The simplifier (@{text simp}) *} +subsubsection \The simplifier (@{text simp})\ -text {* +text \ The simplest way for evaluation is just using the simplifier with the original code equations of the underlying program. This gives fully symbolic evaluation and highest trustablity, with the usual performance of the simplifier. Note that for operations on abstract datatypes (cf.~\secref{sec:invariant}), the original theorems as given by the users are used, not the modified ones. -*} +\ -subsubsection {* Normalization by evaluation (@{text nbe}) *} +subsubsection \Normalization by evaluation (@{text nbe})\ -text {* +text \ Normalization by evaluation @{cite "Aehlig-Haftmann-Nipkow:2008:nbe"} provides a comparably fast partially symbolic evaluation which permits also normalization of functions and uninterpreted symbols; the stack of code to be trusted is considerable. -*} +\ -subsubsection {* Evaluation in ML (@{text code}) *} +subsubsection \Evaluation in ML (@{text code})\ -text {* +text \ Highest performance can be achieved by evaluation in ML, at the cost of being restricted to ground results and a layered stack of code to be trusted, including code generator configurations by the user. @@ -73,32 +73,32 @@ out there depends crucially on the correctness of the code generator setup; this is one of the reasons why you should not use adaptation (see \secref{sec:adaptation}) frivolously. -*} +\ -subsection {* Aspects of evaluation *} +subsection \Aspects of evaluation\ -text {* +text \ Each of the techniques can be combined with different aspects. The most important distinction is between dynamic and static evaluation. Dynamic evaluation takes the code generator configuration \qt{as it is} at the point where evaluation is issued. Best example is the @{command_def value} command which allows ad-hoc evaluation of terms: -*} +\ value %quote "42 / (12 :: rat)" -text {* +text \ \noindent @{command value} tries first to evaluate using ML, falling back to normalization by evaluation if this fails. A particular technique may be specified in square brackets, e.g. -*} +\ value %quote [nbe] "42 / (12 :: rat)" -text {* +text \ To employ dynamic evaluation in the document generation, there is also a @{text value} antiquotation with the same evaluation techniques as @{command value}. @@ -162,12 +162,12 @@ For property conversion (which coincides with conversion except for evaluation in ML), methods are provided which solve a given goal by evaluation. -*} +\ -subsection {* Schematic overview *} +subsection \Schematic overview\ -text {* +text \ \newcommand{\ttsize}{\fontsize{5.8pt}{8pt}\selectfont} \fontsize{9pt}{12pt}\selectfont \begin{tabular}{ll||c|c|c} @@ -189,12 +189,12 @@ & \ttsize@{ML "Nbe.static_conv"} & \ttsize@{ML "Code_Evaluation.static_conv"} \end{tabular} -*} +\ -subsection {* Preprocessing HOL terms into evaluable shape *} +subsection \Preprocessing HOL terms into evaluable shape\ -text {* +text \ When integration decision procedures developed inside HOL into HOL itself, it is necessary to somehow get from the Isabelle/ML representation to the representation used by the decision procedure itself (``reification''). @@ -203,7 +203,7 @@ @{ML "Reification.conv"} and @{ML "Reification.tac"} An simplistic example: -*} +\ datatype %quote form_ord = T | F | Less nat nat | And form_ord form_ord | Or form_ord form_ord | Neg form_ord @@ -217,21 +217,21 @@ | "interp (Or f1 f2) vs \ interp f1 vs \ interp f2 vs" | "interp (Neg f) vs \ \ interp f vs" -text {* +text \ The datatype @{type form_ord} represents formulae whose semantics is given by @{const interp}. Note that values are represented by variable indices (@{typ nat}) whose concrete values are given in list @{term vs}. -*} +\ -ML (*<*) {* *} +ML (*<*) \\ schematic_lemma "thm": fixes x y z :: "'a::order" shows "x < y \ x < z \ ?P" ML_prf -(*>*) {* val thm = - Reification.conv @{context} @{thms interp.simps} @{cterm "x < y \ x < z"} *} (*<*) -by (tactic {* ALLGOALS (rtac thm) *}) +(*>*) \val thm = + Reification.conv @{context} @{thms interp.simps} @{cterm "x < y \ x < z"}\ (*<*) +by (tactic \ALLGOALS (rtac thm)\) (*>*) -text {* +text \ By virtue of @{fact interp.simps}, @{ML "Reification.conv"} provides a conversion which, for this concrete example, yields @{thm thm [no_vars]}. Note that the argument to @{const interp} does not contain any free variables and can thus be evaluated @@ -239,38 +239,38 @@ A less meager example can be found in the AFP, session @{text "Regular-Sets"}, theory @{text Regexp_Method}. -*} +\ -subsection {* Intimate connection between logic and system runtime *} +subsection \Intimate connection between logic and system runtime\ -text {* +text \ The toolbox of static evaluation conversions forms a reasonable base to interweave generated code and system tools. However in some situations more direct interaction is desirable. -*} +\ -subsubsection {* Static embedding of generated code into system runtime -- the @{text code} antiquotation \label{sec:code_antiq} *} +subsubsection \Static embedding of generated code into system runtime -- the @{text code} antiquotation \label{sec:code_antiq}\ -text {* +text \ The @{text code} antiquotation allows to include constants from generated code directly into ML system code, as in the following toy example: -*} +\ datatype %quote form = T | F | And form form | Or form form (*<*) -(*>*) ML %quotett {* +(*>*) ML %quotett \ fun eval_form @{code T} = true | eval_form @{code F} = false | eval_form (@{code And} (p, q)) = eval_form p andalso eval_form q | eval_form (@{code Or} (p, q)) = eval_form p orelse eval_form q; -*} +\ -text {* +text \ \noindent @{text code} takes as argument the name of a constant; after the whole ML is read, the necessary code is generated transparently and the corresponding constant names are inserted. @@ -281,24 +281,24 @@ For a less simplistic example, theory @{text Approximation} in the @{text Decision_Procs} session is a good reference. -*} +\ -subsubsection {* Static embedding of generated code into system runtime -- @{text code_reflect} *} +subsubsection \Static embedding of generated code into system runtime -- @{text code_reflect}\ -text {* +text \ The @{text code} antiquoation is lightweight, but the generated code is only accessible while the ML section is processed. Sometimes this is not appropriate, especially if the generated code contains datatype declarations which are shared with other parts of the system. In these cases, @{command_def code_reflect} can be used: -*} +\ code_reflect %quote Sum_Type datatypes sum = Inl | Inr functions "Sum_Type.sum.projl" "Sum_Type.sum.projr" -text {* +text \ \noindent @{command_def code_reflect} takes a structure name and references to datatypes and functions; for these code is compiled into the named ML structure and the \emph{Eval} target is modified @@ -309,17 +309,17 @@ A typical example for @{command code_reflect} can be found in the @{theory Predicate} theory. -*} +\ -subsubsection {* Separate compilation -- @{text code_reflect} *} +subsubsection \Separate compilation -- @{text code_reflect}\ -text {* +text \ For technical reasons it is sometimes necessary to separate generation and compilation of code which is supposed to be used in the system runtime. For this @{command code_reflect} with an optional @{text "file"} argument can be used: -*} +\ code_reflect %quote Rat datatypes rat = Frct @@ -328,10 +328,10 @@ "(times :: rat \ rat \ rat)" "(divide :: rat \ rat \ rat)" file "$ISABELLE_TMP/examples/rat.ML" -text {* +text \ \noindent This merely generates the referenced code to the given file which can be included into the system runtime later on. -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Foundations.thy --- a/src/Doc/Codegen/Foundations.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Foundations.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,11 +2,11 @@ imports Introduction begin -section {* Code generation foundations \label{sec:foundations} *} +section \Code generation foundations \label{sec:foundations}\ -subsection {* Code generator architecture \label{sec:architecture} *} +subsection \Code generator architecture \label{sec:architecture}\ -text {* +text \ The code generator is actually a framework consisting of different components which can be customised individually. @@ -90,12 +90,12 @@ \noindent From these steps, only the last two are carried out outside the logic; by keeping this layer as thin as possible, the amount of code to trust is kept to a minimum. -*} +\ -subsection {* The pre- and postprocessor \label{sec:preproc} *} +subsection \The pre- and postprocessor \label{sec:preproc}\ -text {* +text \ Before selected function theorems are turned into abstract code, a chain of definitional transformation steps is carried out: \emph{preprocessing}. The preprocessor consists of two @@ -122,13 +122,13 @@ is is just expressed as @{term "x \ set xs"}. But for execution the intermediate set is not desirable. Hence the following specification: -*} +\ definition %quote member :: "'a list \ 'a \ bool" where [code_abbrev]: "member xs x \ x \ set xs" -text {* +text \ \noindent The \emph{@{attribute code_abbrev} attribute} declares its theorem a rewrite rule for the postprocessor and the symmetric of its theorem as rewrite rule for the preprocessor. Together, @@ -153,12 +153,12 @@ code_thms} (see \secref{sec:equations}) provides a convenient mechanism to inspect the impact of a preprocessor setup on code equations. -*} +\ -subsection {* Understanding code equations \label{sec:equations} *} +subsection \Understanding code equations \label{sec:equations}\ -text {* +text \ As told in \secref{sec:principle}, the notion of code equations is vital to code generation. Indeed most problems which occur in practice can be resolved by an inspection of the underlying code @@ -166,7 +166,7 @@ It is possible to exchange the default code equations for constants by explicitly proving alternative ones: -*} +\ lemma %quote [code]: "dequeue (AQueue xs []) = @@ -176,18 +176,18 @@ (Some y, AQueue xs ys)" by (cases xs, simp_all) (cases "rev xs", simp_all) -text {* +text \ \noindent The annotation @{text "[code]"} is an @{text attribute} which states that the given theorems should be considered as code equations for a @{text fun} statement -- the corresponding constant is determined syntactically. The resulting code: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts dequeue (consts) dequeue (Haskell)} -*} +\ -text {* +text \ \noindent You may note that the equality test @{term "xs = []"} has been replaced by the predicate @{term "List.null xs"}. This is due to the default setup of the \qn{preprocessor}. @@ -206,24 +206,24 @@ The code equations after preprocessing are already are blueprint of the generated program and can be inspected using the @{command code_thms} command: -*} +\ code_thms %quote dequeue -text {* +text \ \noindent This prints a table with the code equations for @{const dequeue}, including \emph{all} code equations those equations depend on recursively. These dependencies themselves can be visualized using the @{command_def code_deps} command. -*} +\ -subsection {* Equality *} +subsection \Equality\ -text {* +text \ Implementation of equality deserves some attention. Here an example function involving polymorphic equality: -*} +\ primrec %quote collect_duplicates :: "'a list \ 'a list \ 'a list \ 'a list" where "collect_duplicates xs ys [] = xs" @@ -233,17 +233,17 @@ else collect_duplicates xs (z#ys) zs else collect_duplicates (z#xs) (z#ys) zs)" -text {* +text \ \noindent During preprocessing, the membership test is rewritten, resulting in @{const List.member}, which itself performs an explicit equality check, as can be seen in the corresponding @{text SML} code: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts collect_duplicates (SML)} -*} +\ -text {* +text \ \noindent Obviously, polymorphic equality is implemented the Haskell way using a type class. How is this achieved? HOL introduces an explicit class @{class equal} with a corresponding operation @{const @@ -252,15 +252,15 @@ through all dependent code equations. For datatypes, instances of @{class equal} are implicitly derived when possible. For other types, you may instantiate @{text equal} manually like any other type class. -*} +\ -subsection {* Explicit partiality \label{sec:partiality} *} +subsection \Explicit partiality \label{sec:partiality}\ -text {* +text \ Partiality usually enters the game by partial patterns, as in the following example, again for amortised queues: -*} +\ definition %quote strict_dequeue :: "'a queue \ 'a \ 'a queue" where "strict_dequeue q = (case dequeue q @@ -272,19 +272,19 @@ (case rev xs of y # ys \ (y, AQueue [] ys))" by (simp_all add: strict_dequeue_def) (cases xs, simp_all split: list.split) -text {* +text \ \noindent In the corresponding code, there is no equation for the pattern @{term "AQueue [] []"}: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts strict_dequeue (consts) strict_dequeue (Haskell)} -*} +\ -text {* +text \ \noindent In some cases it is desirable to have this pseudo-\qt{partiality} more explicitly, e.g.~as follows: -*} +\ axiomatization %quote empty_queue :: 'a @@ -298,7 +298,7 @@ (y, AQueue xs ys)" by (simp_all add: strict_dequeue'_def split: list.splits) -text {* +text \ Observe that on the right hand side of the definition of @{const "strict_dequeue'"}, the unspecified constant @{const empty_queue} occurs. @@ -310,30 +310,30 @@ side. In order to categorise a constant into that category explicitly, use the @{attribute code} attribute with @{text abort}: -*} +\ declare %quote [[code abort: empty_queue]] -text {* +text \ \noindent Then the code generator will just insert an error or exception at the appropriate position: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts strict_dequeue' (consts) empty_queue strict_dequeue' (Haskell)} -*} +\ -text {* +text \ \noindent This feature however is rarely needed in practice. Note also that the HOL default setup already declares @{const undefined}, which is most likely to be used in such situations, as @{text "code abort"}. -*} +\ -subsection {* If something goes utterly wrong \label{sec:utterly_wrong} *} +subsection \If something goes utterly wrong \label{sec:utterly_wrong}\ -text {* +text \ Under certain circumstances, the code generator fails to produce code entirely. To debug these, the following hints may prove helpful: @@ -371,6 +371,6 @@ class signatures (\qt{wellsortedness error}). \end{description} -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Further.thy --- a/src/Doc/Codegen/Further.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Further.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,11 +2,11 @@ imports Setup "~~/src/Tools/Permanent_Interpretation" begin -section {* Further issues \label{sec:further} *} +section \Further issues \label{sec:further}\ -subsection {* Specialities of the @{text Scala} target language \label{sec:scala} *} +subsection \Specialities of the @{text Scala} target language \label{sec:scala}\ -text {* +text \ @{text Scala} deviates from languages of the ML family in a couple of aspects; those which affect code generation mainly have to do with @{text Scala}'s type system: @@ -43,7 +43,7 @@ Isabelle's type classes are mapped onto @{text Scala} implicits; in cases with diamonds in the subclass hierarchy this can lead to ambiguities in the generated code: -*} +\ class %quote class1 = fixes foo :: "'a \ 'a" @@ -52,62 +52,62 @@ class %quote class3 = class1 -text {* +text \ \noindent Here both @{class class2} and @{class class3} inherit from @{class class1}, forming the upper part of a diamond. -*} +\ definition %quote bar :: "'a :: {class2, class3} \ 'a" where "bar = foo" -text {* +text \ \noindent This yields the following code: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts bar (Scala)} -*} +\ -text {* +text \ \noindent This code is rejected by the @{text Scala} compiler: in the definition of @{text bar}, it is not clear from where to derive the implicit argument for @{text foo}. The solution to the problem is to close the diamond by a further class with inherits from both @{class class2} and @{class class3}: -*} +\ class %quote class4 = class2 + class3 -text {* +text \ \noindent Then the offending code equation can be restricted to @{class class4}: -*} +\ lemma %quote [code]: "(bar :: 'a::class4 \ 'a) = foo" by (simp only: bar_def) -text {* +text \ \noindent with the following code: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts bar (Scala)} -*} +\ -text {* +text \ \noindent which exposes no ambiguity. Since the preprocessor (cf.~\secref{sec:preproc}) propagates sort constraints through a system of code equations, it is usually not very difficult to identify the set of code equations which actually needs more restricted sort constraints. -*} +\ -subsection {* Modules namespace *} +subsection \Modules namespace\ -text {* +text \ When invoking the @{command export_code} command it is possible to leave out the @{keyword "module_name"} part; then code is distributed over different modules, where the module name space @@ -122,38 +122,38 @@ A solution is to declare module names explicitly. Let use assume the three cyclically dependent modules are named \emph{A}, \emph{B} and \emph{C}. Then, by stating -*} +\ code_identifier %quote code_module A \ (SML) ABC | code_module B \ (SML) ABC | code_module C \ (SML) ABC -text {* +text \ \noindent we explicitly map all those modules on \emph{ABC}, resulting in an ad-hoc merge of this three modules at serialisation time. -*} +\ -subsection {* Locales and interpretation *} +subsection \Locales and interpretation\ -text {* +text \ A technical issue comes to surface when generating code from specifications stemming from locale interpretation. Let us assume a locale specifying a power operation on arbitrary types: -*} +\ locale %quote power = fixes power :: "'a \ 'b \ 'b" assumes power_commute: "power x \ power y = power y \ power x" begin -text {* +text \ \noindent Inside that locale we can lift @{text power} to exponent lists by means of specification relative to that locale: -*} +\ primrec %quote powers :: "'a list \ 'b \ 'b" where "powers [] = id" @@ -175,7 +175,7 @@ end %quote -text {* +text \ After an interpretation of this locale (say, @{command_def interpretation} @{text "fun_power:"} @{term [source] "power (\n (f :: 'a \ 'a). f ^^ n)"}), one would expect to have a constant @{text @@ -189,43 +189,43 @@ achieved. First, a dedicated definition of the constant on which the local @{text "powers"} after interpretation is supposed to be mapped on: -*} +\ definition %quote funpows :: "nat list \ ('a \ 'a) \ 'a \ 'a" where [code del]: "funpows = power.powers (\n f. f ^^ n)" -text {* +text \ \noindent In general, the pattern is @{text "c = t"} where @{text c} is the name of the future constant and @{text t} the foundational term corresponding to the local constant after interpretation. The interpretation itself is enriched with an equation @{text "t = c"}: -*} +\ interpretation %quote fun_power: power "(\n (f :: 'a \ 'a). f ^^ n)" where "power.powers (\n f. f ^^ n) = funpows" by unfold_locales (simp_all add: fun_eq_iff funpow_mult mult.commute funpows_def) -text {* +text \ \noindent This additional equation is trivially proved by the definition itself. After this setup procedure, code generation can continue as usual: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts funpows (consts) Nat.funpow funpows (Haskell)} -*} (*<*) +\ (*<*) -(*>*) text {* +(*>*) text \ Fortunately, an even more succint approach is available using command @{command permanent_interpretation}. This is available by importing theory @{file "~~/src/Tools/Permanent_Interpretation.thy"}. Then the pattern above collapses to -*} (*<*) +\ (*<*) -setup {* Sign.add_path "funpows" *} +setup \Sign.add_path "funpows"\ hide_const (open) funpows (*>*) @@ -234,43 +234,43 @@ by unfold_locales (simp_all add: fun_eq_iff funpow_mult mult.commute) (*<*) -setup {* Sign.parent_path *} +setup \Sign.parent_path\ (*>*) -subsection {* Parallel computation *} +subsection \Parallel computation\ -text {* +text \ Theory @{text Parallel} in @{file "~~/src/HOL/Library"} contains operations to exploit parallelism inside the Isabelle/ML runtime engine. -*} +\ -subsection {* Imperative data structures *} +subsection \Imperative data structures\ -text {* +text \ If you consider imperative data structures as inevitable for a specific application, you should consider \emph{Imperative Functional Programming with Isabelle/HOL} @{cite "bulwahn-et-al:2008:imperative"}; the framework described there is available in session @{text Imperative_HOL}, together with a short primer document. -*} +\ -subsection {* ML system interfaces \label{sec:ml} *} +subsection \ML system interfaces \label{sec:ml}\ -text {* +text \ Since the code generator framework not only aims to provide a nice Isar interface but also to form a base for code-generation-based applications, here a short description of the most fundamental ML interfaces. -*} +\ -subsubsection {* Managing executable content *} +subsubsection \Managing executable content\ -text %mlref {* +text %mlref \ \begin{mldecls} @{index_ML Code.read_const: "theory -> string -> string"} \\ @{index_ML Code.add_eqn: "thm -> theory -> theory"} \\ @@ -322,12 +322,12 @@ if @{text const} is no constructor. \end{description} -*} +\ -subsubsection {* Data depending on the theory's executable content *} +subsubsection \Data depending on the theory's executable content\ -text {* +text \ Implementing code generator applications on top of the framework set out so far usually not only involves using those primitive interfaces but also storing code-dependent data and various other @@ -373,7 +373,7 @@ \item @{text change_yield} update with side result. \end{description} -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Inductive_Predicate.thy --- a/src/Doc/Codegen/Inductive_Predicate.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Inductive_Predicate.thy Thu Jan 15 13:39:41 2015 +0100 @@ -16,9 +16,9 @@ lexordp_def [unfolded lexord_def mem_Collect_eq split] (*>*) -section {* Inductive Predicates \label{sec:inductive} *} +section \Inductive Predicates \label{sec:inductive}\ -text {* +text \ The @{text "predicate compiler"} is an extension of the code generator which turns inductive specifications into equational ones, from which in turn executable code can be generated. The mechanisms of @@ -27,20 +27,20 @@ Consider the simple predicate @{const append} given by these two introduction rules: -*} +\ -text %quote {* +text %quote \ @{thm append.intros(1)[of ys]} \\ @{thm append.intros(2)[of xs ys zs x]} -*} +\ -text {* +text \ \noindent To invoke the compiler, simply use @{command_def "code_pred"}: -*} +\ code_pred %quote append . -text {* +text \ \noindent The @{command "code_pred"} command takes the name of the inductive predicate and then you put a period to discharge a trivial correctness proof. The compiler infers possible modes for the @@ -56,55 +56,55 @@ \item @{text "o \ o \ i \ bool"} \end{itemize} You can compute sets of predicates using @{command_def "values"}: -*} +\ values %quote "{zs. append [(1::nat),2,3] [4,5] zs}" -text {* \noindent outputs @{text "{[1, 2, 3, 4, 5]}"}, and *} +text \\noindent outputs @{text "{[1, 2, 3, 4, 5]}"}, and\ values %quote "{(xs, ys). append xs ys [(2::nat),3]}" -text {* \noindent outputs @{text "{([], [2, 3]), ([2], [3]), ([2, 3], [])}"}. *} +text \\noindent outputs @{text "{([], [2, 3]), ([2], [3]), ([2, 3], [])}"}.\ -text {* +text \ \noindent If you are only interested in the first elements of the set comprehension (with respect to a depth-first search on the introduction rules), you can pass an argument to @{command "values"} to specify the number of elements you want: -*} +\ values %quote 1 "{(xs, ys). append xs ys [(1::nat), 2, 3, 4]}" values %quote 3 "{(xs, ys). append xs ys [(1::nat), 2, 3, 4]}" -text {* +text \ \noindent The @{command "values"} command can only compute set comprehensions for which a mode has been inferred. The code equations for a predicate are made available as theorems with the suffix @{text "equation"}, and can be inspected with: -*} +\ thm %quote append.equation -text {* +text \ \noindent More advanced options are described in the following subsections. -*} +\ -subsection {* Alternative names for functions *} +subsection \Alternative names for functions\ -text {* +text \ By default, the functions generated from a predicate are named after the predicate with the mode mangled into the name (e.g., @{text "append_i_i_o"}). You can specify your own names as follows: -*} +\ code_pred %quote (modes: i \ i \ o \ bool as concat, o \ o \ i \ bool as split, i \ o \ i \ bool as suffix) append . -subsection {* Alternative introduction rules *} +subsection \Alternative introduction rules\ -text {* +text \ Sometimes the introduction rules of an predicate are not executable because they contain non-executable constants or specific modes could not be inferred. It is also possible that the introduction @@ -113,34 +113,34 @@ introduction rules for predicates with the attribute @{attribute "code_pred_intro"}. For example, the transitive closure is defined by: -*} +\ -text %quote {* +text %quote \ @{lemma [source] "r a b \ tranclp r a b" by (fact tranclp.intros(1))}\\ @{lemma [source] "tranclp r a b \ r b c \ tranclp r a c" by (fact tranclp.intros(2))} -*} +\ -text {* +text \ \noindent These rules do not suit well for executing the transitive closure with the mode @{text "(i \ o \ bool) \ i \ o \ bool"}, as the second rule will cause an infinite loop in the recursive call. This can be avoided using the following alternative rules which are declared to the predicate compiler by the attribute @{attribute "code_pred_intro"}: -*} +\ lemma %quote [code_pred_intro]: "r a b \ tranclp r a b" "r a b \ tranclp r b c \ tranclp r a c" by auto -text {* +text \ \noindent After declaring all alternative rules for the transitive closure, you invoke @{command "code_pred"} as usual. As you have declared alternative rules for the predicate, you are urged to prove that these introduction rules are complete, i.e., that you can derive an elimination rule for the alternative rules: -*} +\ code_pred %quote tranclp proof - @@ -148,20 +148,20 @@ from this converse_tranclpE [OF tranclp.prems] show thesis by metis qed -text {* +text \ \noindent Alternative rules can also be used for constants that have not been defined inductively. For example, the lexicographic order which is defined as: -*} +\ -text %quote {* +text %quote \ @{thm [display] lexordp_def [of r]} -*} +\ -text {* +text \ \noindent To make it executable, you can derive the following two rules and prove the elimination rule: -*} +\ lemma %quote [code_pred_intro]: "append xs (a # v) ys \ lexordp r xs ys" @@ -197,16 +197,16 @@ qed(*>*) -subsection {* Options for values *} +subsection \Options for values\ -text {* +text \ In the presence of higher-order predicates, multiple modes for some predicate could be inferred that are not disambiguated by the pattern of the set comprehension. To disambiguate the modes for the arguments of a predicate, you can state the modes explicitly in the @{command "values"} command. Consider the simple predicate @{term "succ"}: -*} +\ inductive %quote succ :: "nat \ nat \ bool" where "succ 0 (Suc 0)" @@ -214,7 +214,7 @@ code_pred %quote succ . -text {* +text \ \noindent For this, the predicate compiler can infer modes @{text "o \ o \ bool"}, @{text "i \ o \ bool"}, @{text "o \ i \ bool"} and @{text "i \ i \ bool"}. The invocation of @{command "values"} @@ -223,15 +223,15 @@ "o \ o \ bool"} is chosen. To choose another mode for the argument, you can declare the mode for the argument between the @{command "values"} and the number of elements. -*} +\ values %quote [mode: i \ o \ bool] 1 "{n. tranclp succ 10 n}" (*FIMXE does not terminate for n\1*) values %quote [mode: o \ i \ bool] 1 "{n. tranclp succ n 10}" -subsection {* Embedding into functional code within Isabelle/HOL *} +subsection \Embedding into functional code within Isabelle/HOL\ -text {* +text \ To embed the computation of an inductive predicate into functions that are defined in Isabelle/HOL, you have a number of options: @@ -258,18 +258,18 @@ raises a run-time error @{term "not_unique"}. \end{itemize} -*} +\ -subsection {* Further Examples *} +subsection \Further Examples\ -text {* +text \ Further examples for compiling inductive predicates can be found in @{file "~~/src/HOL/Predicate_Compile_Examples/Examples.thy"}. There are also some examples in the Archive of Formal Proofs, notably in the @{text "POPLmark-deBruijn"} and the @{text "FeatherweightJava"} sessions. -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Introduction.thy --- a/src/Doc/Codegen/Introduction.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Introduction.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,9 +2,9 @@ imports Setup begin -section {* Introduction *} +section \Introduction\ -text {* +text \ This tutorial introduces the code generator facilities of @{text "Isabelle/HOL"}. It allows to turn (a certain class of) HOL specifications into corresponding executable code in the programming @@ -14,12 +14,12 @@ To profit from this tutorial, some familiarity and experience with @{theory HOL} @{cite "isa-tutorial"} and its basic theories is assumed. -*} +\ -subsection {* Code generation principle: shallow embedding \label{sec:principle} *} +subsection \Code generation principle: shallow embedding \label{sec:principle}\ -text {* +text \ The key concept for understanding Isabelle's code generation is \emph{shallow embedding}: logical entities like constants, types and classes are identified with corresponding entities in the target @@ -29,12 +29,12 @@ system, then every rewrite step performed by the program can be simulated in the logic, which guarantees partial correctness @{cite "Haftmann-Nipkow:2010:code"}. -*} +\ -subsection {* A quick start with the Isabelle/HOL toolbox \label{sec:queue_example} *} +subsection \A quick start with the Isabelle/HOL toolbox \label{sec:queue_example}\ -text {* +text \ In a HOL theory, the @{command_def datatype} and @{command_def definition}/@{command_def primrec}/@{command_def fun} declarations form the core of a functional programming language. By default @@ -43,7 +43,7 @@ ado. For example, here a simple \qt{implementation} of amortised queues: -*} +\ datatype %quote 'a queue = AQueue "'a list" "'a list" @@ -63,18 +63,18 @@ "xs \ [] \ dequeue (AQueue xs []) = (case rev xs of y # ys \ (Some y, AQueue [] ys))" by (cases xs) (simp_all split: list.splits) (*>*) -text {* \noindent Then we can generate code e.g.~for @{text SML} as follows: *} +text \\noindent Then we can generate code e.g.~for @{text SML} as follows:\ export_code %quote empty dequeue enqueue in SML module_name Example file "$ISABELLE_TMP/examples/example.ML" -text {* \noindent resulting in the following code: *} +text \\noindent resulting in the following code:\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts empty enqueue dequeue (SML)} -*} +\ -text {* +text \ \noindent The @{command_def export_code} command takes a space-separated list of constants for which code shall be generated; anything else needed for those is added implicitly. Then follows a @@ -84,31 +84,31 @@ @{text SML}, @{text OCaml} and @{text Scala} it denotes a \emph{file}, for @{text Haskell} it denotes a \emph{directory} where a file named as the module name (with extension @{text ".hs"}) is written: -*} +\ export_code %quote empty dequeue enqueue in Haskell module_name Example file "$ISABELLE_TMP/examples/" -text {* +text \ \noindent This is the corresponding code: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts empty enqueue dequeue (Haskell)} -*} +\ -text {* +text \ \noindent For more details about @{command export_code} see \secref{sec:further}. -*} +\ -subsection {* Type classes *} +subsection \Type classes\ -text {* +text \ Code can also be generated from type classes in a Haskell-like manner. For illustration here an example from abstract algebra: -*} +\ class %quote semigroup = fixes mult :: "'a \ 'a \ 'a" (infixl "\" 70) @@ -146,51 +146,51 @@ end %quote -text {* +text \ \noindent We define the natural operation of the natural numbers on monoids: -*} +\ primrec %quote (in monoid) pow :: "nat \ 'a \ 'a" where "pow 0 a = \" | "pow (Suc n) a = a \ pow n a" -text {* +text \ \noindent This we use to define the discrete exponentiation function: -*} +\ definition %quote bexp :: "nat \ nat" where "bexp n = pow n (Suc (Suc 0))" -text {* +text \ \noindent The corresponding code in Haskell uses that language's native classes: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts bexp (Haskell)} -*} +\ -text {* +text \ \noindent This is a convenient place to show how explicit dictionary construction manifests in generated code -- the same example in @{text SML}: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts bexp (SML)} -*} +\ -text {* +text \ \noindent Note the parameters with trailing underscore (@{verbatim "A_"}), which are the dictionary parameters. -*} +\ -subsection {* How to continue from here *} +subsection \How to continue from here\ -text {* +text \ What you have seen so far should be already enough in a lot of cases. If you are content with this, you can quit reading here. @@ -236,7 +236,7 @@ \begin{center}\textit{Happy proving, happy hacking!}\end{center} \end{minipage}}}\end{center} -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Refinement.thy --- a/src/Doc/Codegen/Refinement.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Refinement.thy Thu Jan 15 13:39:41 2015 +0100 @@ -2,81 +2,81 @@ imports Setup begin -section {* Program and datatype refinement \label{sec:refinement} *} +section \Program and datatype refinement \label{sec:refinement}\ -text {* +text \ Code generation by shallow embedding (cf.~\secref{sec:principle}) allows to choose code equations and datatype constructors freely, given that some very basic syntactic properties are met; this flexibility opens up mechanisms for refinement which allow to extend the scope and quality of generated code dramatically. -*} +\ -subsection {* Program refinement *} +subsection \Program refinement\ -text {* +text \ Program refinement works by choosing appropriate code equations explicitly (cf.~\secref{sec:equations}); as example, we use Fibonacci numbers: -*} +\ fun %quote fib :: "nat \ nat" where "fib 0 = 0" | "fib (Suc 0) = Suc 0" | "fib (Suc (Suc n)) = fib n + fib (Suc n)" -text {* +text \ \noindent The runtime of the corresponding code grows exponential due to two recursive calls: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts fib (consts) fib (Haskell)} -*} +\ -text {* +text \ \noindent A more efficient implementation would use dynamic programming, e.g.~sharing of common intermediate results between recursive calls. This idea is expressed by an auxiliary operation which computes a Fibonacci number and its successor simultaneously: -*} +\ definition %quote fib_step :: "nat \ nat \ nat" where "fib_step n = (fib (Suc n), fib n)" -text {* +text \ \noindent This operation can be implemented by recursion using dynamic programming: -*} +\ lemma %quote [code]: "fib_step 0 = (Suc 0, 0)" "fib_step (Suc n) = (let (m, q) = fib_step n in (m + q, m))" by (simp_all add: fib_step_def) -text {* +text \ \noindent What remains is to implement @{const fib} by @{const fib_step} as follows: -*} +\ lemma %quote [code]: "fib 0 = 0" "fib (Suc n) = fst (fib_step n)" by (simp_all add: fib_step_def) -text {* +text \ \noindent The resulting code shows only linear growth of runtime: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts fib (consts) fib fib_step (Haskell)} -*} +\ -subsection {* Datatype refinement *} +subsection \Datatype refinement\ -text {* +text \ Selecting specific code equations \emph{and} datatype constructors leads to datatype refinement. As an example, we will develop an alternative representation of the queue example given in @@ -85,7 +85,7 @@ details, which may be cumbersome when proving theorems about it. Therefore, here is a simple, straightforward representation of queues: -*} +\ datatype %quote 'a queue = Queue "'a list" @@ -99,17 +99,17 @@ "dequeue (Queue []) = (None, Queue [])" | "dequeue (Queue (x # xs)) = (Some x, Queue xs)" -text {* +text \ \noindent This we can use directly for proving; for executing, we provide an alternative characterisation: -*} +\ definition %quote AQueue :: "'a list \ 'a list \ 'a queue" where "AQueue xs ys = Queue (ys @ rev xs)" code_datatype %quote AQueue -text {* +text \ \noindent Here we define a \qt{constructor} @{const "AQueue"} which is defined in terms of @{text "Queue"} and interprets its arguments according to what the \emph{content} of an amortised queue is supposed @@ -127,7 +127,7 @@ Equipped with this, we are able to prove the following equations for our primitive queue operations which \qt{implement} the simple queues in an amortised fashion: -*} +\ lemma %quote empty_AQueue [code]: "empty = AQueue [] []" @@ -144,12 +144,12 @@ "dequeue (AQueue xs (y # ys)) = (Some y, AQueue xs ys)" by (simp_all add: AQueue_def) -text {* +text \ \noindent It is good style, although no absolute requirement, to provide code equations for the original artefacts of the implemented type, if possible; in our case, these are the datatype constructor @{const Queue} and the case combinator @{const case_queue}: -*} +\ lemma %quote Queue_AQueue [code]: "Queue = AQueue []" @@ -159,15 +159,15 @@ "case_queue f (AQueue xs ys) = f (ys @ rev xs)" by (simp add: AQueue_def) -text {* +text \ \noindent The resulting code looks as expected: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts empty enqueue dequeue Queue case_queue (SML)} -*} +\ -text {* +text \ The same techniques can also be applied to types which are not specified as datatypes, e.g.~type @{typ int} is originally specified as quotient type by means of @{command_def typedef}, but for code @@ -176,12 +176,12 @@ This approach however fails if the representation of a type demands invariants; this issue is discussed in the next section. -*} +\ -subsection {* Datatype refinement involving invariants \label{sec:invariant} *} +subsection \Datatype refinement involving invariants \label{sec:invariant}\ -text {* +text \ Datatype representation involving invariants require a dedicated setup for the type and its primitive operations. As a running example, we implement a type @{text "'a dlist"} of list consisting @@ -191,31 +191,31 @@ type (in our example @{text "'a dlist"}) should be implemented. Here we choose @{text "'a list"}. Then a conversion from the concrete type to the abstract type must be specified, here: -*} +\ -text %quote {* +text %quote \ @{term_type Dlist} -*} +\ -text {* +text \ \noindent Next follows the specification of a suitable \emph{projection}, i.e.~a conversion from abstract to concrete type: -*} +\ -text %quote {* +text %quote \ @{term_type list_of_dlist} -*} +\ -text {* +text \ \noindent This projection must be specified such that the following \emph{abstract datatype certificate} can be proven: -*} +\ lemma %quote [code abstype]: "Dlist (list_of_dlist dxs) = dxs" by (fact Dlist_list_of_dlist) -text {* +text \ \noindent Note that so far the invariant on representations (@{term_type distinct}) has never been mentioned explicitly: the invariant is only referred to implicitly: all values in @@ -226,26 +226,26 @@ indirectly using the projection @{const list_of_dlist}. For the empty @{text "dlist"}, @{const Dlist.empty}, we finally want the code equation -*} +\ -text %quote {* +text %quote \ @{term "Dlist.empty = Dlist []"} -*} +\ -text {* +text \ \noindent This we have to prove indirectly as follows: -*} +\ lemma %quote [code]: "list_of_dlist Dlist.empty = []" by (fact list_of_dlist_empty) -text {* +text \ \noindent This equation logically encodes both the desired code equation and that the expression @{const Dlist} is applied to obeys the implicit invariant. Equations for insertion and removal are similar: -*} +\ lemma %quote [code]: "list_of_dlist (Dlist.insert x dxs) = List.insert x (list_of_dlist dxs)" @@ -255,15 +255,15 @@ "list_of_dlist (Dlist.remove x dxs) = remove1 x (list_of_dlist dxs)" by (fact list_of_dlist_remove) -text {* +text \ \noindent Then the corresponding code is as follows: -*} +\ -text %quotetypewriter {* +text %quotetypewriter \ @{code_stmts Dlist.empty Dlist.insert Dlist.remove list_of_dlist (Haskell)} -*} +\ -text {* +text \ See further @{cite "Haftmann-Kraus-Kuncar-Nipkow:2013:data_refinement"} for the meta theory of datatype refinement involving invariants. @@ -271,6 +271,6 @@ invariants are available in the library, theory @{theory Mapping} specifies key-value-mappings (type @{typ "('a, 'b) mapping"}); these can be implemented by red-black-trees (theory @{theory RBT}). -*} +\ end diff -r ead400fd6484 -r 056945909f60 src/Doc/Codegen/Setup.thy --- a/src/Doc/Codegen/Setup.thy Thu Jan 15 13:39:41 2015 +0100 +++ b/src/Doc/Codegen/Setup.thy Thu Jan 15 13:39:41 2015 +0100 @@ -14,7 +14,7 @@ ML_file "../antiquote_setup.ML" ML_file "../more_antiquote.ML" -setup {* +setup \ let val typ = Simple_Syntax.read_typ; in @@ -25,7 +25,7 @@ [("_constrain", typ "logic => type => logic", Mixfix ("_ \ _", [4, 0], 3)), ("_constrain", typ "prop' => type => prop'", Mixfix ("_ \ _", [4, 0], 3))] end -*} +\ declare [[default_code_width = 74]]