merged
authorwenzelm
Sat, 17 Oct 2015 21:42:18 +0200
changeset 61465 79900ab5d50a
parent 61433 a4c0de1df3d8 (current diff)
parent 61464 d35ff80f27fb (diff)
child 61466 9a468c3a1fa1
merged
--- a/NEWS	Sat Oct 17 13:18:43 2015 +0200
+++ b/NEWS	Sat Oct 17 21:42:18 2015 +0200
@@ -57,14 +57,27 @@
 
 *** Document preparation ***
 
-* Isabelle control symbols for markup and formatting:
-
+* Commands 'paragraph' and 'subparagraph' provide additional section
+headings. Thus there are 6 levels of standard headings, as in HTML.
+
+* Text is structured in paragraphs and nested lists, using notation that
+is similar to Markdown. The control symbols for list items are as
+follows:
+
+  \<^item>  itemize
+  \<^enum>  enumerate
+  \<^descr>  description
+
+* Text may contain control symbols for markup and formatting as follows:
+
+  \<^noindent>   \noindent
   \<^smallskip>   \smallskip
   \<^medskip>   \medskip
   \<^bigskip>   \bigskip
 
-  \<^item>  \item  (itemize)
-  \<^enum>  \item  (enumeration)
+* Command 'text_raw' has been clarified: input text is processed as in
+'text' (with antiquotations and control symbols). The key difference is
+the lack of the surrounding isabelle markup environment in output.
 
 
 *** Isar ***
--- a/etc/symbols	Sat Oct 17 13:18:43 2015 +0200
+++ b/etc/symbols	Sat Oct 17 21:42:18 2015 +0200
@@ -352,15 +352,18 @@
 \<open>                 code: 0x002039  group: punctuation  font: IsabelleText  abbrev: <<
 \<close>                code: 0x00203a  group: punctuation  font: IsabelleText  abbrev: >>
 \<here>                 code: 0x002302  font: IsabelleText
+\<^noindent>            code: 0x0021e4  group: control  font: IsabelleText
+\<^smallskip>           code: 0x002508  group: control  font: IsabelleText
+\<^medskip>             code: 0x002509  group: control  font: IsabelleText
+\<^bigskip>             code: 0x002501  group: control  font: IsabelleText
+\<^item>                code: 0x0025aa  group: control  font: IsabelleText
+\<^enum>                code: 0x0025b8  group: control  font: IsabelleText
+\<^descr>               code: 0x0027a7  group: control  font: IsabelleText
+#\<^emph>                code: 0x002217  group: control  font: IsabelleText
+\<^bold>                code: 0x002759  group: control  font: IsabelleText
 \<^sub>                 code: 0x0021e9  group: control  font: IsabelleText
 \<^sup>                 code: 0x0021e7  group: control  font: IsabelleText
-\<^bold>                code: 0x002759  group: control  font: IsabelleText
 \<^bsub>                code: 0x0021d8  group: control_block  font: IsabelleText  abbrev: =_(
 \<^esub>                code: 0x0021d9  group: control_block  font: IsabelleText  abbrev: =_)
 \<^bsup>                code: 0x0021d7  group: control_block  font: IsabelleText  abbrev: =^(
 \<^esup>                code: 0x0021d6  group: control_block  font: IsabelleText  abbrev: =^)
-\<^smallskip>           code: 0x002508  group: control
-\<^medskip>             code: 0x002509  group: control
-\<^bigskip>             code: 0x002501  group: control
-\<^item>                code: 0x0025aa  group: control
-\<^enum>                code: 0x0025b8  group: control
--- a/lib/fonts/IsabelleText.sfd	Sat Oct 17 13:18:43 2015 +0200
+++ b/lib/fonts/IsabelleText.sfd	Sat Oct 17 21:42:18 2015 +0200
@@ -19,7 +19,7 @@
 OS2_WeightWidthSlopeOnly: 0
 OS2_UseTypoMetrics: 1
 CreationTime: 1050361371
-ModificationTime: 1444656719
+ModificationTime: 1444823673
 PfmFamily: 17
 TTFWeight: 400
 TTFWidth: 5
@@ -2241,11 +2241,11 @@
 DisplaySize: -96
 AntiAlias: 1
 FitToEm: 1
-WinInfo: 9432 18 16
+WinInfo: 10062 18 16
 BeginPrivate: 0
 EndPrivate
 TeXData: 1 0 0 631296 315648 210432 572416 -1048576 210432 783286 444596 497025 792723 393216 433062 380633 303038 157286 324010 404750 52429 2506097 1059062 262144
-BeginChars: 1114189 1389
+BeginChars: 1114189 1392
 
 StartChar: u10000
 Encoding: 65536 65536 0
@@ -17003,16 +17003,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 60 89 N 1 0 0 1 0 0 2
 EndChar
@@ -18983,14 +18983,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 54 83 N 1 0 0 1 0 0 2
 EndChar
@@ -19012,14 +19012,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 61 90 N 1 0 0 1 0 0 2
 EndChar
@@ -19986,17 +19986,17 @@
 LayerCount: 2
 Fore
 SplineSet
-353 1901 m 5
- 472 1901 l 5
- 484 1847 484 1847 534 1818.5 c 4
- 584 1790 584 1790 666 1790 c 4
- 749 1790 749 1790 797.5 1817.5 c 4
- 846 1845 846 1845 861 1901 c 5
- 980 1901 l 5
- 969 1782 969 1782 889.5 1720.5 c 4
- 810 1659 810 1659 666 1659 c 260
- 522 1659 522 1659 443 1720 c 4
- 364 1781 364 1781 353 1901 c 5
+353 1901 m 5,0,-1
+ 472 1901 l 5,1,2
+ 484 1847 484 1847 534 1818.5 c 4,3,4
+ 584 1790 584 1790 666 1790 c 4,5,6
+ 749 1790 749 1790 797.5 1817.5 c 4,7,8
+ 846 1845 846 1845 861 1901 c 5,9,-1
+ 980 1901 l 5,10,11
+ 969 1782 969 1782 889.5 1720.5 c 4,12,13
+ 810 1659 810 1659 666 1659 c 260,14,15
+ 522 1659 522 1659 443 1720 c 4,16,17
+ 364 1781 364 1781 353 1901 c 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -20008,17 +20008,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 74 103 N 1 0 0 1 0 0 2
 EndChar
@@ -20030,11 +20030,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1872 m 5
- 718 1872 l 5
- 718 1667 l 5
- 513 1667 l 5
- 513 1872 l 5
+513 1872 m 5,0,-1
+ 718 1872 l 5,1,-1
+ 718 1667 l 5,2,-1
+ 513 1667 l 5,3,-1
+ 513 1872 l 5,0,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -20046,21 +20046,21 @@
 LayerCount: 2
 Fore
 SplineSet
-700 0 m 5
- 756 -62 756 -62 782.5 -114.5 c 4
- 809 -167 809 -167 809 -215 c 4
- 809 -304 809 -304 749 -349.5 c 4
- 689 -395 689 -395 571 -395 c 4
- 526 -395 526 -395 482.5 -389 c 4
- 439 -383 439 -383 395 -371 c 5
- 395 -240 l 5
- 429 -257 429 -257 466.5 -264.5 c 4
- 504 -272 504 -272 551 -272 c 4
- 609 -272 609 -272 639.5 -248 c 4
- 670 -224 670 -224 670 -178 c 4
- 670 -148 670 -148 648 -104.5 c 4
- 626 -61 626 -61 582 0 c 5
- 700 0 l 5
+700 0 m 5,0,1
+ 756 -62 756 -62 782.5 -114.5 c 4,2,3
+ 809 -167 809 -167 809 -215 c 4,4,5
+ 809 -304 809 -304 749 -349.5 c 4,6,7
+ 689 -395 689 -395 571 -395 c 4,8,9
+ 526 -395 526 -395 482.5 -389 c 4,10,11
+ 439 -383 439 -383 395 -371 c 5,12,-1
+ 395 -240 l 5,13,14
+ 429 -257 429 -257 466.5 -264.5 c 4,15,16
+ 504 -272 504 -272 551 -272 c 4,17,18
+ 609 -272 609 -272 639.5 -248 c 4,19,20
+ 670 -224 670 -224 670 -178 c 4,21,22
+ 670 -148 670 -148 648 -104.5 c 4,23,24
+ 626 -61 626 -61 582 0 c 5,25,-1
+ 700 0 l 5,0,1
 EndSplineSet
 Refer: 54 83 N 1 0 0 1 0 0 2
 EndChar
@@ -20072,21 +20072,21 @@
 LayerCount: 2
 Fore
 SplineSet
-700 0 m 5
- 756 -62 756 -62 782.5 -114.5 c 4
- 809 -167 809 -167 809 -215 c 4
- 809 -304 809 -304 749 -349.5 c 4
- 689 -395 689 -395 571 -395 c 4
- 526 -395 526 -395 482.5 -389 c 4
- 439 -383 439 -383 395 -371 c 5
- 395 -240 l 5
- 429 -257 429 -257 466.5 -264.5 c 4
- 504 -272 504 -272 551 -272 c 4
- 609 -272 609 -272 639.5 -248 c 4
- 670 -224 670 -224 670 -178 c 4
- 670 -148 670 -148 648 -104.5 c 4
- 626 -61 626 -61 582 0 c 5
- 700 0 l 5
+700 0 m 5,0,1
+ 756 -62 756 -62 782.5 -114.5 c 4,2,3
+ 809 -167 809 -167 809 -215 c 4,4,5
+ 809 -304 809 -304 749 -349.5 c 4,6,7
+ 689 -395 689 -395 571 -395 c 4,8,9
+ 526 -395 526 -395 482.5 -389 c 4,10,11
+ 439 -383 439 -383 395 -371 c 5,12,-1
+ 395 -240 l 5,13,14
+ 429 -257 429 -257 466.5 -264.5 c 4,15,16
+ 504 -272 504 -272 551 -272 c 4,17,18
+ 609 -272 609 -272 639.5 -248 c 4,19,20
+ 670 -224 670 -224 670 -178 c 4,21,22
+ 670 -148 670 -148 648 -104.5 c 4,23,24
+ 626 -61 626 -61 582 0 c 5,25,-1
+ 700 0 l 5,0,1
 EndSplineSet
 Refer: 86 115 N 1 0 0 1 0 0 2
 EndChar
@@ -20098,11 +20098,11 @@
 LayerCount: 2
 Fore
 SplineSet
-762 1899 m 5
- 948 1899 l 5
- 719 1635 l 5
- 565 1635 l 5
- 762 1899 l 5
+762 1899 m 5,0,-1
+ 948 1899 l 5,1,-1
+ 719 1635 l 5,2,-1
+ 565 1635 l 5,3,-1
+ 762 1899 l 5,0,-1
 EndSplineSet
 Refer: 38 67 N 1 0 0 1 0 0 2
 EndChar
@@ -20124,14 +20124,14 @@
 LayerCount: 2
 Fore
 SplineSet
-612 1635 m 5
- 401 1901 l 5
- 541 1901 l 5
- 706 1723 l 5
- 872 1901 l 5
- 1012 1901 l 5
- 801 1635 l 5
- 612 1635 l 5
+612 1635 m 5,0,-1
+ 401 1901 l 5,1,-1
+ 541 1901 l 5,2,-1
+ 706 1723 l 5,3,-1
+ 872 1901 l 5,4,-1
+ 1012 1901 l 5,5,-1
+ 801 1635 l 5,6,-1
+ 612 1635 l 5,0,-1
 EndSplineSet
 Refer: 38 67 N 1 0 0 1 0 0 2
 EndChar
@@ -45112,11 +45112,11 @@
 LayerCount: 2
 Fore
 SplineSet
-561 1899 m 5
- 758 1635 l 5
- 604 1635 l 5
- 377 1899 l 5
- 561 1899 l 5
+561 1899 m 5,0,-1
+ 758 1635 l 5,1,-1
+ 604 1635 l 5,2,-1
+ 377 1899 l 5,3,-1
+ 561 1899 l 5,0,-1
 EndSplineSet
 Refer: 669 1045 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45129,16 +45129,16 @@
 LayerCount: 2
 Fore
 SplineSet
-375 1870 m 5
- 578 1870 l 5
- 578 1667 l 5
- 375 1667 l 5
- 375 1870 l 5
-767 1870 m 5
- 969 1870 l 5
- 969 1667 l 5
- 767 1667 l 5
- 767 1870 l 5
+375 1870 m 5,0,-1
+ 578 1870 l 5,1,-1
+ 578 1667 l 5,2,-1
+ 375 1667 l 5,3,-1
+ 375 1870 l 5,0,-1
+767 1870 m 5,4,-1
+ 969 1870 l 5,5,-1
+ 969 1667 l 5,6,-1
+ 767 1667 l 5,7,-1
+ 767 1870 l 5,4,-1
 EndSplineSet
 Refer: 669 1045 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45194,11 +45194,11 @@
 LayerCount: 2
 Fore
 SplineSet
-744 1899 m 5
- 930 1899 l 5
- 701 1635 l 5
- 547 1635 l 5
- 744 1899 l 5
+744 1899 m 5,0,-1
+ 930 1899 l 5,1,-1
+ 701 1635 l 5,2,-1
+ 547 1635 l 5,3,-1
+ 744 1899 l 5,0,-1
 EndSplineSet
 Refer: 667 1043 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45260,16 +45260,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 654 1030 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45404,11 +45404,11 @@
 LayerCount: 2
 Fore
 SplineSet
-699 1899 m 5
- 885 1899 l 5
- 656 1635 l 5
- 502 1635 l 5
- 699 1899 l 5
+699 1899 m 5,0,-1
+ 885 1899 l 5,1,-1
+ 656 1635 l 5,2,-1
+ 502 1635 l 5,3,-1
+ 699 1899 l 5,0,-1
 EndSplineSet
 Refer: 674 1050 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45421,11 +45421,11 @@
 LayerCount: 2
 Fore
 SplineSet
-561 1899 m 5
- 758 1635 l 5
- 604 1635 l 5
- 377 1899 l 5
- 561 1899 l 5
+561 1899 m 5,0,-1
+ 758 1635 l 5,1,-1
+ 604 1635 l 5,2,-1
+ 377 1899 l 5,3,-1
+ 561 1899 l 5,0,-1
 EndSplineSet
 Refer: 672 1048 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45438,17 +45438,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 683 1059 N 1 0 0 1 0 0 2
 Validated: 1
@@ -45663,17 +45663,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 672 1048 N 1 0 0 1 0 0 2
 Validated: 1
@@ -46418,17 +46418,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 704 1080 N 1 0 0 1 0 0 2
 Validated: 1
@@ -47245,17 +47245,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 715 1091 N 1 0 0 1 0 0 2
 Validated: 1
@@ -47718,21 +47718,21 @@
 LayerCount: 2
 Fore
 SplineSet
-627 0 m 5
- 683 -62 683 -62 709.5 -114.5 c 4
- 736 -167 736 -167 736 -215 c 4
- 736 -304 736 -304 676 -349.5 c 4
- 616 -395 616 -395 498 -395 c 4
- 453 -395 453 -395 409.5 -389 c 4
- 366 -383 366 -383 322 -371 c 5
- 322 -240 l 5
- 356 -257 356 -257 393.5 -264.5 c 4
- 431 -272 431 -272 478 -272 c 4
- 536 -272 536 -272 566.5 -248 c 4
- 597 -224 597 -224 597 -178 c 4
- 597 -148 597 -148 575 -104.5 c 4
- 553 -61 553 -61 509 0 c 5
- 627 0 l 5
+627 0 m 5,0,1
+ 683 -62 683 -62 709.5 -114.5 c 4,2,3
+ 736 -167 736 -167 736 -215 c 4,4,5
+ 736 -304 736 -304 676 -349.5 c 4,6,7
+ 616 -395 616 -395 498 -395 c 4,8,9
+ 453 -395 453 -395 409.5 -389 c 4,10,11
+ 366 -383 366 -383 322 -371 c 5,12,-1
+ 322 -240 l 5,13,14
+ 356 -257 356 -257 393.5 -264.5 c 4,15,16
+ 431 -272 431 -272 478 -272 c 4,17,18
+ 536 -272 536 -272 566.5 -248 c 4,19,20
+ 597 -224 597 -224 597 -178 c 4,21,22
+ 597 -148 597 -148 575 -104.5 c 4,23,24
+ 553 -61 553 -61 509 0 c 5,25,-1
+ 627 0 l 5,0,1
 EndSplineSet
 Refer: 671 1047 N 1 0 0 1 0 0 2
 Validated: 5
@@ -47745,21 +47745,21 @@
 LayerCount: 2
 Fore
 SplineSet
-633 0 m 5
- 689 -62 689 -62 715.5 -114.5 c 4
- 742 -167 742 -167 742 -215 c 4
- 742 -304 742 -304 682 -349.5 c 4
- 622 -395 622 -395 504 -395 c 4
- 459 -395 459 -395 415.5 -389 c 4
- 372 -383 372 -383 328 -371 c 5
- 328 -240 l 5
- 362 -257 362 -257 399.5 -264.5 c 4
- 437 -272 437 -272 484 -272 c 4
- 542 -272 542 -272 572.5 -248 c 4
- 603 -224 603 -224 603 -178 c 4
- 603 -148 603 -148 581 -104.5 c 4
- 559 -61 559 -61 515 0 c 5
- 633 0 l 5
+633 0 m 5,0,1
+ 689 -62 689 -62 715.5 -114.5 c 4,2,3
+ 742 -167 742 -167 742 -215 c 4,4,5
+ 742 -304 742 -304 682 -349.5 c 4,6,7
+ 622 -395 622 -395 504 -395 c 4,8,9
+ 459 -395 459 -395 415.5 -389 c 4,10,11
+ 372 -383 372 -383 328 -371 c 5,12,-1
+ 328 -240 l 5,13,14
+ 362 -257 362 -257 399.5 -264.5 c 4,15,16
+ 437 -272 437 -272 484 -272 c 4,17,18
+ 542 -272 542 -272 572.5 -248 c 4,19,20
+ 603 -224 603 -224 603 -178 c 4,21,22
+ 603 -148 603 -148 581 -104.5 c 4,23,24
+ 559 -61 559 -61 515 0 c 5,25,-1
+ 633 0 l 5,0,1
 EndSplineSet
 Refer: 703 1079 N 1 0 0 1 0 0 2
 Validated: 5
@@ -48006,21 +48006,21 @@
 LayerCount: 2
 Fore
 SplineSet
-800 0 m 5
- 856 -62 856 -62 882.5 -114.5 c 4
- 909 -167 909 -167 909 -215 c 4
- 909 -304 909 -304 849 -349.5 c 4
- 789 -395 789 -395 671 -395 c 4
- 626 -395 626 -395 582.5 -389 c 4
- 539 -383 539 -383 495 -371 c 5
- 495 -240 l 5
- 529 -257 529 -257 566.5 -264.5 c 4
- 604 -272 604 -272 651 -272 c 4
- 709 -272 709 -272 739.5 -248 c 4
- 770 -224 770 -224 770 -178 c 4
- 770 -148 770 -148 748 -104.5 c 4
- 726 -61 726 -61 682 0 c 5
- 800 0 l 5
+800 0 m 5,0,1
+ 856 -62 856 -62 882.5 -114.5 c 4,2,3
+ 909 -167 909 -167 909 -215 c 4,4,5
+ 909 -304 909 -304 849 -349.5 c 4,6,7
+ 789 -395 789 -395 671 -395 c 4,8,9
+ 626 -395 626 -395 582.5 -389 c 4,10,11
+ 539 -383 539 -383 495 -371 c 5,12,-1
+ 495 -240 l 5,13,14
+ 529 -257 529 -257 566.5 -264.5 c 4,15,16
+ 604 -272 604 -272 651 -272 c 4,17,18
+ 709 -272 709 -272 739.5 -248 c 4,19,20
+ 770 -224 770 -224 770 -178 c 4,21,22
+ 770 -148 770 -148 748 -104.5 c 4,23,24
+ 726 -61 726 -61 682 0 c 5,25,-1
+ 800 0 l 5,0,1
 EndSplineSet
 Refer: 681 1057 N 1 0 0 1 0 0 2
 Validated: 5
@@ -48033,21 +48033,21 @@
 LayerCount: 2
 Fore
 SplineSet
-804 0 m 5
- 860 -62 860 -62 886.5 -114.5 c 4
- 913 -167 913 -167 913 -215 c 4
- 913 -304 913 -304 853 -349.5 c 4
- 793 -395 793 -395 675 -395 c 4
- 630 -395 630 -395 586.5 -389 c 4
- 543 -383 543 -383 499 -371 c 5
- 499 -240 l 5
- 533 -257 533 -257 570.5 -264.5 c 4
- 608 -272 608 -272 655 -272 c 4
- 713 -272 713 -272 743.5 -248 c 4
- 774 -224 774 -224 774 -178 c 4
- 774 -148 774 -148 752 -104.5 c 4
- 730 -61 730 -61 686 0 c 5
- 804 0 l 5
+804 0 m 5,0,1
+ 860 -62 860 -62 886.5 -114.5 c 4,2,3
+ 913 -167 913 -167 913 -215 c 4,4,5
+ 913 -304 913 -304 853 -349.5 c 4,6,7
+ 793 -395 793 -395 675 -395 c 4,8,9
+ 630 -395 630 -395 586.5 -389 c 4,10,11
+ 543 -383 543 -383 499 -371 c 5,12,-1
+ 499 -240 l 5,13,14
+ 533 -257 533 -257 570.5 -264.5 c 4,15,16
+ 608 -272 608 -272 655 -272 c 4,17,18
+ 713 -272 713 -272 743.5 -248 c 4,19,20
+ 774 -224 774 -224 774 -178 c 4,21,22
+ 774 -148 774 -148 752 -104.5 c 4,23,24
+ 730 -61 730 -61 686 0 c 5,25,-1
+ 804 0 l 5,0,1
 EndSplineSet
 Refer: 713 1089 N 1 0 0 1 0 0 2
 Validated: 5
@@ -48375,17 +48375,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 670 1046 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48398,17 +48398,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 702 1078 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48680,17 +48680,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 664 1040 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48703,17 +48703,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 696 1072 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48726,16 +48726,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 664 1040 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48779,17 +48779,17 @@
 LayerCount: 2
 Fore
 SplineSet
-321 1901 m 5
- 440 1901 l 5
- 452 1847 452 1847 502 1818.5 c 4
- 552 1790 552 1790 634 1790 c 4
- 717 1790 717 1790 765.5 1817.5 c 4
- 814 1845 814 1845 829 1901 c 5
- 948 1901 l 5
- 937 1782 937 1782 857.5 1720.5 c 4
- 778 1659 778 1659 634 1659 c 260
- 490 1659 490 1659 411 1720 c 4
- 332 1781 332 1781 321 1901 c 5
+321 1901 m 5,0,-1
+ 440 1901 l 5,1,2
+ 452 1847 452 1847 502 1818.5 c 4,3,4
+ 552 1790 552 1790 634 1790 c 4,5,6
+ 717 1790 717 1790 765.5 1817.5 c 4,7,8
+ 814 1845 814 1845 829 1901 c 5,9,-1
+ 948 1901 l 5,10,11
+ 937 1782 937 1782 857.5 1720.5 c 4,12,13
+ 778 1659 778 1659 634 1659 c 260,14,15
+ 490 1659 490 1659 411 1720 c 4,16,17
+ 332 1781 332 1781 321 1901 c 5,0,-1
 EndSplineSet
 Refer: 669 1045 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48802,17 +48802,17 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1608 m 5
- 436 1608 l 5
- 447 1532 447 1532 495 1495 c 4
- 543 1458 543 1458 630 1458 c 4
- 715 1458 715 1458 763 1495 c 4
- 811 1532 811 1532 825 1608 c 5
- 944 1608 l 5
- 933 1465 933 1465 854 1393 c 4
- 775 1321 775 1321 630 1321 c 4
- 486 1321 486 1321 407 1393 c 4
- 328 1465 328 1465 317 1608 c 5
+317 1608 m 5,0,-1
+ 436 1608 l 5,1,2
+ 447 1532 447 1532 495 1495 c 4,3,4
+ 543 1458 543 1458 630 1458 c 4,5,6
+ 715 1458 715 1458 763 1495 c 4,7,8
+ 811 1532 811 1532 825 1608 c 5,9,-1
+ 944 1608 l 5,10,11
+ 933 1465 933 1465 854 1393 c 4,12,13
+ 775 1321 775 1321 630 1321 c 4,14,15
+ 486 1321 486 1321 407 1393 c 4,16,17
+ 328 1465 328 1465 317 1608 c 5,0,-1
 EndSplineSet
 Refer: 701 1077 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48893,16 +48893,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 828 1240 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48926,16 +48926,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 670 1046 N 1 0 0 1 0 0 2
 Validated: 1
@@ -48959,16 +48959,16 @@
 LayerCount: 2
 Fore
 SplineSet
-302 1870 m 5
- 505 1870 l 5
- 505 1667 l 5
- 302 1667 l 5
- 302 1870 l 5
-694 1870 m 5
- 896 1870 l 5
- 896 1667 l 5
- 694 1667 l 5
- 694 1870 l 5
+302 1870 m 5,0,-1
+ 505 1870 l 5,1,-1
+ 505 1667 l 5,2,-1
+ 302 1667 l 5,3,-1
+ 302 1870 l 5,0,-1
+694 1870 m 5,4,-1
+ 896 1870 l 5,5,-1
+ 896 1667 l 5,6,-1
+ 694 1667 l 5,7,-1
+ 694 1870 l 5,4,-1
 EndSplineSet
 Refer: 671 1047 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49060,11 +49060,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 672 1048 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49077,11 +49077,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 704 1080 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49094,16 +49094,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 672 1048 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49127,16 +49127,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 678 1054 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49224,16 +49224,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 844 1256 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49257,16 +49257,16 @@
 LayerCount: 2
 Fore
 SplineSet
-286 1870 m 5
- 489 1870 l 5
- 489 1667 l 5
- 286 1667 l 5
- 286 1870 l 5
-678 1870 m 5
- 880 1870 l 5
- 880 1667 l 5
- 678 1667 l 5
- 678 1870 l 5
+286 1870 m 5,0,-1
+ 489 1870 l 5,1,-1
+ 489 1667 l 5,2,-1
+ 286 1667 l 5,3,-1
+ 286 1870 l 5,0,-1
+678 1870 m 5,4,-1
+ 880 1870 l 5,5,-1
+ 880 1667 l 5,6,-1
+ 678 1667 l 5,7,-1
+ 678 1870 l 5,4,-1
 EndSplineSet
 Refer: 693 1069 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49290,11 +49290,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 683 1059 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49307,11 +49307,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 715 1091 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49324,16 +49324,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 683 1059 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49357,16 +49357,16 @@
 LayerCount: 2
 Fore
 SplineSet
-860 1899 m 5
- 1046 1899 l 5
- 817 1635 l 5
- 663 1635 l 5
- 860 1899 l 5
-541 1899 m 5
- 727 1899 l 5
- 498 1635 l 5
- 344 1635 l 5
- 541 1899 l 5
+860 1899 m 5,0,-1
+ 1046 1899 l 5,1,-1
+ 817 1635 l 5,2,-1
+ 663 1635 l 5,3,-1
+ 860 1899 l 5,0,-1
+541 1899 m 5,4,-1
+ 727 1899 l 5,5,-1
+ 498 1635 l 5,6,-1
+ 344 1635 l 5,7,-1
+ 541 1899 l 5,4,-1
 EndSplineSet
 Refer: 683 1059 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49379,16 +49379,16 @@
 LayerCount: 2
 Fore
 SplineSet
-535 1638 m 5
- 705 1638 l 5
- 481 1262 l 5
- 344 1262 l 5
- 535 1638 l 5
-868 1638 m 5
- 1047 1638 l 5
- 799 1262 l 5
- 664 1262 l 5
- 868 1638 l 5
+535 1638 m 5,0,-1
+ 705 1638 l 5,1,-1
+ 481 1262 l 5,2,-1
+ 344 1262 l 5,3,-1
+ 535 1638 l 5,0,-1
+868 1638 m 5,4,-1
+ 1047 1638 l 5,5,-1
+ 799 1262 l 5,6,-1
+ 664 1262 l 5,7,-1
+ 868 1638 l 5,4,-1
 EndSplineSet
 Refer: 715 1091 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49401,16 +49401,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 687 1063 N 1 0 0 1 0 0 2
 Validated: 1
@@ -49478,16 +49478,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1870 m 5
- 522 1870 l 5
- 522 1667 l 5
- 319 1667 l 5
- 319 1870 l 5
-711 1870 m 5
- 913 1870 l 5
- 913 1667 l 5
- 711 1667 l 5
- 711 1870 l 5
+319 1870 m 5,0,-1
+ 522 1870 l 5,1,-1
+ 522 1667 l 5,2,-1
+ 319 1667 l 5,3,-1
+ 319 1870 l 5,0,-1
+711 1870 m 5,4,-1
+ 913 1870 l 5,5,-1
+ 913 1667 l 5,6,-1
+ 711 1667 l 5,7,-1
+ 711 1870 l 5,4,-1
 EndSplineSet
 Refer: 691 1067 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51046,11 +51046,11 @@
 LayerCount: 2
 Fore
 SplineSet
-825 -200 m 5
- 975 -200 l 5
- 975 -350 l 5
- 825 -350 l 5
- 825 -200 l 5
+825 -200 m 5,0,-1
+ 975 -200 l 5,1,-1
+ 975 -350 l 5,2,-1
+ 825 -350 l 5,3,-1
+ 825 -200 l 5,0,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51063,16 +51063,16 @@
 LayerCount: 2
 Fore
 SplineSet
-500 1050 m 5
- 650 1050 l 5
- 650 900 l 5
- 500 900 l 5
- 500 1050 l 5
-250 1050 m 5
- 400 1050 l 5
- 400 900 l 5
- 250 900 l 5
- 250 1050 l 5
+500 1050 m 5,0,-1
+ 650 1050 l 5,1,-1
+ 650 900 l 5,2,-1
+ 500 900 l 5,3,-1
+ 500 1050 l 5,0,-1
+250 1050 m 5,4,-1
+ 400 1050 l 5,5,-1
+ 400 900 l 5,6,-1
+ 250 900 l 5,7,-1
+ 250 1050 l 5,4,-1
 EndSplineSet
 Refer: 957 1607 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51085,16 +51085,16 @@
 LayerCount: 2
 Fore
 SplineSet
-950 800 m 5
- 1100 800 l 5
- 1100 650 l 5
- 950 650 l 5
- 950 800 l 5
-700 800 m 5
- 850 800 l 5
- 850 650 l 5
- 700 650 l 5
- 700 800 l 5
+950 800 m 5,0,-1
+ 1100 800 l 5,1,-1
+ 1100 650 l 5,2,-1
+ 950 650 l 5,3,-1
+ 950 800 l 5,0,-1
+700 800 m 5,4,-1
+ 850 800 l 5,5,-1
+ 850 650 l 5,6,-1
+ 700 650 l 5,7,-1
+ 700 800 l 5,4,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51107,21 +51107,21 @@
 LayerCount: 2
 Fore
 SplineSet
-825 1050 m 5
- 975 1050 l 5
- 975 900 l 5
- 825 900 l 5
- 825 1050 l 5
-950 800 m 5
- 1100 800 l 5
- 1100 650 l 5
- 950 650 l 5
- 950 800 l 5
-700 800 m 5
- 850 800 l 5
- 850 650 l 5
- 700 650 l 5
- 700 800 l 5
+825 1050 m 5,0,-1
+ 975 1050 l 5,1,-1
+ 975 900 l 5,2,-1
+ 825 900 l 5,3,-1
+ 825 1050 l 5,0,-1
+950 800 m 5,4,-1
+ 1100 800 l 5,5,-1
+ 1100 650 l 5,6,-1
+ 950 650 l 5,7,-1
+ 950 800 l 5,4,-1
+700 800 m 5,8,-1
+ 850 800 l 5,9,-1
+ 850 650 l 5,10,-1
+ 700 650 l 5,11,-1
+ 700 800 l 5,8,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51134,11 +51134,11 @@
 LayerCount: 2
 Fore
 SplineSet
-775 175 m 5
- 925 175 l 5
- 925 25 l 5
- 775 25 l 5
- 775 175 l 5
+775 175 m 5,0,-1
+ 925 175 l 5,1,-1
+ 925 25 l 5,2,-1
+ 775 25 l 5,3,-1
+ 775 175 l 5,0,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51180,11 +51180,11 @@
 LayerCount: 2
 Fore
 SplineSet
-575 1200 m 5
- 725 1200 l 5
- 725 1050 l 5
- 575 1050 l 5
- 575 1200 l 5
+575 1200 m 5,0,-1
+ 725 1200 l 5,1,-1
+ 725 1050 l 5,2,-1
+ 575 1050 l 5,3,-1
+ 575 1200 l 5,0,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51223,11 +51223,11 @@
 LayerCount: 2
 Fore
 SplineSet
-325 1200 m 5
- 475 1200 l 5
- 475 1050 l 5
- 325 1050 l 5
- 325 1200 l 5
+325 1200 m 5,0,-1
+ 475 1200 l 5,1,-1
+ 475 1050 l 5,2,-1
+ 325 1050 l 5,3,-1
+ 325 1200 l 5,0,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51262,11 +51262,11 @@
 LayerCount: 2
 Fore
 SplineSet
-625 950 m 5
- 775 950 l 5
- 775 800 l 5
- 625 800 l 5
- 625 950 l 5
+625 950 m 5,0,-1
+ 775 950 l 5,1,-1
+ 775 800 l 5,2,-1
+ 625 800 l 5,3,-1
+ 625 950 l 5,0,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51327,21 +51327,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1375 1200 m 5
- 1525 1200 l 5
- 1525 1050 l 5
- 1375 1050 l 5
- 1375 1200 l 5
-1500 950 m 5
- 1650 950 l 5
- 1650 800 l 5
- 1500 800 l 5
- 1500 950 l 5
-1250 950 m 5
- 1400 950 l 5
- 1400 800 l 5
- 1250 800 l 5
- 1250 950 l 5
+1375 1200 m 5,0,-1
+ 1525 1200 l 5,1,-1
+ 1525 1050 l 5,2,-1
+ 1375 1050 l 5,3,-1
+ 1375 1200 l 5,0,-1
+1500 950 m 5,4,-1
+ 1650 950 l 5,5,-1
+ 1650 800 l 5,6,-1
+ 1500 800 l 5,7,-1
+ 1500 950 l 5,4,-1
+1250 950 m 5,8,-1
+ 1400 950 l 5,9,-1
+ 1400 800 l 5,10,-1
+ 1250 800 l 5,11,-1
+ 1250 950 l 5,8,-1
 EndSplineSet
 Refer: 942 1587 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51401,11 +51401,11 @@
 LayerCount: 2
 Fore
 SplineSet
-1275 950 m 5
- 1425 950 l 5
- 1425 800 l 5
- 1275 800 l 5
- 1275 950 l 5
+1275 950 m 5,0,-1
+ 1425 950 l 5,1,-1
+ 1425 800 l 5,2,-1
+ 1275 800 l 5,3,-1
+ 1275 950 l 5,0,-1
 EndSplineSet
 Refer: 944 1589 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51451,11 +51451,11 @@
 LayerCount: 2
 Fore
 SplineSet
-975 1000 m 5
- 1125 1000 l 5
- 1125 850 l 5
- 975 850 l 5
- 975 1000 l 5
+975 1000 m 5,0,-1
+ 1125 1000 l 5,1,-1
+ 1125 850 l 5,2,-1
+ 975 850 l 5,3,-1
+ 975 1000 l 5,0,-1
 EndSplineSet
 Refer: 946 1591 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51503,11 +51503,11 @@
 LayerCount: 2
 Fore
 SplineSet
-375 1350 m 5
- 525 1350 l 5
- 525 1200 l 5
- 375 1200 l 5
- 375 1350 l 5
+375 1350 m 5,0,-1
+ 525 1350 l 5,1,-1
+ 525 1200 l 5,2,-1
+ 375 1200 l 5,3,-1
+ 375 1350 l 5,0,-1
 EndSplineSet
 Refer: 948 1593 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51584,16 +51584,16 @@
 LayerCount: 2
 Fore
 SplineSet
-1100 1300 m 5
- 1250 1300 l 5
- 1250 1150 l 5
- 1100 1150 l 5
- 1100 1300 l 5
-850 1300 m 5
- 1000 1300 l 5
- 1000 1150 l 5
- 850 1150 l 5
- 850 1300 l 5
+1100 1300 m 5,0,-1
+ 1250 1300 l 5,1,-1
+ 1250 1150 l 5,2,-1
+ 1100 1150 l 5,3,-1
+ 1100 1300 l 5,0,-1
+850 1300 m 5,4,-1
+ 1000 1300 l 5,5,-1
+ 1000 1150 l 5,6,-1
+ 850 1150 l 5,7,-1
+ 850 1300 l 5,4,-1
 EndSplineSet
 Refer: 989 1647 N 1 0 0 1 0 0 2
 Validated: 1
@@ -51855,16 +51855,16 @@
 LayerCount: 2
 Fore
 SplineSet
-750 -350 m 5
- 900 -350 l 5
- 900 -500 l 5
- 750 -500 l 5
- 750 -350 l 5
-500 -350 m 5
- 650 -350 l 5
- 650 -500 l 5
- 500 -500 l 5
- 500 -350 l 5
+750 -350 m 5,0,-1
+ 900 -350 l 5,1,-1
+ 900 -500 l 5,2,-1
+ 750 -500 l 5,3,-1
+ 750 -350 l 5,0,-1
+500 -350 m 5,4,-1
+ 650 -350 l 5,5,-1
+ 650 -500 l 5,6,-1
+ 500 -500 l 5,7,-1
+ 500 -350 l 5,4,-1
 EndSplineSet
 Refer: 959 1609 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52664,16 +52664,16 @@
 LayerCount: 2
 Fore
 SplineSet
-825 800 m 5
- 975 800 l 5
- 975 650 l 5
- 825 650 l 5
- 825 800 l 5
-825 1050 m 5
- 975 1050 l 5
- 975 900 l 5
- 825 900 l 5
- 825 1050 l 5
+825 800 m 5,0,-1
+ 975 800 l 5,1,-1
+ 975 650 l 5,2,-1
+ 825 650 l 5,3,-1
+ 825 800 l 5,0,-1
+825 1050 m 5,4,-1
+ 975 1050 l 5,5,-1
+ 975 900 l 5,6,-1
+ 825 900 l 5,7,-1
+ 825 1050 l 5,4,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52686,16 +52686,16 @@
 LayerCount: 2
 Fore
 SplineSet
-825 -350 m 5
- 975 -350 l 5
- 975 -500 l 5
- 825 -500 l 5
- 825 -350 l 5
-825 -100 m 5
- 975 -100 l 5
- 975 -250 l 5
- 825 -250 l 5
- 825 -100 l 5
+825 -350 m 5,0,-1
+ 975 -350 l 5,1,-1
+ 975 -500 l 5,2,-1
+ 825 -500 l 5,3,-1
+ 825 -350 l 5,0,-1
+825 -100 m 5,4,-1
+ 975 -100 l 5,5,-1
+ 975 -250 l 5,6,-1
+ 825 -250 l 5,7,-1
+ 825 -100 l 5,4,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52708,34 +52708,34 @@
 LayerCount: 2
 Fore
 SplineSet
-997 -148 m 4
- 997 -108 997 -108 969 -80 c 4
- 941 -51 941 -51 900 -51 c 4
- 859 -51 859 -51 831 -79 c 4
- 803 -107 803 -107 803 -148 c 4
- 803 -190 803 -190 831 -218 c 4
- 859 -245 859 -245 900 -245 c 4
- 941 -245 941 -245 969 -217 c 4
- 997 -189 997 -189 997 -148 c 4
-1119 -148 m 4
- 1119 -240 1119 -240 1056 -304 c 4
- 992 -368 992 -368 900 -368 c 4
- 808 -368 808 -368 745 -304 c 4
- 681 -240 681 -240 681 -148 c 4
- 681 -56 681 -56 745 8 c 4
- 808 71 808 71 900 71 c 4
- 992 71 992 71 1056 8 c 4
- 1119 -56 1119 -56 1119 -148 c 4
-950 800 m 5
- 1100 800 l 5
- 1100 650 l 5
- 950 650 l 5
- 950 800 l 5
-700 800 m 5
- 850 800 l 5
- 850 650 l 5
- 700 650 l 5
- 700 800 l 5
+997 -148 m 4,0,1
+ 997 -108 997 -108 969 -80 c 4,2,3
+ 941 -51 941 -51 900 -51 c 4,4,5
+ 859 -51 859 -51 831 -79 c 4,6,7
+ 803 -107 803 -107 803 -148 c 4,8,9
+ 803 -190 803 -190 831 -218 c 4,10,11
+ 859 -245 859 -245 900 -245 c 4,12,13
+ 941 -245 941 -245 969 -217 c 4,14,15
+ 997 -189 997 -189 997 -148 c 4,0,1
+1119 -148 m 4,16,17
+ 1119 -240 1119 -240 1056 -304 c 4,18,19
+ 992 -368 992 -368 900 -368 c 4,20,21
+ 808 -368 808 -368 745 -304 c 4,22,23
+ 681 -240 681 -240 681 -148 c 4,24,25
+ 681 -56 681 -56 745 8 c 4,26,27
+ 808 71 808 71 900 71 c 4,28,29
+ 992 71 992 71 1056 8 c 4,30,31
+ 1119 -56 1119 -56 1119 -148 c 4,16,17
+950 800 m 5,32,-1
+ 1100 800 l 5,33,-1
+ 1100 650 l 5,34,-1
+ 950 650 l 5,35,-1
+ 950 800 l 5,32,-1
+700 800 m 5,36,-1
+ 850 800 l 5,37,-1
+ 850 650 l 5,38,-1
+ 700 650 l 5,39,-1
+ 700 800 l 5,36,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 5
@@ -52748,21 +52748,21 @@
 LayerCount: 2
 Fore
 SplineSet
-825 700 m 5
- 975 700 l 5
- 975 550 l 5
- 825 550 l 5
- 825 700 l 5
-950 950 m 5
- 1100 950 l 5
- 1100 800 l 5
- 950 800 l 5
- 950 950 l 5
-700 950 m 5
- 850 950 l 5
- 850 800 l 5
- 700 800 l 5
- 700 950 l 5
+825 700 m 5,0,-1
+ 975 700 l 5,1,-1
+ 975 550 l 5,2,-1
+ 825 550 l 5,3,-1
+ 825 700 l 5,0,-1
+950 950 m 5,4,-1
+ 1100 950 l 5,5,-1
+ 1100 800 l 5,6,-1
+ 950 800 l 5,7,-1
+ 950 950 l 5,4,-1
+700 950 m 5,8,-1
+ 850 950 l 5,9,-1
+ 850 800 l 5,10,-1
+ 700 800 l 5,11,-1
+ 700 950 l 5,8,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52775,21 +52775,21 @@
 LayerCount: 2
 Fore
 SplineSet
-825 -350 m 5
- 975 -350 l 5
- 975 -500 l 5
- 825 -500 l 5
- 825 -350 l 5
-950 -100 m 5
- 1100 -100 l 5
- 1100 -250 l 5
- 950 -250 l 5
- 950 -100 l 5
-700 -100 m 5
- 850 -100 l 5
- 850 -250 l 5
- 700 -250 l 5
- 700 -100 l 5
+825 -350 m 5,0,-1
+ 975 -350 l 5,1,-1
+ 975 -500 l 5,2,-1
+ 825 -500 l 5,3,-1
+ 825 -350 l 5,0,-1
+950 -100 m 5,4,-1
+ 1100 -100 l 5,5,-1
+ 1100 -250 l 5,6,-1
+ 950 -250 l 5,7,-1
+ 950 -100 l 5,4,-1
+700 -100 m 5,8,-1
+ 850 -100 l 5,9,-1
+ 850 -250 l 5,10,-1
+ 700 -250 l 5,11,-1
+ 700 -100 l 5,8,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52802,26 +52802,26 @@
 LayerCount: 2
 Fore
 SplineSet
-950 800 m 5
- 1100 800 l 5
- 1100 650 l 5
- 950 650 l 5
- 950 800 l 5
-950 1050 m 5
- 1100 1050 l 5
- 1100 900 l 5
- 950 900 l 5
- 950 1050 l 5
-700 800 m 5
- 850 800 l 5
- 850 650 l 5
- 700 650 l 5
- 700 800 l 5
-700 1050 m 5
- 850 1050 l 5
- 850 900 l 5
- 700 900 l 5
- 700 1050 l 5
+950 800 m 5,0,-1
+ 1100 800 l 5,1,-1
+ 1100 650 l 5,2,-1
+ 950 650 l 5,3,-1
+ 950 800 l 5,0,-1
+950 1050 m 5,4,-1
+ 1100 1050 l 5,5,-1
+ 1100 900 l 5,6,-1
+ 950 900 l 5,7,-1
+ 950 1050 l 5,4,-1
+700 800 m 5,8,-1
+ 850 800 l 5,9,-1
+ 850 650 l 5,10,-1
+ 700 650 l 5,11,-1
+ 700 800 l 5,8,-1
+700 1050 m 5,12,-1
+ 850 1050 l 5,13,-1
+ 850 900 l 5,14,-1
+ 700 900 l 5,15,-1
+ 700 1050 l 5,12,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52834,26 +52834,26 @@
 LayerCount: 2
 Fore
 SplineSet
-950 -350 m 5
- 1100 -350 l 5
- 1100 -500 l 5
- 950 -500 l 5
- 950 -350 l 5
-950 -100 m 5
- 1100 -100 l 5
- 1100 -250 l 5
- 950 -250 l 5
- 950 -100 l 5
-700 -350 m 5
- 850 -350 l 5
- 850 -500 l 5
- 700 -500 l 5
- 700 -350 l 5
-700 -100 m 5
- 850 -100 l 5
- 850 -250 l 5
- 700 -250 l 5
- 700 -100 l 5
+950 -350 m 5,0,-1
+ 1100 -350 l 5,1,-1
+ 1100 -500 l 5,2,-1
+ 950 -500 l 5,3,-1
+ 950 -350 l 5,0,-1
+950 -100 m 5,4,-1
+ 1100 -100 l 5,5,-1
+ 1100 -250 l 5,6,-1
+ 950 -250 l 5,7,-1
+ 950 -100 l 5,4,-1
+700 -350 m 5,8,-1
+ 850 -350 l 5,9,-1
+ 850 -500 l 5,10,-1
+ 700 -500 l 5,11,-1
+ 700 -350 l 5,8,-1
+700 -100 m 5,12,-1
+ 850 -100 l 5,13,-1
+ 850 -250 l 5,14,-1
+ 700 -250 l 5,15,-1
+ 700 -100 l 5,12,-1
 EndSplineSet
 Refer: 988 1646 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52877,16 +52877,16 @@
 LayerCount: 2
 Fore
 SplineSet
-575 1200 m 5
- 725 1200 l 5
- 725 1050 l 5
- 575 1050 l 5
- 575 1200 l 5
-575 1450 m 5
- 725 1450 l 5
- 725 1300 l 5
- 575 1300 l 5
- 575 1450 l 5
+575 1200 m 5,0,-1
+ 725 1200 l 5,1,-1
+ 725 1050 l 5,2,-1
+ 575 1050 l 5,3,-1
+ 575 1200 l 5,0,-1
+575 1450 m 5,4,-1
+ 725 1450 l 5,5,-1
+ 725 1300 l 5,6,-1
+ 575 1300 l 5,7,-1
+ 575 1450 l 5,4,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52899,16 +52899,16 @@
 LayerCount: 2
 Fore
 SplineSet
-900 175 m 5
- 1050 175 l 5
- 1050 25 l 5
- 900 25 l 5
- 900 175 l 5
-650 175 m 5
- 800 175 l 5
- 800 25 l 5
- 650 25 l 5
- 650 175 l 5
+900 175 m 5,0,-1
+ 1050 175 l 5,1,-1
+ 1050 25 l 5,2,-1
+ 900 25 l 5,3,-1
+ 900 175 l 5,0,-1
+650 175 m 5,4,-1
+ 800 175 l 5,5,-1
+ 800 25 l 5,6,-1
+ 650 25 l 5,7,-1
+ 650 175 l 5,4,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52921,16 +52921,16 @@
 LayerCount: 2
 Fore
 SplineSet
-775 75 m 5
- 925 75 l 5
- 925 -75 l 5
- 775 -75 l 5
- 775 75 l 5
-775 325 m 5
- 925 325 l 5
- 925 175 l 5
- 775 175 l 5
- 775 325 l 5
+775 75 m 5,0,-1
+ 925 75 l 5,1,-1
+ 925 -75 l 5,2,-1
+ 775 -75 l 5,3,-1
+ 775 75 l 5,0,-1
+775 325 m 5,4,-1
+ 925 325 l 5,5,-1
+ 925 175 l 5,6,-1
+ 775 175 l 5,7,-1
+ 775 325 l 5,4,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52943,21 +52943,21 @@
 LayerCount: 2
 Fore
 SplineSet
-575 1450 m 5
- 725 1450 l 5
- 725 1300 l 5
- 575 1300 l 5
- 575 1450 l 5
-700 1200 m 5
- 850 1200 l 5
- 850 1050 l 5
- 700 1050 l 5
- 700 1200 l 5
-450 1200 m 5
- 600 1200 l 5
- 600 1050 l 5
- 450 1050 l 5
- 450 1200 l 5
+575 1450 m 5,0,-1
+ 725 1450 l 5,1,-1
+ 725 1300 l 5,2,-1
+ 575 1300 l 5,3,-1
+ 575 1450 l 5,0,-1
+700 1200 m 5,4,-1
+ 850 1200 l 5,5,-1
+ 850 1050 l 5,6,-1
+ 700 1050 l 5,7,-1
+ 700 1200 l 5,4,-1
+450 1200 m 5,8,-1
+ 600 1200 l 5,9,-1
+ 600 1050 l 5,10,-1
+ 450 1050 l 5,11,-1
+ 450 1200 l 5,8,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52970,21 +52970,21 @@
 LayerCount: 2
 Fore
 SplineSet
-787 50 m 5
- 937 50 l 5
- 937 -100 l 5
- 787 -100 l 5
- 787 50 l 5
-912 300 m 5
- 1062 300 l 5
- 1062 150 l 5
- 912 150 l 5
- 912 300 l 5
-662 300 m 5
- 812 300 l 5
- 812 150 l 5
- 662 150 l 5
- 662 300 l 5
+787 50 m 5,0,-1
+ 937 50 l 5,1,-1
+ 937 -100 l 5,2,-1
+ 787 -100 l 5,3,-1
+ 787 50 l 5,0,-1
+912 300 m 5,4,-1
+ 1062 300 l 5,5,-1
+ 1062 150 l 5,6,-1
+ 912 150 l 5,7,-1
+ 912 300 l 5,4,-1
+662 300 m 5,8,-1
+ 812 300 l 5,9,-1
+ 812 150 l 5,10,-1
+ 662 150 l 5,11,-1
+ 662 300 l 5,8,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -52997,26 +52997,26 @@
 LayerCount: 2
 Fore
 SplineSet
-912 75 m 5
- 1062 75 l 5
- 1062 -75 l 5
- 912 -75 l 5
- 912 75 l 5
-912 325 m 5
- 1062 325 l 5
- 1062 175 l 5
- 912 175 l 5
- 912 325 l 5
-662 75 m 5
- 812 75 l 5
- 812 -75 l 5
- 662 -75 l 5
- 662 75 l 5
-662 325 m 5
- 812 325 l 5
- 812 175 l 5
- 662 175 l 5
- 662 325 l 5
+912 75 m 5,0,-1
+ 1062 75 l 5,1,-1
+ 1062 -75 l 5,2,-1
+ 912 -75 l 5,3,-1
+ 912 75 l 5,0,-1
+912 325 m 5,4,-1
+ 1062 325 l 5,5,-1
+ 1062 175 l 5,6,-1
+ 912 175 l 5,7,-1
+ 912 325 l 5,4,-1
+662 75 m 5,8,-1
+ 812 75 l 5,9,-1
+ 812 -75 l 5,10,-1
+ 662 -75 l 5,11,-1
+ 662 75 l 5,8,-1
+662 325 m 5,12,-1
+ 812 325 l 5,13,-1
+ 812 175 l 5,14,-1
+ 662 175 l 5,15,-1
+ 662 325 l 5,12,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53040,24 +53040,24 @@
 LayerCount: 2
 Fore
 SplineSet
-597 -148 m 4
- 597 -108 597 -108 569 -80 c 4
- 541 -51 541 -51 500 -51 c 4
- 459 -51 459 -51 431 -79 c 4
- 403 -107 403 -107 403 -148 c 4
- 403 -190 403 -190 431 -218 c 4
- 459 -245 459 -245 500 -245 c 4
- 541 -245 541 -245 569 -217 c 4
- 597 -189 597 -189 597 -148 c 4
-719 -148 m 4
- 719 -240 719 -240 656 -304 c 4
- 592 -368 592 -368 500 -368 c 4
- 408 -368 408 -368 345 -304 c 4
- 281 -240 281 -240 281 -148 c 4
- 281 -56 281 -56 345 8 c 4
- 408 71 408 71 500 71 c 4
- 592 71 592 71 656 8 c 4
- 719 -56 719 -56 719 -148 c 4
+597 -148 m 4,0,1
+ 597 -108 597 -108 569 -80 c 4,2,3
+ 541 -51 541 -51 500 -51 c 4,4,5
+ 459 -51 459 -51 431 -79 c 4,6,7
+ 403 -107 403 -107 403 -148 c 4,8,9
+ 403 -190 403 -190 431 -218 c 4,10,11
+ 459 -245 459 -245 500 -245 c 4,12,13
+ 541 -245 541 -245 569 -217 c 4,14,15
+ 597 -189 597 -189 597 -148 c 4,0,1
+719 -148 m 4,16,17
+ 719 -240 719 -240 656 -304 c 4,18,19
+ 592 -368 592 -368 500 -368 c 4,20,21
+ 408 -368 408 -368 345 -304 c 4,22,23
+ 281 -240 281 -240 281 -148 c 4,24,25
+ 281 -56 281 -56 345 8 c 4,26,27
+ 408 71 408 71 500 71 c 4,28,29
+ 592 71 592 71 656 8 c 4,30,31
+ 719 -56 719 -56 719 -148 c 4,16,17
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 5
@@ -53070,11 +53070,11 @@
 LayerCount: 2
 Fore
 SplineSet
-425 -200 m 5
- 575 -200 l 5
- 575 -350 l 5
- 425 -350 l 5
- 425 -200 l 5
+425 -200 m 5,0,-1
+ 575 -200 l 5,1,-1
+ 575 -350 l 5,2,-1
+ 425 -350 l 5,3,-1
+ 425 -200 l 5,0,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53087,11 +53087,11 @@
 LayerCount: 2
 Fore
 SplineSet
-425 -200 m 5
- 575 -200 l 5
- 575 -350 l 5
- 425 -350 l 5
- 425 -200 l 5
+425 -200 m 5,0,-1
+ 575 -200 l 5,1,-1
+ 575 -350 l 5,2,-1
+ 425 -350 l 5,3,-1
+ 425 -200 l 5,0,-1
 EndSplineSet
 Refer: 921 1557 N 1 0 0 1 -62 -250 2
 Refer: 938 1583 N 1 0 0 1 0 0 2
@@ -53105,16 +53105,16 @@
 LayerCount: 2
 Fore
 SplineSet
-500 1200 m 5
- 650 1200 l 5
- 650 1050 l 5
- 500 1050 l 5
- 500 1200 l 5
-250 1200 m 5
- 400 1200 l 5
- 400 1050 l 5
- 250 1050 l 5
- 250 1200 l 5
+500 1200 m 5,0,-1
+ 650 1200 l 5,1,-1
+ 650 1050 l 5,2,-1
+ 500 1050 l 5,3,-1
+ 500 1200 l 5,0,-1
+250 1200 m 5,4,-1
+ 400 1200 l 5,5,-1
+ 400 1050 l 5,6,-1
+ 250 1050 l 5,7,-1
+ 250 1200 l 5,4,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53127,16 +53127,16 @@
 LayerCount: 2
 Fore
 SplineSet
-549 -150 m 5
- 699 -150 l 5
- 699 -300 l 5
- 549 -300 l 5
- 549 -150 l 5
-299 -150 m 5
- 449 -150 l 5
- 449 -300 l 5
- 299 -300 l 5
- 299 -150 l 5
+549 -150 m 5,0,-1
+ 699 -150 l 5,1,-1
+ 699 -300 l 5,2,-1
+ 549 -300 l 5,3,-1
+ 549 -150 l 5,0,-1
+299 -150 m 5,4,-1
+ 449 -150 l 5,5,-1
+ 449 -300 l 5,6,-1
+ 299 -300 l 5,7,-1
+ 299 -150 l 5,4,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53149,21 +53149,21 @@
 LayerCount: 2
 Fore
 SplineSet
-375 1450 m 5
- 525 1450 l 5
- 525 1300 l 5
- 375 1300 l 5
- 375 1450 l 5
-500 1200 m 5
- 650 1200 l 5
- 650 1050 l 5
- 500 1050 l 5
- 500 1200 l 5
-250 1200 m 5
- 400 1200 l 5
- 400 1050 l 5
- 250 1050 l 5
- 250 1200 l 5
+375 1450 m 5,0,-1
+ 525 1450 l 5,1,-1
+ 525 1300 l 5,2,-1
+ 375 1300 l 5,3,-1
+ 375 1450 l 5,0,-1
+500 1200 m 5,4,-1
+ 650 1200 l 5,5,-1
+ 650 1050 l 5,6,-1
+ 500 1050 l 5,7,-1
+ 500 1200 l 5,4,-1
+250 1200 m 5,8,-1
+ 400 1200 l 5,9,-1
+ 400 1050 l 5,10,-1
+ 250 1050 l 5,11,-1
+ 250 1200 l 5,8,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53176,21 +53176,21 @@
 LayerCount: 2
 Fore
 SplineSet
-375 1150 m 5
- 525 1150 l 5
- 525 1000 l 5
- 375 1000 l 5
- 375 1150 l 5
-500 1400 m 5
- 650 1400 l 5
- 650 1250 l 5
- 500 1250 l 5
- 500 1400 l 5
-250 1400 m 5
- 400 1400 l 5
- 400 1250 l 5
- 250 1250 l 5
- 250 1400 l 5
+375 1150 m 5,0,-1
+ 525 1150 l 5,1,-1
+ 525 1000 l 5,2,-1
+ 375 1000 l 5,3,-1
+ 375 1150 l 5,0,-1
+500 1400 m 5,4,-1
+ 650 1400 l 5,5,-1
+ 650 1250 l 5,6,-1
+ 500 1250 l 5,7,-1
+ 500 1400 l 5,4,-1
+250 1400 m 5,8,-1
+ 400 1400 l 5,9,-1
+ 400 1250 l 5,10,-1
+ 250 1250 l 5,11,-1
+ 250 1400 l 5,8,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53203,26 +53203,26 @@
 LayerCount: 2
 Fore
 SplineSet
-500 1200 m 5
- 650 1200 l 5
- 650 1050 l 5
- 500 1050 l 5
- 500 1200 l 5
-500 1450 m 5
- 650 1450 l 5
- 650 1300 l 5
- 500 1300 l 5
- 500 1450 l 5
-250 1200 m 5
- 400 1200 l 5
- 400 1050 l 5
- 250 1050 l 5
- 250 1200 l 5
-250 1450 m 5
- 400 1450 l 5
- 400 1300 l 5
- 250 1300 l 5
- 250 1450 l 5
+500 1200 m 5,0,-1
+ 650 1200 l 5,1,-1
+ 650 1050 l 5,2,-1
+ 500 1050 l 5,3,-1
+ 500 1200 l 5,0,-1
+500 1450 m 5,4,-1
+ 650 1450 l 5,5,-1
+ 650 1300 l 5,6,-1
+ 500 1300 l 5,7,-1
+ 500 1450 l 5,4,-1
+250 1200 m 5,8,-1
+ 400 1200 l 5,9,-1
+ 400 1050 l 5,10,-1
+ 250 1050 l 5,11,-1
+ 250 1200 l 5,8,-1
+250 1450 m 5,12,-1
+ 400 1450 l 5,13,-1
+ 400 1300 l 5,14,-1
+ 250 1300 l 5,15,-1
+ 250 1450 l 5,12,-1
 EndSplineSet
 Refer: 938 1583 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53257,24 +53257,24 @@
 LayerCount: 2
 Fore
 SplineSet
-916 -280 m 4
- 916 -240 916 -240 888 -212 c 4
- 860 -183 860 -183 819 -183 c 4
- 778 -183 778 -183 750 -211 c 4
- 722 -239 722 -239 722 -280 c 4
- 722 -322 722 -322 750 -350 c 4
- 778 -377 778 -377 819 -377 c 4
- 860 -377 860 -377 888 -349 c 4
- 916 -321 916 -321 916 -280 c 4
-1038 -280 m 4
- 1038 -372 1038 -372 975 -436 c 4
- 911 -500 911 -500 819 -500 c 4
- 727 -500 727 -500 664 -436 c 4
- 600 -372 600 -372 600 -280 c 4
- 600 -188 600 -188 664 -124 c 4
- 727 -61 727 -61 819 -61 c 4
- 911 -61 911 -61 975 -124 c 4
- 1038 -188 1038 -188 1038 -280 c 4
+916 -280 m 4,0,1
+ 916 -240 916 -240 888 -212 c 4,2,3
+ 860 -183 860 -183 819 -183 c 4,4,5
+ 778 -183 778 -183 750 -211 c 4,6,7
+ 722 -239 722 -239 722 -280 c 4,8,9
+ 722 -322 722 -322 750 -350 c 4,10,11
+ 778 -377 778 -377 819 -377 c 4,12,13
+ 860 -377 860 -377 888 -349 c 4,14,15
+ 916 -321 916 -321 916 -280 c 4,0,1
+1038 -280 m 4,16,17
+ 1038 -372 1038 -372 975 -436 c 4,18,19
+ 911 -500 911 -500 819 -500 c 4,20,21
+ 727 -500 727 -500 664 -436 c 4,22,23
+ 600 -372 600 -372 600 -280 c 4,24,25
+ 600 -188 600 -188 664 -124 c 4,26,27
+ 727 -61 727 -61 819 -61 c 4,28,29
+ 911 -61 911 -61 975 -124 c 4,30,31
+ 1038 -188 1038 -188 1038 -280 c 4,16,17
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 5
@@ -53287,11 +53287,11 @@
 LayerCount: 2
 Fore
 SplineSet
-820 -290 m 5
- 970 -290 l 5
- 970 -440 l 5
- 820 -440 l 5
- 820 -290 l 5
+820 -290 m 5,0,-1
+ 970 -290 l 5,1,-1
+ 970 -440 l 5,2,-1
+ 820 -440 l 5,3,-1
+ 820 -290 l 5,0,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53315,16 +53315,16 @@
 LayerCount: 2
 Fore
 SplineSet
-180 190 m 5
- 330 190 l 5
- 330 40 l 5
- 180 40 l 5
- 180 190 l 5
-820 -290 m 5
- 970 -290 l 5
- 970 -440 l 5
- 820 -440 l 5
- 820 -290 l 5
+180 190 m 5,0,-1
+ 330 190 l 5,1,-1
+ 330 40 l 5,2,-1
+ 180 40 l 5,3,-1
+ 180 190 l 5,0,-1
+820 -290 m 5,4,-1
+ 970 -290 l 5,5,-1
+ 970 -440 l 5,6,-1
+ 820 -440 l 5,7,-1
+ 820 -290 l 5,4,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53337,16 +53337,16 @@
 LayerCount: 2
 Fore
 SplineSet
-750 950 m 5
- 900 950 l 5
- 900 800 l 5
- 750 800 l 5
- 750 950 l 5
-500 950 m 5
- 650 950 l 5
- 650 800 l 5
- 500 800 l 5
- 500 950 l 5
+750 950 m 5,0,-1
+ 900 950 l 5,1,-1
+ 900 800 l 5,2,-1
+ 750 800 l 5,3,-1
+ 750 950 l 5,0,-1
+500 950 m 5,4,-1
+ 650 950 l 5,5,-1
+ 650 800 l 5,6,-1
+ 500 800 l 5,7,-1
+ 500 950 l 5,4,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53359,21 +53359,21 @@
 LayerCount: 2
 Fore
 SplineSet
-625 1200 m 5
- 775 1200 l 5
- 775 1050 l 5
- 625 1050 l 5
- 625 1200 l 5
-750 950 m 5
- 900 950 l 5
- 900 800 l 5
- 750 800 l 5
- 750 950 l 5
-500 950 m 5
- 650 950 l 5
- 650 800 l 5
- 500 800 l 5
- 500 950 l 5
+625 1200 m 5,0,-1
+ 775 1200 l 5,1,-1
+ 775 1050 l 5,2,-1
+ 625 1050 l 5,3,-1
+ 625 1200 l 5,0,-1
+750 950 m 5,4,-1
+ 900 950 l 5,5,-1
+ 900 800 l 5,6,-1
+ 750 800 l 5,7,-1
+ 750 950 l 5,4,-1
+500 950 m 5,8,-1
+ 650 950 l 5,9,-1
+ 650 800 l 5,10,-1
+ 500 800 l 5,11,-1
+ 500 950 l 5,8,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53386,26 +53386,26 @@
 LayerCount: 2
 Fore
 SplineSet
-750 950 m 5
- 900 950 l 5
- 900 800 l 5
- 750 800 l 5
- 750 950 l 5
-750 1200 m 5
- 900 1200 l 5
- 900 1050 l 5
- 750 1050 l 5
- 750 1200 l 5
-500 950 m 5
- 650 950 l 5
- 650 800 l 5
- 500 800 l 5
- 500 950 l 5
-500 1200 m 5
- 650 1200 l 5
- 650 1050 l 5
- 500 1050 l 5
- 500 1200 l 5
+750 950 m 5,0,-1
+ 900 950 l 5,1,-1
+ 900 800 l 5,2,-1
+ 750 800 l 5,3,-1
+ 750 950 l 5,0,-1
+750 1200 m 5,4,-1
+ 900 1200 l 5,5,-1
+ 900 1050 l 5,6,-1
+ 750 1050 l 5,7,-1
+ 750 1200 l 5,4,-1
+500 950 m 5,8,-1
+ 650 950 l 5,9,-1
+ 650 800 l 5,10,-1
+ 500 800 l 5,11,-1
+ 500 950 l 5,8,-1
+500 1200 m 5,12,-1
+ 650 1200 l 5,13,-1
+ 650 1050 l 5,14,-1
+ 500 1050 l 5,15,-1
+ 500 1200 l 5,12,-1
 EndSplineSet
 Refer: 940 1585 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53418,16 +53418,16 @@
 LayerCount: 2
 Fore
 SplineSet
-1525 -200 m 5
- 1675 -200 l 5
- 1675 -350 l 5
- 1525 -350 l 5
- 1525 -200 l 5
-1375 950 m 5
- 1525 950 l 5
- 1525 800 l 5
- 1375 800 l 5
- 1375 950 l 5
+1525 -200 m 5,0,-1
+ 1675 -200 l 5,1,-1
+ 1675 -350 l 5,2,-1
+ 1525 -350 l 5,3,-1
+ 1525 -200 l 5,0,-1
+1375 950 m 5,4,-1
+ 1525 950 l 5,5,-1
+ 1525 800 l 5,6,-1
+ 1375 800 l 5,7,-1
+ 1375 950 l 5,4,-1
 EndSplineSet
 Refer: 942 1587 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53440,21 +53440,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1525 -350 m 5
- 1675 -350 l 5
- 1675 -500 l 5
- 1525 -500 l 5
- 1525 -350 l 5
-1650 -100 m 5
- 1800 -100 l 5
- 1800 -250 l 5
- 1650 -250 l 5
- 1650 -100 l 5
-1400 -100 m 5
- 1550 -100 l 5
- 1550 -250 l 5
- 1400 -250 l 5
- 1400 -100 l 5
+1525 -350 m 5,0,-1
+ 1675 -350 l 5,1,-1
+ 1675 -500 l 5,2,-1
+ 1525 -500 l 5,3,-1
+ 1525 -350 l 5,0,-1
+1650 -100 m 5,4,-1
+ 1800 -100 l 5,5,-1
+ 1800 -250 l 5,6,-1
+ 1650 -250 l 5,7,-1
+ 1650 -100 l 5,4,-1
+1400 -100 m 5,8,-1
+ 1550 -100 l 5,9,-1
+ 1550 -250 l 5,10,-1
+ 1400 -250 l 5,11,-1
+ 1400 -100 l 5,8,-1
 EndSplineSet
 Refer: 942 1587 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53467,36 +53467,36 @@
 LayerCount: 2
 Fore
 SplineSet
-1525 -350 m 5
- 1675 -350 l 5
- 1675 -500 l 5
- 1525 -500 l 5
- 1525 -350 l 5
-1650 -100 m 5
- 1800 -100 l 5
- 1800 -250 l 5
- 1650 -250 l 5
- 1650 -100 l 5
-1400 -100 m 5
- 1550 -100 l 5
- 1550 -250 l 5
- 1400 -250 l 5
- 1400 -100 l 5
-1375 1200 m 5
- 1525 1200 l 5
- 1525 1050 l 5
- 1375 1050 l 5
- 1375 1200 l 5
-1500 950 m 5
- 1650 950 l 5
- 1650 800 l 5
- 1500 800 l 5
- 1500 950 l 5
-1250 950 m 5
- 1400 950 l 5
- 1400 800 l 5
- 1250 800 l 5
- 1250 950 l 5
+1525 -350 m 5,0,-1
+ 1675 -350 l 5,1,-1
+ 1675 -500 l 5,2,-1
+ 1525 -500 l 5,3,-1
+ 1525 -350 l 5,0,-1
+1650 -100 m 5,4,-1
+ 1800 -100 l 5,5,-1
+ 1800 -250 l 5,6,-1
+ 1650 -250 l 5,7,-1
+ 1650 -100 l 5,4,-1
+1400 -100 m 5,8,-1
+ 1550 -100 l 5,9,-1
+ 1550 -250 l 5,10,-1
+ 1400 -250 l 5,11,-1
+ 1400 -100 l 5,8,-1
+1375 1200 m 5,12,-1
+ 1525 1200 l 5,13,-1
+ 1525 1050 l 5,14,-1
+ 1375 1050 l 5,15,-1
+ 1375 1200 l 5,12,-1
+1500 950 m 5,16,-1
+ 1650 950 l 5,17,-1
+ 1650 800 l 5,18,-1
+ 1500 800 l 5,19,-1
+ 1500 950 l 5,16,-1
+1250 950 m 5,20,-1
+ 1400 950 l 5,21,-1
+ 1400 800 l 5,22,-1
+ 1250 800 l 5,23,-1
+ 1250 950 l 5,20,-1
 EndSplineSet
 Refer: 942 1587 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53509,16 +53509,16 @@
 LayerCount: 2
 Fore
 SplineSet
-1650 -200 m 5
- 1800 -200 l 5
- 1800 -350 l 5
- 1650 -350 l 5
- 1650 -200 l 5
-1400 -200 m 5
- 1550 -200 l 5
- 1550 -350 l 5
- 1400 -350 l 5
- 1400 -200 l 5
+1650 -200 m 5,0,-1
+ 1800 -200 l 5,1,-1
+ 1800 -350 l 5,2,-1
+ 1650 -350 l 5,3,-1
+ 1650 -200 l 5,0,-1
+1400 -200 m 5,4,-1
+ 1550 -200 l 5,5,-1
+ 1550 -350 l 5,6,-1
+ 1400 -350 l 5,7,-1
+ 1400 -200 l 5,4,-1
 EndSplineSet
 Refer: 944 1589 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53531,21 +53531,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1275 1200 m 5
- 1425 1200 l 5
- 1425 1050 l 5
- 1275 1050 l 5
- 1275 1200 l 5
-1400 950 m 5
- 1550 950 l 5
- 1550 800 l 5
- 1400 800 l 5
- 1400 950 l 5
-1150 950 m 5
- 1300 950 l 5
- 1300 800 l 5
- 1150 800 l 5
- 1150 950 l 5
+1275 1200 m 5,0,-1
+ 1425 1200 l 5,1,-1
+ 1425 1050 l 5,2,-1
+ 1275 1050 l 5,3,-1
+ 1275 1200 l 5,0,-1
+1400 950 m 5,4,-1
+ 1550 950 l 5,5,-1
+ 1550 800 l 5,6,-1
+ 1400 800 l 5,7,-1
+ 1400 950 l 5,4,-1
+1150 950 m 5,8,-1
+ 1300 950 l 5,9,-1
+ 1300 800 l 5,10,-1
+ 1150 800 l 5,11,-1
+ 1150 950 l 5,8,-1
 EndSplineSet
 Refer: 944 1589 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53558,21 +53558,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1025 1250 m 5
- 1175 1250 l 5
- 1175 1100 l 5
- 1025 1100 l 5
- 1025 1250 l 5
-1150 1000 m 5
- 1300 1000 l 5
- 1300 850 l 5
- 1150 850 l 5
- 1150 1000 l 5
-900 1000 m 5
- 1050 1000 l 5
- 1050 850 l 5
- 900 850 l 5
- 900 1000 l 5
+1025 1250 m 5,0,-1
+ 1175 1250 l 5,1,-1
+ 1175 1100 l 5,2,-1
+ 1025 1100 l 5,3,-1
+ 1025 1250 l 5,0,-1
+1150 1000 m 5,4,-1
+ 1300 1000 l 5,5,-1
+ 1300 850 l 5,6,-1
+ 1150 850 l 5,7,-1
+ 1150 1000 l 5,4,-1
+900 1000 m 5,8,-1
+ 1050 1000 l 5,9,-1
+ 1050 850 l 5,10,-1
+ 900 850 l 5,11,-1
+ 900 1000 l 5,8,-1
 EndSplineSet
 Refer: 946 1591 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53585,21 +53585,21 @@
 LayerCount: 2
 Fore
 SplineSet
-425 1600 m 5
- 575 1600 l 5
- 575 1450 l 5
- 425 1450 l 5
- 425 1600 l 5
-550 1350 m 5
- 700 1350 l 5
- 700 1200 l 5
- 550 1200 l 5
- 550 1350 l 5
-300 1350 m 5
- 450 1350 l 5
- 450 1200 l 5
- 300 1200 l 5
- 300 1350 l 5
+425 1600 m 5,0,-1
+ 575 1600 l 5,1,-1
+ 575 1450 l 5,2,-1
+ 425 1450 l 5,3,-1
+ 425 1600 l 5,0,-1
+550 1350 m 5,4,-1
+ 700 1350 l 5,5,-1
+ 700 1200 l 5,6,-1
+ 550 1200 l 5,7,-1
+ 550 1350 l 5,4,-1
+300 1350 m 5,8,-1
+ 450 1350 l 5,9,-1
+ 450 1200 l 5,10,-1
+ 300 1200 l 5,11,-1
+ 300 1350 l 5,8,-1
 EndSplineSet
 Refer: 948 1593 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53655,11 +53655,11 @@
 LayerCount: 2
 Fore
 SplineSet
-875 -200 m 5
- 1025 -200 l 5
- 1025 -350 l 5
- 875 -350 l 5
- 875 -200 l 5
+875 -200 m 5,0,-1
+ 1025 -200 l 5,1,-1
+ 1025 -350 l 5,2,-1
+ 875 -350 l 5,3,-1
+ 875 -200 l 5,0,-1
 EndSplineSet
 Refer: 1032 1697 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53672,16 +53672,16 @@
 LayerCount: 2
 Fore
 SplineSet
-875 -200 m 5
- 1025 -200 l 5
- 1025 -350 l 5
- 875 -350 l 5
- 875 -200 l 5
-1525 1300 m 5
- 1675 1300 l 5
- 1675 1150 l 5
- 1525 1150 l 5
- 1525 1300 l 5
+875 -200 m 5,0,-1
+ 1025 -200 l 5,1,-1
+ 1025 -350 l 5,2,-1
+ 875 -350 l 5,3,-1
+ 875 -200 l 5,0,-1
+1525 1300 m 5,4,-1
+ 1675 1300 l 5,5,-1
+ 1675 1150 l 5,6,-1
+ 1525 1150 l 5,7,-1
+ 1525 1300 l 5,4,-1
 EndSplineSet
 Refer: 1032 1697 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53694,21 +53694,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1525 1550 m 5
- 1675 1550 l 5
- 1675 1400 l 5
- 1525 1400 l 5
- 1525 1550 l 5
-1650 1300 m 5
- 1800 1300 l 5
- 1800 1150 l 5
- 1650 1150 l 5
- 1650 1300 l 5
-1400 1300 m 5
- 1550 1300 l 5
- 1550 1150 l 5
- 1400 1150 l 5
- 1400 1300 l 5
+1525 1550 m 5,0,-1
+ 1675 1550 l 5,1,-1
+ 1675 1400 l 5,2,-1
+ 1525 1400 l 5,3,-1
+ 1525 1550 l 5,0,-1
+1650 1300 m 5,4,-1
+ 1800 1300 l 5,5,-1
+ 1800 1150 l 5,6,-1
+ 1650 1150 l 5,7,-1
+ 1650 1300 l 5,4,-1
+1400 1300 m 5,8,-1
+ 1550 1300 l 5,9,-1
+ 1550 1150 l 5,10,-1
+ 1400 1150 l 5,11,-1
+ 1400 1300 l 5,8,-1
 EndSplineSet
 Refer: 1032 1697 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53721,21 +53721,21 @@
 LayerCount: 2
 Fore
 SplineSet
-875 -450 m 5
- 1025 -450 l 5
- 1025 -600 l 5
- 875 -600 l 5
- 875 -450 l 5
-1000 -200 m 5
- 1150 -200 l 5
- 1150 -350 l 5
- 1000 -350 l 5
- 1000 -200 l 5
-750 -200 m 5
- 900 -200 l 5
- 900 -350 l 5
- 750 -350 l 5
- 750 -200 l 5
+875 -450 m 5,0,-1
+ 1025 -450 l 5,1,-1
+ 1025 -600 l 5,2,-1
+ 875 -600 l 5,3,-1
+ 875 -450 l 5,0,-1
+1000 -200 m 5,4,-1
+ 1150 -200 l 5,5,-1
+ 1150 -350 l 5,6,-1
+ 1000 -350 l 5,7,-1
+ 1000 -200 l 5,4,-1
+750 -200 m 5,8,-1
+ 900 -200 l 5,9,-1
+ 900 -350 l 5,10,-1
+ 750 -350 l 5,11,-1
+ 750 -200 l 5,8,-1
 EndSplineSet
 Refer: 1032 1697 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53748,26 +53748,26 @@
 LayerCount: 2
 Fore
 SplineSet
-1650 1300 m 5
- 1800 1300 l 5
- 1800 1150 l 5
- 1650 1150 l 5
- 1650 1300 l 5
-1650 1550 m 5
- 1800 1550 l 5
- 1800 1400 l 5
- 1650 1400 l 5
- 1650 1550 l 5
-1400 1300 m 5
- 1550 1300 l 5
- 1550 1150 l 5
- 1400 1150 l 5
- 1400 1300 l 5
-1400 1550 m 5
- 1550 1550 l 5
- 1550 1400 l 5
- 1400 1400 l 5
- 1400 1550 l 5
+1650 1300 m 5,0,-1
+ 1800 1300 l 5,1,-1
+ 1800 1150 l 5,2,-1
+ 1650 1150 l 5,3,-1
+ 1650 1300 l 5,0,-1
+1650 1550 m 5,4,-1
+ 1800 1550 l 5,5,-1
+ 1800 1400 l 5,6,-1
+ 1650 1400 l 5,7,-1
+ 1650 1550 l 5,4,-1
+1400 1300 m 5,8,-1
+ 1550 1300 l 5,9,-1
+ 1550 1150 l 5,10,-1
+ 1400 1150 l 5,11,-1
+ 1400 1300 l 5,8,-1
+1400 1550 m 5,12,-1
+ 1550 1550 l 5,13,-1
+ 1550 1400 l 5,14,-1
+ 1400 1400 l 5,15,-1
+ 1400 1550 l 5,12,-1
 EndSplineSet
 Refer: 1032 1697 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53780,11 +53780,11 @@
 LayerCount: 2
 Fore
 SplineSet
-975 1300 m 5
- 1125 1300 l 5
- 1125 1150 l 5
- 975 1150 l 5
- 975 1300 l 5
+975 1300 m 5,0,-1
+ 1125 1300 l 5,1,-1
+ 1125 1150 l 5,2,-1
+ 975 1150 l 5,3,-1
+ 975 1300 l 5,0,-1
 EndSplineSet
 Refer: 989 1647 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53797,21 +53797,21 @@
 LayerCount: 2
 Fore
 SplineSet
-975 1550 m 5
- 1125 1550 l 5
- 1125 1400 l 5
- 975 1400 l 5
- 975 1550 l 5
-1100 1300 m 5
- 1250 1300 l 5
- 1250 1150 l 5
- 1100 1150 l 5
- 1100 1300 l 5
-850 1300 m 5
- 1000 1300 l 5
- 1000 1150 l 5
- 850 1150 l 5
- 850 1300 l 5
+975 1550 m 5,0,-1
+ 1125 1550 l 5,1,-1
+ 1125 1400 l 5,2,-1
+ 975 1400 l 5,3,-1
+ 975 1550 l 5,0,-1
+1100 1300 m 5,4,-1
+ 1250 1300 l 5,5,-1
+ 1250 1150 l 5,6,-1
+ 1100 1150 l 5,7,-1
+ 1100 1300 l 5,4,-1
+850 1300 m 5,8,-1
+ 1000 1300 l 5,9,-1
+ 1000 1150 l 5,10,-1
+ 850 1150 l 5,11,-1
+ 850 1300 l 5,8,-1
 EndSplineSet
 Refer: 989 1647 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53904,24 +53904,24 @@
 LayerCount: 2
 Fore
 SplineSet
-1666 1120 m 4
- 1666 1160 1666 1160 1638 1188 c 4
- 1610 1217 1610 1217 1569 1217 c 4
- 1528 1217 1528 1217 1500 1189 c 4
- 1472 1161 1472 1161 1472 1120 c 4
- 1472 1078 1472 1078 1500 1050 c 4
- 1528 1023 1528 1023 1569 1023 c 4
- 1610 1023 1610 1023 1638 1051 c 4
- 1666 1079 1666 1079 1666 1120 c 4
-1788 1120 m 4
- 1788 1028 1788 1028 1725 964 c 4
- 1661 900 1661 900 1569 900 c 4
- 1477 900 1477 900 1414 964 c 4
- 1350 1028 1350 1028 1350 1120 c 4
- 1350 1212 1350 1212 1414 1276 c 4
- 1477 1339 1477 1339 1569 1339 c 4
- 1661 1339 1661 1339 1725 1276 c 4
- 1788 1212 1788 1212 1788 1120 c 4
+1666 1120 m 4,0,1
+ 1666 1160 1666 1160 1638 1188 c 4,2,3
+ 1610 1217 1610 1217 1569 1217 c 4,4,5
+ 1528 1217 1528 1217 1500 1189 c 4,6,7
+ 1472 1161 1472 1161 1472 1120 c 4,8,9
+ 1472 1078 1472 1078 1500 1050 c 4,10,11
+ 1528 1023 1528 1023 1569 1023 c 4,12,13
+ 1610 1023 1610 1023 1638 1051 c 4,14,15
+ 1666 1079 1666 1079 1666 1120 c 4,0,1
+1788 1120 m 4,16,17
+ 1788 1028 1788 1028 1725 964 c 4,18,19
+ 1661 900 1661 900 1569 900 c 4,20,21
+ 1477 900 1477 900 1414 964 c 4,22,23
+ 1350 1028 1350 1028 1350 1120 c 4,24,25
+ 1350 1212 1350 1212 1414 1276 c 4,26,27
+ 1477 1339 1477 1339 1569 1339 c 4,28,29
+ 1661 1339 1661 1339 1725 1276 c 4,30,31
+ 1788 1212 1788 1212 1788 1120 c 4,16,17
 EndSplineSet
 Refer: 1040 1705 N 1 0 0 1 0 0 2
 Validated: 5
@@ -53934,11 +53934,11 @@
 LayerCount: 2
 Fore
 SplineSet
-750 1500 m 5
- 900 1500 l 5
- 900 1350 l 5
- 750 1350 l 5
- 750 1500 l 5
+750 1500 m 5,0,-1
+ 900 1500 l 5,1,-1
+ 900 1350 l 5,2,-1
+ 750 1350 l 5,3,-1
+ 750 1500 l 5,0,-1
 EndSplineSet
 Refer: 953 1603 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53951,21 +53951,21 @@
 LayerCount: 2
 Fore
 SplineSet
-750 1750 m 5
- 900 1750 l 5
- 900 1600 l 5
- 750 1600 l 5
- 750 1750 l 5
-875 1500 m 5
- 1025 1500 l 5
- 1025 1350 l 5
- 875 1350 l 5
- 875 1500 l 5
-625 1500 m 5
- 775 1500 l 5
- 775 1350 l 5
- 625 1350 l 5
- 625 1500 l 5
+750 1750 m 5,0,-1
+ 900 1750 l 5,1,-1
+ 900 1600 l 5,2,-1
+ 750 1600 l 5,3,-1
+ 750 1750 l 5,0,-1
+875 1500 m 5,4,-1
+ 1025 1500 l 5,5,-1
+ 1025 1350 l 5,6,-1
+ 875 1350 l 5,7,-1
+ 875 1500 l 5,4,-1
+625 1500 m 5,8,-1
+ 775 1500 l 5,9,-1
+ 775 1350 l 5,10,-1
+ 625 1350 l 5,11,-1
+ 625 1500 l 5,8,-1
 EndSplineSet
 Refer: 953 1603 N 1 0 0 1 0 0 2
 Validated: 1
@@ -53978,21 +53978,21 @@
 LayerCount: 2
 Fore
 SplineSet
-725 -450 m 5
- 875 -450 l 5
- 875 -600 l 5
- 725 -600 l 5
- 725 -450 l 5
-850 -200 m 5
- 1000 -200 l 5
- 1000 -350 l 5
- 850 -350 l 5
- 850 -200 l 5
-600 -200 m 5
- 750 -200 l 5
- 750 -350 l 5
- 600 -350 l 5
- 600 -200 l 5
+725 -450 m 5,0,-1
+ 875 -450 l 5,1,-1
+ 875 -600 l 5,2,-1
+ 725 -600 l 5,3,-1
+ 725 -450 l 5,0,-1
+850 -200 m 5,4,-1
+ 1000 -200 l 5,5,-1
+ 1000 -350 l 5,6,-1
+ 850 -350 l 5,7,-1
+ 850 -200 l 5,4,-1
+600 -200 m 5,8,-1
+ 750 -200 l 5,9,-1
+ 750 -350 l 5,10,-1
+ 600 -350 l 5,11,-1
+ 600 -200 l 5,8,-1
 EndSplineSet
 Refer: 953 1603 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54005,11 +54005,11 @@
 LayerCount: 2
 Fore
 SplineSet
-1833 1685 m 29
- 970 1331 l 29
- 970 1481 l 29
- 1833 1835 l 29
- 1833 1685 l 29
+1833 1685 m 29,0,-1
+ 970 1331 l 29,1,-1
+ 970 1481 l 29,2,-1
+ 1833 1835 l 29,3,-1
+ 1833 1685 l 29,0,-1
 EndSplineSet
 Refer: 1040 1705 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54022,24 +54022,24 @@
 LayerCount: 2
 Fore
 SplineSet
-1666 1120 m 4
- 1666 1160 1666 1160 1638 1188 c 4
- 1610 1217 1610 1217 1569 1217 c 4
- 1528 1217 1528 1217 1500 1189 c 4
- 1472 1161 1472 1161 1472 1120 c 4
- 1472 1078 1472 1078 1500 1050 c 4
- 1528 1023 1528 1023 1569 1023 c 4
- 1610 1023 1610 1023 1638 1051 c 4
- 1666 1079 1666 1079 1666 1120 c 4
-1788 1120 m 4
- 1788 1028 1788 1028 1725 964 c 4
- 1661 900 1661 900 1569 900 c 4
- 1477 900 1477 900 1414 964 c 4
- 1350 1028 1350 1028 1350 1120 c 4
- 1350 1212 1350 1212 1414 1276 c 4
- 1477 1339 1477 1339 1569 1339 c 4
- 1661 1339 1661 1339 1725 1276 c 4
- 1788 1212 1788 1212 1788 1120 c 4
+1666 1120 m 4,0,1
+ 1666 1160 1666 1160 1638 1188 c 4,2,3
+ 1610 1217 1610 1217 1569 1217 c 4,4,5
+ 1528 1217 1528 1217 1500 1189 c 4,6,7
+ 1472 1161 1472 1161 1472 1120 c 4,8,9
+ 1472 1078 1472 1078 1500 1050 c 4,10,11
+ 1528 1023 1528 1023 1569 1023 c 4,12,13
+ 1610 1023 1610 1023 1638 1051 c 4,14,15
+ 1666 1079 1666 1079 1666 1120 c 4,0,1
+1788 1120 m 4,16,17
+ 1788 1028 1788 1028 1725 964 c 4,18,19
+ 1661 900 1661 900 1569 900 c 4,20,21
+ 1477 900 1477 900 1414 964 c 4,22,23
+ 1350 1028 1350 1028 1350 1120 c 4,24,25
+ 1350 1212 1350 1212 1414 1276 c 4,26,27
+ 1477 1339 1477 1339 1569 1339 c 4,28,29
+ 1661 1339 1661 1339 1725 1276 c 4,30,31
+ 1788 1212 1788 1212 1788 1120 c 4,16,17
 EndSplineSet
 Refer: 1046 1711 N 1 0 0 1 0 0 2
 Validated: 5
@@ -54052,16 +54052,16 @@
 LayerCount: 2
 Fore
 SplineSet
-1150 1850 m 5
- 1300 1850 l 5
- 1300 1700 l 5
- 1150 1700 l 5
- 1150 1850 l 5
-900 1850 m 5
- 1050 1850 l 5
- 1050 1700 l 5
- 900 1700 l 5
- 900 1850 l 5
+1150 1850 m 5,0,-1
+ 1300 1850 l 5,1,-1
+ 1300 1700 l 5,2,-1
+ 1150 1700 l 5,3,-1
+ 1150 1850 l 5,0,-1
+900 1850 m 5,4,-1
+ 1050 1850 l 5,5,-1
+ 1050 1700 l 5,6,-1
+ 900 1700 l 5,7,-1
+ 900 1850 l 5,4,-1
 EndSplineSet
 Refer: 1046 1711 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54074,16 +54074,16 @@
 LayerCount: 2
 Fore
 SplineSet
-850 -200 m 5
- 1000 -200 l 5
- 1000 -350 l 5
- 850 -350 l 5
- 850 -200 l 5
-600 -200 m 5
- 750 -200 l 5
- 750 -350 l 5
- 600 -350 l 5
- 600 -200 l 5
+850 -200 m 5,0,-1
+ 1000 -200 l 5,1,-1
+ 1000 -350 l 5,2,-1
+ 850 -350 l 5,3,-1
+ 850 -200 l 5,0,-1
+600 -200 m 5,4,-1
+ 750 -200 l 5,5,-1
+ 750 -350 l 5,6,-1
+ 600 -350 l 5,7,-1
+ 600 -200 l 5,4,-1
 EndSplineSet
 Refer: 1046 1711 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54096,16 +54096,16 @@
 LayerCount: 2
 Fore
 SplineSet
-725 -450 m 5
- 875 -450 l 5
- 875 -600 l 5
- 725 -600 l 5
- 725 -450 l 5
-725 -200 m 5
- 875 -200 l 5
- 875 -350 l 5
- 725 -350 l 5
- 725 -200 l 5
+725 -450 m 5,0,-1
+ 875 -450 l 5,1,-1
+ 875 -600 l 5,2,-1
+ 725 -600 l 5,3,-1
+ 725 -450 l 5,0,-1
+725 -200 m 5,4,-1
+ 875 -200 l 5,5,-1
+ 875 -350 l 5,6,-1
+ 725 -350 l 5,7,-1
+ 725 -200 l 5,4,-1
 EndSplineSet
 Refer: 1046 1711 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54118,21 +54118,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1025 2100 m 5
- 1175 2100 l 5
- 1175 1950 l 5
- 1025 1950 l 5
- 1025 2100 l 5
-1150 1850 m 5
- 1300 1850 l 5
- 1300 1700 l 5
- 1150 1700 l 5
- 1150 1850 l 5
-900 1850 m 5
- 1050 1850 l 5
- 1050 1700 l 5
- 900 1700 l 5
- 900 1850 l 5
+1025 2100 m 5,0,-1
+ 1175 2100 l 5,1,-1
+ 1175 1950 l 5,2,-1
+ 1025 1950 l 5,3,-1
+ 1025 2100 l 5,0,-1
+1150 1850 m 5,4,-1
+ 1300 1850 l 5,5,-1
+ 1300 1700 l 5,6,-1
+ 1150 1700 l 5,7,-1
+ 1150 1850 l 5,4,-1
+900 1850 m 5,8,-1
+ 1050 1850 l 5,9,-1
+ 1050 1700 l 5,10,-1
+ 900 1700 l 5,11,-1
+ 900 1850 l 5,8,-1
 EndSplineSet
 Refer: 1046 1711 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54156,11 +54156,11 @@
 LayerCount: 2
 Fore
 SplineSet
-1130 1950 m 5
- 1280 1950 l 5
- 1280 1800 l 5
- 1130 1800 l 5
- 1130 1950 l 5
+1130 1950 m 5,0,-1
+ 1280 1950 l 5,1,-1
+ 1280 1800 l 5,2,-1
+ 1130 1800 l 5,3,-1
+ 1130 1950 l 5,0,-1
 EndSplineSet
 Refer: 954 1604 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54173,21 +54173,21 @@
 LayerCount: 2
 Fore
 SplineSet
-1125 2100 m 5
- 1275 2100 l 5
- 1275 1950 l 5
- 1125 1950 l 5
- 1125 2100 l 5
-1250 1850 m 5
- 1400 1850 l 5
- 1400 1700 l 5
- 1250 1700 l 5
- 1250 1850 l 5
-1000 1850 m 5
- 1150 1850 l 5
- 1150 1700 l 5
- 1000 1700 l 5
- 1000 1850 l 5
+1125 2100 m 5,0,-1
+ 1275 2100 l 5,1,-1
+ 1275 1950 l 5,2,-1
+ 1125 1950 l 5,3,-1
+ 1125 2100 l 5,0,-1
+1250 1850 m 5,4,-1
+ 1400 1850 l 5,5,-1
+ 1400 1700 l 5,6,-1
+ 1250 1700 l 5,7,-1
+ 1250 1850 l 5,4,-1
+1000 1850 m 5,8,-1
+ 1150 1850 l 5,9,-1
+ 1150 1700 l 5,10,-1
+ 1000 1700 l 5,11,-1
+ 1000 1850 l 5,8,-1
 EndSplineSet
 Refer: 954 1604 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54200,21 +54200,21 @@
 LayerCount: 2
 Fore
 SplineSet
-675 -650 m 5
- 825 -650 l 5
- 825 -800 l 5
- 675 -800 l 5
- 675 -650 l 5
-800 -400 m 5
- 950 -400 l 5
- 950 -550 l 5
- 800 -550 l 5
- 800 -400 l 5
-550 -400 m 5
- 700 -400 l 5
- 700 -550 l 5
- 550 -550 l 5
- 550 -400 l 5
+675 -650 m 5,0,-1
+ 825 -650 l 5,1,-1
+ 825 -800 l 5,2,-1
+ 675 -800 l 5,3,-1
+ 675 -650 l 5,0,-1
+800 -400 m 5,4,-1
+ 950 -400 l 5,5,-1
+ 950 -550 l 5,6,-1
+ 800 -550 l 5,7,-1
+ 800 -400 l 5,4,-1
+550 -400 m 5,8,-1
+ 700 -400 l 5,9,-1
+ 700 -550 l 5,10,-1
+ 550 -550 l 5,11,-1
+ 550 -400 l 5,8,-1
 EndSplineSet
 Refer: 954 1604 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54227,16 +54227,16 @@
 LayerCount: 2
 Fore
 SplineSet
-700 -500 m 5
- 850 -500 l 5
- 850 -650 l 5
- 700 -650 l 5
- 700 -500 l 5
-575 950 m 5
- 725 950 l 5
- 725 800 l 5
- 575 800 l 5
- 575 950 l 5
+700 -500 m 5,0,-1
+ 850 -500 l 5,1,-1
+ 850 -650 l 5,2,-1
+ 700 -650 l 5,3,-1
+ 700 -500 l 5,0,-1
+575 950 m 5,4,-1
+ 725 950 l 5,5,-1
+ 725 800 l 5,6,-1
+ 575 800 l 5,7,-1
+ 575 950 l 5,4,-1
 EndSplineSet
 Refer: 1057 1722 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54288,29 +54288,29 @@
 LayerCount: 2
 Fore
 SplineSet
-866 -455 m 4
- 866 -415 866 -415 838 -387 c 4
- 810 -358 810 -358 769 -358 c 4
- 728 -358 728 -358 700 -386 c 4
- 672 -414 672 -414 672 -455 c 4
- 672 -497 672 -497 700 -525 c 4
- 728 -552 728 -552 769 -552 c 4
- 810 -552 810 -552 838 -524 c 4
- 866 -496 866 -496 866 -455 c 4
-988 -455 m 4
- 988 -547 988 -547 925 -611 c 4
- 861 -675 861 -675 769 -675 c 4
- 677 -675 677 -675 614 -611 c 4
- 550 -547 550 -547 550 -455 c 4
- 550 -363 550 -363 614 -299 c 4
- 677 -236 677 -236 769 -236 c 4
- 861 -236 861 -236 925 -299 c 4
- 988 -363 988 -363 988 -455 c 4
-575 950 m 5
- 725 950 l 5
- 725 800 l 5
- 575 800 l 5
- 575 950 l 5
+866 -455 m 4,0,1
+ 866 -415 866 -415 838 -387 c 4,2,3
+ 810 -358 810 -358 769 -358 c 4,4,5
+ 728 -358 728 -358 700 -386 c 4,6,7
+ 672 -414 672 -414 672 -455 c 4,8,9
+ 672 -497 672 -497 700 -525 c 4,10,11
+ 728 -552 728 -552 769 -552 c 4,12,13
+ 810 -552 810 -552 838 -524 c 4,14,15
+ 866 -496 866 -496 866 -455 c 4,0,1
+988 -455 m 4,16,17
+ 988 -547 988 -547 925 -611 c 4,18,19
+ 861 -675 861 -675 769 -675 c 4,20,21
+ 677 -675 677 -675 614 -611 c 4,22,23
+ 550 -547 550 -547 550 -455 c 4,24,25
+ 550 -363 550 -363 614 -299 c 4,26,27
+ 677 -236 677 -236 769 -236 c 4,28,29
+ 861 -236 861 -236 925 -299 c 4,30,31
+ 988 -363 988 -363 988 -455 c 4,16,17
+575 950 m 5,32,-1
+ 725 950 l 5,33,-1
+ 725 800 l 5,34,-1
+ 575 800 l 5,35,-1
+ 575 950 l 5,32,-1
 EndSplineSet
 Refer: 1057 1722 N 1 0 0 1 0 0 2
 Validated: 5
@@ -54323,21 +54323,21 @@
 LayerCount: 2
 Fore
 SplineSet
-625 1200 m 5
- 775 1200 l 5
- 775 1050 l 5
- 625 1050 l 5
- 625 1200 l 5
-750 950 m 5
- 900 950 l 5
- 900 800 l 5
- 750 800 l 5
- 750 950 l 5
-500 950 m 5
- 650 950 l 5
- 650 800 l 5
- 500 800 l 5
- 500 950 l 5
+625 1200 m 5,0,-1
+ 775 1200 l 5,1,-1
+ 775 1050 l 5,2,-1
+ 625 1050 l 5,3,-1
+ 625 1200 l 5,0,-1
+750 950 m 5,4,-1
+ 900 950 l 5,5,-1
+ 900 800 l 5,6,-1
+ 750 800 l 5,7,-1
+ 750 950 l 5,4,-1
+500 950 m 5,8,-1
+ 650 950 l 5,9,-1
+ 650 800 l 5,10,-1
+ 500 800 l 5,11,-1
+ 500 950 l 5,8,-1
 EndSplineSet
 Refer: 1057 1722 N 1 0 0 1 0 0 2
 Validated: 1
@@ -54401,26 +54401,26 @@
 LayerCount: 2
 Fore
 SplineSet
-575 1200 m 5
- 725 1200 l 5
- 725 1050 l 5
- 575 1050 l 5
- 575 1200 l 5
-787 50 m 5
- 937 50 l 5
- 937 -100 l 5
- 787 -100 l 5
- 787 50 l 5
-912 300 m 5
- 1062 300 l 5
- 1062 150 l 5
- 912 150 l 5
- 912 300 l 5
-662 300 m 5
- 812 300 l 5
- 812 150 l 5
- 662 150 l 5
- 662 300 l 5
+575 1200 m 5,0,-1
+ 725 1200 l 5,1,-1
+ 725 1050 l 5,2,-1
+ 575 1050 l 5,3,-1
+ 575 1200 l 5,0,-1
+787 50 m 5,4,-1
+ 937 50 l 5,5,-1
+ 937 -100 l 5,6,-1
+ 787 -100 l 5,7,-1
+ 787 50 l 5,4,-1
+912 300 m 5,8,-1
+ 1062 300 l 5,9,-1
+ 1062 150 l 5,10,-1
+ 912 150 l 5,11,-1
+ 912 300 l 5,8,-1
+662 300 m 5,12,-1
+ 812 300 l 5,13,-1
+ 812 150 l 5,14,-1
+ 662 150 l 5,15,-1
+ 662 300 l 5,12,-1
 EndSplineSet
 Refer: 936 1581 N 1 0 0 1 0 0 2
 Validated: 1
@@ -55169,11 +55169,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -55185,11 +55185,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 68 97 N 1 0 0 1 0 0 2
 EndChar
@@ -55201,17 +55201,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -55223,17 +55223,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 68 97 N 1 0 0 1 0 0 2
 EndChar
@@ -55265,14 +55265,14 @@
 LayerCount: 2
 Fore
 SplineSet
-648 1908 m 5
- 837 1908 l 5
- 1048 1642 l 5
- 908 1642 l 5
- 742 1820 l 5
- 577 1642 l 5
- 437 1642 l 5
- 648 1908 l 5
+648 1908 m 5,0,-1
+ 837 1908 l 5,1,-1
+ 1048 1642 l 5,2,-1
+ 908 1642 l 5,3,-1
+ 742 1820 l 5,4,-1
+ 577 1642 l 5,5,-1
+ 437 1642 l 5,6,-1
+ 648 1908 l 5,0,-1
 EndSplineSet
 Refer: 38 67 N 1 0 0 1 0 0 2
 EndChar
@@ -55294,11 +55294,11 @@
 LayerCount: 2
 Fore
 SplineSet
-588 1872 m 5
- 793 1872 l 5
- 793 1667 l 5
- 588 1667 l 5
- 588 1872 l 5
+588 1872 m 5,0,-1
+ 793 1872 l 5,1,-1
+ 793 1667 l 5,2,-1
+ 588 1667 l 5,3,-1
+ 588 1872 l 5,0,-1
 EndSplineSet
 Refer: 38 67 N 1 0 0 1 0 0 2
 EndChar
@@ -55310,11 +55310,11 @@
 LayerCount: 2
 Fore
 SplineSet
-588 1552 m 5
- 793 1552 l 5
- 793 1348 l 5
- 588 1348 l 5
- 588 1552 l 5
+588 1552 m 5,0,-1
+ 793 1552 l 5,1,-1
+ 793 1348 l 5,2,-1
+ 588 1348 l 5,3,-1
+ 588 1552 l 5,0,-1
 EndSplineSet
 Refer: 70 99 N 1 0 0 1 0 0 2
 EndChar
@@ -55326,14 +55326,14 @@
 LayerCount: 2
 Fore
 SplineSet
-444 1629 m 5
- 233 1895 l 5
- 373 1895 l 5
- 538 1717 l 5
- 704 1895 l 5
- 844 1895 l 5
- 633 1629 l 5
- 444 1629 l 5
+444 1629 m 5,0,-1
+ 233 1895 l 5,1,-1
+ 373 1895 l 5,2,-1
+ 538 1717 l 5,3,-1
+ 704 1895 l 5,4,-1
+ 844 1895 l 5,5,-1
+ 633 1629 l 5,6,-1
+ 444 1629 l 5,0,-1
 EndSplineSet
 Refer: 39 68 N 1 0 0 1 0 0 2
 EndChar
@@ -55345,11 +55345,11 @@
 LayerCount: 2
 Fore
 SplineSet
-1114 1557 m 5
- 1312 1557 l 5
- 1199 1181 l 5
- 1045 1181 l 5
- 1114 1557 l 5
+1114 1557 m 5,0,-1
+ 1312 1557 l 5,1,-1
+ 1199 1181 l 5,2,-1
+ 1045 1181 l 5,3,-1
+ 1114 1557 l 5,0,-1
 EndSplineSet
 Refer: 71 100 N 1 0 0 1 0 0 2
 EndChar
@@ -55370,11 +55370,11 @@
 LayerCount: 2
 Fore
 SplineSet
-335 1840 m 5
- 933 1840 l 5
- 933 1692 l 5
- 335 1692 l 5
- 335 1840 l 5
+335 1840 m 5,0,-1
+ 933 1840 l 5,1,-1
+ 933 1692 l 5,2,-1
+ 335 1692 l 5,3,-1
+ 335 1840 l 5,0,-1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -55386,11 +55386,11 @@
 LayerCount: 2
 Fore
 SplineSet
-352 1526 m 5
- 950 1526 l 5
- 950 1378 l 5
- 352 1378 l 5
- 352 1526 l 5
+352 1526 m 5,0,-1
+ 950 1526 l 5,1,-1
+ 950 1378 l 5,2,-1
+ 352 1378 l 5,3,-1
+ 352 1526 l 5,0,-1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -55402,17 +55402,17 @@
 LayerCount: 2
 Fore
 SplineSet
-321 1901 m 5
- 440 1901 l 5
- 452 1847 452 1847 502 1818.5 c 4
- 552 1790 552 1790 634 1790 c 4
- 717 1790 717 1790 765.5 1817.5 c 4
- 814 1845 814 1845 829 1901 c 5
- 948 1901 l 5
- 937 1782 937 1782 857.5 1720.5 c 4
- 778 1659 778 1659 634 1659 c 260
- 490 1659 490 1659 411 1720 c 4
- 332 1781 332 1781 321 1901 c 5
+321 1901 m 5,0,-1
+ 440 1901 l 5,1,2
+ 452 1847 452 1847 502 1818.5 c 4,3,4
+ 552 1790 552 1790 634 1790 c 4,5,6
+ 717 1790 717 1790 765.5 1817.5 c 4,7,8
+ 814 1845 814 1845 829 1901 c 5,9,-1
+ 948 1901 l 5,10,11
+ 937 1782 937 1782 857.5 1720.5 c 4,12,13
+ 778 1659 778 1659 634 1659 c 260,14,15
+ 490 1659 490 1659 411 1720 c 4,16,17
+ 332 1781 332 1781 321 1901 c 5,0,-1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -55424,17 +55424,17 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1608 m 5
- 436 1608 l 5
- 447 1532 447 1532 495 1495 c 4
- 543 1458 543 1458 630 1458 c 4
- 715 1458 715 1458 763 1495 c 4
- 811 1532 811 1532 825 1608 c 5
- 944 1608 l 5
- 933 1465 933 1465 854 1393 c 4
- 775 1321 775 1321 630 1321 c 4
- 486 1321 486 1321 407 1393 c 4
- 328 1465 328 1465 317 1608 c 5
+317 1608 m 5,0,-1
+ 436 1608 l 5,1,2
+ 447 1532 447 1532 495 1495 c 4,3,4
+ 543 1458 543 1458 630 1458 c 4,5,6
+ 715 1458 715 1458 763 1495 c 4,7,8
+ 811 1532 811 1532 825 1608 c 5,9,-1
+ 944 1608 l 5,10,11
+ 933 1465 933 1465 854 1393 c 4,12,13
+ 775 1321 775 1321 630 1321 c 4,14,15
+ 486 1321 486 1321 407 1393 c 4,16,17
+ 328 1465 328 1465 317 1608 c 5,0,-1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -55446,11 +55446,11 @@
 LayerCount: 2
 Fore
 SplineSet
-531 1872 m 5
- 736 1872 l 5
- 736 1667 l 5
- 531 1667 l 5
- 531 1872 l 5
+531 1872 m 5,0,-1
+ 736 1872 l 5,1,-1
+ 736 1667 l 5,2,-1
+ 531 1667 l 5,3,-1
+ 531 1872 l 5,0,-1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -55462,11 +55462,11 @@
 LayerCount: 2
 Fore
 SplineSet
-527 1552 m 5
- 732 1552 l 5
- 732 1348 l 5
- 527 1348 l 5
- 527 1552 l 5
+527 1552 m 5,0,-1
+ 732 1552 l 5,1,-1
+ 732 1348 l 5,2,-1
+ 527 1348 l 5,3,-1
+ 527 1552 l 5,0,-1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -55498,14 +55498,14 @@
 LayerCount: 2
 Fore
 SplineSet
-558 1629 m 5
- 347 1895 l 5
- 487 1895 l 5
- 652 1717 l 5
- 818 1895 l 5
- 958 1895 l 5
- 747 1629 l 5
- 558 1629 l 5
+558 1629 m 5,0,-1
+ 347 1895 l 5,1,-1
+ 487 1895 l 5,2,-1
+ 652 1717 l 5,3,-1
+ 818 1895 l 5,4,-1
+ 958 1895 l 5,5,-1
+ 747 1629 l 5,6,-1
+ 558 1629 l 5,0,-1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -55527,14 +55527,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1901 m 5
- 711 1901 l 5
- 922 1635 l 5
- 782 1635 l 5
- 616 1813 l 5
- 451 1635 l 5
- 311 1635 l 5
- 522 1901 l 5
+522 1901 m 5,0,-1
+ 711 1901 l 5,1,-1
+ 922 1635 l 5,2,-1
+ 782 1635 l 5,3,-1
+ 616 1813 l 5,4,-1
+ 451 1635 l 5,5,-1
+ 311 1635 l 5,6,-1
+ 522 1901 l 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -55556,11 +55556,11 @@
 LayerCount: 2
 Fore
 SplineSet
-563 1872 m 5
- 768 1872 l 5
- 768 1667 l 5
- 563 1667 l 5
- 563 1872 l 5
+563 1872 m 5,0,-1
+ 768 1872 l 5,1,-1
+ 768 1667 l 5,2,-1
+ 563 1667 l 5,3,-1
+ 563 1872 l 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -55572,11 +55572,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1552 m 5
- 718 1552 l 5
- 718 1348 l 5
- 513 1348 l 5
- 513 1552 l 5
+513 1552 m 5,0,-1
+ 718 1552 l 5,1,-1
+ 718 1348 l 5,2,-1
+ 513 1348 l 5,3,-1
+ 513 1552 l 5,0,-1
 EndSplineSet
 Refer: 74 103 N 1 0 0 1 0 0 2
 EndChar
@@ -55588,11 +55588,11 @@
 LayerCount: 2
 Fore
 SplineSet
-630 -228 m 5
- 869 -228 l 5
- 682 -573 l 5
- 536 -573 l 5
- 630 -228 l 5
+630 -228 m 5,0,-1
+ 869 -228 l 5,1,-1
+ 682 -573 l 5,2,-1
+ 536 -573 l 5,3,-1
+ 630 -228 l 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -55604,11 +55604,11 @@
 LayerCount: 2
 Fore
 SplineSet
-725 1269 m 5
- 486 1269 l 5
- 673 1614 l 5
- 819 1614 l 5
- 725 1269 l 5
+725 1269 m 5,0,-1
+ 486 1269 l 5,1,-1
+ 673 1614 l 5,2,-1
+ 819 1614 l 5,3,-1
+ 725 1269 l 5,0,-1
 EndSplineSet
 Refer: 74 103 N 1 0 0 1 0 0 2
 EndChar
@@ -55620,14 +55620,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1901 m 5
- 711 1901 l 5
- 922 1635 l 5
- 782 1635 l 5
- 616 1813 l 5
- 451 1635 l 5
- 311 1635 l 5
- 522 1901 l 5
+522 1901 m 5,0,-1
+ 711 1901 l 5,1,-1
+ 922 1635 l 5,2,-1
+ 782 1635 l 5,3,-1
+ 616 1813 l 5,4,-1
+ 451 1635 l 5,5,-1
+ 311 1635 l 5,6,-1
+ 522 1901 l 5,0,-1
 EndSplineSet
 Refer: 43 72 N 1 0 0 1 0 0 2
 EndChar
@@ -55639,14 +55639,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1901 m 5
- 711 1901 l 5
- 922 1635 l 5
- 782 1635 l 5
- 616 1813 l 5
- 451 1635 l 5
- 311 1635 l 5
- 522 1901 l 5
+522 1901 m 5,0,-1
+ 711 1901 l 5,1,-1
+ 922 1635 l 5,2,-1
+ 782 1635 l 5,3,-1
+ 616 1813 l 5,4,-1
+ 451 1635 l 5,5,-1
+ 311 1635 l 5,6,-1
+ 522 1901 l 5,0,-1
 EndSplineSet
 Refer: 75 104 N 1 0 0 1 0 0 2
 EndChar
@@ -55729,29 +55729,29 @@
 LayerCount: 2
 Fore
 SplineSet
-612 1710 m 5
- 555 1743 l 6
- 530 1757 530 1757 514.5 1762.5 c 4
- 499 1768 499 1768 487 1768 c 4
- 452 1768 452 1768 432 1743 c 4
- 412 1718 412 1718 412 1673 c 6
- 412 1667 l 5
- 287 1667 l 5
- 287 1768 287 1768 338.5 1827 c 4
- 390 1886 390 1886 475 1886 c 4
- 511 1886 511 1886 541.5 1878 c 4
- 572 1870 572 1870 621 1843 c 5
- 678 1813 l 5
- 700 1800 700 1800 717.5 1794 c 4
- 735 1788 735 1788 750 1788 c 4
- 781 1788 781 1788 801 1813.5 c 4
- 821 1839 821 1839 821 1880 c 6
- 821 1886 l 5
- 946 1886 l 5
- 944 1786 944 1786 893 1726.5 c 4
- 842 1667 842 1667 758 1667 c 4
- 724 1667 724 1667 694 1675 c 4
- 664 1683 664 1683 612 1710 c 5
+612 1710 m 5,0,-1
+ 555 1743 l 6,1,2
+ 530 1757 530 1757 514.5 1762.5 c 4,3,4
+ 499 1768 499 1768 487 1768 c 4,5,6
+ 452 1768 452 1768 432 1743 c 4,7,8
+ 412 1718 412 1718 412 1673 c 6,9,-1
+ 412 1667 l 5,10,-1
+ 287 1667 l 5,11,12
+ 287 1768 287 1768 338.5 1827 c 4,13,14
+ 390 1886 390 1886 475 1886 c 4,15,16
+ 511 1886 511 1886 541.5 1878 c 4,17,18
+ 572 1870 572 1870 621 1843 c 5,19,-1
+ 678 1813 l 5,20,21
+ 700 1800 700 1800 717.5 1794 c 4,22,23
+ 735 1788 735 1788 750 1788 c 4,24,25
+ 781 1788 781 1788 801 1813.5 c 4,26,27
+ 821 1839 821 1839 821 1880 c 6,28,-1
+ 821 1886 l 5,29,-1
+ 946 1886 l 5,30,31
+ 944 1786 944 1786 893 1726.5 c 4,32,33
+ 842 1667 842 1667 758 1667 c 4,34,35
+ 724 1667 724 1667 694 1675 c 4,36,37
+ 664 1683 664 1683 612 1710 c 5,0,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -55773,11 +55773,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -55789,11 +55789,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 214 305 N 1 0 0 1 0 0 2
 EndChar
@@ -55805,17 +55805,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -55827,17 +55827,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 214 305 N 1 0 0 1 0 0 2
 EndChar
@@ -55951,14 +55951,14 @@
 LayerCount: 2
 Fore
 SplineSet
-570 1901 m 5
- 759 1901 l 5
- 970 1635 l 5
- 830 1635 l 5
- 664 1813 l 5
- 499 1635 l 5
- 359 1635 l 5
- 570 1901 l 5
+570 1901 m 5,0,-1
+ 759 1901 l 5,1,-1
+ 970 1635 l 5,2,-1
+ 830 1635 l 5,3,-1
+ 664 1813 l 5,4,-1
+ 499 1635 l 5,5,-1
+ 359 1635 l 5,6,-1
+ 570 1901 l 5,0,-1
 EndSplineSet
 Refer: 45 74 N 1 0 0 1 0 0 2
 EndChar
@@ -55970,19 +55970,19 @@
 LayerCount: 2
 Fore
 SplineSet
-600 -20 m 6
- 600 977 l 5
- 283 977 l 5
- 283 1120 l 5
- 784 1120 l 5
- 784 -20 l 6
- 784 -215 784 -215 694.5 -320.5 c 4
- 605 -426 605 -426 440 -426 c 6
- 186 -426 l 5
- 186 -270 l 5
- 420 -270 l 6
- 510 -270 510 -270 555 -207.5 c 4
- 600 -145 600 -145 600 -20 c 6
+600 -20 m 6,0,-1
+ 600 977 l 5,1,-1
+ 283 977 l 5,2,-1
+ 283 1120 l 5,3,-1
+ 784 1120 l 5,4,-1
+ 784 -20 l 6,5,6
+ 784 -215 784 -215 694.5 -320.5 c 4,7,8
+ 605 -426 605 -426 440 -426 c 6,9,-1
+ 186 -426 l 5,10,-1
+ 186 -270 l 5,11,-1
+ 420 -270 l 6,12,13
+ 510 -270 510 -270 555 -207.5 c 4,14,15
+ 600 -145 600 -145 600 -20 c 6,0,-1
 EndSplineSet
 Refer: 215 710 N 1 0 0 1 0 0 2
 EndChar
@@ -55994,11 +55994,11 @@
 LayerCount: 2
 Fore
 SplineSet
-608 -199 m 5
- 847 -199 l 5
- 660 -544 l 5
- 514 -544 l 5
- 608 -199 l 5
+608 -199 m 5,0,-1
+ 847 -199 l 5,1,-1
+ 660 -544 l 5,2,-1
+ 514 -544 l 5,3,-1
+ 608 -199 l 5,0,-1
 EndSplineSet
 Refer: 46 75 N 1 0 0 1 0 0 2
 EndChar
@@ -56010,11 +56010,11 @@
 LayerCount: 2
 Fore
 SplineSet
-646 -199 m 5
- 885 -199 l 5
- 698 -544 l 5
- 552 -544 l 5
- 646 -199 l 5
+646 -199 m 5,0,-1
+ 885 -199 l 5,1,-1
+ 698 -544 l 5,2,-1
+ 552 -544 l 5,3,-1
+ 646 -199 l 5,0,-1
 EndSplineSet
 Refer: 78 107 N 1 0 0 1 0 0 2
 EndChar
@@ -56049,11 +56049,11 @@
 LayerCount: 2
 Fore
 SplineSet
-397 1900 m 5
- 583 1900 l 5
- 354 1636 l 5
- 200 1636 l 5
- 397 1900 l 5
+397 1900 m 5,0,-1
+ 583 1900 l 5,1,-1
+ 354 1636 l 5,2,-1
+ 200 1636 l 5,3,-1
+ 397 1900 l 5,0,-1
 EndSplineSet
 Refer: 47 76 N 1 0 0 1 0 0 2
 EndChar
@@ -56065,11 +56065,11 @@
 LayerCount: 2
 Fore
 SplineSet
-577 1900 m 5
- 763 1900 l 5
- 534 1636 l 5
- 380 1636 l 5
- 577 1900 l 5
+577 1900 m 5,0,-1
+ 763 1900 l 5,1,-1
+ 534 1636 l 5,2,-1
+ 380 1636 l 5,3,-1
+ 577 1900 l 5,0,-1
 EndSplineSet
 Refer: 79 108 N 1 0 0 1 0 0 2
 EndChar
@@ -56081,11 +56081,11 @@
 LayerCount: 2
 Fore
 SplineSet
-604 -199 m 5
- 843 -199 l 5
- 656 -544 l 5
- 510 -544 l 5
- 604 -199 l 5
+604 -199 m 5,0,-1
+ 843 -199 l 5,1,-1
+ 656 -544 l 5,2,-1
+ 510 -544 l 5,3,-1
+ 604 -199 l 5,0,-1
 EndSplineSet
 Refer: 47 76 N 1 0 0 1 0 0 2
 EndChar
@@ -56097,11 +56097,11 @@
 LayerCount: 2
 Fore
 SplineSet
-497 -199 m 5
- 736 -199 l 5
- 549 -544 l 5
- 403 -544 l 5
- 497 -199 l 5
+497 -199 m 5,0,-1
+ 736 -199 l 5,1,-1
+ 549 -544 l 5,2,-1
+ 403 -544 l 5,3,-1
+ 497 -199 l 5,0,-1
 EndSplineSet
 Refer: 79 108 N 1 0 0 1 0 0 2
 EndChar
@@ -56113,11 +56113,11 @@
 LayerCount: 2
 Fore
 SplineSet
-718 1491 m 5
- 916 1491 l 5
- 803 1115 l 5
- 649 1115 l 5
- 718 1491 l 5
+718 1491 m 5,0,-1
+ 916 1491 l 5,1,-1
+ 803 1115 l 5,2,-1
+ 649 1115 l 5,3,-1
+ 718 1491 l 5,0,-1
 EndSplineSet
 Refer: 47 76 N 1 0 0 1 0 0 2
 EndChar
@@ -56129,11 +56129,11 @@
 LayerCount: 2
 Fore
 SplineSet
-960 1567 m 5
- 1158 1567 l 5
- 1045 1191 l 5
- 891 1191 l 5
- 960 1567 l 5
+960 1567 m 5,0,-1
+ 1158 1567 l 5,1,-1
+ 1045 1191 l 5,2,-1
+ 891 1191 l 5,3,-1
+ 960 1567 l 5,0,-1
 EndSplineSet
 Refer: 79 108 N 1 0 0 1 0 0 2
 EndChar
@@ -56165,11 +56165,11 @@
 LayerCount: 2
 Fore
 SplineSet
-705 1899 m 5
- 891 1899 l 5
- 662 1635 l 5
- 508 1635 l 5
- 705 1899 l 5
+705 1899 m 5,0,-1
+ 891 1899 l 5,1,-1
+ 662 1635 l 5,2,-1
+ 508 1635 l 5,3,-1
+ 705 1899 l 5,0,-1
 EndSplineSet
 Refer: 49 78 N 1 0 0 1 0 0 2
 EndChar
@@ -56191,11 +56191,11 @@
 LayerCount: 2
 Fore
 SplineSet
-544 -199 m 5
- 783 -199 l 5
- 596 -544 l 5
- 450 -544 l 5
- 544 -199 l 5
+544 -199 m 5,0,-1
+ 783 -199 l 5,1,-1
+ 596 -544 l 5,2,-1
+ 450 -544 l 5,3,-1
+ 544 -199 l 5,0,-1
 EndSplineSet
 Refer: 49 78 N 1 0 0 1 0 0 2
 EndChar
@@ -56207,11 +56207,11 @@
 LayerCount: 2
 Fore
 SplineSet
-550 -199 m 5
- 789 -199 l 5
- 602 -544 l 5
- 456 -544 l 5
- 550 -199 l 5
+550 -199 m 5,0,-1
+ 789 -199 l 5,1,-1
+ 602 -544 l 5,2,-1
+ 456 -544 l 5,3,-1
+ 550 -199 l 5,0,-1
 EndSplineSet
 Refer: 81 110 N 1 0 0 1 0 0 2
 EndChar
@@ -56223,14 +56223,14 @@
 LayerCount: 2
 Fore
 SplineSet
-564 1635 m 5
- 353 1901 l 5
- 493 1901 l 5
- 658 1723 l 5
- 824 1901 l 5
- 964 1901 l 5
- 753 1635 l 5
- 564 1635 l 5
+564 1635 m 5,0,-1
+ 353 1901 l 5,1,-1
+ 493 1901 l 5,2,-1
+ 658 1723 l 5,3,-1
+ 824 1901 l 5,4,-1
+ 964 1901 l 5,5,-1
+ 753 1635 l 5,6,-1
+ 564 1635 l 5,0,-1
 EndSplineSet
 Refer: 49 78 N 1 0 0 1 0 0 2
 EndChar
@@ -56327,11 +56327,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -56343,11 +56343,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -56359,17 +56359,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -56381,17 +56381,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -56403,16 +56403,16 @@
 LayerCount: 2
 Fore
 SplineSet
-860 1899 m 5
- 1046 1899 l 5
- 817 1635 l 5
- 663 1635 l 5
- 860 1899 l 5
-541 1899 m 5
- 727 1899 l 5
- 498 1635 l 5
- 344 1635 l 5
- 541 1899 l 5
+860 1899 m 5,0,-1
+ 1046 1899 l 5,1,-1
+ 817 1635 l 5,2,-1
+ 663 1635 l 5,3,-1
+ 860 1899 l 5,0,-1
+541 1899 m 5,4,-1
+ 727 1899 l 5,5,-1
+ 498 1635 l 5,6,-1
+ 344 1635 l 5,7,-1
+ 541 1899 l 5,4,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -56424,16 +56424,16 @@
 LayerCount: 2
 Fore
 SplineSet
-535 1638 m 5
- 705 1638 l 5
- 481 1262 l 5
- 344 1262 l 5
- 535 1638 l 5
-868 1638 m 5
- 1047 1638 l 5
- 799 1262 l 5
- 664 1262 l 5
- 868 1638 l 5
+535 1638 m 5,0,-1
+ 705 1638 l 5,1,-1
+ 481 1262 l 5,2,-1
+ 344 1262 l 5,3,-1
+ 535 1638 l 5,0,-1
+868 1638 m 5,4,-1
+ 1047 1638 l 5,5,-1
+ 799 1262 l 5,6,-1
+ 664 1262 l 5,7,-1
+ 868 1638 l 5,4,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -56445,11 +56445,11 @@
 LayerCount: 2
 Fore
 SplineSet
-597 1899 m 5
- 783 1899 l 5
- 554 1635 l 5
- 400 1635 l 5
- 597 1899 l 5
+597 1899 m 5,0,-1
+ 783 1899 l 5,1,-1
+ 554 1635 l 5,2,-1
+ 400 1635 l 5,3,-1
+ 597 1899 l 5,0,-1
 EndSplineSet
 Refer: 53 82 N 1 0 0 1 0 0 2
 EndChar
@@ -56471,11 +56471,11 @@
 LayerCount: 2
 Fore
 SplineSet
-616 -199 m 5
- 855 -199 l 5
- 668 -544 l 5
- 522 -544 l 5
- 616 -199 l 5
+616 -199 m 5,0,-1
+ 855 -199 l 5,1,-1
+ 668 -544 l 5,2,-1
+ 522 -544 l 5,3,-1
+ 616 -199 l 5,0,-1
 EndSplineSet
 Refer: 53 82 N 1 0 0 1 0 0 2
 EndChar
@@ -56487,11 +56487,11 @@
 LayerCount: 2
 Fore
 SplineSet
-382 -199 m 5
- 621 -199 l 5
- 434 -544 l 5
- 288 -544 l 5
- 382 -199 l 5
+382 -199 m 5,0,-1
+ 621 -199 l 5,1,-1
+ 434 -544 l 5,2,-1
+ 288 -544 l 5,3,-1
+ 382 -199 l 5,0,-1
 EndSplineSet
 Refer: 85 114 N 1 0 0 1 0 0 2
 EndChar
@@ -56503,14 +56503,14 @@
 LayerCount: 2
 Fore
 SplineSet
-462 1629 m 5
- 251 1895 l 5
- 391 1895 l 5
- 556 1717 l 5
- 722 1895 l 5
- 862 1895 l 5
- 651 1629 l 5
- 462 1629 l 5
+462 1629 m 5,0,-1
+ 251 1895 l 5,1,-1
+ 391 1895 l 5,2,-1
+ 556 1717 l 5,3,-1
+ 722 1895 l 5,4,-1
+ 862 1895 l 5,5,-1
+ 651 1629 l 5,6,-1
+ 462 1629 l 5,0,-1
 EndSplineSet
 Refer: 53 82 N 1 0 0 1 0 0 2
 EndChar
@@ -56532,11 +56532,11 @@
 LayerCount: 2
 Fore
 SplineSet
-697 1899 m 5
- 883 1899 l 5
- 654 1635 l 5
- 500 1635 l 5
- 697 1899 l 5
+697 1899 m 5,0,-1
+ 883 1899 l 5,1,-1
+ 654 1635 l 5,2,-1
+ 500 1635 l 5,3,-1
+ 697 1899 l 5,0,-1
 EndSplineSet
 Refer: 54 83 N 1 0 0 1 0 0 2
 EndChar
@@ -56558,14 +56558,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1901 m 5
- 711 1901 l 5
- 922 1635 l 5
- 782 1635 l 5
- 616 1813 l 5
- 451 1635 l 5
- 311 1635 l 5
- 522 1901 l 5
+522 1901 m 5,0,-1
+ 711 1901 l 5,1,-1
+ 922 1635 l 5,2,-1
+ 782 1635 l 5,3,-1
+ 616 1813 l 5,4,-1
+ 451 1635 l 5,5,-1
+ 311 1635 l 5,6,-1
+ 522 1901 l 5,0,-1
 EndSplineSet
 Refer: 54 83 N 1 0 0 1 0 0 2
 EndChar
@@ -56587,21 +56587,21 @@
 LayerCount: 2
 Fore
 SplineSet
-700 0 m 5
- 756 -62 756 -62 782.5 -114.5 c 4
- 809 -167 809 -167 809 -215 c 4
- 809 -304 809 -304 749 -349.5 c 4
- 689 -395 689 -395 571 -395 c 4
- 526 -395 526 -395 482.5 -389 c 4
- 439 -383 439 -383 395 -371 c 5
- 395 -240 l 5
- 429 -257 429 -257 466.5 -264.5 c 4
- 504 -272 504 -272 551 -272 c 4
- 609 -272 609 -272 639.5 -248 c 4
- 670 -224 670 -224 670 -178 c 4
- 670 -148 670 -148 648 -104.5 c 4
- 626 -61 626 -61 582 0 c 5
- 700 0 l 5
+700 0 m 5,0,1
+ 756 -62 756 -62 782.5 -114.5 c 4,2,3
+ 809 -167 809 -167 809 -215 c 4,4,5
+ 809 -304 809 -304 749 -349.5 c 4,6,7
+ 689 -395 689 -395 571 -395 c 4,8,9
+ 526 -395 526 -395 482.5 -389 c 4,10,11
+ 439 -383 439 -383 395 -371 c 5,12,-1
+ 395 -240 l 5,13,14
+ 429 -257 429 -257 466.5 -264.5 c 4,15,16
+ 504 -272 504 -272 551 -272 c 4,17,18
+ 609 -272 609 -272 639.5 -248 c 4,19,20
+ 670 -224 670 -224 670 -178 c 4,21,22
+ 670 -148 670 -148 648 -104.5 c 4,23,24
+ 626 -61 626 -61 582 0 c 5,25,-1
+ 700 0 l 5,0,1
 EndSplineSet
 Refer: 55 84 N 1 0 0 1 0 0 2
 EndChar
@@ -56613,21 +56613,21 @@
 LayerCount: 2
 Fore
 SplineSet
-821 0 m 5
- 877 -62 877 -62 903.5 -114.5 c 4
- 930 -167 930 -167 930 -215 c 4
- 930 -304 930 -304 870 -349.5 c 4
- 810 -395 810 -395 692 -395 c 4
- 647 -395 647 -395 603.5 -389 c 4
- 560 -383 560 -383 516 -371 c 5
- 516 -240 l 5
- 550 -257 550 -257 587.5 -264.5 c 4
- 625 -272 625 -272 672 -272 c 4
- 730 -272 730 -272 760.5 -248 c 4
- 791 -224 791 -224 791 -178 c 4
- 791 -148 791 -148 769 -104.5 c 4
- 747 -61 747 -61 703 0 c 5
- 821 0 l 5
+821 0 m 5,0,1
+ 877 -62 877 -62 903.5 -114.5 c 4,2,3
+ 930 -167 930 -167 930 -215 c 4,4,5
+ 930 -304 930 -304 870 -349.5 c 4,6,7
+ 810 -395 810 -395 692 -395 c 4,8,9
+ 647 -395 647 -395 603.5 -389 c 4,10,11
+ 560 -383 560 -383 516 -371 c 5,12,-1
+ 516 -240 l 5,13,14
+ 550 -257 550 -257 587.5 -264.5 c 4,15,16
+ 625 -272 625 -272 672 -272 c 4,17,18
+ 730 -272 730 -272 760.5 -248 c 4,19,20
+ 791 -224 791 -224 791 -178 c 4,21,22
+ 791 -148 791 -148 769 -104.5 c 4,23,24
+ 747 -61 747 -61 703 0 c 5,25,-1
+ 821 0 l 5,0,1
 EndSplineSet
 Refer: 87 116 N 1 0 0 1 0 0 2
 EndChar
@@ -56639,14 +56639,14 @@
 LayerCount: 2
 Fore
 SplineSet
-528 1635 m 5
- 317 1901 l 5
- 457 1901 l 5
- 622 1723 l 5
- 788 1901 l 5
- 928 1901 l 5
- 717 1635 l 5
- 528 1635 l 5
+528 1635 m 5,0,-1
+ 317 1901 l 5,1,-1
+ 457 1901 l 5,2,-1
+ 622 1723 l 5,3,-1
+ 788 1901 l 5,4,-1
+ 928 1901 l 5,5,-1
+ 717 1635 l 5,6,-1
+ 528 1635 l 5,0,-1
 EndSplineSet
 Refer: 55 84 N 1 0 0 1 0 0 2
 EndChar
@@ -56658,11 +56658,11 @@
 LayerCount: 2
 Fore
 SplineSet
-820 1662 m 5
- 1018 1662 l 5
- 905 1286 l 5
- 751 1286 l 5
- 820 1662 l 5
+820 1662 m 5,0,-1
+ 1018 1662 l 5,1,-1
+ 905 1286 l 5,2,-1
+ 751 1286 l 5,3,-1
+ 820 1662 l 5,0,-1
 EndSplineSet
 Refer: 87 116 N 1 0 0 1 0 0 2
 EndChar
@@ -56738,29 +56738,29 @@
 LayerCount: 2
 Fore
 SplineSet
-612 1710 m 5
- 555 1743 l 6
- 530 1757 530 1757 514.5 1762.5 c 4
- 499 1768 499 1768 487 1768 c 4
- 452 1768 452 1768 432 1743 c 4
- 412 1718 412 1718 412 1673 c 6
- 412 1667 l 5
- 287 1667 l 5
- 287 1768 287 1768 338.5 1827 c 4
- 390 1886 390 1886 475 1886 c 4
- 511 1886 511 1886 541.5 1878 c 4
- 572 1870 572 1870 621 1843 c 5
- 678 1813 l 5
- 700 1800 700 1800 717.5 1794 c 4
- 735 1788 735 1788 750 1788 c 4
- 781 1788 781 1788 801 1813.5 c 4
- 821 1839 821 1839 821 1880 c 6
- 821 1886 l 5
- 946 1886 l 5
- 944 1786 944 1786 893 1726.5 c 4
- 842 1667 842 1667 758 1667 c 4
- 724 1667 724 1667 694 1675 c 4
- 664 1683 664 1683 612 1710 c 5
+612 1710 m 5,0,-1
+ 555 1743 l 6,1,2
+ 530 1757 530 1757 514.5 1762.5 c 4,3,4
+ 499 1768 499 1768 487 1768 c 4,5,6
+ 452 1768 452 1768 432 1743 c 4,7,8
+ 412 1718 412 1718 412 1673 c 6,9,-1
+ 412 1667 l 5,10,-1
+ 287 1667 l 5,11,12
+ 287 1768 287 1768 338.5 1827 c 4,13,14
+ 390 1886 390 1886 475 1886 c 4,15,16
+ 511 1886 511 1886 541.5 1878 c 4,17,18
+ 572 1870 572 1870 621 1843 c 5,19,-1
+ 678 1813 l 5,20,21
+ 700 1800 700 1800 717.5 1794 c 4,22,23
+ 735 1788 735 1788 750 1788 c 4,24,25
+ 781 1788 781 1788 801 1813.5 c 4,26,27
+ 821 1839 821 1839 821 1880 c 6,28,-1
+ 821 1886 l 5,29,-1
+ 946 1886 l 5,30,31
+ 944 1786 944 1786 893 1726.5 c 4,32,33
+ 842 1667 842 1667 758 1667 c 4,34,35
+ 724 1667 724 1667 694 1675 c 4,36,37
+ 664 1683 664 1683 612 1710 c 5,0,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -56782,11 +56782,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -56798,11 +56798,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 0 0 2
 EndChar
@@ -56814,17 +56814,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1901 m 5
- 422 1901 l 5
- 434 1847 434 1847 484 1818.5 c 4
- 534 1790 534 1790 616 1790 c 4
- 699 1790 699 1790 747.5 1817.5 c 4
- 796 1845 796 1845 811 1901 c 5
- 930 1901 l 5
- 919 1782 919 1782 839.5 1720.5 c 4
- 760 1659 760 1659 616 1659 c 260
- 472 1659 472 1659 393 1720 c 4
- 314 1781 314 1781 303 1901 c 5
+303 1901 m 5,0,-1
+ 422 1901 l 5,1,2
+ 434 1847 434 1847 484 1818.5 c 4,3,4
+ 534 1790 534 1790 616 1790 c 4,5,6
+ 699 1790 699 1790 747.5 1817.5 c 4,7,8
+ 796 1845 796 1845 811 1901 c 5,9,-1
+ 930 1901 l 5,10,11
+ 919 1782 919 1782 839.5 1720.5 c 4,12,13
+ 760 1659 760 1659 616 1659 c 260,14,15
+ 472 1659 472 1659 393 1720 c 4,16,17
+ 314 1781 314 1781 303 1901 c 5,0,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -56836,17 +56836,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1608 m 5
- 422 1608 l 5
- 433 1532 433 1532 481 1495 c 4
- 529 1458 529 1458 616 1458 c 4
- 701 1458 701 1458 749 1495 c 4
- 797 1532 797 1532 811 1608 c 5
- 930 1608 l 5
- 919 1465 919 1465 840 1393 c 4
- 761 1321 761 1321 616 1321 c 4
- 472 1321 472 1321 393 1393 c 4
- 314 1465 314 1465 303 1608 c 5
+303 1608 m 5,0,-1
+ 422 1608 l 5,1,2
+ 433 1532 433 1532 481 1495 c 4,3,4
+ 529 1458 529 1458 616 1458 c 4,5,6
+ 701 1458 701 1458 749 1495 c 4,7,8
+ 797 1532 797 1532 811 1608 c 5,9,-1
+ 930 1608 l 5,10,11
+ 919 1465 919 1465 840 1393 c 4,12,13
+ 761 1321 761 1321 616 1321 c 4,14,15
+ 472 1321 472 1321 393 1393 c 4,16,17
+ 314 1465 314 1465 303 1608 c 5,0,-1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 0 0 2
 EndChar
@@ -56878,16 +56878,16 @@
 LayerCount: 2
 Fore
 SplineSet
-860 1899 m 5
- 1046 1899 l 5
- 817 1635 l 5
- 663 1635 l 5
- 860 1899 l 5
-541 1899 m 5
- 727 1899 l 5
- 498 1635 l 5
- 344 1635 l 5
- 541 1899 l 5
+860 1899 m 5,0,-1
+ 1046 1899 l 5,1,-1
+ 817 1635 l 5,2,-1
+ 663 1635 l 5,3,-1
+ 860 1899 l 5,0,-1
+541 1899 m 5,4,-1
+ 727 1899 l 5,5,-1
+ 498 1635 l 5,6,-1
+ 344 1635 l 5,7,-1
+ 541 1899 l 5,4,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -56899,16 +56899,16 @@
 LayerCount: 2
 Fore
 SplineSet
-535 1638 m 5
- 705 1638 l 5
- 481 1262 l 5
- 344 1262 l 5
- 535 1638 l 5
-868 1638 m 5
- 1047 1638 l 5
- 799 1262 l 5
- 664 1262 l 5
- 868 1638 l 5
+535 1638 m 5,0,-1
+ 705 1638 l 5,1,-1
+ 481 1262 l 5,2,-1
+ 344 1262 l 5,3,-1
+ 535 1638 l 5,0,-1
+868 1638 m 5,4,-1
+ 1047 1638 l 5,5,-1
+ 799 1262 l 5,6,-1
+ 664 1262 l 5,7,-1
+ 868 1638 l 5,4,-1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 0 0 2
 EndChar
@@ -56940,14 +56940,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1908 m 5
- 711 1908 l 5
- 922 1642 l 5
- 782 1642 l 5
- 616 1820 l 5
- 451 1642 l 5
- 311 1642 l 5
- 522 1908 l 5
+522 1908 m 5,0,-1
+ 711 1908 l 5,1,-1
+ 922 1642 l 5,2,-1
+ 782 1642 l 5,3,-1
+ 616 1820 l 5,4,-1
+ 451 1642 l 5,5,-1
+ 311 1642 l 5,6,-1
+ 522 1908 l 5,0,-1
 EndSplineSet
 Refer: 58 87 N 1 0 0 1 0 0 2
 EndChar
@@ -56969,14 +56969,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1908 m 5
- 711 1908 l 5
- 922 1642 l 5
- 782 1642 l 5
- 616 1820 l 5
- 451 1642 l 5
- 311 1642 l 5
- 522 1908 l 5
+522 1908 m 5,0,-1
+ 711 1908 l 5,1,-1
+ 922 1642 l 5,2,-1
+ 782 1642 l 5,3,-1
+ 616 1820 l 5,4,-1
+ 451 1642 l 5,5,-1
+ 311 1642 l 5,6,-1
+ 522 1908 l 5,0,-1
 EndSplineSet
 Refer: 60 89 N 1 0 0 1 0 0 2
 EndChar
@@ -56998,11 +56998,11 @@
 LayerCount: 2
 Fore
 SplineSet
-699 1899 m 5
- 885 1899 l 5
- 656 1635 l 5
- 502 1635 l 5
- 699 1899 l 5
+699 1899 m 5,0,-1
+ 885 1899 l 5,1,-1
+ 656 1635 l 5,2,-1
+ 502 1635 l 5,3,-1
+ 699 1899 l 5,0,-1
 EndSplineSet
 Refer: 61 90 N 1 0 0 1 0 0 2
 EndChar
@@ -57024,11 +57024,11 @@
 LayerCount: 2
 Fore
 SplineSet
-563 1872 m 5
- 768 1872 l 5
- 768 1667 l 5
- 563 1667 l 5
- 563 1872 l 5
+563 1872 m 5,0,-1
+ 768 1872 l 5,1,-1
+ 768 1667 l 5,2,-1
+ 563 1667 l 5,3,-1
+ 563 1872 l 5,0,-1
 EndSplineSet
 Refer: 61 90 N 1 0 0 1 0 0 2
 EndChar
@@ -57040,11 +57040,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1552 m 5
- 718 1552 l 5
- 718 1348 l 5
- 513 1348 l 5
- 513 1552 l 5
+513 1552 m 5,0,-1
+ 718 1552 l 5,1,-1
+ 718 1348 l 5,2,-1
+ 513 1348 l 5,3,-1
+ 513 1552 l 5,0,-1
 EndSplineSet
 Refer: 93 122 N 1 0 0 1 0 0 2
 EndChar
@@ -58053,21 +58053,21 @@
 LayerCount: 2
 Fore
 SplineSet
-796 1292 m 5
- 796 1411 l 5
- 857 1366 857 1366 901 1344.5 c 4
- 945 1323 945 1323 976 1323 c 4
- 1022 1323 1022 1323 1045 1350.5 c 4
- 1068 1378 1068 1378 1068 1432 c 4
- 1068 1464 1068 1464 1060.5 1495 c 4
- 1053 1526 1053 1526 1038 1557 c 5
- 1171 1557 l 5
- 1181 1519 1181 1519 1186 1485 c 4
- 1191 1451 1191 1451 1191 1421 c 4
- 1191 1299 1191 1299 1147.5 1241.5 c 4
- 1104 1184 1104 1184 1011 1184 c 4
- 963 1184 963 1184 910.5 1210.5 c 4
- 858 1237 858 1237 796 1292 c 5
+796 1292 m 5,0,-1
+ 796 1411 l 5,1,2
+ 857 1366 857 1366 901 1344.5 c 4,3,4
+ 945 1323 945 1323 976 1323 c 4,5,6
+ 1022 1323 1022 1323 1045 1350.5 c 4,7,8
+ 1068 1378 1068 1378 1068 1432 c 4,9,10
+ 1068 1464 1068 1464 1060.5 1495 c 4,11,12
+ 1053 1526 1053 1526 1038 1557 c 5,13,-1
+ 1171 1557 l 5,14,15
+ 1181 1519 1181 1519 1186 1485 c 4,16,17
+ 1191 1451 1191 1451 1191 1421 c 4,18,19
+ 1191 1299 1191 1299 1147.5 1241.5 c 4,20,21
+ 1104 1184 1104 1184 1011 1184 c 4,22,23
+ 963 1184 963 1184 910.5 1210.5 c 4,24,25
+ 858 1237 858 1237 796 1292 c 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 -111 0 2
 EndChar
@@ -58079,21 +58079,21 @@
 LayerCount: 2
 Fore
 SplineSet
-807 872 m 5
- 807 991 l 5
- 868 946 868 946 912 924.5 c 4
- 956 903 956 903 987 903 c 4
- 1033 903 1033 903 1056 930.5 c 4
- 1079 958 1079 958 1079 1012 c 4
- 1079 1044 1079 1044 1071.5 1075 c 4
- 1064 1106 1064 1106 1049 1137 c 5
- 1182 1137 l 5
- 1192 1099 1192 1099 1197 1065 c 4
- 1202 1031 1202 1031 1202 1001 c 4
- 1202 879 1202 879 1158.5 821.5 c 4
- 1115 764 1115 764 1022 764 c 4
- 974 764 974 764 921.5 790.5 c 4
- 869 817 869 817 807 872 c 5
+807 872 m 5,0,-1
+ 807 991 l 5,1,2
+ 868 946 868 946 912 924.5 c 4,3,4
+ 956 903 956 903 987 903 c 4,5,6
+ 1033 903 1033 903 1056 930.5 c 4,7,8
+ 1079 958 1079 958 1079 1012 c 4,9,10
+ 1079 1044 1079 1044 1071.5 1075 c 4,11,12
+ 1064 1106 1064 1106 1049 1137 c 5,13,-1
+ 1182 1137 l 5,14,15
+ 1192 1099 1192 1099 1197 1065 c 4,16,17
+ 1202 1031 1202 1031 1202 1001 c 4,18,19
+ 1202 879 1202 879 1158.5 821.5 c 4,20,21
+ 1115 764 1115 764 1022 764 c 4,22,23
+ 974 764 974 764 921.5 790.5 c 4,24,25
+ 869 817 869 817 807 872 c 5,0,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 -105 0 2
 EndChar
@@ -58525,21 +58525,21 @@
 LayerCount: 2
 Fore
 SplineSet
-829 1296 m 5
- 829 1415 l 5
- 890 1370 890 1370 934 1348.5 c 4
- 978 1327 978 1327 1009 1327 c 4
- 1055 1327 1055 1327 1078 1354.5 c 4
- 1101 1382 1101 1382 1101 1436 c 4
- 1101 1468 1101 1468 1093.5 1499 c 4
- 1086 1530 1086 1530 1071 1561 c 5
- 1204 1561 l 5
- 1214 1523 1214 1523 1219 1489 c 4
- 1224 1455 1224 1455 1224 1425 c 4
- 1224 1303 1224 1303 1180.5 1245.5 c 4
- 1137 1188 1137 1188 1044 1188 c 4
- 996 1188 996 1188 943.5 1214.5 c 4
- 891 1241 891 1241 829 1296 c 5
+829 1296 m 5,0,-1
+ 829 1415 l 5,1,2
+ 890 1370 890 1370 934 1348.5 c 4,3,4
+ 978 1327 978 1327 1009 1327 c 4,5,6
+ 1055 1327 1055 1327 1078 1354.5 c 4,7,8
+ 1101 1382 1101 1382 1101 1436 c 4,9,10
+ 1101 1468 1101 1468 1093.5 1499 c 4,11,12
+ 1086 1530 1086 1530 1071 1561 c 5,13,-1
+ 1204 1561 l 5,14,15
+ 1214 1523 1214 1523 1219 1489 c 4,16,17
+ 1224 1455 1224 1455 1224 1425 c 4,18,19
+ 1224 1303 1224 1303 1180.5 1245.5 c 4,20,21
+ 1137 1188 1137 1188 1044 1188 c 4,22,23
+ 996 1188 996 1188 943.5 1214.5 c 4,24,25
+ 891 1241 891 1241 829 1296 c 5,0,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 -138 0 2
 EndChar
@@ -58551,21 +58551,21 @@
 LayerCount: 2
 Fore
 SplineSet
-799 872 m 5
- 799 991 l 5
- 860 946 860 946 904 924.5 c 4
- 948 903 948 903 979 903 c 4
- 1025 903 1025 903 1048 930.5 c 4
- 1071 958 1071 958 1071 1012 c 4
- 1071 1044 1071 1044 1063.5 1075 c 4
- 1056 1106 1056 1106 1041 1137 c 5
- 1174 1137 l 5
- 1184 1099 1184 1099 1189 1065 c 4
- 1194 1031 1194 1031 1194 1001 c 4
- 1194 879 1194 879 1150.5 821.5 c 4
- 1107 764 1107 764 1014 764 c 4
- 966 764 966 764 913.5 790.5 c 4
- 861 817 861 817 799 872 c 5
+799 872 m 5,0,-1
+ 799 991 l 5,1,2
+ 860 946 860 946 904 924.5 c 4,3,4
+ 948 903 948 903 979 903 c 4,5,6
+ 1025 903 1025 903 1048 930.5 c 4,7,8
+ 1071 958 1071 958 1071 1012 c 4,9,10
+ 1071 1044 1071 1044 1063.5 1075 c 4,11,12
+ 1056 1106 1056 1106 1041 1137 c 5,13,-1
+ 1174 1137 l 5,14,15
+ 1184 1099 1184 1099 1189 1065 c 4,16,17
+ 1194 1031 1194 1031 1194 1001 c 4,18,19
+ 1194 879 1194 879 1150.5 821.5 c 4,20,21
+ 1107 764 1107 764 1014 764 c 4,22,23
+ 966 764 966 764 913.5 790.5 c 4,24,25
+ 861 817 861 817 799 872 c 5,0,-1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 -156 0 2
 EndChar
@@ -59112,14 +59112,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -59141,14 +59141,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -59170,14 +59170,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -59199,14 +59199,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -59228,16 +59228,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+319 1746 m 5,0,-1
+ 522 1746 l 5,1,-1
+ 522 1543 l 5,2,-1
+ 319 1543 l 5,3,-1
+ 319 1746 l 5,0,-1
+711 1746 m 5,4,-1
+ 913 1746 l 5,5,-1
+ 913 1543 l 5,6,-1
+ 711 1543 l 5,7,-1
+ 711 1746 l 5,4,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -59259,21 +59259,21 @@
 LayerCount: 2
 Fore
 SplineSet
-672 2041 m 5
- 858 2041 l 5
- 629 1777 l 5
- 475 1777 l 5
- 672 2041 l 5
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+672 2041 m 5,0,-1
+ 858 2041 l 5,1,-1
+ 629 1777 l 5,2,-1
+ 475 1777 l 5,3,-1
+ 672 2041 l 5,0,-1
+319 1746 m 5,4,-1
+ 522 1746 l 5,5,-1
+ 522 1543 l 5,6,-1
+ 319 1543 l 5,7,-1
+ 319 1746 l 5,4,-1
+711 1746 m 5,8,-1
+ 913 1746 l 5,9,-1
+ 913 1543 l 5,10,-1
+ 711 1543 l 5,11,-1
+ 711 1746 l 5,8,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -59294,24 +59294,24 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1777 m 5
- 311 2043 l 5
- 451 2043 l 5
- 616 1865 l 5
- 782 2043 l 5
- 922 2043 l 5
- 711 1777 l 5
- 522 1777 l 5
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+522 1777 m 5,0,-1
+ 311 2043 l 5,1,-1
+ 451 2043 l 5,2,-1
+ 616 1865 l 5,3,-1
+ 782 2043 l 5,4,-1
+ 922 2043 l 5,5,-1
+ 711 1777 l 5,6,-1
+ 522 1777 l 5,0,-1
+319 1746 m 5,7,-1
+ 522 1746 l 5,8,-1
+ 522 1543 l 5,9,-1
+ 319 1543 l 5,10,-1
+ 319 1746 l 5,7,-1
+711 1746 m 5,11,-1
+ 913 1746 l 5,12,-1
+ 913 1543 l 5,13,-1
+ 711 1543 l 5,14,-1
+ 711 1746 l 5,11,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -59332,21 +59332,21 @@
 LayerCount: 2
 Fore
 SplineSet
-561 2041 m 5
- 758 1777 l 5
- 604 1777 l 5
- 377 2041 l 5
- 561 2041 l 5
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+561 2041 m 5,0,-1
+ 758 1777 l 5,1,-1
+ 604 1777 l 5,2,-1
+ 377 2041 l 5,3,-1
+ 561 2041 l 5,0,-1
+319 1746 m 5,4,-1
+ 522 1746 l 5,5,-1
+ 522 1543 l 5,6,-1
+ 319 1543 l 5,7,-1
+ 319 1746 l 5,4,-1
+711 1746 m 5,8,-1
+ 913 1746 l 5,9,-1
+ 913 1543 l 5,10,-1
+ 711 1543 l 5,11,-1
+ 711 1746 l 5,8,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -59400,16 +59400,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+319 1746 m 5,0,-1
+ 522 1746 l 5,1,-1
+ 522 1543 l 5,2,-1
+ 319 1543 l 5,3,-1
+ 319 1746 l 5,0,-1
+711 1746 m 5,4,-1
+ 913 1746 l 5,5,-1
+ 913 1543 l 5,6,-1
+ 711 1543 l 5,7,-1
+ 711 1746 l 5,4,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -59431,11 +59431,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1748 m 5
- 718 1748 l 5
- 718 1543 l 5
- 513 1543 l 5
- 513 1748 l 5
+513 1748 m 5,0,-1
+ 718 1748 l 5,1,-1
+ 718 1543 l 5,2,-1
+ 513 1543 l 5,3,-1
+ 513 1748 l 5,0,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -59456,11 +59456,11 @@
 LayerCount: 2
 Fore
 SplineSet
-487 1840 m 5
- 1085 1840 l 5
- 1085 1692 l 5
- 487 1692 l 5
- 487 1840 l 5
+487 1840 m 5,0,-1
+ 1085 1840 l 5,1,-1
+ 1085 1692 l 5,2,-1
+ 487 1692 l 5,3,-1
+ 487 1840 l 5,0,-1
 EndSplineSet
 Refer: 144 198 N 1 0 0 1 0 0 2
 EndChar
@@ -59472,11 +59472,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 160 230 N 1 0 0 1 0 0 2
 EndChar
@@ -59488,14 +59488,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -59517,14 +59517,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 46 75 N 1 0 0 1 0 0 2
 EndChar
@@ -59536,14 +59536,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 78 107 N 1 0 0 1 0 0 2
 EndChar
@@ -59575,11 +59575,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 1299 490 N 1 0 0 1 0 0 2
 EndChar
@@ -59591,11 +59591,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 1300 491 N 1 0 0 1 0 0 2
 EndChar
@@ -59607,14 +59607,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 1259 439 N 1 0 0 1 0 0 2
 EndChar
@@ -59626,29 +59626,29 @@
 LayerCount: 2
 Fore
 SplineSet
-609 476 m 5
- 715 476 715 476 843 420 c 4
- 941 378 941 378 1027 269 c 4
- 1108 165 1108 165 1108 27 c 4
- 1108 -194 1108 -194 956 -315 c 4
- 804 -436 804 -436 524 -436 c 4
- 430 -436 430 -436 331 -418 c 4
- 231 -399 231 -399 125 -362 c 5
- 125 -167 l 5
- 209 -216 209 -216 309 -241 c 4
- 409 -266 409 -266 518 -266 c 4
- 708 -266 708 -266 807 -191 c 4
- 907 -116 907 -116 907 27 c 4
- 907 159 907 159 815 234 c 5
- 722 308 722 308 557 308 c 6
- 383 308 l 5
- 383 474 l 5
- 813 973 l 5
- 146 973 l 5
- 146 1120 l 5
- 1020 1120 l 5
- 1020 952 l 5
- 609 476 l 5
+609 476 m 5,0,1
+ 715 476 715 476 843 420 c 4,2,3
+ 941 378 941 378 1027 269 c 4,4,5
+ 1108 165 1108 165 1108 27 c 4,6,7
+ 1108 -194 1108 -194 956 -315 c 4,8,9
+ 804 -436 804 -436 524 -436 c 4,10,11
+ 430 -436 430 -436 331 -418 c 4,12,13
+ 231 -399 231 -399 125 -362 c 5,14,-1
+ 125 -167 l 5,15,16
+ 209 -216 209 -216 309 -241 c 4,17,18
+ 409 -266 409 -266 518 -266 c 4,19,20
+ 708 -266 708 -266 807 -191 c 4,21,22
+ 907 -116 907 -116 907 27 c 4,23,24
+ 907 159 907 159 815 234 c 5,25,26
+ 722 308 722 308 557 308 c 6,27,-1
+ 383 308 l 5,28,-1
+ 383 474 l 5,29,-1
+ 813 973 l 5,30,-1
+ 146 973 l 5,31,-1
+ 146 1120 l 5,32,-1
+ 1020 1120 l 5,33,-1
+ 1020 952 l 5,34,-1
+ 609 476 l 5,0,1
 EndSplineSet
 Refer: 224 711 N 1 0 0 1 0 0 2
 EndChar
@@ -59660,19 +59660,19 @@
 LayerCount: 2
 Fore
 SplineSet
-600 -20 m 6
- 600 977 l 5
- 283 977 l 5
- 283 1120 l 5
- 784 1120 l 5
- 784 -20 l 6
- 784 -215 784 -215 694.5 -320.5 c 4
- 605 -426 605 -426 440 -426 c 6
- 186 -426 l 5
- 186 -270 l 5
- 420 -270 l 6
- 510 -270 510 -270 555 -207.5 c 4
- 600 -145 600 -145 600 -20 c 6
+600 -20 m 6,0,-1
+ 600 977 l 5,1,-1
+ 283 977 l 5,2,-1
+ 283 1120 l 5,3,-1
+ 784 1120 l 5,4,-1
+ 784 -20 l 6,5,6
+ 784 -215 784 -215 694.5 -320.5 c 4,7,8
+ 605 -426 605 -426 440 -426 c 6,9,-1
+ 186 -426 l 5,10,-1
+ 186 -270 l 5,11,-1
+ 420 -270 l 6,12,13
+ 510 -270 510 -270 555 -207.5 c 4,14,15
+ 600 -145 600 -145 600 -20 c 6,0,-1
 EndSplineSet
 Refer: 224 711 N 1 0 0 1 35 -5 2
 EndChar
@@ -59684,11 +59684,11 @@
 LayerCount: 2
 Fore
 SplineSet
-762 1899 m 5
- 948 1899 l 5
- 719 1635 l 5
- 565 1635 l 5
- 762 1899 l 5
+762 1899 m 5,0,-1
+ 948 1899 l 5,1,-1
+ 719 1635 l 5,2,-1
+ 565 1635 l 5,3,-1
+ 762 1899 l 5,0,-1
 EndSplineSet
 Refer: 42 71 N 1 0 0 1 0 0 2
 EndChar
@@ -59743,11 +59743,11 @@
 LayerCount: 2
 Fore
 SplineSet
-561 1899 m 5
- 758 1635 l 5
- 604 1635 l 5
- 377 1899 l 5
- 561 1899 l 5
+561 1899 m 5,0,-1
+ 758 1635 l 5,1,-1
+ 604 1635 l 5,2,-1
+ 377 1899 l 5,3,-1
+ 561 1899 l 5,0,-1
 EndSplineSet
 Refer: 49 78 N 1 0 0 1 0 0 2
 EndChar
@@ -59769,11 +59769,11 @@
 LayerCount: 2
 Fore
 SplineSet
-912 1899 m 5
- 1098 1899 l 5
- 869 1635 l 5
- 715 1635 l 5
- 912 1899 l 5
+912 1899 m 5,0,-1
+ 1098 1899 l 5,1,-1
+ 869 1635 l 5,2,-1
+ 715 1635 l 5,3,-1
+ 912 1899 l 5,0,-1
 EndSplineSet
 Refer: 144 198 N 1 0 0 1 0 0 2
 EndChar
@@ -59795,11 +59795,11 @@
 LayerCount: 2
 Fore
 SplineSet
-672 1899 m 5
- 858 1899 l 5
- 629 1635 l 5
- 475 1635 l 5
- 672 1899 l 5
+672 1899 m 5,0,-1
+ 858 1899 l 5,1,-1
+ 629 1635 l 5,2,-1
+ 475 1635 l 5,3,-1
+ 672 1899 l 5,0,-1
 EndSplineSet
 Refer: 145 216 N 1 0 0 1 0 0 2
 EndChar
@@ -59821,16 +59821,16 @@
 LayerCount: 2
 Fore
 SplineSet
-373 1899 m 5
- 570 1635 l 5
- 416 1635 l 5
- 187 1899 l 5
- 373 1899 l 5
-692 1899 m 5
- 889 1635 l 5
- 735 1635 l 5
- 506 1899 l 5
- 692 1899 l 5
+373 1899 m 5,0,-1
+ 570 1635 l 5,1,-1
+ 416 1635 l 5,2,-1
+ 187 1899 l 5,3,-1
+ 373 1899 l 5,0,-1
+692 1899 m 5,4,-1
+ 889 1635 l 5,5,-1
+ 735 1635 l 5,6,-1
+ 506 1899 l 5,7,-1
+ 692 1899 l 5,4,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -59842,16 +59842,16 @@
 LayerCount: 2
 Fore
 SplineSet
-388 1638 m 5
- 593 1262 l 5
- 458 1262 l 5
- 210 1638 l 5
- 388 1638 l 5
-722 1638 m 5
- 912 1262 l 5
- 775 1262 l 5
- 552 1638 l 5
- 722 1638 l 5
+388 1638 m 5,0,-1
+ 593 1262 l 5,1,-1
+ 458 1262 l 5,2,-1
+ 210 1638 l 5,3,-1
+ 388 1638 l 5,0,-1
+722 1638 m 5,4,-1
+ 912 1262 l 5,5,-1
+ 775 1262 l 5,6,-1
+ 552 1638 l 5,7,-1
+ 722 1638 l 5,4,-1
 EndSplineSet
 Refer: 68 97 N 1 0 0 1 0 0 2
 EndChar
@@ -59863,17 +59863,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1659 m 5
- 314 1779 314 1779 393 1840 c 4
- 472 1901 472 1901 616 1901 c 4
- 760 1901 760 1901 839.5 1839.5 c 4
- 919 1778 919 1778 930 1659 c 5
- 811 1659 l 5
- 796 1715 796 1715 747.5 1742.5 c 4
- 699 1770 699 1770 616 1770 c 4
- 534 1770 534 1770 484 1741.5 c 4
- 434 1713 434 1713 422 1659 c 5
- 303 1659 l 5
+303 1659 m 5,0,1
+ 314 1779 314 1779 393 1840 c 4,2,3
+ 472 1901 472 1901 616 1901 c 4,4,5
+ 760 1901 760 1901 839.5 1839.5 c 4,6,7
+ 919 1778 919 1778 930 1659 c 5,8,-1
+ 811 1659 l 5,9,10
+ 796 1715 796 1715 747.5 1742.5 c 4,11,12
+ 699 1770 699 1770 616 1770 c 4,13,14
+ 534 1770 534 1770 484 1741.5 c 4,15,16
+ 434 1713 434 1713 422 1659 c 5,17,-1
+ 303 1659 l 5,0,1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -59885,17 +59885,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1321 m 5
- 314 1464 314 1464 393 1536 c 4
- 472 1608 472 1608 616 1608 c 4
- 761 1608 761 1608 840 1536 c 4
- 919 1464 919 1464 930 1321 c 5
- 811 1321 l 5
- 797 1397 797 1397 749 1434 c 4
- 701 1471 701 1471 616 1471 c 4
- 529 1471 529 1471 481 1434 c 4
- 433 1397 433 1397 422 1321 c 5
- 303 1321 l 5
+303 1321 m 5,0,1
+ 314 1464 314 1464 393 1536 c 4,2,3
+ 472 1608 472 1608 616 1608 c 4,4,5
+ 761 1608 761 1608 840 1536 c 4,6,7
+ 919 1464 919 1464 930 1321 c 5,8,-1
+ 811 1321 l 5,9,10
+ 797 1397 797 1397 749 1434 c 4,11,12
+ 701 1471 701 1471 616 1471 c 4,13,14
+ 529 1471 529 1471 481 1434 c 4,15,16
+ 433 1397 433 1397 422 1321 c 5,17,-1
+ 303 1321 l 5,0,1
 EndSplineSet
 Refer: 68 97 N 1 0 0 1 0 0 2
 EndChar
@@ -59907,16 +59907,16 @@
 LayerCount: 2
 Fore
 SplineSet
-373 1899 m 5
- 570 1635 l 5
- 416 1635 l 5
- 187 1899 l 5
- 373 1899 l 5
-692 1899 m 5
- 889 1635 l 5
- 735 1635 l 5
- 506 1899 l 5
- 692 1899 l 5
+373 1899 m 5,0,-1
+ 570 1635 l 5,1,-1
+ 416 1635 l 5,2,-1
+ 187 1899 l 5,3,-1
+ 373 1899 l 5,0,-1
+692 1899 m 5,4,-1
+ 889 1635 l 5,5,-1
+ 735 1635 l 5,6,-1
+ 506 1899 l 5,7,-1
+ 692 1899 l 5,4,-1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -59928,16 +59928,16 @@
 LayerCount: 2
 Fore
 SplineSet
-402 1638 m 5
- 607 1262 l 5
- 472 1262 l 5
- 224 1638 l 5
- 402 1638 l 5
-736 1638 m 5
- 926 1262 l 5
- 789 1262 l 5
- 566 1638 l 5
- 736 1638 l 5
+402 1638 m 5,0,-1
+ 607 1262 l 5,1,-1
+ 472 1262 l 5,2,-1
+ 224 1638 l 5,3,-1
+ 402 1638 l 5,0,-1
+736 1638 m 5,4,-1
+ 926 1262 l 5,5,-1
+ 789 1262 l 5,6,-1
+ 566 1638 l 5,7,-1
+ 736 1638 l 5,4,-1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -59949,17 +59949,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1659 m 5
- 314 1779 314 1779 393 1840 c 4
- 472 1901 472 1901 616 1901 c 4
- 760 1901 760 1901 839.5 1839.5 c 4
- 919 1778 919 1778 930 1659 c 5
- 811 1659 l 5
- 796 1715 796 1715 747.5 1742.5 c 4
- 699 1770 699 1770 616 1770 c 4
- 534 1770 534 1770 484 1741.5 c 4
- 434 1713 434 1713 422 1659 c 5
- 303 1659 l 5
+303 1659 m 5,0,1
+ 314 1779 314 1779 393 1840 c 4,2,3
+ 472 1901 472 1901 616 1901 c 4,4,5
+ 760 1901 760 1901 839.5 1839.5 c 4,6,7
+ 919 1778 919 1778 930 1659 c 5,8,-1
+ 811 1659 l 5,9,10
+ 796 1715 796 1715 747.5 1742.5 c 4,11,12
+ 699 1770 699 1770 616 1770 c 4,13,14
+ 534 1770 534 1770 484 1741.5 c 4,15,16
+ 434 1713 434 1713 422 1659 c 5,17,-1
+ 303 1659 l 5,0,1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -59971,17 +59971,17 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1321 m 5
- 328 1464 328 1464 407 1536 c 4
- 486 1608 486 1608 630 1608 c 4
- 775 1608 775 1608 854 1536 c 4
- 933 1464 933 1464 944 1321 c 5
- 825 1321 l 5
- 811 1397 811 1397 763 1434 c 4
- 715 1471 715 1471 630 1471 c 4
- 543 1471 543 1471 495 1434 c 4
- 447 1397 447 1397 436 1321 c 5
- 317 1321 l 5
+317 1321 m 5,0,1
+ 328 1464 328 1464 407 1536 c 4,2,3
+ 486 1608 486 1608 630 1608 c 4,4,5
+ 775 1608 775 1608 854 1536 c 4,6,7
+ 933 1464 933 1464 944 1321 c 5,8,-1
+ 825 1321 l 5,9,10
+ 811 1397 811 1397 763 1434 c 4,11,12
+ 715 1471 715 1471 630 1471 c 4,13,14
+ 543 1471 543 1471 495 1434 c 4,15,16
+ 447 1397 447 1397 436 1321 c 5,17,-1
+ 317 1321 l 5,0,1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -59993,16 +59993,16 @@
 LayerCount: 2
 Fore
 SplineSet
-373 1899 m 5
- 570 1635 l 5
- 416 1635 l 5
- 187 1899 l 5
- 373 1899 l 5
-692 1899 m 5
- 889 1635 l 5
- 735 1635 l 5
- 506 1899 l 5
- 692 1899 l 5
+373 1899 m 5,0,-1
+ 570 1635 l 5,1,-1
+ 416 1635 l 5,2,-1
+ 187 1899 l 5,3,-1
+ 373 1899 l 5,0,-1
+692 1899 m 5,4,-1
+ 889 1635 l 5,5,-1
+ 735 1635 l 5,6,-1
+ 506 1899 l 5,7,-1
+ 692 1899 l 5,4,-1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -60014,16 +60014,16 @@
 LayerCount: 2
 Fore
 SplineSet
-388 1638 m 5
- 593 1262 l 5
- 458 1262 l 5
- 210 1638 l 5
- 388 1638 l 5
-722 1638 m 5
- 912 1262 l 5
- 775 1262 l 5
- 552 1638 l 5
- 722 1638 l 5
+388 1638 m 5,0,-1
+ 593 1262 l 5,1,-1
+ 458 1262 l 5,2,-1
+ 210 1638 l 5,3,-1
+ 388 1638 l 5,0,-1
+722 1638 m 5,4,-1
+ 912 1262 l 5,5,-1
+ 775 1262 l 5,6,-1
+ 552 1638 l 5,7,-1
+ 722 1638 l 5,4,-1
 EndSplineSet
 Refer: 214 305 N 1 0 0 1 0 0 2
 EndChar
@@ -60035,17 +60035,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1659 m 5
- 314 1779 314 1779 393 1840 c 4
- 472 1901 472 1901 616 1901 c 4
- 760 1901 760 1901 839.5 1839.5 c 4
- 919 1778 919 1778 930 1659 c 5
- 811 1659 l 5
- 796 1715 796 1715 747.5 1742.5 c 4
- 699 1770 699 1770 616 1770 c 4
- 534 1770 534 1770 484 1741.5 c 4
- 434 1713 434 1713 422 1659 c 5
- 303 1659 l 5
+303 1659 m 5,0,1
+ 314 1779 314 1779 393 1840 c 4,2,3
+ 472 1901 472 1901 616 1901 c 4,4,5
+ 760 1901 760 1901 839.5 1839.5 c 4,6,7
+ 919 1778 919 1778 930 1659 c 5,8,-1
+ 811 1659 l 5,9,10
+ 796 1715 796 1715 747.5 1742.5 c 4,11,12
+ 699 1770 699 1770 616 1770 c 4,13,14
+ 534 1770 534 1770 484 1741.5 c 4,15,16
+ 434 1713 434 1713 422 1659 c 5,17,-1
+ 303 1659 l 5,0,1
 EndSplineSet
 Refer: 44 73 N 1 0 0 1 0 0 2
 EndChar
@@ -60057,17 +60057,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1321 m 5
- 314 1464 314 1464 393 1536 c 4
- 472 1608 472 1608 616 1608 c 4
- 761 1608 761 1608 840 1536 c 4
- 919 1464 919 1464 930 1321 c 5
- 811 1321 l 5
- 797 1397 797 1397 749 1434 c 4
- 701 1471 701 1471 616 1471 c 4
- 529 1471 529 1471 481 1434 c 4
- 433 1397 433 1397 422 1321 c 5
- 303 1321 l 5
+303 1321 m 5,0,1
+ 314 1464 314 1464 393 1536 c 4,2,3
+ 472 1608 472 1608 616 1608 c 4,4,5
+ 761 1608 761 1608 840 1536 c 4,6,7
+ 919 1464 919 1464 930 1321 c 5,8,-1
+ 811 1321 l 5,9,10
+ 797 1397 797 1397 749 1434 c 4,11,12
+ 701 1471 701 1471 616 1471 c 4,13,14
+ 529 1471 529 1471 481 1434 c 4,15,16
+ 433 1397 433 1397 422 1321 c 5,17,-1
+ 303 1321 l 5,0,1
 EndSplineSet
 Refer: 214 305 N 1 0 0 1 0 0 2
 EndChar
@@ -60079,16 +60079,16 @@
 LayerCount: 2
 Fore
 SplineSet
-373 1899 m 5
- 570 1635 l 5
- 416 1635 l 5
- 187 1899 l 5
- 373 1899 l 5
-692 1899 m 5
- 889 1635 l 5
- 735 1635 l 5
- 506 1899 l 5
- 692 1899 l 5
+373 1899 m 5,0,-1
+ 570 1635 l 5,1,-1
+ 416 1635 l 5,2,-1
+ 187 1899 l 5,3,-1
+ 373 1899 l 5,0,-1
+692 1899 m 5,4,-1
+ 889 1635 l 5,5,-1
+ 735 1635 l 5,6,-1
+ 506 1899 l 5,7,-1
+ 692 1899 l 5,4,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60100,16 +60100,16 @@
 LayerCount: 2
 Fore
 SplineSet
-388 1638 m 5
- 593 1262 l 5
- 458 1262 l 5
- 210 1638 l 5
- 388 1638 l 5
-722 1638 m 5
- 912 1262 l 5
- 775 1262 l 5
- 552 1638 l 5
- 722 1638 l 5
+388 1638 m 5,0,-1
+ 593 1262 l 5,1,-1
+ 458 1262 l 5,2,-1
+ 210 1638 l 5,3,-1
+ 388 1638 l 5,0,-1
+722 1638 m 5,4,-1
+ 912 1262 l 5,5,-1
+ 775 1262 l 5,6,-1
+ 552 1638 l 5,7,-1
+ 722 1638 l 5,4,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -60121,17 +60121,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1659 m 5
- 314 1779 314 1779 393 1840 c 4
- 472 1901 472 1901 616 1901 c 4
- 760 1901 760 1901 839.5 1839.5 c 4
- 919 1778 919 1778 930 1659 c 5
- 811 1659 l 5
- 796 1715 796 1715 747.5 1742.5 c 4
- 699 1770 699 1770 616 1770 c 4
- 534 1770 534 1770 484 1741.5 c 4
- 434 1713 434 1713 422 1659 c 5
- 303 1659 l 5
+303 1659 m 5,0,1
+ 314 1779 314 1779 393 1840 c 4,2,3
+ 472 1901 472 1901 616 1901 c 4,4,5
+ 760 1901 760 1901 839.5 1839.5 c 4,6,7
+ 919 1778 919 1778 930 1659 c 5,8,-1
+ 811 1659 l 5,9,10
+ 796 1715 796 1715 747.5 1742.5 c 4,11,12
+ 699 1770 699 1770 616 1770 c 4,13,14
+ 534 1770 534 1770 484 1741.5 c 4,15,16
+ 434 1713 434 1713 422 1659 c 5,17,-1
+ 303 1659 l 5,0,1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60143,17 +60143,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1321 m 5
- 314 1464 314 1464 393 1536 c 4
- 472 1608 472 1608 616 1608 c 4
- 761 1608 761 1608 840 1536 c 4
- 919 1464 919 1464 930 1321 c 5
- 811 1321 l 5
- 797 1397 797 1397 749 1434 c 4
- 701 1471 701 1471 616 1471 c 4
- 529 1471 529 1471 481 1434 c 4
- 433 1397 433 1397 422 1321 c 5
- 303 1321 l 5
+303 1321 m 5,0,1
+ 314 1464 314 1464 393 1536 c 4,2,3
+ 472 1608 472 1608 616 1608 c 4,4,5
+ 761 1608 761 1608 840 1536 c 4,6,7
+ 919 1464 919 1464 930 1321 c 5,8,-1
+ 811 1321 l 5,9,10
+ 797 1397 797 1397 749 1434 c 4,11,12
+ 701 1471 701 1471 616 1471 c 4,13,14
+ 529 1471 529 1471 481 1434 c 4,15,16
+ 433 1397 433 1397 422 1321 c 5,17,-1
+ 303 1321 l 5,0,1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -60165,16 +60165,16 @@
 LayerCount: 2
 Fore
 SplineSet
-323 1899 m 5
- 520 1635 l 5
- 366 1635 l 5
- 137 1899 l 5
- 323 1899 l 5
-642 1899 m 5
- 839 1635 l 5
- 685 1635 l 5
- 456 1899 l 5
- 642 1899 l 5
+323 1899 m 5,0,-1
+ 520 1635 l 5,1,-1
+ 366 1635 l 5,2,-1
+ 137 1899 l 5,3,-1
+ 323 1899 l 5,0,-1
+642 1899 m 5,4,-1
+ 839 1635 l 5,5,-1
+ 685 1635 l 5,6,-1
+ 456 1899 l 5,7,-1
+ 642 1899 l 5,4,-1
 EndSplineSet
 Refer: 53 82 N 1 0 0 1 0 0 2
 EndChar
@@ -60186,16 +60186,16 @@
 LayerCount: 2
 Fore
 SplineSet
-538 1638 m 5
- 743 1262 l 5
- 608 1262 l 5
- 360 1638 l 5
- 538 1638 l 5
-872 1638 m 5
- 1062 1262 l 5
- 925 1262 l 5
- 702 1638 l 5
- 872 1638 l 5
+538 1638 m 5,0,-1
+ 743 1262 l 5,1,-1
+ 608 1262 l 5,2,-1
+ 360 1638 l 5,3,-1
+ 538 1638 l 5,0,-1
+872 1638 m 5,4,-1
+ 1062 1262 l 5,5,-1
+ 925 1262 l 5,6,-1
+ 702 1638 l 5,7,-1
+ 872 1638 l 5,4,-1
 EndSplineSet
 Refer: 85 114 N 1 0 0 1 0 0 2
 EndChar
@@ -60207,17 +60207,17 @@
 LayerCount: 2
 Fore
 SplineSet
-253 1659 m 5
- 264 1779 264 1779 343 1840 c 4
- 422 1901 422 1901 566 1901 c 4
- 710 1901 710 1901 789.5 1839.5 c 4
- 869 1778 869 1778 880 1659 c 5
- 761 1659 l 5
- 746 1715 746 1715 697.5 1742.5 c 4
- 649 1770 649 1770 566 1770 c 4
- 484 1770 484 1770 434 1741.5 c 4
- 384 1713 384 1713 372 1659 c 5
- 253 1659 l 5
+253 1659 m 5,0,1
+ 264 1779 264 1779 343 1840 c 4,2,3
+ 422 1901 422 1901 566 1901 c 4,4,5
+ 710 1901 710 1901 789.5 1839.5 c 4,6,7
+ 869 1778 869 1778 880 1659 c 5,8,-1
+ 761 1659 l 5,9,10
+ 746 1715 746 1715 697.5 1742.5 c 4,11,12
+ 649 1770 649 1770 566 1770 c 4,13,14
+ 484 1770 484 1770 434 1741.5 c 4,15,16
+ 384 1713 384 1713 372 1659 c 5,17,-1
+ 253 1659 l 5,0,1
 EndSplineSet
 Refer: 53 82 N 1 0 0 1 0 0 2
 EndChar
@@ -60229,17 +60229,17 @@
 LayerCount: 2
 Fore
 SplineSet
-453 1321 m 5
- 464 1464 464 1464 543 1536 c 4
- 622 1608 622 1608 766 1608 c 4
- 911 1608 911 1608 990 1536 c 4
- 1069 1464 1069 1464 1080 1321 c 5
- 961 1321 l 5
- 947 1397 947 1397 899 1434 c 4
- 851 1471 851 1471 766 1471 c 4
- 679 1471 679 1471 631 1434 c 4
- 583 1397 583 1397 572 1321 c 5
- 453 1321 l 5
+453 1321 m 5,0,1
+ 464 1464 464 1464 543 1536 c 4,2,3
+ 622 1608 622 1608 766 1608 c 4,4,5
+ 911 1608 911 1608 990 1536 c 4,6,7
+ 1069 1464 1069 1464 1080 1321 c 5,8,-1
+ 961 1321 l 5,9,10
+ 947 1397 947 1397 899 1434 c 4,11,12
+ 851 1471 851 1471 766 1471 c 4,13,14
+ 679 1471 679 1471 631 1434 c 4,15,16
+ 583 1397 583 1397 572 1321 c 5,17,-1
+ 453 1321 l 5,0,1
 EndSplineSet
 Refer: 85 114 N 1 0 0 1 0 0 2
 EndChar
@@ -60251,16 +60251,16 @@
 LayerCount: 2
 Fore
 SplineSet
-373 1899 m 5
- 570 1635 l 5
- 416 1635 l 5
- 187 1899 l 5
- 373 1899 l 5
-692 1899 m 5
- 889 1635 l 5
- 735 1635 l 5
- 506 1899 l 5
- 692 1899 l 5
+373 1899 m 5,0,-1
+ 570 1635 l 5,1,-1
+ 416 1635 l 5,2,-1
+ 187 1899 l 5,3,-1
+ 373 1899 l 5,0,-1
+692 1899 m 5,4,-1
+ 889 1635 l 5,5,-1
+ 735 1635 l 5,6,-1
+ 506 1899 l 5,7,-1
+ 692 1899 l 5,4,-1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -60272,16 +60272,16 @@
 LayerCount: 2
 Fore
 SplineSet
-388 1638 m 5
- 593 1262 l 5
- 458 1262 l 5
- 210 1638 l 5
- 388 1638 l 5
-722 1638 m 5
- 912 1262 l 5
- 775 1262 l 5
- 552 1638 l 5
- 722 1638 l 5
+388 1638 m 5,0,-1
+ 593 1262 l 5,1,-1
+ 458 1262 l 5,2,-1
+ 210 1638 l 5,3,-1
+ 388 1638 l 5,0,-1
+722 1638 m 5,4,-1
+ 912 1262 l 5,5,-1
+ 775 1262 l 5,6,-1
+ 552 1638 l 5,7,-1
+ 722 1638 l 5,4,-1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 0 0 2
 EndChar
@@ -60293,17 +60293,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1659 m 5
- 314 1779 314 1779 393 1840 c 4
- 472 1901 472 1901 616 1901 c 4
- 760 1901 760 1901 839.5 1839.5 c 4
- 919 1778 919 1778 930 1659 c 5
- 811 1659 l 5
- 796 1715 796 1715 747.5 1742.5 c 4
- 699 1770 699 1770 616 1770 c 4
- 534 1770 534 1770 484 1741.5 c 4
- 434 1713 434 1713 422 1659 c 5
- 303 1659 l 5
+303 1659 m 5,0,1
+ 314 1779 314 1779 393 1840 c 4,2,3
+ 472 1901 472 1901 616 1901 c 4,4,5
+ 760 1901 760 1901 839.5 1839.5 c 4,6,7
+ 919 1778 919 1778 930 1659 c 5,8,-1
+ 811 1659 l 5,9,10
+ 796 1715 796 1715 747.5 1742.5 c 4,11,12
+ 699 1770 699 1770 616 1770 c 4,13,14
+ 534 1770 534 1770 484 1741.5 c 4,15,16
+ 434 1713 434 1713 422 1659 c 5,17,-1
+ 303 1659 l 5,0,1
 EndSplineSet
 Refer: 56 85 N 1 0 0 1 0 0 2
 EndChar
@@ -60315,17 +60315,17 @@
 LayerCount: 2
 Fore
 SplineSet
-303 1321 m 5
- 314 1464 314 1464 393 1536 c 4
- 472 1608 472 1608 616 1608 c 4
- 761 1608 761 1608 840 1536 c 4
- 919 1464 919 1464 930 1321 c 5
- 811 1321 l 5
- 797 1397 797 1397 749 1434 c 4
- 701 1471 701 1471 616 1471 c 4
- 529 1471 529 1471 481 1434 c 4
- 433 1397 433 1397 422 1321 c 5
- 303 1321 l 5
+303 1321 m 5,0,1
+ 314 1464 314 1464 393 1536 c 4,2,3
+ 472 1608 472 1608 616 1608 c 4,4,5
+ 761 1608 761 1608 840 1536 c 4,6,7
+ 919 1464 919 1464 930 1321 c 5,8,-1
+ 811 1321 l 5,9,10
+ 797 1397 797 1397 749 1434 c 4,11,12
+ 701 1471 701 1471 616 1471 c 4,13,14
+ 529 1471 529 1471 481 1434 c 4,15,16
+ 433 1397 433 1397 422 1321 c 5,17,-1
+ 303 1321 l 5,0,1
 EndSplineSet
 Refer: 88 117 N 1 0 0 1 0 0 2
 EndChar
@@ -60337,11 +60337,11 @@
 LayerCount: 2
 Fore
 SplineSet
-502 -197 m 5
- 741 -197 l 5
- 554 -542 l 5
- 408 -542 l 5
- 502 -197 l 5
+502 -197 m 5,0,-1
+ 741 -197 l 5,1,-1
+ 554 -542 l 5,2,-1
+ 408 -542 l 5,3,-1
+ 502 -197 l 5,0,-1
 EndSplineSet
 Refer: 54 83 N 1 0 0 1 0 0 2
 EndChar
@@ -60353,11 +60353,11 @@
 LayerCount: 2
 Fore
 SplineSet
-502 -197 m 5
- 741 -197 l 5
- 554 -542 l 5
- 408 -542 l 5
- 502 -197 l 5
+502 -197 m 5,0,-1
+ 741 -197 l 5,1,-1
+ 554 -542 l 5,2,-1
+ 408 -542 l 5,3,-1
+ 502 -197 l 5,0,-1
 EndSplineSet
 Refer: 86 115 N 1 0 0 1 0 0 2
 EndChar
@@ -60369,11 +60369,11 @@
 LayerCount: 2
 Fore
 SplineSet
-502 -197 m 5
- 741 -197 l 5
- 554 -542 l 5
- 408 -542 l 5
- 502 -197 l 5
+502 -197 m 5,0,-1
+ 741 -197 l 5,1,-1
+ 554 -542 l 5,2,-1
+ 408 -542 l 5,3,-1
+ 502 -197 l 5,0,-1
 EndSplineSet
 Refer: 55 84 N 1 0 0 1 0 0 2
 EndChar
@@ -60385,11 +60385,11 @@
 LayerCount: 2
 Fore
 SplineSet
-591 -197 m 5
- 830 -197 l 5
- 643 -542 l 5
- 497 -542 l 5
- 591 -197 l 5
+591 -197 m 5,0,-1
+ 830 -197 l 5,1,-1
+ 643 -542 l 5,2,-1
+ 497 -542 l 5,3,-1
+ 591 -197 l 5,0,-1
 EndSplineSet
 Refer: 87 116 N 1 0 0 1 0 0 2
 EndChar
@@ -60501,14 +60501,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 43 72 N 1 0 0 1 0 0 2
 EndChar
@@ -60520,14 +60520,14 @@
 LayerCount: 2
 Fore
 SplineSet
-522 1635 m 5
- 311 1901 l 5
- 451 1901 l 5
- 616 1723 l 5
- 782 1901 l 5
- 922 1901 l 5
- 711 1635 l 5
- 522 1635 l 5
+522 1635 m 5,0,-1
+ 311 1901 l 5,1,-1
+ 451 1901 l 5,2,-1
+ 616 1723 l 5,3,-1
+ 782 1901 l 5,4,-1
+ 922 1901 l 5,5,-1
+ 711 1635 l 5,6,-1
+ 522 1635 l 5,0,-1
 EndSplineSet
 Refer: 75 104 N 1 0 0 1 0 0 2
 EndChar
@@ -60674,11 +60674,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1872 m 5
- 718 1872 l 5
- 718 1667 l 5
- 513 1667 l 5
- 513 1872 l 5
+513 1872 m 5,0,-1
+ 718 1872 l 5,1,-1
+ 718 1667 l 5,2,-1
+ 513 1667 l 5,3,-1
+ 513 1872 l 5,0,-1
 EndSplineSet
 Refer: 36 65 N 1 0 0 1 0 0 2
 EndChar
@@ -60690,11 +60690,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1552 m 5
- 718 1552 l 5
- 718 1348 l 5
- 513 1348 l 5
- 513 1552 l 5
+513 1552 m 5,0,-1
+ 718 1552 l 5,1,-1
+ 718 1348 l 5,2,-1
+ 513 1348 l 5,3,-1
+ 513 1552 l 5,0,-1
 EndSplineSet
 Refer: 68 97 N 1 0 0 1 0 0 2
 EndChar
@@ -60706,21 +60706,21 @@
 LayerCount: 2
 Fore
 SplineSet
-750 0 m 5
- 806 -62 806 -62 832.5 -114.5 c 4
- 859 -167 859 -167 859 -215 c 4
- 859 -304 859 -304 799 -349.5 c 4
- 739 -395 739 -395 621 -395 c 4
- 576 -395 576 -395 532.5 -389 c 4
- 489 -383 489 -383 445 -371 c 5
- 445 -240 l 5
- 479 -257 479 -257 516.5 -264.5 c 4
- 554 -272 554 -272 601 -272 c 4
- 659 -272 659 -272 689.5 -248 c 4
- 720 -224 720 -224 720 -178 c 4
- 720 -148 720 -148 698 -104.5 c 4
- 676 -61 676 -61 632 0 c 5
- 750 0 l 5
+750 0 m 5,0,1
+ 806 -62 806 -62 832.5 -114.5 c 4,2,3
+ 859 -167 859 -167 859 -215 c 4,4,5
+ 859 -304 859 -304 799 -349.5 c 4,6,7
+ 739 -395 739 -395 621 -395 c 4,8,9
+ 576 -395 576 -395 532.5 -389 c 4,10,11
+ 489 -383 489 -383 445 -371 c 5,12,-1
+ 445 -240 l 5,13,14
+ 479 -257 479 -257 516.5 -264.5 c 4,15,16
+ 554 -272 554 -272 601 -272 c 4,17,18
+ 659 -272 659 -272 689.5 -248 c 4,19,20
+ 720 -224 720 -224 720 -178 c 4,21,22
+ 720 -148 720 -148 698 -104.5 c 4,23,24
+ 676 -61 676 -61 632 0 c 5,25,-1
+ 750 0 l 5,0,1
 EndSplineSet
 Refer: 40 69 N 1 0 0 1 0 0 2
 EndChar
@@ -60732,21 +60732,21 @@
 LayerCount: 2
 Fore
 SplineSet
-750 0 m 5
- 806 -62 806 -62 832.5 -114.5 c 4
- 859 -167 859 -167 859 -215 c 4
- 859 -304 859 -304 799 -349.5 c 4
- 739 -395 739 -395 621 -395 c 4
- 576 -395 576 -395 532.5 -389 c 4
- 489 -383 489 -383 445 -371 c 5
- 445 -240 l 5
- 479 -257 479 -257 516.5 -264.5 c 4
- 554 -272 554 -272 601 -272 c 4
- 659 -272 659 -272 689.5 -248 c 4
- 720 -224 720 -224 720 -178 c 4
- 720 -148 720 -148 698 -104.5 c 4
- 676 -61 676 -61 632 0 c 5
- 750 0 l 5
+750 0 m 5,0,1
+ 806 -62 806 -62 832.5 -114.5 c 4,2,3
+ 859 -167 859 -167 859 -215 c 4,4,5
+ 859 -304 859 -304 799 -349.5 c 4,6,7
+ 739 -395 739 -395 621 -395 c 4,8,9
+ 576 -395 576 -395 532.5 -389 c 4,10,11
+ 489 -383 489 -383 445 -371 c 5,12,-1
+ 445 -240 l 5,13,14
+ 479 -257 479 -257 516.5 -264.5 c 4,15,16
+ 554 -272 554 -272 601 -272 c 4,17,18
+ 659 -272 659 -272 689.5 -248 c 4,19,20
+ 720 -224 720 -224 720 -178 c 4,21,22
+ 720 -148 720 -148 698 -104.5 c 4,23,24
+ 676 -61 676 -61 632 0 c 5,25,-1
+ 750 0 l 5,0,1
 EndSplineSet
 Refer: 72 101 N 1 0 0 1 0 0 2
 EndChar
@@ -60758,16 +60758,16 @@
 LayerCount: 2
 Fore
 SplineSet
-319 1746 m 5
- 522 1746 l 5
- 522 1543 l 5
- 319 1543 l 5
- 319 1746 l 5
-711 1746 m 5
- 913 1746 l 5
- 913 1543 l 5
- 711 1543 l 5
- 711 1746 l 5
+319 1746 m 5,0,-1
+ 522 1746 l 5,1,-1
+ 522 1543 l 5,2,-1
+ 319 1543 l 5,3,-1
+ 319 1746 l 5,0,-1
+711 1746 m 5,4,-1
+ 913 1746 l 5,5,-1
+ 913 1543 l 5,6,-1
+ 711 1543 l 5,7,-1
+ 711 1746 l 5,4,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60789,29 +60789,29 @@
 LayerCount: 2
 Fore
 SplineSet
-612 1582 m 5
- 555 1615 l 6
- 530 1629 530 1629 514.5 1634.5 c 4
- 499 1640 499 1640 487 1640 c 4
- 452 1640 452 1640 432 1615 c 4
- 412 1590 412 1590 412 1545 c 6
- 412 1539 l 5
- 287 1539 l 5
- 287 1640 287 1640 338.5 1699 c 4
- 390 1758 390 1758 475 1758 c 4
- 511 1758 511 1758 541.5 1750 c 4
- 572 1742 572 1742 621 1715 c 5
- 678 1685 l 5
- 700 1672 700 1672 717.5 1666 c 4
- 735 1660 735 1660 750 1660 c 4
- 781 1660 781 1660 801 1685.5 c 4
- 821 1711 821 1711 821 1752 c 6
- 821 1758 l 5
- 946 1758 l 5
- 944 1658 944 1658 893 1598.5 c 4
- 842 1539 842 1539 758 1539 c 4
- 724 1539 724 1539 694 1547 c 4
- 664 1555 664 1555 612 1582 c 5
+612 1582 m 5,0,-1
+ 555 1615 l 6,1,2
+ 530 1629 530 1629 514.5 1634.5 c 4,3,4
+ 499 1640 499 1640 487 1640 c 4,5,6
+ 452 1640 452 1640 432 1615 c 4,7,8
+ 412 1590 412 1590 412 1545 c 6,9,-1
+ 412 1539 l 5,10,-1
+ 287 1539 l 5,11,12
+ 287 1640 287 1640 338.5 1699 c 4,13,14
+ 390 1758 390 1758 475 1758 c 4,15,16
+ 511 1758 511 1758 541.5 1750 c 4,17,18
+ 572 1742 572 1742 621 1715 c 5,19,-1
+ 678 1685 l 5,20,21
+ 700 1672 700 1672 717.5 1666 c 4,22,23
+ 735 1660 735 1660 750 1660 c 4,24,25
+ 781 1660 781 1660 801 1685.5 c 4,26,27
+ 821 1711 821 1711 821 1752 c 6,28,-1
+ 821 1758 l 5,29,-1
+ 946 1758 l 5,30,31
+ 944 1658 944 1658 893 1598.5 c 4,32,33
+ 842 1539 842 1539 758 1539 c 4,34,35
+ 724 1539 724 1539 694 1547 c 4,36,37
+ 664 1555 664 1555 612 1582 c 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60833,11 +60833,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1872 m 5
- 718 1872 l 5
- 718 1667 l 5
- 513 1667 l 5
- 513 1872 l 5
+513 1872 m 5,0,-1
+ 718 1872 l 5,1,-1
+ 718 1667 l 5,2,-1
+ 513 1667 l 5,3,-1
+ 513 1872 l 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60849,11 +60849,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1552 m 5
- 718 1552 l 5
- 718 1348 l 5
- 513 1348 l 5
- 513 1552 l 5
+513 1552 m 5,0,-1
+ 718 1552 l 5,1,-1
+ 718 1348 l 5,2,-1
+ 513 1348 l 5,3,-1
+ 513 1552 l 5,0,-1
 EndSplineSet
 Refer: 82 111 N 1 0 0 1 0 0 2
 EndChar
@@ -60865,11 +60865,11 @@
 LayerCount: 2
 Fore
 SplineSet
-513 1748 m 5
- 718 1748 l 5
- 718 1543 l 5
- 513 1543 l 5
- 513 1748 l 5
+513 1748 m 5,0,-1
+ 718 1748 l 5,1,-1
+ 718 1543 l 5,2,-1
+ 513 1543 l 5,3,-1
+ 513 1748 l 5,0,-1
 EndSplineSet
 Refer: 50 79 N 1 0 0 1 0 0 2
 EndChar
@@ -60891,11 +60891,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1840 m 5
- 915 1840 l 5
- 915 1692 l 5
- 317 1692 l 5
- 317 1840 l 5
+317 1840 m 5,0,-1
+ 915 1840 l 5,1,-1
+ 915 1692 l 5,2,-1
+ 317 1692 l 5,3,-1
+ 317 1840 l 5,0,-1
 EndSplineSet
 Refer: 60 89 N 1 0 0 1 0 0 2
 EndChar
@@ -60907,11 +60907,11 @@
 LayerCount: 2
 Fore
 SplineSet
-317 1526 m 5
- 915 1526 l 5
- 915 1378 l 5
- 317 1378 l 5
- 317 1526 l 5
+317 1526 m 5,0,-1
+ 915 1526 l 5,1,-1
+ 915 1378 l 5,2,-1
+ 317 1378 l 5,3,-1
+ 317 1526 l 5,0,-1
 EndSplineSet
 Refer: 92 121 N 1 0 0 1 0 0 2
 EndChar
@@ -61716,5 +61716,73 @@
  219 135 l 1,0,-1
 EndSplineSet
 EndChar
+
+StartChar: uni27A7
+Encoding: 10151 10151 1389
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+613 530 m 1,0,-1
+ 336 530 l 1,1,-1
+ 336 980 l 1,2,-1
+ 613 980 l 1,3,-1
+ 613 1248 l 1,4,-1
+ 897 755 l 1,5,-1
+ 613 262 l 1,6,-1
+ 613 530 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni21E4
+Encoding: 8676 8676 1390
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+230 602 m 1,0,-1
+ 521 893 l 1,1,-1
+ 611 803 l 1,2,-1
+ 451 643 l 1,3,-1
+ 1167 643 l 1,4,-1
+ 1167 479 l 1,5,-1
+ 451 479 l 1,6,-1
+ 611 319 l 1,7,-1
+ 521 229 l 1,8,-1
+ 230 520 l 1,9,-1
+ 230 229 l 1,10,-1
+ 66 229 l 1,11,-1
+ 66 893 l 1,12,-1
+ 230 893 l 1,13,-1
+ 230 602 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni21E5
+Encoding: 8677 8677 1391
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+1003 602 m 1,0,-1
+ 1003 893 l 1,1,-1
+ 1167 893 l 1,2,-1
+ 1167 229 l 1,3,-1
+ 1003 229 l 1,4,-1
+ 1003 520 l 1,5,-1
+ 712 229 l 1,6,-1
+ 622 319 l 1,7,-1
+ 782 479 l 1,8,-1
+ 66 479 l 1,9,-1
+ 66 643 l 1,10,-1
+ 782 643 l 1,11,-1
+ 622 803 l 1,12,-1
+ 712 893 l 1,13,-1
+ 1003 602 l 1,0,-1
+EndSplineSet
+EndChar
 EndChars
 EndSplineFont
Binary file lib/fonts/IsabelleText.ttf has changed
--- a/lib/fonts/IsabelleTextBold.sfd	Sat Oct 17 13:18:43 2015 +0200
+++ b/lib/fonts/IsabelleTextBold.sfd	Sat Oct 17 21:42:18 2015 +0200
@@ -20,7 +20,7 @@
 OS2_WeightWidthSlopeOnly: 0
 OS2_UseTypoMetrics: 1
 CreationTime: 1050374980
-ModificationTime: 1444656748
+ModificationTime: 1444823743
 PfmFamily: 17
 TTFWeight: 700
 TTFWidth: 5
@@ -1678,10 +1678,10 @@
 DisplaySize: -96
 AntiAlias: 1
 FitToEm: 1
-WinInfo: 9450 21 15
+WinInfo: 8568 21 15
 BeginPrivate: 0
 EndPrivate
-BeginChars: 1114115 1381
+BeginChars: 1114115 1384
 
 StartChar: .notdef
 Encoding: 1114112 -1 0
@@ -67953,5 +67953,73 @@
  219 135 l 1,0,-1
 EndSplineSet
 EndChar
+
+StartChar: uni27A7
+Encoding: 10151 10151 1381
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+613 530 m 1,0,-1
+ 336 530 l 1,1,-1
+ 336 980 l 1,2,-1
+ 613 980 l 1,3,-1
+ 613 1248 l 1,4,-1
+ 897 755 l 1,5,-1
+ 613 262 l 1,6,-1
+ 613 530 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni21E4
+Encoding: 8676 8676 1382
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+260 662 m 1,0,-1
+ 521 923 l 1,1,-1
+ 641 803 l 1,2,-1
+ 511 673 l 1,3,-1
+ 1167 673 l 1,4,-1
+ 1167 449 l 1,5,-1
+ 511 449 l 1,6,-1
+ 641 319 l 1,7,-1
+ 521 199 l 1,8,-1
+ 260 460 l 1,9,-1
+ 260 199 l 1,10,-1
+ 66 199 l 1,11,-1
+ 66 923 l 1,12,-1
+ 260 923 l 1,13,-1
+ 260 662 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni21E5
+Encoding: 8677 8677 1383
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+973 662 m 1,0,-1
+ 973 923 l 1,1,-1
+ 1167 923 l 1,2,-1
+ 1167 199 l 1,3,-1
+ 973 199 l 1,4,-1
+ 973 460 l 1,5,-1
+ 712 199 l 1,6,-1
+ 592 319 l 1,7,-1
+ 722 449 l 1,8,-1
+ 66 449 l 1,9,-1
+ 66 673 l 1,10,-1
+ 722 673 l 1,11,-1
+ 592 803 l 1,12,-1
+ 712 923 l 1,13,-1
+ 973 662 l 1,0,-1
+EndSplineSet
+EndChar
 EndChars
 EndSplineFont
Binary file lib/fonts/IsabelleTextBold.ttf has changed
--- a/lib/texinputs/isabelle.sty	Sat Oct 17 13:18:43 2015 +0200
+++ b/lib/texinputs/isabelle.sty	Sat Oct 17 21:42:18 2015 +0200
@@ -39,11 +39,10 @@
 \DeclareRobustCommand{\isactrlesup}{\egroup\egroup\endmath\egroup}
 \newcommand{\isactrlbold}[1]{{\bfseries\upshape\boldmath#1}}
 
+\def\isactrlnoindent{\noindent}
 \def\isactrlsmallskip{\smallskip}
 \def\isactrlmedskip{\medskip}
 \def\isactrlbigskip{\bigskip}
-\def\isactrlitem{\item}
-\def\isactrlenum{\item}
 
 \newenvironment{isaantiq}{{\isacharat\isacharbraceleft}}{{\isacharbraceright}}
 
@@ -134,6 +133,8 @@
 \newcommand{\isamarkupsection}[1]{\section{#1}}
 \newcommand{\isamarkupsubsection}[1]{\subsection{#1}}
 \newcommand{\isamarkupsubsubsection}[1]{\subsubsection{#1}}
+\newcommand{\isamarkupparagraph}[1]{\paragraph{#1}}
+\newcommand{\isamarkupsubparagraph}[1]{\subparagraph{#1}}
 
 \newif\ifisamarkup
 \newcommand{\isabeginpar}{\par\ifisamarkup\relax\else\medskip\fi}
--- a/src/Doc/Classes/Classes.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Classes/Classes.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -19,25 +19,25 @@
 
   \begin{quote}
 
-  \noindent@{text "class eq where"} \\
+  \<^noindent>@{text "class eq where"} \\
   \hspace*{2ex}@{text "eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
 
-  \medskip\noindent@{text "instance nat :: eq where"} \\
+  \<^medskip>\<^noindent>@{text "instance nat :: eq where"} \\
   \hspace*{2ex}@{text "eq 0 0 = True"} \\
   \hspace*{2ex}@{text "eq 0 _ = False"} \\
   \hspace*{2ex}@{text "eq _ 0 = False"} \\
   \hspace*{2ex}@{text "eq (Suc n) (Suc m) = eq n m"}
 
-  \medskip\noindent@{text "instance (\<alpha>::eq, \<beta>::eq) pair :: eq where"} \\
+  \<^medskip>\<^noindent>@{text "instance (\<alpha>::eq, \<beta>::eq) pair :: eq where"} \\
   \hspace*{2ex}@{text "eq (x1, y1) (x2, y2) = eq x1 x2 \<and> eq y1 y2"}
 
-  \medskip\noindent@{text "class ord extends eq where"} \\
+  \<^medskip>\<^noindent>@{text "class ord extends eq where"} \\
   \hspace*{2ex}@{text "less_eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
   \hspace*{2ex}@{text "less :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
 
   \end{quote}
 
-  \noindent Type variables are annotated with (finitely many) classes;
+  \<^noindent> Type variables are annotated with (finitely many) classes;
   these annotations are assertions that a particular polymorphic type
   provides definitions for overloaded functions.
 
@@ -56,7 +56,7 @@
 
   \begin{quote}
 
-  \noindent@{text "class eq where"} \\
+  \<^noindent>@{text "class eq where"} \\
   \hspace*{2ex}@{text "eq :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
   @{text "satisfying"} \\
   \hspace*{2ex}@{text "refl: eq x x"} \\
@@ -65,7 +65,7 @@
 
   \end{quote}
 
-  \noindent From a theoretical point of view, type classes are
+  \<^noindent> From a theoretical point of view, type classes are
   lightweight modules; Haskell type classes may be emulated by SML
   functors @{cite classes_modules}.  Isabelle/Isar offers a discipline
   of type classes which brings all those aspects together:
@@ -80,7 +80,7 @@
       locales @{cite "kammueller-locales"}.
   \end{enumerate}
 
-  \noindent Isar type classes also directly support code generation in
+  \<^noindent> Isar type classes also directly support code generation in
   a Haskell like fashion. Internally, they are mapped to more
   primitive Isabelle concepts @{cite "Haftmann-Wenzel:2006:classes"}.
 
@@ -106,7 +106,7 @@
   assumes assoc: "(x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
 
 text \<open>
-  \noindent This @{command class} specification consists of two parts:
+  \<^noindent> This @{command class} specification consists of two parts:
   the \qn{operational} part names the class parameter (@{element
   "fixes"}), the \qn{logical} part specifies properties on them
   (@{element "assumes"}).  The local @{element "fixes"} and @{element
@@ -141,7 +141,7 @@
 end %quote
 
 text \<open>
-  \noindent @{command instantiation} defines class parameters at a
+  \<^noindent> @{command instantiation} defines class parameters at a
   particular instance using common specification tools (here,
   @{command definition}).  The concluding @{command instance} opens a
   proof that the given parameters actually conform to the class
@@ -155,7 +155,7 @@
   semigroup} automatically, i.e.\ any general results are immediately
   available on concrete instances.
 
-  \medskip Another instance of @{class semigroup} yields the natural
+  \<^medskip> Another instance of @{class semigroup} yields the natural
   numbers:
 \<close>
 
@@ -175,7 +175,7 @@
 end %quote
 
 text \<open>
-  \noindent Note the occurrence of the name @{text mult_nat} in the
+  \<^noindent> Note the occurrence of the name @{text mult_nat} in the
   primrec declaration; by default, the local name of a class operation
   @{text f} to be instantiated on type constructor @{text \<kappa>} is
   mangled as @{text f_\<kappa>}.  In case of uncertainty, these names may be
@@ -205,7 +205,7 @@
 end %quote
 
 text \<open>
-  \noindent Associativity of product semigroups is established using
+  \<^noindent> Associativity of product semigroups is established using
   the definition of @{text "(\<otimes>)"} on products and the hypothetical
   associativity of the type components; these hypotheses are
   legitimate due to the @{class semigroup} constraints imposed on the
@@ -227,7 +227,7 @@
   assumes neutl: "\<one> \<otimes> x = x"
 
 text \<open>
-  \noindent Again, we prove some instances, by providing suitable
+  \<^noindent> Again, we prove some instances, by providing suitable
   parameter definitions and proofs for the additional specifications.
   Observe that instantiations for types with the same arity may be
   simultaneous:
@@ -269,7 +269,7 @@
 end %quote
 
 text \<open>
-  \noindent Fully-fledged monoids are modelled by another subclass,
+  \<^noindent> Fully-fledged monoids are modelled by another subclass,
   which does not add new parameters but tightens the specification:
 \<close>
 
@@ -303,7 +303,7 @@
 end %quote
 
 text \<open>
-  \noindent To finish our small algebra example, we add a @{text
+  \<^noindent> To finish our small algebra example, we add a @{text
   group} class with a corresponding instance:
 \<close>
 
@@ -343,19 +343,19 @@
   assumes idem: "f (f x) = f x"
 
 text \<open>
-  \noindent essentially introduces the locale
+  \<^noindent> essentially introduces the locale
 \<close> (*<*)setup %invisible \<open>Sign.add_path "foo"\<close>
 (*>*)
 locale %quote idem =
   fixes f :: "\<alpha> \<Rightarrow> \<alpha>"
   assumes idem: "f (f x) = f x"
 
-text \<open>\noindent together with corresponding constant(s):\<close>
+text \<open>\<^noindent> together with corresponding constant(s):\<close>
 
 consts %quote f :: "\<alpha> \<Rightarrow> \<alpha>"
 
 text \<open>
-  \noindent The connection to the type system is done by means of a
+  \<^noindent> The connection to the type system is done by means of a
   primitive type class @{text "idem"}, together with a corresponding
   interpretation:
 \<close>
@@ -365,7 +365,7 @@
 (*<*)sorry(*>*)
 
 text \<open>
-  \noindent This gives you the full power of the Isabelle module system;
+  \<^noindent> This gives you the full power of the Isabelle module system;
   conclusions in locale @{text idem} are implicitly propagated
   to class @{text idem}.
 \<close> (*<*)setup %invisible \<open>Sign.parent_path\<close>
@@ -391,7 +391,7 @@
 qed
 
 text \<open>
-  \noindent Here the \qt{@{keyword "in"} @{class group}} target
+  \<^noindent> Here the \qt{@{keyword "in"} @{class group}} target
   specification indicates that the result is recorded within that
   context for later use.  This local theorem is also lifted to the
   global one @{fact "group.left_cancel:"} @{prop [source] "\<And>x y z ::
@@ -413,13 +413,13 @@
   | "pow_nat (Suc n) x = x \<otimes> pow_nat n x"
 
 text \<open>
-  \noindent If the locale @{text group} is also a class, this local
+  \<^noindent> If the locale @{text group} is also a class, this local
   definition is propagated onto a global definition of @{term [source]
   "pow_nat :: nat \<Rightarrow> \<alpha>::monoid \<Rightarrow> \<alpha>::monoid"} with corresponding theorems
 
   @{thm pow_nat.simps [no_vars]}.
 
-  \noindent As you can see from this example, for local definitions
+  \<^noindent> As you can see from this example, for local definitions
   you may use any specification tool which works together with
   locales, such as Krauss's recursive function package
   @{cite krauss2006}.
@@ -446,7 +446,7 @@
   proof qed auto
 
 text \<open>
-  \noindent This enables us to apply facts on monoids
+  \<^noindent> This enables us to apply facts on monoids
   to lists, e.g. @{thm list_monoid.neutl [no_vars]}.
 
   When using this interpretation pattern, it may also
@@ -470,7 +470,7 @@
 qed intro_locales
 
 text \<open>
-  \noindent This pattern is also helpful to reuse abstract
+  \<^noindent> This pattern is also helpful to reuse abstract
   specifications on the \emph{same} type.  For example, think of a
   class @{text preorder}; for type @{typ nat}, there are at least two
   possible instances: the natural order or the order induced by the
@@ -542,7 +542,7 @@
     else (pow_nat (nat (- k)) x)\<div>)"
 
 text \<open>
-  \noindent yields the global definition of @{term [source] "pow_int ::
+  \<^noindent> yields the global definition of @{term [source] "pow_int ::
   int \<Rightarrow> \<alpha>::group \<Rightarrow> \<alpha>::group"} with the corresponding theorem @{thm
   pow_int_def [no_vars]}.
 \<close>
@@ -566,7 +566,7 @@
 term %quote "x \<otimes> y" -- \<open>example 3\<close>
 
 text \<open>
-  \noindent Here in example 1, the term refers to the local class
+  \<^noindent> Here in example 1, the term refers to the local class
   operation @{text "mult [\<alpha>]"}, whereas in example 2 the type
   constraint enforces the global class operation @{text "mult [nat]"}.
   In the global context in example 3, the reference is to the
@@ -592,14 +592,14 @@
   "example = pow_int 10 (-2)"
 
 text \<open>
-  \noindent This maps to Haskell as follows:
+  \<^noindent> This maps to Haskell as follows:
 \<close>
 text %quotetypewriter \<open>
   @{code_stmts example (Haskell)}
 \<close>
 
 text \<open>
-  \noindent The code in SML has explicit dictionary passing:
+  \<^noindent> The code in SML has explicit dictionary passing:
 \<close>
 text %quotetypewriter \<open>
   @{code_stmts example (SML)}
@@ -607,7 +607,7 @@
 
 
 text \<open>
-  \noindent In Scala, implicits are used as dictionaries:
+  \<^noindent> In Scala, implicits are used as dictionaries:
 \<close>
 text %quotetypewriter \<open>
   @{code_stmts example (Scala)}
--- a/src/Doc/Implementation/Eq.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Eq.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -98,29 +98,25 @@
   @{index_ML fold_goals_tac: "Proof.context -> thm list -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
+  \<^descr> @{ML rewrite_rule}~@{text "ctxt rules thm"} rewrites the whole
   theorem by the given rules.
 
-  \item @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
+  \<^descr> @{ML rewrite_goals_rule}~@{text "ctxt rules thm"} rewrites the
   outer premises of the given theorem.  Interpreting the same as a
   goal state (\secref{sec:tactical-goals}) it means to rewrite all
   subgoals (in the same manner as @{ML rewrite_goals_tac}).
 
-  \item @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
+  \<^descr> @{ML rewrite_goal_tac}~@{text "ctxt rules i"} rewrites subgoal
   @{text "i"} by the given rewrite rules.
 
-  \item @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
+  \<^descr> @{ML rewrite_goals_tac}~@{text "ctxt rules"} rewrites all subgoals
   by the given rewrite rules.
 
-  \item @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
+  \<^descr> @{ML fold_goals_tac}~@{text "ctxt rules"} essentially uses @{ML
   rewrite_goals_tac} with the symmetric form of each member of @{text
   "rules"}, re-ordered to fold longer expression first.  This supports
   to idea to fold primitive definitions that appear in expended form
   in the proof state.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Implementation/Integration.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Integration.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -44,27 +44,23 @@
   @{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Toplevel.state} represents Isar toplevel
+  \<^descr> Type @{ML_type Toplevel.state} represents Isar toplevel
   states, which are normally manipulated through the concept of
   toplevel transitions only (\secref{sec:toplevel-transition}).
 
-  \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
+  \<^descr> @{ML Toplevel.UNDEF} is raised for undefined toplevel
   operations.  Many operations work only partially for certain cases,
   since @{ML_type Toplevel.state} is a sum type.
 
-  \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
+  \<^descr> @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
   toplevel state.
 
-  \item @{ML Toplevel.theory_of}~@{text "state"} selects the
+  \<^descr> @{ML Toplevel.theory_of}~@{text "state"} selects the
   background theory of @{text "state"}, it raises @{ML Toplevel.UNDEF}
   for an empty toplevel state.
 
-  \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
+  \<^descr> @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
   state if available, otherwise it raises an error.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -72,15 +68,11 @@
   @{ML_antiquotation_def "Isar.state"} & : & @{text ML_antiquotation} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{text "@{Isar.state}"} refers to Isar toplevel state at that
+  \<^descr> @{text "@{Isar.state}"} refers to Isar toplevel state at that
   point --- as abstract value.
 
   This only works for diagnostic ML commands, such as @{command
   ML_val} or @{command ML_command}.
-
-  \end{description}
 \<close>
 
 
@@ -121,33 +113,29 @@
   Toplevel.transition -> Toplevel.transition"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
+  \<^descr> @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
   function.
 
-  \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
+  \<^descr> @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
   transformer.
 
-  \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
+  \<^descr> @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
   goal function, which turns a theory into a proof state.  The theory
   may be changed before entering the proof; the generic Isar goal
   setup includes an @{verbatim after_qed} argument that specifies how to
   apply the proven result to the enclosing context, when the proof
   is finished.
 
-  \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
+  \<^descr> @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
   proof command, with a singleton result.
 
-  \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
+  \<^descr> @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
   command, with zero or more result states (represented as a lazy
   list).
 
-  \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
+  \<^descr> @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
   proof command, that returns the resulting theory, after applying the
   resulting facts to the target context.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The file @{"file" "~~/src/HOL/ex/Commands.thy"} shows some example
@@ -175,13 +163,11 @@
   @{index_ML Thy_Info.register_thy: "theory -> unit"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
+  \<^descr> @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
   up-to-date wrt.\ the external file store; outdated ancestors are reloaded on
   demand.
 
-  \item @{ML use_thys} is similar to @{ML use_thy}, but handles several
+  \<^descr> @{ML use_thys} is similar to @{ML use_thy}, but handles several
   theories simultaneously. Thus it acts like processing the import header of a
   theory, without performing the merge of the result. By loading a whole
   sub-graph of theories, the intrinsic parallelism can be exploited by the
@@ -189,18 +175,16 @@
 
   This variant is used by default in @{tool build} @{cite "isabelle-system"}.
 
-  \item @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
+  \<^descr> @{ML Thy_Info.get_theory}~@{text A} retrieves the theory value
   presently associated with name @{text A}. Note that the result might be
   outdated wrt.\ the file-system content.
 
-  \item @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
+  \<^descr> @{ML Thy_Info.remove_thy}~@{text A} deletes theory @{text A} and all
   descendants from the theory database.
 
-  \item @{ML Thy_Info.register_thy}~@{text "text thy"} registers an existing
+  \<^descr> @{ML Thy_Info.register_thy}~@{text "text thy"} registers an existing
   theory value with the theory loader database and updates source version
   information according to the file store.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Implementation/Isar.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Isar.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -8,8 +8,6 @@
   @{cite \<open>\S2\<close> "isabelle-isar-ref"}) consists of three main categories of
   language elements:
 
-  \begin{enumerate}
-
   \<^enum> Proof \emph{commands} define the primary language of
   transactions of the underlying Isar/VM interpreter.  Typical
   examples are @{command "fix"}, @{command "assume"}, @{command
@@ -34,8 +32,6 @@
 
   Typical examples are @{attribute intro} (which affects the context),
   and @{attribute symmetric} (which affects the theorem).
-
-  \end{enumerate}
 \<close>
 
 
@@ -79,9 +75,7 @@
   (term * term list) list list -> Proof.context -> Proof.state"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Proof.state} represents Isar proof states.
+  \<^descr> Type @{ML_type Proof.state} represents Isar proof states.
   This is a block-structured configuration with proof context,
   linguistic mode, and optional goal.  The latter consists of goal
   context, goal facts (``@{text "using"}''), and tactical goal state
@@ -91,7 +85,7 @@
   refinement of some parts of the tactical goal --- how exactly is
   defined by the proof method that is applied in that situation.
 
-  \item @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
+  \<^descr> @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
   Proof.assert_backward} are partial identity functions that fail
   unless a certain linguistic mode is active, namely ``@{text
   "proof(state)"}'', ``@{text "proof(chain)"}'', ``@{text
@@ -101,24 +95,24 @@
   It is advisable study the implementations of existing proof commands
   for suitable modes to be asserted.
 
-  \item @{ML Proof.simple_goal}~@{text "state"} returns the structured
+  \<^descr> @{ML Proof.simple_goal}~@{text "state"} returns the structured
   Isar goal (if available) in the form seen by ``simple'' methods
   (like @{method simp} or @{method blast}).  The Isar goal facts are
   already inserted as premises into the subgoals, which are presented
   individually as in @{ML Proof.goal}.
 
-  \item @{ML Proof.goal}~@{text "state"} returns the structured Isar
+  \<^descr> @{ML Proof.goal}~@{text "state"} returns the structured Isar
   goal (if available) in the form seen by regular methods (like
   @{method rule}).  The auxiliary internal encoding of Pure
   conjunctions is split into individual subgoals as usual.
 
-  \item @{ML Proof.raw_goal}~@{text "state"} returns the structured
+  \<^descr> @{ML Proof.raw_goal}~@{text "state"} returns the structured
   Isar goal (if available) in the raw internal form seen by ``raw''
   methods (like @{method induct}).  This form is rarely appropriate
   for diagnostic tools; @{ML Proof.simple_goal} or @{ML Proof.goal}
   should be used in most situations.
 
-  \item @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
+  \<^descr> @{ML Proof.theorem}~@{text "before_qed after_qed statement ctxt"}
   initializes a toplevel Isar proof state within a given context.
 
   The optional @{text "before_qed"} method is applied at the end of
@@ -138,8 +132,6 @@
   Isar source language.  The original nested list structure over terms
   is turned into one over theorems when @{text "after_qed"} is
   invoked.
-
-  \end{description}
 \<close>
 
 
@@ -148,16 +140,12 @@
   @{ML_antiquotation_def "Isar.goal"} & : & @{text ML_antiquotation} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{text "@{Isar.goal}"} refers to the regular goal state (if
+  \<^descr> @{text "@{Isar.goal}"} refers to the regular goal state (if
   available) of the current proof state managed by the Isar toplevel
   --- as abstract value.
 
   This only works for diagnostic ML commands, such as @{command
   ML_val} or @{command ML_command}.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example peeks at a certain goal configuration.\<close>
@@ -189,8 +177,6 @@
   tactics need to hold for methods accordingly, with the following
   additions.
 
-  \begin{itemize}
-
   \<^item> Goal addressing is further limited either to operate
   uniformly on \emph{all} subgoals, or specifically on the
   \emph{first} subgoal.
@@ -211,7 +197,6 @@
   is no sensible use of facts outside the goal state, facts should be
   inserted into the subgoals that are addressed by the method.
 
-  \end{itemize}
 
   \<^medskip>
   Syntactically, the language of proof methods appears as
@@ -265,8 +250,6 @@
   Empirically, any Isar proof method can be categorized as
   follows.
 
-  \begin{enumerate}
-
   \<^enum> \emph{Special method with cases} with named context additions
   associated with the follow-up goal state.
 
@@ -294,7 +277,6 @@
 
   Example: @{method "rule_tac"}.
 
-  \end{enumerate}
 
   When implementing proof methods, it is advisable to study existing
   implementations carefully and imitate the typical ``boiler plate''
@@ -318,39 +300,35 @@
   string -> theory -> theory"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Proof.method} represents proof methods as
+  \<^descr> Type @{ML_type Proof.method} represents proof methods as
   abstract type.
 
-  \item @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
+  \<^descr> @{ML METHOD_CASES}~@{text "(fn facts => cases_tactic)"} wraps
   @{text cases_tactic} depending on goal facts as proof method with
   cases; the goal context is passed via method syntax.
 
-  \item @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
+  \<^descr> @{ML METHOD}~@{text "(fn facts => tactic)"} wraps @{text
   tactic} depending on goal facts as regular proof method; the goal
   context is passed via method syntax.
 
-  \item @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
+  \<^descr> @{ML SIMPLE_METHOD}~@{text "tactic"} wraps a tactic that
   addresses all subgoals uniformly as simple proof method.  Goal facts
   are already inserted into all subgoals before @{text "tactic"} is
   applied.
 
-  \item @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
+  \<^descr> @{ML SIMPLE_METHOD'}~@{text "tactic"} wraps a tactic that
   addresses a specific subgoal as simple proof method that operates on
   subgoal 1.  Goal facts are inserted into the subgoal then the @{text
   "tactic"} is applied.
 
-  \item @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
+  \<^descr> @{ML Method.insert_tac}~@{text "facts i"} inserts @{text
   "facts"} into subgoal @{text "i"}.  This is convenient to reproduce
   part of the @{ML SIMPLE_METHOD} or @{ML SIMPLE_METHOD'} wrapping
   within regular @{ML METHOD}, for example.
 
-  \item @{ML Method.setup}~@{text "name parser description"} provides
+  \<^descr> @{ML Method.setup}~@{text "name parser description"} provides
   the functionality of the Isar command @{command method_setup} as ML
   function.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>See also @{command method_setup} in
@@ -546,23 +524,19 @@
   string -> theory -> theory"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type attribute} represents attributes as concrete
+  \<^descr> Type @{ML_type attribute} represents attributes as concrete
   type alias.
 
-  \item @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
+  \<^descr> @{ML Thm.rule_attribute}~@{text "(fn context => rule)"} wraps
   a context-dependent rule (mapping on @{ML_type thm}) as attribute.
 
-  \item @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
+  \<^descr> @{ML Thm.declaration_attribute}~@{text "(fn thm => decl)"}
   wraps a theorem-dependent declaration (mapping on @{ML_type
   Context.generic}) as attribute.
 
-  \item @{ML Attrib.setup}~@{text "name parser description"} provides
+  \<^descr> @{ML Attrib.setup}~@{text "name parser description"} provides
   the functionality of the Isar command @{command attribute_setup} as
   ML function.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -574,16 +548,12 @@
   @@{ML_antiquotation attributes} attributes
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{attributes [\<dots>]}"} embeds attribute source
+  \<^descr> @{text "@{attributes [\<dots>]}"} embeds attribute source
   representation into the ML text, which is particularly useful with
   declarations like @{ML Local_Theory.note}.  Attribute names are
   internalized at compile time, but the source is unevaluated.  This
   means attributes with formal arguments (types, terms, theorems) may
   be subject to odd effects of dynamic scoping!
-
-  \end{description}
 \<close>
 
 text %mlex \<open>See also @{command attribute_setup} in
--- a/src/Doc/Implementation/Local_Theory.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Local_Theory.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -103,9 +103,7 @@
     local_theory -> (string * thm list) * local_theory"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type local_theory} represents local theories.
+  \<^descr> Type @{ML_type local_theory} represents local theories.
   Although this is merely an alias for @{ML_type Proof.context}, it is
   semantically a subtype of the same: a @{ML_type local_theory} holds
   target information as special context data.  Subtyping means that
@@ -113,7 +111,7 @@
   with operations on expecting a regular @{text "ctxt:"}~@{ML_type
   Proof.context}.
 
-  \item @{ML Named_Target.init}~@{text "before_exit name thy"}
+  \<^descr> @{ML Named_Target.init}~@{text "before_exit name thy"}
   initializes a local theory derived from the given background theory.
   An empty name refers to a \emph{global theory} context, and a
   non-empty name refers to a @{command locale} or @{command class}
@@ -121,7 +119,7 @@
   useful for experimentation --- normally the Isar toplevel already
   takes care to initialize the local theory context.
 
-  \item @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
+  \<^descr> @{ML Local_Theory.define}~@{text "((b, mx), (a, rhs))
   lthy"} defines a local entity according to the specification that is
   given relatively to the current @{text "lthy"} context.  In
   particular the term of the RHS may refer to earlier local entities
@@ -141,7 +139,7 @@
   declarations such as @{attribute simp}, while non-trivial rules like
   @{attribute simplified} are better avoided.
 
-  \item @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
+  \<^descr> @{ML Local_Theory.note}~@{text "(a, ths) lthy"} is
   analogous to @{ML Local_Theory.define}, but defines facts instead of
   terms.  There is also a slightly more general variant @{ML
   Local_Theory.notes} that defines several facts (with attribute
@@ -149,8 +147,6 @@
 
   This is essentially the internal version of the @{command lemmas}
   command, or @{command declare} if an empty name binding is given.
-
-  \end{description}
 \<close>
 
 
--- a/src/Doc/Implementation/Logic.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Logic.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -138,55 +138,51 @@
   @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
   \end{mldecls}
 
-  \begin{description}
+  \<^descr> Type @{ML_type class} represents type classes.
 
-  \item Type @{ML_type class} represents type classes.
-
-  \item Type @{ML_type sort} represents sorts, i.e.\ finite
+  \<^descr> Type @{ML_type sort} represents sorts, i.e.\ finite
   intersections of classes.  The empty list @{ML "[]: sort"} refers to
   the empty class intersection, i.e.\ the ``full sort''.
 
-  \item Type @{ML_type arity} represents type arities.  A triple
+  \<^descr> Type @{ML_type arity} represents type arities.  A triple
   @{text "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> ::
   (\<^vec>s)s"} as described above.
 
-  \item Type @{ML_type typ} represents types; this is a datatype with
+  \<^descr> Type @{ML_type typ} represents types; this is a datatype with
   constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
 
-  \item @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
+  \<^descr> @{ML Term.map_atyps}~@{text "f \<tau>"} applies the mapping @{text
   "f"} to all atomic types (@{ML TFree}, @{ML TVar}) occurring in
   @{text "\<tau>"}.
 
-  \item @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
+  \<^descr> @{ML Term.fold_atyps}~@{text "f \<tau>"} iterates the operation
   @{text "f"} over all occurrences of atomic types (@{ML TFree}, @{ML
   TVar}) in @{text "\<tau>"}; the type structure is traversed from left to
   right.
 
-  \item @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
+  \<^descr> @{ML Sign.subsort}~@{text "thy (s\<^sub>1, s\<^sub>2)"}
   tests the subsort relation @{text "s\<^sub>1 \<subseteq> s\<^sub>2"}.
 
-  \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
+  \<^descr> @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
   @{text "\<tau>"} is of sort @{text "s"}.
 
-  \item @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
+  \<^descr> @{ML Sign.add_type}~@{text "ctxt (\<kappa>, k, mx)"} declares a
   new type constructors @{text "\<kappa>"} with @{text "k"} arguments and
   optional mixfix syntax.
 
-  \item @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
+  \<^descr> @{ML Sign.add_type_abbrev}~@{text "ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)"}
   defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"}.
 
-  \item @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
+  \<^descr> @{ML Sign.primitive_class}~@{text "(c, [c\<^sub>1, \<dots>,
   c\<^sub>n])"} declares a new class @{text "c"}, together with class
   relations @{text "c \<subseteq> c\<^sub>i"}, for @{text "i = 1, \<dots>, n"}.
 
-  \item @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
+  \<^descr> @{ML Sign.primitive_classrel}~@{text "(c\<^sub>1,
   c\<^sub>2)"} declares the class relation @{text "c\<^sub>1 \<subseteq>
   c\<^sub>2"}.
 
-  \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
+  \<^descr> @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
   the arity @{text "\<kappa> :: (\<^vec>s)s"}.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -211,28 +207,24 @@
   @@{ML_antiquotation typ} type
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{class c}"} inlines the internalized class @{text
+  \<^descr> @{text "@{class c}"} inlines the internalized class @{text
   "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{sort s}"} inlines the internalized sort @{text "s"}
+  \<^descr> @{text "@{sort s}"} inlines the internalized sort @{text "s"}
   --- as @{ML_type "string list"} literal.
 
-  \item @{text "@{type_name c}"} inlines the internalized type
+  \<^descr> @{text "@{type_name c}"} inlines the internalized type
   constructor @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{type_abbrev c}"} inlines the internalized type
+  \<^descr> @{text "@{type_abbrev c}"} inlines the internalized type
   abbreviation @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{nonterminal c}"} inlines the internalized syntactic
+  \<^descr> @{text "@{nonterminal c}"} inlines the internalized syntactic
   type~/ grammar nonterminal @{text "c"} --- as @{ML_type string}
   literal.
 
-  \item @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
+  \<^descr> @{text "@{typ \<tau>}"} inlines the internalized type @{text "\<tau>"}
   --- as constructor term for datatype @{ML_type typ}.
-
-  \end{description}
 \<close>
 
 
@@ -383,65 +375,61 @@
   @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type term} represents de-Bruijn terms, with comments
+  \<^descr> Type @{ML_type term} represents de-Bruijn terms, with comments
   in abstractions, and explicitly named free variables and constants;
   this is a datatype with constructors @{index_ML Bound}, @{index_ML
   Free}, @{index_ML Var}, @{index_ML Const}, @{index_ML Abs},
   @{index_ML_op "$"}.
 
-  \item @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
+  \<^descr> @{text "t"}~@{ML_text aconv}~@{text "u"} checks @{text
   "\<alpha>"}-equivalence of two terms.  This is the basic equality relation
   on type @{ML_type term}; raw datatype equality should only be used
   for operations related to parsing or printing!
 
-  \item @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
+  \<^descr> @{ML Term.map_types}~@{text "f t"} applies the mapping @{text
   "f"} to all types occurring in @{text "t"}.
 
-  \item @{ML Term.fold_types}~@{text "f t"} iterates the operation
+  \<^descr> @{ML Term.fold_types}~@{text "f t"} iterates the operation
   @{text "f"} over all occurrences of types in @{text "t"}; the term
   structure is traversed from left to right.
 
-  \item @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
+  \<^descr> @{ML Term.map_aterms}~@{text "f t"} applies the mapping @{text
   "f"} to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
   Const}) occurring in @{text "t"}.
 
-  \item @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
+  \<^descr> @{ML Term.fold_aterms}~@{text "f t"} iterates the operation
   @{text "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML
   Free}, @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
   traversed from left to right.
 
-  \item @{ML fastype_of}~@{text "t"} determines the type of a
+  \<^descr> @{ML fastype_of}~@{text "t"} determines the type of a
   well-typed term.  This operation is relatively slow, despite the
   omission of any sanity checks.
 
-  \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
+  \<^descr> @{ML lambda}~@{text "a b"} produces an abstraction @{text
   "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
   body @{text "b"} are replaced by bound variables.
 
-  \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
+  \<^descr> @{ML betapply}~@{text "(t, u)"} produces an application @{text
   "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
   abstraction.
 
-  \item @{ML incr_boundvars}~@{text "j"} increments a term's dangling
+  \<^descr> @{ML incr_boundvars}~@{text "j"} increments a term's dangling
   bound variables by the offset @{text "j"}.  This is required when
   moving a subterm into a context where it is enclosed by a different
   number of abstractions.  Bound variables with a matching abstraction
   are unaffected.
 
-  \item @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
+  \<^descr> @{ML Sign.declare_const}~@{text "ctxt ((c, \<sigma>), mx)"} declares
   a new constant @{text "c :: \<sigma>"} with optional mixfix syntax.
 
-  \item @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
+  \<^descr> @{ML Sign.add_abbrev}~@{text "print_mode (c, t)"}
   introduces a new term abbreviation @{text "c \<equiv> t"}.
 
-  \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
+  \<^descr> @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
   Sign.const_instance}~@{text "thy (c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])"}
   convert between two representations of polymorphic constants: full
   type instance vs.\ compact type arguments form.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -464,27 +452,23 @@
   @@{ML_antiquotation prop} prop
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{const_name c}"} inlines the internalized logical
+  \<^descr> @{text "@{const_name c}"} inlines the internalized logical
   constant name @{text "c"} --- as @{ML_type string} literal.
 
-  \item @{text "@{const_abbrev c}"} inlines the internalized
+  \<^descr> @{text "@{const_abbrev c}"} inlines the internalized
   abbreviated constant name @{text "c"} --- as @{ML_type string}
   literal.
 
-  \item @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
+  \<^descr> @{text "@{const c(\<^vec>\<tau>)}"} inlines the internalized
   constant @{text "c"} with precise type instantiation in the sense of
   @{ML Sign.const_instance} --- as @{ML Const} constructor term for
   datatype @{ML_type term}.
 
-  \item @{text "@{term t}"} inlines the internalized term @{text "t"}
+  \<^descr> @{text "@{term t}"} inlines the internalized term @{text "t"}
   --- as constructor term for datatype @{ML_type term}.
 
-  \item @{text "@{prop \<phi>}"} inlines the internalized proposition
+  \<^descr> @{text "@{prop \<phi>}"} inlines the internalized proposition
   @{text "\<phi>"} --- as constructor term for datatype @{ML_type term}.
-
-  \end{description}
 \<close>
 
 
@@ -681,9 +665,7 @@
   Defs.entry -> Defs.entry list -> theory -> theory"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Thm.peek_status}~@{text "thm"} informs about the current
+  \<^descr> @{ML Thm.peek_status}~@{text "thm"} informs about the current
   status of the derivation object behind the given theorem.  This is a
   snapshot of a potentially ongoing (parallel) evaluation of proofs.
   The three Boolean values indicate the following: @{verbatim oracle}
@@ -692,15 +674,15 @@
   failed} if some future proof has failed, rendering the theorem
   invalid!
 
-  \item @{ML Logic.all}~@{text "a B"} produces a Pure quantification
+  \<^descr> @{ML Logic.all}~@{text "a B"} produces a Pure quantification
   @{text "\<And>a. B"}, where occurrences of the atomic term @{text "a"} in
   the body proposition @{text "B"} are replaced by bound variables.
   (See also @{ML lambda} on terms.)
 
-  \item @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
+  \<^descr> @{ML Logic.mk_implies}~@{text "(A, B)"} produces a Pure
   implication @{text "A \<Longrightarrow> B"}.
 
-  \item Types @{ML_type ctyp} and @{ML_type cterm} represent certified
+  \<^descr> Types @{ML_type ctyp} and @{ML_type cterm} represent certified
   types and terms, respectively.  These are abstract datatypes that
   guarantee that its values have passed the full well-formedness (and
   well-typedness) checks, relative to the declarations of type
@@ -711,7 +693,7 @@
   are located in the @{ML_structure Thm} module, even though theorems are
   not yet involved at that stage.
 
-  \item @{ML Thm.ctyp_of}~@{text "ctxt \<tau>"} and @{ML
+  \<^descr> @{ML Thm.ctyp_of}~@{text "ctxt \<tau>"} and @{ML
   Thm.cterm_of}~@{text "ctxt t"} explicitly check types and terms,
   respectively.  This also involves some basic normalizations, such
   expansion of type and term abbreviations from the underlying
@@ -719,7 +701,7 @@
   Full re-certification is relatively slow and should be avoided in
   tight reasoning loops.
 
-  \item @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
+  \<^descr> @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML
   Drule.mk_implies} etc.\ compose certified terms (or propositions)
   incrementally.  This is equivalent to @{ML Thm.cterm_of} after
   unchecked @{ML_op "$"}, @{ML lambda}, @{ML Logic.all}, @{ML
@@ -728,58 +710,56 @@
   constructions on top.  There are separate operations to decompose
   certified terms and theorems to produce certified terms again.
 
-  \item Type @{ML_type thm} represents proven propositions.  This is
+  \<^descr> Type @{ML_type thm} represents proven propositions.  This is
   an abstract datatype that guarantees that its values have been
   constructed by basic principles of the @{ML_structure Thm} module.
   Every @{ML_type thm} value refers its background theory,
   cf.\ \secref{sec:context-theory}.
 
-  \item @{ML Thm.transfer}~@{text "thy thm"} transfers the given
+  \<^descr> @{ML Thm.transfer}~@{text "thy thm"} transfers the given
   theorem to a \emph{larger} theory, see also \secref{sec:context}.
   This formal adjustment of the background context has no logical
   significance, but is occasionally required for formal reasons, e.g.\
   when theorems that are imported from more basic theories are used in
   the current situation.
 
-  \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
+  \<^descr> @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
   Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
   correspond to the primitive inferences of \figref{fig:prim-rules}.
 
-  \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
+  \<^descr> @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
   corresponds to the @{text "generalize"} rules of
   \figref{fig:subst-rules}.  Here collections of type and term
   variables are generalized simultaneously, specified by the given
   basic names.
 
-  \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
+  \<^descr> @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^sub>s,
   \<^vec>x\<^sub>\<tau>)"} corresponds to the @{text "instantiate"} rules
   of \figref{fig:subst-rules}.  Type variables are substituted before
   term variables.  Note that the types in @{text "\<^vec>x\<^sub>\<tau>"}
   refer to the instantiated versions.
 
-  \item @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
+  \<^descr> @{ML Thm.add_axiom}~@{text "ctxt (name, A)"} declares an
   arbitrary proposition as axiom, and retrieves it as a theorem from
   the resulting theory, cf.\ @{text "axiom"} in
   \figref{fig:prim-rules}.  Note that the low-level representation in
   the axiom table may differ slightly from the returned theorem.
 
-  \item @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
+  \<^descr> @{ML Thm.add_oracle}~@{text "(binding, oracle)"} produces a named
   oracle rule, essentially generating arbitrary axioms on the fly,
   cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
 
-  \item @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
+  \<^descr> @{ML Thm.add_def}~@{text "ctxt unchecked overloaded (name, c
   \<^vec>x \<equiv> t)"} states a definitional axiom for an existing constant
   @{text "c"}.  Dependencies are recorded via @{ML Theory.add_deps},
   unless the @{text "unchecked"} option is set.  Note that the
   low-level representation in the axiom table may differ slightly from
   the returned theorem.
 
-  \item @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
+  \<^descr> @{ML Theory.add_deps}~@{text "ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>"}
   declares dependencies of a named specification for constant @{text
   "c\<^sub>\<tau>"}, relative to existing specifications for constants @{text
   "\<^vec>d\<^sub>\<sigma>"}.  This also works for type constructors.
-
-  \end{description}
 \<close>
 
 
@@ -808,23 +788,21 @@
     @'by' method method?
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
+  \<^descr> @{text "@{ctyp \<tau>}"} produces a certified type wrt.\ the
   current background theory --- as abstract value of type @{ML_type
   ctyp}.
 
-  \item @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
+  \<^descr> @{text "@{cterm t}"} and @{text "@{cprop \<phi>}"} produce a
   certified term wrt.\ the current background theory --- as abstract
   value of type @{ML_type cterm}.
 
-  \item @{text "@{thm a}"} produces a singleton fact --- as abstract
+  \<^descr> @{text "@{thm a}"} produces a singleton fact --- as abstract
   value of type @{ML_type thm}.
 
-  \item @{text "@{thms a}"} produces a general fact --- as abstract
+  \<^descr> @{text "@{thms a}"} produces a general fact --- as abstract
   value of type @{ML_type "thm list"}.
 
-  \item @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
+  \<^descr> @{text "@{lemma \<phi> by meth}"} produces a fact that is proven on
   the spot according to the minimal proof, which imitates a terminal
   Isar proof.  The result is an abstract value of type @{ML_type thm}
   or @{ML_type "thm list"}, depending on the number of propositions
@@ -840,9 +818,6 @@
   "by"} step.  More complex Isar proofs should be done in regular
   theory source, before compiling the corresponding ML text that uses
   the result.
-
-  \end{description}
-
 \<close>
 
 
@@ -915,26 +890,22 @@
   @{index_ML Logic.dest_type: "term -> typ"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
+  \<^descr> @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
   "A"} and @{text "B"}.
 
-  \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
+  \<^descr> @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
   from @{text "A &&& B"}.
 
-  \item @{ML Drule.mk_term} derives @{text "TERM t"}.
+  \<^descr> @{ML Drule.mk_term} derives @{text "TERM t"}.
 
-  \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
+  \<^descr> @{ML Drule.dest_term} recovers term @{text "t"} from @{text
   "TERM t"}.
 
-  \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
+  \<^descr> @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
   "TYPE(\<tau>)"}.
 
-  \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
+  \<^descr> @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
   @{text "\<tau>"}.
-
-  \end{description}
 \<close>
 
 
@@ -972,16 +943,12 @@
   @{index_ML Thm.strip_shyps: "thm -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
+  \<^descr> @{ML Thm.extra_shyps}~@{text "thm"} determines the extraneous
   sort hypotheses of the given theorem, i.e.\ the sorts that are not
   present within type variables of the statement.
 
-  \item @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
+  \<^descr> @{ML Thm.strip_shyps}~@{text "thm"} removes any extraneous
   sort hypotheses that can be witnessed from the type signature.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following artificial example demonstrates the
@@ -1069,8 +1036,6 @@
   Regular user-level inferences in Isabelle/Pure always
   maintain the following canonical form of results:
 
-  \begin{itemize}
-
   \<^item> Normalization by @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"},
   which is a theorem of Pure, means that quantifiers are pushed in
   front of implication at each level of nesting.  The normal form is a
@@ -1081,8 +1046,6 @@
   \<Longrightarrow> A \<^vec>x"} we have @{text "\<^vec>H ?\<^vec>x \<Longrightarrow> A ?\<^vec>x"}.
   Note that this representation looses information about the order of
   parameters, and vacuous quantifiers vanish automatically.
-
-  \end{itemize}
 \<close>
 
 text %mlref \<open>
@@ -1090,14 +1053,10 @@
   @{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
+  \<^descr> @{ML Simplifier.norm_hhf}~@{text "ctxt thm"} normalizes the given
   theorem according to the canonical form specified above.  This is
   occasionally helpful to repair some low-level tools that do not
   handle Hereditary Harrop Formulae properly.
-
-  \end{description}
 \<close>
 
 
@@ -1174,9 +1133,7 @@
   @{index_ML_op "OF": "thm * thm list -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
+  \<^descr> @{text "rule\<^sub>1 RSN (i, rule\<^sub>2)"} resolves the conclusion of
   @{text "rule\<^sub>1"} with the @{text i}-th premise of @{text "rule\<^sub>2"},
   according to the @{inference resolution} principle explained above.
   Unless there is precisely one resolvent it raises exception @{ML
@@ -1185,10 +1142,10 @@
   This corresponds to the rule attribute @{attribute THEN} in Isar
   source language.
 
-  \item @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
+  \<^descr> @{text "rule\<^sub>1 RS rule\<^sub>2"} abbreviates @{text "rule\<^sub>1 RSN (1,
   rule\<^sub>2)"}.
 
-  \item @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
+  \<^descr> @{text "rules\<^sub>1 RLN (i, rules\<^sub>2)"} joins lists of rules.  For
   every @{text "rule\<^sub>1"} in @{text "rules\<^sub>1"} and @{text "rule\<^sub>2"} in
   @{text "rules\<^sub>2"}, it resolves the conclusion of @{text "rule\<^sub>1"} with
   the @{text "i"}-th premise of @{text "rule\<^sub>2"}, accumulating multiple
@@ -1196,23 +1153,21 @@
   higher-order unifications can be inefficient compared to the lazy
   variant seen in elementary tactics like @{ML resolve_tac}.
 
-  \item @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
+  \<^descr> @{text "rules\<^sub>1 RL rules\<^sub>2"} abbreviates @{text "rules\<^sub>1 RLN (1,
   rules\<^sub>2)"}.
 
-  \item @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
+  \<^descr> @{text "[rule\<^sub>1, \<dots>, rule\<^sub>n] MRS rule"} resolves @{text "rule\<^sub>i"}
   against premise @{text "i"} of @{text "rule"}, for @{text "i = n, \<dots>,
   1"}.  By working from right to left, newly emerging premises are
   concatenated in the result, without interfering.
 
-  \item @{text "rule OF rules"} is an alternative notation for @{text
+  \<^descr> @{text "rule OF rules"} is an alternative notation for @{text
   "rules MRS rule"}, which makes rule composition look more like
   function application.  Note that the argument @{text "rules"} need
   not be atomic.
 
   This corresponds to the rule attribute @{attribute OF} in Isar
   source language.
-
-  \end{description}
 \<close>
 
 
@@ -1359,23 +1314,21 @@
   @{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type proof} represents proof terms; this is a
+  \<^descr> Type @{ML_type proof} represents proof terms; this is a
   datatype with constructors @{index_ML Abst}, @{index_ML AbsP},
   @{index_ML_op "%"}, @{index_ML_op "%%"}, @{index_ML PBound},
   @{index_ML MinProof}, @{index_ML Hyp}, @{index_ML PAxm}, @{index_ML
   Oracle}, @{index_ML Promise}, @{index_ML PThm} as explained above.
   %FIXME OfClass (!?)
 
-  \item Type @{ML_type proof_body} represents the nested proof
+  \<^descr> Type @{ML_type proof_body} represents the nested proof
   information of a named theorem, consisting of a digest of oracles
   and named theorem over some proof term.  The digest only covers the
   directly visible part of the proof: in order to get the full
   information, the implicit graph of nested theorems needs to be
   traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
 
-  \item @{ML Thm.proof_of}~@{text "thm"} and @{ML
+  \<^descr> @{ML Thm.proof_of}~@{text "thm"} and @{ML
   Thm.proof_body_of}~@{text "thm"} produce the proof term or proof
   body (with digest of oracles and theorems) from a given theorem.
   Note that this involves a full join of internal futures that fulfill
@@ -1384,14 +1337,14 @@
   Parallel performance may suffer by inspecting proof terms at
   run-time.
 
-  \item @{ML proofs} specifies the detail of proof recording within
+  \<^descr> @{ML proofs} specifies the detail of proof recording within
   @{ML_type thm} values produced by the inference kernel: @{ML 0}
   records only the names of oracles, @{ML 1} records oracle names and
   propositions, @{ML 2} additionally records full proof terms.
   Officially named theorems that contribute to a result are recorded
   in any case.
 
-  \item @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
+  \<^descr> @{ML Reconstruct.reconstruct_proof}~@{text "thy prop prf"}
   turns the implicit proof term @{text "prf"} into a full proof of the
   given proposition.
 
@@ -1401,24 +1354,22 @@
   constructed manually, but not for those produced automatically by
   the inference kernel.
 
-  \item @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
+  \<^descr> @{ML Reconstruct.expand_proof}~@{text "thy [thm\<^sub>1, \<dots>, thm\<^sub>n]
   prf"} expands and reconstructs the proofs of all specified theorems,
   with the given (full) proof.  Theorems that are not unique specified
   via their name may be disambiguated by giving their proposition.
 
-  \item @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
+  \<^descr> @{ML Proof_Checker.thm_of_proof}~@{text "thy prf"} turns the
   given (full) proof into a theorem, by replaying it using only
   primitive rules of the inference kernel.
 
-  \item @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
+  \<^descr> @{ML Proof_Syntax.read_proof}~@{text "thy b\<^sub>1 b\<^sub>2 s"} reads in a
   proof term. The Boolean flags indicate the use of sort and type
   information.  Usually, typing information is left implicit and is
   inferred during proof reconstruction.  %FIXME eliminate flags!?
 
-  \item @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
+  \<^descr> @{ML Proof_Syntax.pretty_proof}~@{text "ctxt prf"}
   pretty-prints the given proof term.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>Detailed proof information of a theorem may be retrieved
--- a/src/Doc/Implementation/ML.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/ML.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -76,8 +76,8 @@
   subsubsections, paragraphs etc.\ using a simple layout via ML
   comments as follows.
 
-  \begin{verbatim}
-  (*** section ***)
+  @{verbatim [display]
+\<open>  (*** section ***)
 
   (** subsection **)
 
@@ -88,8 +88,7 @@
   (*
     long paragraph,
     with more text
-  *)
-  \end{verbatim}
+  *)\<close>}
 
   As in regular typography, there is some extra space \emph{before}
   section headings that are adjacent to plain text, but not other headings
@@ -155,8 +154,8 @@
 
   Example:
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   fun print_foo ctxt foo =
     let
@@ -180,15 +179,12 @@
   fun print_foo ctxt foo =
     let
       fun aux t = ... string_of_term ctxt t ...
-    in ... end;
-  \end{verbatim}
+    in ... end;\<close>}
 
 
   \paragraph{Specific conventions.} Here are some specific name forms
   that occur frequently in the sources.
 
-  \begin{itemize}
-
   \<^item> A function that maps @{ML_text foo} to @{ML_text bar} is
   called @{ML_text foo_to_bar} or @{ML_text bar_of_foo} (never
   @{ML_text foo2bar}, nor @{ML_text bar_from_foo}, nor @{ML_text
@@ -210,21 +206,17 @@
   framework (\secref{sec:context} and \chref{ch:local-theory}) have
   firm naming conventions as follows:
 
-  \begin{itemize}
-
-  \<^item> theories are called @{ML_text thy}, rarely @{ML_text theory}
-  (never @{ML_text thry})
-
-  \<^item> proof contexts are called @{ML_text ctxt}, rarely @{ML_text
-  context} (never @{ML_text ctx})
-
-  \<^item> generic contexts are called @{ML_text context}
-
-  \<^item> local theories are called @{ML_text lthy}, except for local
-  theories that are treated as proof context (which is a semantic
-  super-type)
-
-  \end{itemize}
+    \<^item> theories are called @{ML_text thy}, rarely @{ML_text theory}
+    (never @{ML_text thry})
+
+    \<^item> proof contexts are called @{ML_text ctxt}, rarely @{ML_text
+    context} (never @{ML_text ctx})
+
+    \<^item> generic contexts are called @{ML_text context}
+
+    \<^item> local theories are called @{ML_text lthy}, except for local
+    theories that are treated as proof context (which is a semantic
+    super-type)
 
   Variations with primed or decimal numbers are always possible, as
   well as semantic prefixes like @{ML_text foo_thy} or @{ML_text
@@ -235,32 +227,28 @@
   \<^item> The main logical entities (\secref{ch:logic}) have established
   naming convention as follows:
 
-  \begin{itemize}
-
-  \<^item> sorts are called @{ML_text S}
-
-  \<^item> types are called @{ML_text T}, @{ML_text U}, or @{ML_text
-  ty} (never @{ML_text t})
-
-  \<^item> terms are called @{ML_text t}, @{ML_text u}, or @{ML_text
-  tm} (never @{ML_text trm})
-
-  \<^item> certified types are called @{ML_text cT}, rarely @{ML_text
-  T}, with variants as for types
-
-  \<^item> certified terms are called @{ML_text ct}, rarely @{ML_text
-  t}, with variants as for terms (never @{ML_text ctrm})
-
-  \<^item> theorems are called @{ML_text th}, or @{ML_text thm}
-
-  \end{itemize}
+    \<^item> sorts are called @{ML_text S}
+
+    \<^item> types are called @{ML_text T}, @{ML_text U}, or @{ML_text
+    ty} (never @{ML_text t})
+
+    \<^item> terms are called @{ML_text t}, @{ML_text u}, or @{ML_text
+    tm} (never @{ML_text trm})
+
+    \<^item> certified types are called @{ML_text cT}, rarely @{ML_text
+    T}, with variants as for types
+
+    \<^item> certified terms are called @{ML_text ct}, rarely @{ML_text
+    t}, with variants as for terms (never @{ML_text ctrm})
+
+    \<^item> theorems are called @{ML_text th}, or @{ML_text thm}
 
   Proper semantic names override these conventions completely.  For
   example, the left-hand side of an equation (as a term) can be called
   @{ML_text lhs} (not @{ML_text lhs_tm}).  Or a term that is known
   to be a variable can be called @{ML_text v} or @{ML_text x}.
 
-  \item Tactics (\secref{sec:tactics}) are sufficiently important to
+  \<^item> Tactics (\secref{sec:tactics}) are sufficiently important to
   have specific naming conventions.  The name of a basic tactic
   definition always has a @{ML_text "_tac"} suffix, the subgoal index
   (if applicable) is always called @{ML_text i}, and the goal state
@@ -269,9 +257,7 @@
   before the latter two, and the general context is given first.
   Example:
 
-  \begin{verbatim}
-  fun my_tac ctxt arg1 arg2 i st = ...
-  \end{verbatim}
+  @{verbatim [display] \<open>  fun my_tac ctxt arg1 arg2 i st = ...\<close>}
 
   Note that the goal state @{ML_text st} above is rarely made
   explicit, if tactic combinators (tacticals) are used as usual.
@@ -280,8 +266,6 @@
   in the @{verbatim ctxt} argument above. Do not refer to the background
   theory of @{verbatim st} -- it is not a proper context, but merely a formal
   certificate.
-
-  \end{itemize}
 \<close>
 
 
@@ -307,16 +291,16 @@
   defines positioning of spaces for parentheses, punctuation, and
   infixes as illustrated here:
 
-  \begin{verbatim}
-  val x = y + z * (a + b);
+  @{verbatim [display]
+\<open>  val x = y + z * (a + b);
   val pair = (a, b);
-  val record = {foo = 1, bar = 2};
-  \end{verbatim}
+  val record = {foo = 1, bar = 2};\<close>}
 
   Lines are normally broken \emph{after} an infix operator or
   punctuation character.  For example:
 
-  \begin{verbatim}
+  @{verbatim [display]
+\<open>
   val x =
     a +
     b +
@@ -326,7 +310,7 @@
    (a,
     b,
     c);
-  \end{verbatim}
+\<close>}
 
   Some special infixes (e.g.\ @{ML_text "|>"}) work better at the
   start of the line, but punctuation is always at the end.
@@ -354,8 +338,8 @@
   nesting depth, not the accidental length of the text that initiates
   a level of nesting.  Example:
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   if b then
     expr1_part1
@@ -370,8 +354,7 @@
   if b then expr1_part1
             expr1_part2
   else expr2_part1
-       expr2_part2
-  \end{verbatim}
+       expr2_part2\<close>}
 
   The second form has many problems: it assumes a fixed-width font
   when viewing the sources, it uses more space on the line and thus
@@ -395,8 +378,8 @@
   @{ML_text case} get extra indentation to indicate the nesting
   clearly.  Example:
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   fun foo p1 =
         expr1
@@ -409,16 +392,15 @@
   fun foo p1 =
     expr1
     | foo p2 =
-    expr2
-  \end{verbatim}
+    expr2\<close>}
 
   Body expressions consisting of @{ML_text case} or @{ML_text let}
   require care to maintain compositionality, to prevent loss of
   logical indentation where it is especially important to see the
   structure of the text.  Example:
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   fun foo p1 =
         (case e of
@@ -442,8 +424,7 @@
       ...
     in
       ...
-    end
-  \end{verbatim}
+    end\<close>}
 
   Extra parentheses around @{ML_text case} expressions are optional,
   but help to analyse the nesting based on character matching in the
@@ -453,41 +434,36 @@
   There are two main exceptions to the overall principle of
   compositionality in the layout of complex expressions.
 
-  \begin{enumerate}
-
   \<^enum> @{ML_text "if"} expressions are iterated as if ML had multi-branch
   conditionals, e.g.
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   if b1 then e1
   else if b2 then e2
-  else e3
-  \end{verbatim}
+  else e3\<close>}
 
   \<^enum> @{ML_text fn} abstractions are often layed-out as if they
   would lack any structure by themselves.  This traditional form is
   motivated by the possibility to shift function arguments back and
   forth wrt.\ additional combinators.  Example:
 
-  \begin{verbatim}
-  (* RIGHT *)
+  @{verbatim [display]
+\<open>  (* RIGHT *)
 
   fun foo x y = fold (fn z =>
-    expr)
-  \end{verbatim}
+    expr)\<close>}
 
   Here the visual appearance is that of three arguments @{ML_text x},
   @{ML_text y}, @{ML_text z} in a row.
 
-  \end{enumerate}
 
   Such weakly structured layout should be use with great care.  Here
   are some counter-examples involving @{ML_text let} expressions:
 
-  \begin{verbatim}
-  (* WRONG *)
+  @{verbatim [display]
+\<open>  (* WRONG *)
 
   fun foo x = let
       val y = ...
@@ -515,8 +491,7 @@
     let
       val y = ...
     in
-      ... end
-  \end{verbatim}
+      ... end\<close>}
 
   \<^medskip>
   In general the source layout is meant to emphasize the
@@ -646,25 +621,22 @@
   @{index_ML ML_Thms.bind_thm: "string * thm -> unit"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML "ML_Context.the_generic_context ()"} refers to the theory
+  \<^descr> @{ML "ML_Context.the_generic_context ()"} refers to the theory
   context of the ML toplevel --- at compile time.  ML code needs to
   take care to refer to @{ML "ML_Context.the_generic_context ()"}
   correctly.  Recall that evaluation of a function body is delayed
   until actual run-time.
 
-  \item @{ML "Context.>>"}~@{text f} applies context transformation
+  \<^descr> @{ML "Context.>>"}~@{text f} applies context transformation
   @{text f} to the implicit context of the ML toplevel.
 
-  \item @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
+  \<^descr> @{ML ML_Thms.bind_thms}~@{text "(name, thms)"} stores a list of
   theorems produced in ML both in the (global) theory context and the
   ML toplevel, associating it with the provided name.
 
-  \item @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but
+  \<^descr> @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but
   refers to a singleton fact.
 
-  \end{description}
 
   It is important to note that the above functions are really
   restricted to the compile time, even though the ML compiler is
@@ -725,19 +697,15 @@
   @@{ML_antiquotation print} @{syntax name}?
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{make_string}"} inlines a function to print arbitrary values
+  \<^descr> @{text "@{make_string}"} inlines a function to print arbitrary values
   similar to the ML toplevel. The result is compiler dependent and may fall
   back on "?" in certain situations. The value of configuration option
   @{attribute_ref ML_print_depth} determines further details of output.
 
-  \item @{text "@{print f}"} uses the ML function @{text "f: string ->
+  \<^descr> @{text "@{print f}"} uses the ML function @{text "f: string ->
   unit"} to output the result of @{text "@{make_string}"} above,
   together with the source position of the antiquotation.  The default
   output function is @{ML writeln}.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following artificial examples show how to produce
@@ -907,19 +875,16 @@
   @{index_ML fold_map: "('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML fold}~@{text f} lifts the parametrized update function
+  \<^descr> @{ML fold}~@{text f} lifts the parametrized update function
   @{text "f"} to a list of parameters.
 
-  \item @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
+  \<^descr> @{ML fold_rev}~@{text "f"} is similar to @{ML fold}~@{text
   "f"}, but works inside-out, as if the list would be reversed.
 
-  \item @{ML fold_map}~@{text "f"} lifts the parametrized update
+  \<^descr> @{ML fold_map}~@{text "f"} lifts the parametrized update
   function @{text "f"} (with side-result) to a list of parameters and
   cumulative side-results.
 
-  \end{description}
 
   \begin{warn}
   The literature on functional programming provides a confusing multitude of
@@ -1033,13 +998,11 @@
   @{index_ML error: "string -> 'a"} % FIXME Output.error_message (!?) \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
+  \<^descr> @{ML writeln}~@{text "text"} outputs @{text "text"} as regular
   message.  This is the primary message output operation of Isabelle
   and should be used by default.
 
-  \item @{ML tracing}~@{text "text"} outputs @{text "text"} as special
+  \<^descr> @{ML tracing}~@{text "text"} outputs @{text "text"} as special
   tracing message, indicating potential high-volume output to the
   front-end (hundreds or thousands of messages issued by a single
   command).  The idea is to allow the user-interface to downgrade the
@@ -1049,11 +1012,11 @@
   output, e.g.\ switch to a different output window.  So this channel
   should not be used for regular output.
 
-  \item @{ML warning}~@{text "text"} outputs @{text "text"} as
+  \<^descr> @{ML warning}~@{text "text"} outputs @{text "text"} as
   warning, which typically means some extra emphasis on the front-end
   side (color highlighting, icons, etc.).
 
-  \item @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
+  \<^descr> @{ML error}~@{text "text"} raises exception @{ML ERROR}~@{text
   "text"} and thus lets the Isar toplevel print @{text "text"} on the
   error channel, which typically means some extra emphasis on the
   front-end side (color highlighting, icons, etc.).
@@ -1068,7 +1031,6 @@
   this is normally not used directly in user code.
   \end{warn}
 
-  \end{description}
 
   \begin{warn}
   Regular Isabelle/ML code should output messages exclusively by the
@@ -1215,40 +1177,36 @@
   @{index_ML Runtime.exn_trace: "(unit -> 'a) -> 'a"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML try}~@{text "f x"} makes the partiality of evaluating
+  \<^descr> @{ML try}~@{text "f x"} makes the partiality of evaluating
   @{text "f x"} explicit via the option datatype.  Interrupts are
   \emph{not} handled here, i.e.\ this form serves as safe replacement
   for the \emph{unsafe} version @{ML_text "(SOME"}~@{text "f
   x"}~@{ML_text "handle _ => NONE)"} that is occasionally seen in
   books about SML97, but not in Isabelle/ML.
 
-  \item @{ML can} is similar to @{ML try} with more abstract result.
-
-  \item @{ML ERROR}~@{text "msg"} represents user errors; this
+  \<^descr> @{ML can} is similar to @{ML try} with more abstract result.
+
+  \<^descr> @{ML ERROR}~@{text "msg"} represents user errors; this
   exception is normally raised indirectly via the @{ML error} function
   (see \secref{sec:message-channels}).
 
-  \item @{ML Fail}~@{text "msg"} represents general program failures.
-
-  \item @{ML Exn.is_interrupt} identifies interrupts robustly, without
+  \<^descr> @{ML Fail}~@{text "msg"} represents general program failures.
+
+  \<^descr> @{ML Exn.is_interrupt} identifies interrupts robustly, without
   mentioning concrete exception constructors in user code.  Handled
   interrupts need to be re-raised promptly!
 
-  \item @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
+  \<^descr> @{ML reraise}~@{text "exn"} raises exception @{text "exn"}
   while preserving its implicit position information (if possible,
   depending on the ML platform).
 
-  \item @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
+  \<^descr> @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~@{text
   "e"}@{ML_text ")"} evaluates expression @{text "e"} while printing
   a full trace of its stack of nested exceptions (if possible,
   depending on the ML platform).
 
   Inserting @{ML Runtime.exn_trace} into ML code temporarily is
   useful for debugging, but not suitable for production code.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -1256,14 +1214,10 @@
   @{ML_antiquotation_def "assert"} & : & @{text ML_antiquotation} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{text "@{assert}"} inlines a function
+  \<^descr> @{text "@{assert}"} inlines a function
   @{ML_type "bool -> unit"} that raises @{ML Fail} if the argument is
   @{ML false}.  Due to inlining the source position of failed
   assertions is included in the error output.
-
-  \end{description}
 \<close>
 
 
@@ -1276,8 +1230,6 @@
   in itself a small string, which has either one of the following
   forms:
 
-  \begin{enumerate}
-
   \<^enum> a single ASCII character ``@{text "c"}'', for example
   ``@{verbatim a}'',
 
@@ -1298,7 +1250,6 @@
   "<^raw"}@{text n}@{verbatim ">"}, where @{text n} consists of digits, for
   example ``@{verbatim "\<^raw42>"}''.
 
-  \end{enumerate}
 
   The @{text "ident"} syntax for symbol names is @{text "letter
   (letter | digit)\<^sup>*"}, where @{text "letter = A..Za..z"} and @{text
@@ -1337,33 +1288,30 @@
   @{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type "Symbol.symbol"} represents individual Isabelle
+  \<^descr> Type @{ML_type "Symbol.symbol"} represents individual Isabelle
   symbols.
 
-  \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
+  \<^descr> @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
   from the packed form.  This function supersedes @{ML
   "String.explode"} for virtually all purposes of manipulating text in
   Isabelle!\footnote{The runtime overhead for exploded strings is
   mainly that of the list structure: individual symbols that happen to
   be a singleton string do not require extra memory in Poly/ML.}
 
-  \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
+  \<^descr> @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
   "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
   symbols according to fixed syntactic conventions of Isabelle, cf.\
   @{cite "isabelle-isar-ref"}.
 
-  \item Type @{ML_type "Symbol.sym"} is a concrete datatype that
+  \<^descr> Type @{ML_type "Symbol.sym"} is a concrete datatype that
   represents the different kinds of symbols explicitly, with
   constructors @{ML "Symbol.Char"}, @{ML "Symbol.UTF8"},
   @{ML "Symbol.Sym"}, @{ML "Symbol.Ctrl"}, @{ML "Symbol.Raw"},
   @{ML "Symbol.Malformed"}.
 
-  \item @{ML "Symbol.decode"} converts the string representation of a
+  \<^descr> @{ML "Symbol.decode"} converts the string representation of a
   symbol into the datatype version.
 
-  \end{description}
 
   \paragraph{Historical note.} In the original SML90 standard the
   primitive ML type @{ML_type char} did not exists, and @{ML_text
@@ -1399,13 +1347,9 @@
   @{index_ML_type char} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type char} is \emph{not} used.  The smallest textual
+  \<^descr> Type @{ML_type char} is \emph{not} used.  The smallest textual
   unit in Isabelle is represented as a ``symbol'' (see
   \secref{sec:symbols}).
-
-  \end{description}
 \<close>
 
 
@@ -1416,9 +1360,7 @@
   @{index_ML_type string} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type string} represents immutable vectors of 8-bit
+  \<^descr> Type @{ML_type string} represents immutable vectors of 8-bit
   characters.  There are operations in SML to convert back and forth
   to actual byte vectors, which are seldom used.
 
@@ -1426,22 +1368,16 @@
   Isabelle-specific purposes with the following implicit substructures
   packed into the string content:
 
-  \begin{enumerate}
-
-  \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}),
-  with @{ML Symbol.explode} as key operation;
-
-  \<^enum> XML tree structure via YXML (see also @{cite "isabelle-system"}),
-  with @{ML YXML.parse_body} as key operation.
-
-  \end{enumerate}
+    \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}),
+    with @{ML Symbol.explode} as key operation;
+  
+    \<^enum> XML tree structure via YXML (see also @{cite "isabelle-system"}),
+    with @{ML YXML.parse_body} as key operation.
 
   Note that Isabelle/ML string literals may refer Isabelle symbols like
   ``@{verbatim \<alpha>}'' natively, \emph{without} escaping the backslash. This is a
   consequence of Isabelle treating all source text as strings of symbols,
   instead of raw characters.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The subsequent example illustrates the difference of
@@ -1471,9 +1407,7 @@
   @{index_ML_type int} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type int} represents regular mathematical integers, which
+  \<^descr> Type @{ML_type int} represents regular mathematical integers, which
   are \emph{unbounded}. Overflow is treated properly, but should never happen
   in practice.\footnote{The size limit for integer bit patterns in memory is
   64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.} This works
@@ -1486,8 +1420,6 @@
   @{ML_structure Int}.  Structure @{ML_structure Integer} in @{file
   "~~/src/Pure/General/integer.ML"} provides some additional
   operations.
-
-  \end{description}
 \<close>
 
 
@@ -1499,19 +1431,15 @@
   @{index_ML seconds: "real -> Time.time"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Time.time} represents time abstractly according
+  \<^descr> Type @{ML_type Time.time} represents time abstractly according
   to the SML97 basis library definition.  This is adequate for
   internal ML operations, but awkward in concrete time specifications.
 
-  \item @{ML seconds}~@{text "s"} turns the concrete scalar @{text
+  \<^descr> @{ML seconds}~@{text "s"} turns the concrete scalar @{text
   "s"} (measured in seconds) into an abstract time value.  Floating
   point numbers are easy to use as configuration options in the
   context (see \secref{sec:config-options}) or system options that
   are maintained externally.
-
-  \end{description}
 \<close>
 
 
@@ -1551,15 +1479,13 @@
   @{index_ML update: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
+  \<^descr> @{ML cons}~@{text "x xs"} evaluates to @{text "x :: xs"}.
 
   Tupled infix operators are a historical accident in Standard ML.
   The curried @{ML cons} amends this, but it should be only used when
   partial application is required.
 
-  \item @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
+  \<^descr> @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat
   lists as a set-like container that maintains the order of elements.
   See @{file "~~/src/Pure/library.ML"} for the full specifications
   (written in ML).  There are some further derived operations like
@@ -1571,8 +1497,6 @@
   often more appropriate in declarations of context data
   (\secref{sec:context-data}) that are issued by the user in Isar
   source: later declarations take precedence over earlier ones.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>Using canonical @{ML fold} together with @{ML cons} (or
@@ -1627,9 +1551,7 @@
   @{index_ML AList.update: "('a * 'a -> bool) -> 'a * 'b -> ('a * 'b) list -> ('a * 'b) list"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
+  \<^descr> @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update}
   implement the main ``framework operations'' for mappings in
   Isabelle/ML, following standard conventions for their names and
   types.
@@ -1644,7 +1566,6 @@
   justify its independent existence.  This also gives the
   implementation some opportunity for peep-hole optimization.
 
-  \end{description}
 
   Association lists are adequate as simple implementation of finite mappings
   in many practical situations. A more advanced table structure is defined in
@@ -1762,8 +1683,6 @@
   read/write access to shared resources, which are outside the purely
   functional world of ML.  This covers the following in particular.
 
-  \begin{itemize}
-
   \<^item> Global references (or arrays), i.e.\ mutable memory cells that
   persist over several invocations of associated
   operations.\footnote{This is independent of the visibility of such
@@ -1775,7 +1694,6 @@
   \<^item> Writable resources in the file-system that are shared among
   different threads or external processes.
 
-  \end{itemize}
 
   Isabelle/ML provides various mechanisms to avoid critical shared
   resources in most situations.  As last resort there are some
@@ -1783,8 +1701,6 @@
   help to make Isabelle/ML programs work smoothly in a concurrent
   environment.
 
-  \begin{itemize}
-
   \<^item> Avoid global references altogether.  Isabelle/Isar maintains a
   uniform context that incorporates arbitrary data declared by user
   programs (\secref{sec:context-data}).  This context is passed as
@@ -1824,8 +1740,6 @@
   serial numbers in Isabelle/ML.  Thus temporary files that are passed
   to to some external process will be always disjoint, and thus
   thread-safe.
-
-  \end{itemize}
 \<close>
 
 text %mlref \<open>
@@ -1834,16 +1748,12 @@
   @{index_ML serial_string: "unit -> string"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML File.tmp_path}~@{text "path"} relocates the base
+  \<^descr> @{ML File.tmp_path}~@{text "path"} relocates the base
   component of @{text "path"} into the unique temporary directory of
   the running Isabelle/ML process.
 
-  \item @{ML serial_string}~@{text "()"} creates a new serial number
+  \<^descr> @{ML serial_string}~@{text "()"} creates a new serial number
   that is unique over the runtime of the Isabelle/ML process.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example shows how to create unique
@@ -1881,16 +1791,14 @@
   ('a -> ('b * 'a) option) -> 'b"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type "'a Synchronized.var"} represents synchronized
+  \<^descr> Type @{ML_type "'a Synchronized.var"} represents synchronized
   variables with state of type @{ML_type 'a}.
 
-  \item @{ML Synchronized.var}~@{text "name x"} creates a synchronized
+  \<^descr> @{ML Synchronized.var}~@{text "name x"} creates a synchronized
   variable that is initialized with value @{text "x"}.  The @{text
   "name"} is used for tracing.
 
-  \item @{ML Synchronized.guarded_access}~@{text "var f"} lets the
+  \<^descr> @{ML Synchronized.guarded_access}~@{text "var f"} lets the
   function @{text "f"} operate within a critical section on the state
   @{text "x"} as follows: if @{text "f x"} produces @{ML NONE}, it
   continues to wait on the internal condition variable, expecting that
@@ -1900,7 +1808,6 @@
   signal to all waiting threads on the associated condition variable,
   and returns the result @{text "y"}.
 
-  \end{description}
 
   There are some further variants of the @{ML
   Synchronized.guarded_access} combinator, see @{file
@@ -1994,40 +1901,36 @@
   @{index_ML Par_Exn.release_first: "'a Exn.result list -> 'a list"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type "'a Exn.result"} represents the disjoint sum of
+  \<^descr> Type @{ML_type "'a Exn.result"} represents the disjoint sum of
   ML results explicitly, with constructor @{ML Exn.Res} for regular
   values and @{ML "Exn.Exn"} for exceptions.
 
-  \item @{ML Exn.capture}~@{text "f x"} manages the evaluation of
+  \<^descr> @{ML Exn.capture}~@{text "f x"} manages the evaluation of
   @{text "f x"} such that exceptions are made explicit as @{ML
   "Exn.Exn"}.  Note that this includes physical interrupts (see also
   \secref{sec:exceptions}), so the same precautions apply to user
   code: interrupts must not be absorbed accidentally!
 
-  \item @{ML Exn.interruptible_capture} is similar to @{ML
+  \<^descr> @{ML Exn.interruptible_capture} is similar to @{ML
   Exn.capture}, but interrupts are immediately re-raised as required
   for user code.
 
-  \item @{ML Exn.release}~@{text "result"} releases the original
+  \<^descr> @{ML Exn.release}~@{text "result"} releases the original
   runtime result, exposing its regular value or raising the reified
   exception.
 
-  \item @{ML Par_Exn.release_all}~@{text "results"} combines results
+  \<^descr> @{ML Par_Exn.release_all}~@{text "results"} combines results
   that were produced independently (e.g.\ by parallel evaluation).  If
   all results are regular values, that list is returned.  Otherwise,
   the collection of all exceptions is raised, wrapped-up as collective
   parallel exception.  Note that the latter prevents access to
   individual exceptions by conventional @{verbatim "handle"} of ML.
 
-  \item @{ML Par_Exn.release_first} is similar to @{ML
+  \<^descr> @{ML Par_Exn.release_first} is similar to @{ML
   Par_Exn.release_all}, but only the first (meaningful) exception that has
   occurred in the original evaluation process is raised again, the others are
   ignored.  That single exception may get handled by conventional
   means in ML.
-
-  \end{description}
 \<close>
 
 
@@ -2055,9 +1958,7 @@
   @{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
+  \<^descr> @{ML Par_List.map}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} is like @{ML
   "map"}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"}, but the evaluation of @{text "f x\<^sub>i"}
   for @{text "i = 1, \<dots>, n"} is performed in parallel.
 
@@ -2067,7 +1968,7 @@
   program exception that happened to occur in the parallel evaluation
   is propagated, and all other failures are ignored.
 
-  \item @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
+  \<^descr> @{ML Par_List.get_some}~@{text "f [x\<^sub>1, \<dots>, x\<^sub>n]"} produces some
   @{text "f x\<^sub>i"} that is of the form @{text "SOME y\<^sub>i"}, if that
   exists, otherwise @{text "NONE"}.  Thus it is similar to @{ML
   Library.get_first}, but subject to a non-deterministic parallel
@@ -2078,8 +1979,6 @@
   This generic parallel choice combinator is the basis for derived
   forms, such as @{ML Par_List.find_some}, @{ML Par_List.exists}, @{ML
   Par_List.forall}.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>Subsequently, the Ackermann function is evaluated in
@@ -2128,25 +2027,21 @@
   @{index_ML Lazy.force: "'a lazy -> 'a"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type "'a lazy"} represents lazy values over type @{verbatim
+  \<^descr> Type @{ML_type "'a lazy"} represents lazy values over type @{verbatim
   "'a"}.
 
-  \item @{ML Lazy.lazy}~@{text "(fn () => e)"} wraps the unevaluated
+  \<^descr> @{ML Lazy.lazy}~@{text "(fn () => e)"} wraps the unevaluated
   expression @{text e} as unfinished lazy value.
 
-  \item @{ML Lazy.value}~@{text a} wraps the value @{text a} as finished lazy
+  \<^descr> @{ML Lazy.value}~@{text a} wraps the value @{text a} as finished lazy
   value.  When forced, it returns @{text a} without any further evaluation.
 
   There is very low overhead for this proforma wrapping of strict values as
   lazy values.
 
-  \item @{ML Lazy.force}~@{text x} produces the result of the lazy value in a
+  \<^descr> @{ML Lazy.force}~@{text x} produces the result of the lazy value in a
   thread-safe manner as explained above. Thus it may cause the current thread
   to wait on a pending evaluation attempt by another thread.
-
-  \end{description}
 \<close>
 
 
@@ -2221,59 +2116,53 @@
   @{index_ML Future.fulfill: "'a future -> 'a -> unit"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type "'a future"} represents future values over type
+  \<^descr> Type @{ML_type "'a future"} represents future values over type
   @{verbatim "'a"}.
 
-  \item @{ML Future.fork}~@{text "(fn () => e)"} registers the unevaluated
+  \<^descr> @{ML Future.fork}~@{text "(fn () => e)"} registers the unevaluated
   expression @{text e} as unfinished future value, to be evaluated eventually
   on the parallel worker-thread farm. This is a shorthand for @{ML
   Future.forks} below, with default parameters and a single expression.
 
-  \item @{ML Future.forks}~@{text "params exprs"} is the general interface to
+  \<^descr> @{ML Future.forks}~@{text "params exprs"} is the general interface to
   fork several futures simultaneously. The @{text params} consist of the
   following fields:
 
-  \begin{itemize}
-
-  \<^item> @{text "name : string"} (default @{ML "\"\""}) specifies a common name
-  for the tasks of the forked futures, which serves diagnostic purposes.
-
-  \<^item> @{text "group : Future.group option"} (default @{ML NONE}) specifies
-  an optional task group for the forked futures. @{ML NONE} means that a new
-  sub-group of the current worker-thread task context is created. If this is
-  not a worker thread, the group will be a new root in the group hierarchy.
-
-  \<^item> @{text "deps : Future.task list"} (default @{ML "[]"}) specifies
-  dependencies on other future tasks, i.e.\ the adjacency relation in the
-  global task queue. Dependencies on already finished tasks are ignored.
-
-  \<^item> @{text "pri : int"} (default @{ML 0}) specifies a priority within the
-  task queue.
-
-  Typically there is only little deviation from the default priority @{ML 0}.
-  As a rule of thumb, @{ML "~1"} means ``low priority" and @{ML 1} means
-  ``high priority''.
-
-  Note that the task priority only affects the position in the queue, not the
-  thread priority. When a worker thread picks up a task for processing, it
-  runs with the normal thread priority to the end (or until canceled). Higher
-  priority tasks that are queued later need to wait until this (or another)
-  worker thread becomes free again.
-
-  \<^item> @{text "interrupts : bool"} (default @{ML true}) tells whether the
-  worker thread that processes the corresponding task is initially put into
-  interruptible state. This state may change again while running, by modifying
-  the thread attributes.
-
-  With interrupts disabled, a running future task cannot be canceled.  It is
-  the responsibility of the programmer that this special state is retained
-  only briefly.
-
-  \end{itemize}
-
-  \item @{ML Future.join}~@{text x} retrieves the value of an already finished
+    \<^item> @{text "name : string"} (default @{ML "\"\""}) specifies a common name
+    for the tasks of the forked futures, which serves diagnostic purposes.
+
+    \<^item> @{text "group : Future.group option"} (default @{ML NONE}) specifies
+    an optional task group for the forked futures. @{ML NONE} means that a new
+    sub-group of the current worker-thread task context is created. If this is
+    not a worker thread, the group will be a new root in the group hierarchy.
+
+    \<^item> @{text "deps : Future.task list"} (default @{ML "[]"}) specifies
+    dependencies on other future tasks, i.e.\ the adjacency relation in the
+    global task queue. Dependencies on already finished tasks are ignored.
+
+    \<^item> @{text "pri : int"} (default @{ML 0}) specifies a priority within the
+    task queue.
+
+    Typically there is only little deviation from the default priority @{ML 0}.
+    As a rule of thumb, @{ML "~1"} means ``low priority" and @{ML 1} means
+    ``high priority''.
+
+    Note that the task priority only affects the position in the queue, not the
+    thread priority. When a worker thread picks up a task for processing, it
+    runs with the normal thread priority to the end (or until canceled). Higher
+    priority tasks that are queued later need to wait until this (or another)
+    worker thread becomes free again.
+
+    \<^item> @{text "interrupts : bool"} (default @{ML true}) tells whether the
+    worker thread that processes the corresponding task is initially put into
+    interruptible state. This state may change again while running, by modifying
+    the thread attributes.
+
+    With interrupts disabled, a running future task cannot be canceled.  It is
+    the responsibility of the programmer that this special state is retained
+    only briefly.
+
+  \<^descr> @{ML Future.join}~@{text x} retrieves the value of an already finished
   future, which may lead to an exception, according to the result of its
   previous evaluation.
 
@@ -2295,7 +2184,7 @@
   explicitly when forked (see @{text deps} above). Thus the evaluation can
   work from the bottom up, without join conflicts and wait states.
 
-  \item @{ML Future.joins}~@{text xs} joins the given list of futures
+  \<^descr> @{ML Future.joins}~@{text xs} joins the given list of futures
   simultaneously, which is more efficient than @{ML "map Future.join"}~@{text
   xs}.
 
@@ -2305,23 +2194,23 @@
   presently evaluated on other threads only happens as last resort, when no
   other unfinished futures are left over.
 
-  \item @{ML Future.value}~@{text a} wraps the value @{text a} as finished
+  \<^descr> @{ML Future.value}~@{text a} wraps the value @{text a} as finished
   future value, bypassing the worker-thread farm. When joined, it returns
   @{text a} without any further evaluation.
 
   There is very low overhead for this proforma wrapping of strict values as
   futures.
 
-  \item @{ML Future.map}~@{text "f x"} is a fast-path implementation of @{ML
+  \<^descr> @{ML Future.map}~@{text "f x"} is a fast-path implementation of @{ML
   Future.fork}~@{text "(fn () => f ("}@{ML Future.join}~@{text "x))"}, which
   avoids the full overhead of the task queue and worker-thread farm as far as
   possible. The function @{text f} is supposed to be some trivial
   post-processing or projection of the future result.
 
-  \item @{ML Future.cancel}~@{text "x"} cancels the task group of the given
+  \<^descr> @{ML Future.cancel}~@{text "x"} cancels the task group of the given
   future, using @{ML Future.cancel_group} below.
 
-  \item @{ML Future.cancel_group}~@{text "group"} cancels all tasks of the
+  \<^descr> @{ML Future.cancel_group}~@{text "group"} cancels all tasks of the
   given task group for all time. Threads that are presently processing a task
   of the given group are interrupted: it may take some time until they are
   actually terminated. Tasks that are queued but not yet processed are
@@ -2329,15 +2218,13 @@
   invalidated, any further attempt to fork a future that belongs to it will
   yield a canceled result as well.
 
-  \item @{ML Future.promise}~@{text abort} registers a passive future with the
+  \<^descr> @{ML Future.promise}~@{text abort} registers a passive future with the
   given @{text abort} operation: it is invoked when the future task group is
   canceled.
 
-  \item @{ML Future.fulfill}~@{text "x a"} finishes the passive future @{text
+  \<^descr> @{ML Future.fulfill}~@{text "x a"} finishes the passive future @{text
   x} by the given value @{text a}. If the promise has already been canceled,
   the attempt to fulfill it causes an exception.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Implementation/Prelim.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Prelim.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -26,8 +26,6 @@
   Contexts and derivations are linked by the following key
   principles:
 
-  \begin{itemize}
-
   \<^item> Transfer: monotonicity of derivations admits results to be
   transferred into a \emph{larger} context, i.e.\ @{text "\<Gamma> \<turnstile>\<^sub>\<Theta>
   \<phi>"} implies @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta>\<^sub>' \<phi>"} for contexts @{text "\<Theta>'
@@ -39,7 +37,6 @@
   @{text "\<Delta> = \<Gamma>' - \<Gamma>"}.  Note that @{text "\<Theta>"} remains unchanged here,
   only the @{text "\<Gamma>"} part is affected.
 
-  \end{itemize}
 
   \<^medskip>
   By modeling the main characteristics of the primitive
@@ -129,30 +126,26 @@
   @{index_ML Theory.ancestors_of: "theory -> theory list"} \\
   \end{mldecls}
 
-  \begin{description}
+  \<^descr> Type @{ML_type theory} represents theory contexts.
 
-  \item Type @{ML_type theory} represents theory contexts.
-
-  \item @{ML "Context.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
+  \<^descr> @{ML "Context.eq_thy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} check strict
   identity of two theories.
 
-  \item @{ML "Context.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
+  \<^descr> @{ML "Context.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
   according to the intrinsic graph structure of the construction.
   This sub-theory relation is a nominal approximation of inclusion
   (@{text "\<subseteq>"}) of the corresponding content (according to the
   semantics of the ML modules that implement the data).
 
-  \item @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
+  \<^descr> @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
   a new theory based on the given parents.  This ML function is
   normally not invoked directly.
 
-  \item @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
+  \<^descr> @{ML "Theory.parents_of"}~@{text "thy"} returns the direct
   ancestors of @{text thy}.
 
-  \item @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
+  \<^descr> @{ML "Theory.ancestors_of"}~@{text "thy"} returns all
   ancestors of @{text thy} (not including @{text thy} itself).
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -167,20 +160,16 @@
   @@{ML_antiquotation theory_context} nameref
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{theory}"} refers to the background theory of the
+  \<^descr> @{text "@{theory}"} refers to the background theory of the
   current context --- as abstract value.
 
-  \item @{text "@{theory A}"} refers to an explicitly named ancestor
+  \<^descr> @{text "@{theory A}"} refers to an explicitly named ancestor
   theory @{text "A"} of the background theory of the current context
   --- as abstract value.
 
-  \item @{text "@{theory_context A}"} is similar to @{text "@{theory
+  \<^descr> @{text "@{theory_context A}"} is similar to @{text "@{theory
   A}"}, but presents the result as initial @{ML_type Proof.context}
   (see also @{ML Proof_Context.init_global}).
-
-  \end{description}
 \<close>
 
 
@@ -220,21 +209,17 @@
   @{index_ML Proof_Context.transfer: "theory -> Proof.context -> Proof.context"} \\
   \end{mldecls}
 
-  \begin{description}
+  \<^descr> Type @{ML_type Proof.context} represents proof contexts.
 
-  \item Type @{ML_type Proof.context} represents proof contexts.
-
-  \item @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
+  \<^descr> @{ML Proof_Context.init_global}~@{text "thy"} produces a proof
   context derived from @{text "thy"}, initializing all data.
 
-  \item @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
+  \<^descr> @{ML Proof_Context.theory_of}~@{text "ctxt"} selects the
   background theory from @{text "ctxt"}.
 
-  \item @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
+  \<^descr> @{ML Proof_Context.transfer}~@{text "thy ctxt"} promotes the
   background theory of @{text "ctxt"} to the super theory @{text
   "thy"}.
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -242,16 +227,12 @@
   @{ML_antiquotation_def "context"} & : & @{text ML_antiquotation} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{text "@{context}"} refers to \emph{the} context at
+  \<^descr> @{text "@{context}"} refers to \emph{the} context at
   compile-time --- as abstract value.  Independently of (local) theory
   or proof mode, this always produces a meaningful result.
 
   This is probably the most common antiquotation in interactive
   experimentation with ML inside Isar.
-
-  \end{description}
 \<close>
 
 
@@ -279,22 +260,18 @@
   @{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Context.generic} is the direct sum of @{ML_type
+  \<^descr> Type @{ML_type Context.generic} is the direct sum of @{ML_type
   "theory"} and @{ML_type "Proof.context"}, with the datatype
   constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
 
-  \item @{ML Context.theory_of}~@{text "context"} always produces a
+  \<^descr> @{ML Context.theory_of}~@{text "context"} always produces a
   theory from the generic @{text "context"}, using @{ML
   "Proof_Context.theory_of"} as required.
 
-  \item @{ML Context.proof_of}~@{text "context"} always produces a
+  \<^descr> @{ML Context.proof_of}~@{text "context"} always produces a
   proof context from the generic @{text "context"}, using @{ML
   "Proof_Context.init_global"} as required (note that this re-initializes the
   context data with each invocation).
-
-  \end{description}
 \<close>
 
 
@@ -383,20 +360,16 @@
   @{index_ML_functor Generic_Data} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML_functor Theory_Data}@{text "(spec)"} declares data for
+  \<^descr> @{ML_functor Theory_Data}@{text "(spec)"} declares data for
   type @{ML_type theory} according to the specification provided as
   argument structure.  The resulting structure provides data init and
   access operations as described above.
 
-  \item @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
+  \<^descr> @{ML_functor Proof_Data}@{text "(spec)"} is analogous to
   @{ML_functor Theory_Data} for type @{ML_type Proof.context}.
 
-  \item @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
+  \<^descr> @{ML_functor Generic_Data}@{text "(spec)"} is analogous to
   @{ML_functor Theory_Data} for type @{ML_type Context.generic}.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>
@@ -456,7 +429,6 @@
 
   \<^medskip>
   Our intended invariant is achieved as follows:
-  \begin{enumerate}
 
   \<^enum> @{ML Wellformed_Terms.add} only admits terms that have passed
   the @{ML Sign.cert_term} check of the given theory at that point.
@@ -466,7 +438,6 @@
   upwards in the hierarchy (via extension or merges), and maintain
   wellformedness without further checks.
 
-  \end{enumerate}
 
   Note that all basic operations of the inference kernel (which
   includes @{ML Sign.cert_term}) observe this monotonicity principle,
@@ -544,15 +515,13 @@
   string Config.T"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Config.get}~@{text "ctxt config"} gets the value of
+  \<^descr> @{ML Config.get}~@{text "ctxt config"} gets the value of
   @{text "config"} in the given context.
 
-  \item @{ML Config.map}~@{text "config f ctxt"} updates the context
+  \<^descr> @{ML Config.map}~@{text "config f ctxt"} updates the context
   by updating the value of @{text "config"}.
 
-  \item @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
+  \<^descr> @{text "config ="}~@{ML Attrib.setup_config_bool}~@{text "name
   default"} creates a named configuration option of type @{ML_type
   bool}, with the given @{text "default"} depending on the application
   context.  The resulting @{text "config"} can be used to get/map its
@@ -560,11 +529,9 @@
   background theory that registers the option as attribute with some
   concrete syntax.
 
-  \item @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
+  \<^descr> @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
   Attrib.config_string} work like @{ML Attrib.config_bool}, but for
   types @{ML_type int} and @{ML_type string}, respectively.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example shows how to declare and use a
@@ -621,7 +588,6 @@
   Subsequently, we shall introduce specific categories of
   names.  Roughly speaking these correspond to logical entities as
   follows:
-  \begin{itemize}
 
   \<^item> Basic names (\secref{sec:basic-name}): free and bound
   variables.
@@ -632,8 +598,6 @@
   (type constructors, term constants, other concepts defined in user
   space).  Such entities are typically managed via name spaces
   (\secref{sec:name-space}).
-
-  \end{itemize}
 \<close>
 
 
@@ -690,27 +654,25 @@
   @{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Name.internal}~@{text "name"} produces an internal name
+  \<^descr> @{ML Name.internal}~@{text "name"} produces an internal name
   by adding one underscore.
 
-  \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
+  \<^descr> @{ML Name.skolem}~@{text "name"} produces a Skolem name by
   adding two underscores.
 
-  \item Type @{ML_type Name.context} represents the context of already
+  \<^descr> Type @{ML_type Name.context} represents the context of already
   used names; the initial value is @{ML "Name.context"}.
 
-  \item @{ML Name.declare}~@{text "name"} enters a used name into the
+  \<^descr> @{ML Name.declare}~@{text "name"} enters a used name into the
   context.
 
-  \item @{ML Name.invent}~@{text "context name n"} produces @{text
+  \<^descr> @{ML Name.invent}~@{text "context name n"} produces @{text
   "n"} fresh names derived from @{text "name"}.
 
-  \item @{ML Name.variant}~@{text "name context"} produces a fresh
+  \<^descr> @{ML Name.variant}~@{text "name context"} produces a fresh
   variant of @{text "name"}; the result is declared to the context.
 
-  \item @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
+  \<^descr> @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
   of declared type and term variable names.  Projecting a proof
   context down to a primitive name context is occasionally useful when
   invoking lower-level operations.  Regular management of ``fresh
@@ -718,8 +680,6 @@
   Variable}, which is also able to provide an official status of
   ``locally fixed variable'' within the logical environment (cf.\
   \secref{sec:variables}).
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following simple examples demonstrate how to produce
@@ -775,15 +735,12 @@
   Isabelle syntax observes the following rules for
   representing an indexname @{text "(x, i)"} as a packed string:
 
-  \begin{itemize}
-
   \<^item> @{text "?x"} if @{text "x"} does not end with a digit and @{text "i = 0"},
 
   \<^item> @{text "?xi"} if @{text "x"} does not end with a digit,
 
   \<^item> @{text "?x.i"} otherwise.
 
-  \end{itemize}
 
   Indexnames may acquire large index numbers after several maxidx
   shifts have been applied.  Results are usually normalized towards
@@ -798,15 +755,11 @@
   @{index_ML_type indexname: "string * int"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type indexname} represents indexed names.  This is
+  \<^descr> Type @{ML_type indexname} represents indexed names.  This is
   an abbreviation for @{ML_type "string * int"}.  The second component
   is usually non-negative, except for situations where @{text "(x,
   -1)"} is used to inject basic names into this type.  Other negative
   indexes should not be used.
-
-  \end{description}
 \<close>
 
 
@@ -843,22 +796,18 @@
   @{index_ML Long_Name.explode: "string -> string list"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Long_Name.base_name}~@{text "name"} returns the base name
+  \<^descr> @{ML Long_Name.base_name}~@{text "name"} returns the base name
   of a long name.
 
-  \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
+  \<^descr> @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
   of a long name.
 
-  \item @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
+  \<^descr> @{ML Long_Name.append}~@{text "name\<^sub>1 name\<^sub>2"} appends two long
   names.
 
-  \item @{ML Long_Name.implode}~@{text "names"} and @{ML
+  \<^descr> @{ML Long_Name.implode}~@{text "names"} and @{ML
   Long_Name.explode}~@{text "name"} convert between the packed string
   representation and the explicit list form of long names.
-
-  \end{description}
 \<close>
 
 
@@ -947,68 +896,66 @@
   @{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type binding} represents the abstract concept of
+  \<^descr> Type @{ML_type binding} represents the abstract concept of
   name bindings.
 
-  \item @{ML Binding.empty} is the empty binding.
+  \<^descr> @{ML Binding.empty} is the empty binding.
 
-  \item @{ML Binding.name}~@{text "name"} produces a binding with base
+  \<^descr> @{ML Binding.name}~@{text "name"} produces a binding with base
   name @{text "name"}.  Note that this lacks proper source position
   information; see also the ML antiquotation @{ML_antiquotation
   binding}.
 
-  \item @{ML Binding.qualify}~@{text "mandatory name binding"}
+  \<^descr> @{ML Binding.qualify}~@{text "mandatory name binding"}
   prefixes qualifier @{text "name"} to @{text "binding"}.  The @{text
   "mandatory"} flag tells if this name component always needs to be
   given in name space accesses --- this is mostly @{text "false"} in
   practice.  Note that this part of qualification is typically used in
   derived specification mechanisms.
 
-  \item @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
+  \<^descr> @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
   affects the system prefix.  This part of extra qualification is
   typically used in the infrastructure for modular specifications,
   notably ``local theory targets'' (see also \chref{ch:local-theory}).
 
-  \item @{ML Binding.concealed}~@{text "binding"} indicates that the
+  \<^descr> @{ML Binding.concealed}~@{text "binding"} indicates that the
   binding shall refer to an entity that serves foundational purposes
   only.  This flag helps to mark implementation details of
   specification mechanism etc.  Other tools should not depend on the
   particulars of concealed entities (cf.\ @{ML
   Name_Space.is_concealed}).
 
-  \item @{ML Binding.print}~@{text "binding"} produces a string
+  \<^descr> @{ML Binding.print}~@{text "binding"} produces a string
   representation for human-readable output, together with some formal
   markup that might get used in GUI front-ends, for example.
 
-  \item Type @{ML_type Name_Space.naming} represents the abstract
+  \<^descr> Type @{ML_type Name_Space.naming} represents the abstract
   concept of a naming policy.
 
-  \item @{ML Name_Space.global_naming} is the default naming policy: it is
+  \<^descr> @{ML Name_Space.global_naming} is the default naming policy: it is
   global and lacks any path prefix.  In a regular theory context this is
   augmented by a path prefix consisting of the theory name.
 
-  \item @{ML Name_Space.add_path}~@{text "path naming"} augments the
+  \<^descr> @{ML Name_Space.add_path}~@{text "path naming"} augments the
   naming policy by extending its path component.
 
-  \item @{ML Name_Space.full_name}~@{text "naming binding"} turns a
+  \<^descr> @{ML Name_Space.full_name}~@{text "naming binding"} turns a
   name binding (usually a basic name) into the fully qualified
   internal name, according to the given naming policy.
 
-  \item Type @{ML_type Name_Space.T} represents name spaces.
+  \<^descr> Type @{ML_type Name_Space.T} represents name spaces.
 
-  \item @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
+  \<^descr> @{ML Name_Space.empty}~@{text "kind"} and @{ML Name_Space.merge}~@{text
   "(space\<^sub>1, space\<^sub>2)"} are the canonical operations for
   maintaining name spaces according to theory data management
   (\secref{sec:context-data}); @{text "kind"} is a formal comment
   to characterize the purpose of a name space.
 
-  \item @{ML Name_Space.declare}~@{text "context strict binding
+  \<^descr> @{ML Name_Space.declare}~@{text "context strict binding
   space"} enters a name binding as fully qualified internal name into
   the name space, using the naming of the context.
 
-  \item @{ML Name_Space.intern}~@{text "space name"} internalizes a
+  \<^descr> @{ML Name_Space.intern}~@{text "space name"} internalizes a
   (partially qualified) external name.
 
   This operation is mostly for parsing!  Note that fully qualified
@@ -1017,17 +964,15 @@
   (or their derivatives for @{ML_type theory} and
   @{ML_type Proof.context}).
 
-  \item @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
+  \<^descr> @{ML Name_Space.extern}~@{text "ctxt space name"} externalizes a
   (fully qualified) internal name.
 
   This operation is mostly for printing!  User code should not rely on
   the precise result too much.
 
-  \item @{ML Name_Space.is_concealed}~@{text "space name"} indicates
+  \<^descr> @{ML Name_Space.is_concealed}~@{text "space name"} indicates
   whether @{text "name"} refers to a strictly private entity that
   other tools are supposed to ignore!
-
-  \end{description}
 \<close>
 
 text %mlantiq \<open>
@@ -1039,14 +984,10 @@
   @@{ML_antiquotation binding} name
   \<close>}
 
-  \begin{description}
-
-  \item @{text "@{binding name}"} produces a binding with base name
+  \<^descr> @{text "@{binding name}"} produces a binding with base name
   @{text "name"} and the source position taken from the concrete
   syntax of this antiquotation.  In many situations this is more
   appropriate than the more basic @{ML Binding.name} function.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example yields the source position of some
--- a/src/Doc/Implementation/Proof.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Proof.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -114,50 +114,46 @@
   ((string * (string * typ)) list * term) * Proof.context"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
+  \<^descr> @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
   variables @{text "xs"}, returning the resulting internal names.  By
   default, the internal representation coincides with the external
   one, which also means that the given variables must not be fixed
   already.  There is a different policy within a local proof body: the
   given names are just hints for newly invented Skolem variables.
 
-  \item @{ML Variable.variant_fixes} is similar to @{ML
+  \<^descr> @{ML Variable.variant_fixes} is similar to @{ML
   Variable.add_fixes}, but always produces fresh variants of the given
   names.
 
-  \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
+  \<^descr> @{ML Variable.declare_term}~@{text "t ctxt"} declares term
   @{text "t"} to belong to the context.  This automatically fixes new
   type variables, but not term variables.  Syntactic constraints for
   type and term variables are declared uniformly, though.
 
-  \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
+  \<^descr> @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
   syntactic constraints from term @{text "t"}, without making it part
   of the context yet.
 
-  \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
+  \<^descr> @{ML Variable.export}~@{text "inner outer thms"} generalizes
   fixed type and term variables in @{text "thms"} according to the
   difference of the @{text "inner"} and @{text "outer"} context,
   following the principles sketched above.
 
-  \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
+  \<^descr> @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
   variables in @{text "ts"} as far as possible, even those occurring
   in fixed term variables.  The default policy of type-inference is to
   fix newly introduced type variables, which is essentially reversed
   with @{ML Variable.polymorphic}: here the given terms are detached
   from the context as far as possible.
 
-  \item @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
+  \<^descr> @{ML Variable.import}~@{text "open thms ctxt"} invents fixed
   type and term variables for the schematic ones occurring in @{text
   "thms"}.  The @{text "open"} flag indicates whether the fixed names
   should be accessible to the user, otherwise newly introduced names
   are marked as ``internal'' (\secref{sec:names}).
 
-  \item @{ML Variable.focus}~@{text "bindings B"} decomposes the outermost @{text
+  \<^descr> @{ML Variable.focus}~@{text "bindings B"} decomposes the outermost @{text
   "\<And>"} prefix of proposition @{text "B"}, using the given name bindings.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example shows how to work with fixed term
@@ -291,36 +287,32 @@
   @{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type Assumption.export} represents arbitrary export
+  \<^descr> Type @{ML_type Assumption.export} represents arbitrary export
   rules, which is any function of type @{ML_type "bool -> cterm list
   -> thm -> thm"}, where the @{ML_type "bool"} indicates goal mode,
   and the @{ML_type "cterm list"} the collection of assumptions to be
   discharged simultaneously.
 
-  \item @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
+  \<^descr> @{ML Assumption.assume}~@{text "ctxt A"} turns proposition @{text
   "A"} into a primitive assumption @{text "A \<turnstile> A'"}, where the
   conclusion @{text "A'"} is in HHF normal form.
 
-  \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
+  \<^descr> @{ML Assumption.add_assms}~@{text "r As"} augments the context
   by assumptions @{text "As"} with export rule @{text "r"}.  The
   resulting facts are hypothetical theorems as produced by the raw
   @{ML Assumption.assume}.
 
-  \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
+  \<^descr> @{ML Assumption.add_assumes}~@{text "As"} is a special case of
   @{ML Assumption.add_assms} where the export rule performs @{text
   "\<Longrightarrow>\<hyphen>intro"} or @{text "#\<Longrightarrow>\<hyphen>intro"}, depending on goal
   mode.
 
-  \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
+  \<^descr> @{ML Assumption.export}~@{text "is_goal inner outer thm"}
   exports result @{text "thm"} from the the @{text "inner"} context
   back into the @{text "outer"} one; @{text "is_goal = true"} means
   this is a goal context.  The result is in HHF normal form.  Note
   that @{ML "Proof_Context.export"} combines @{ML "Variable.export"}
   and @{ML "Assumption.export"} in the canonical way.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following example demonstrates how rules can be
@@ -419,33 +411,31 @@
   Proof.context -> ((string * cterm) list * thm list) * Proof.context"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
+  \<^descr> @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
   of the specified sub-goal, producing an extended context and a
   reduced goal, which needs to be solved by the given tactic.  All
   schematic parameters of the goal are imported into the context as
   fixed ones, which may not be instantiated in the sub-proof.
 
-  \item @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
+  \<^descr> @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
   Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are
   slightly more flexible: only the specified parts of the subgoal are
   imported into the context, and the body tactic may introduce new
   subgoals and schematic variables.
 
-  \item @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
+  \<^descr> @{ML Subgoal.focus}, @{ML Subgoal.focus_prems}, @{ML
   Subgoal.focus_params} extract the focus information from a goal
   state in the same way as the corresponding tacticals above.  This is
   occasionally useful to experiment without writing actual tactics
   yet.
 
-  \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
+  \<^descr> @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
   "C"} in the context augmented by fixed variables @{text "xs"} and
   assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
   it.  The latter may depend on the local assumptions being presented
   as facts.  The result is in HHF normal form.
 
-  \item @{ML Goal.prove_common}~@{text "ctxt fork_pri"} is the common form
+  \<^descr> @{ML Goal.prove_common}~@{text "ctxt fork_pri"} is the common form
   to state and prove a simultaneous goal statement, where @{ML Goal.prove}
   is a convenient shorthand that is most frequently used in applications.
 
@@ -462,12 +452,10 @@
   transaction. Thus the system is able to expose error messages ultimately
   to the end-user, even though the subsequent ML code misses them.
 
-  \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
+  \<^descr> @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
   given facts using a tactic, which results in additional fixed
   variables and assumptions in the context.  Final results need to be
   exported explicitly.
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The following minimal example illustrates how to access
--- a/src/Doc/Implementation/Syntax.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Syntax.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -36,13 +36,10 @@
   \secref{sec:term-check}, respectively.  This results in the
   following decomposition of the main operations:
 
-  \begin{itemize}
-
   \<^item> @{text "read = parse; check"}
 
   \<^item> @{text "pretty = uncheck; unparse"}
 
-  \end{itemize}
 
   For example, some specification package might thus intercept syntax
   processing at a well-defined stage after @{text "parse"}, to a augment the
@@ -88,12 +85,10 @@
   @{index_ML Syntax.string_of_term: "Proof.context -> term -> string"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Syntax.read_typs}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_typs}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as types of the logic.
 
-  \item @{ML Syntax.read_terms}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_terms}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as terms of the logic.
   Type-reconstruction puts all parsed terms into the same scope: types of
   free variables ultimately need to coincide.
@@ -103,32 +98,31 @@
   is possible to use @{ML Type.constraint} on the intermediate pre-terms
   (\secref{sec:term-check}).
 
-  \item @{ML Syntax.read_props}~@{text "ctxt strs"} parses and checks a
+  \<^descr> @{ML Syntax.read_props}~@{text "ctxt strs"} parses and checks a
   simultaneous list of source strings as terms of the logic, with an implicit
   type-constraint for each argument to enforce type @{typ prop}; this also
   affects the inner syntax for parsing. The remaining type-reconstruction
   works as for @{ML Syntax.read_terms}.
 
-  \item @{ML Syntax.read_typ}, @{ML Syntax.read_term}, @{ML Syntax.read_prop}
+  \<^descr> @{ML Syntax.read_typ}, @{ML Syntax.read_term}, @{ML Syntax.read_prop}
   are like the simultaneous versions, but operate on a single argument only.
   This convenient shorthand is adequate in situations where a single item in
   its own scope is processed. Do not use @{ML "map o Syntax.read_term"} where
   @{ML Syntax.read_terms} is actually intended!
 
-  \item @{ML Syntax.pretty_typ}~@{text "ctxt T"} and @{ML
+  \<^descr> @{ML Syntax.pretty_typ}~@{text "ctxt T"} and @{ML
   Syntax.pretty_term}~@{text "ctxt t"} uncheck and pretty-print the given type
   or term, respectively. Although the uncheck phase acts on a simultaneous
   list as well, this is rarely used in practice, so only the singleton case is
   provided as combined pretty operation. There is no distinction of term vs.\
   proposition.
 
-  \item @{ML Syntax.string_of_typ} and @{ML Syntax.string_of_term} are
+  \<^descr> @{ML Syntax.string_of_typ} and @{ML Syntax.string_of_term} are
   convenient compositions of @{ML Syntax.pretty_typ} and @{ML
   Syntax.pretty_term} with @{ML Pretty.string_of} for output. The result may
   be concatenated with other strings, as long as there is no further
   formatting and line-breaking involved.
 
-  \end{description}
 
   @{ML Syntax.read_term}, @{ML Syntax.read_prop}, and @{ML
   Syntax.string_of_term} are the most important operations in practice.
@@ -179,28 +173,25 @@
   @{index_ML Syntax.unparse_term: "Proof.context -> term -> Pretty.T"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Syntax.parse_typ}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_typ}~@{text "ctxt str"} parses a source string as
   pre-type that is ready to be used with subsequent check operations.
 
-  \item @{ML Syntax.parse_term}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_term}~@{text "ctxt str"} parses a source string as
   pre-term that is ready to be used with subsequent check operations.
 
-  \item @{ML Syntax.parse_prop}~@{text "ctxt str"} parses a source string as
+  \<^descr> @{ML Syntax.parse_prop}~@{text "ctxt str"} parses a source string as
   pre-term that is ready to be used with subsequent check operations. The
   inner syntax category is @{typ prop} and a suitable type-constraint is
   included to ensure that this information is observed in subsequent type
   reconstruction.
 
-  \item @{ML Syntax.unparse_typ}~@{text "ctxt T"} unparses a type after
+  \<^descr> @{ML Syntax.unparse_typ}~@{text "ctxt T"} unparses a type after
   uncheck operations, to turn it into a pretty tree.
 
-  \item @{ML Syntax.unparse_term}~@{text "ctxt T"} unparses a term after
+  \<^descr> @{ML Syntax.unparse_term}~@{text "ctxt T"} unparses a term after
   uncheck operations, to turn it into a pretty tree. There is no distinction
   for propositions here.
 
-  \end{description}
 
   These operations always operate on a single item; use the combinator @{ML
   map} to apply them to a list.
@@ -247,13 +238,11 @@
   @{index_ML Syntax.uncheck_terms: "Proof.context -> term list -> term list"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Syntax.check_typs}~@{text "ctxt Ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_typs}~@{text "ctxt Ts"} checks a simultaneous list
   of pre-types as types of the logic.  Typically, this involves normalization
   of type synonyms.
 
-  \item @{ML Syntax.check_terms}~@{text "ctxt ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_terms}~@{text "ctxt ts"} checks a simultaneous list
   of pre-terms as terms of the logic. Typically, this involves type-inference
   and normalization term abbreviations. The types within the given terms are
   treated in the same way as for @{ML Syntax.check_typs}.
@@ -264,19 +253,18 @@
   is checked; afterwards the type arguments are recovered with @{ML
   Logic.dest_type}.
 
-  \item @{ML Syntax.check_props}~@{text "ctxt ts"} checks a simultaneous list
+  \<^descr> @{ML Syntax.check_props}~@{text "ctxt ts"} checks a simultaneous list
   of pre-terms as terms of the logic, such that all terms are constrained by
   type @{typ prop}. The remaining check operation works as @{ML
   Syntax.check_terms} above.
 
-  \item @{ML Syntax.uncheck_typs}~@{text "ctxt Ts"} unchecks a simultaneous
+  \<^descr> @{ML Syntax.uncheck_typs}~@{text "ctxt Ts"} unchecks a simultaneous
   list of types of the logic, in preparation of pretty printing.
 
-  \item @{ML Syntax.uncheck_terms}~@{text "ctxt ts"} unchecks a simultaneous
+  \<^descr> @{ML Syntax.uncheck_terms}~@{text "ctxt ts"} unchecks a simultaneous
   list of terms of the logic, in preparation of pretty printing. There is no
   distinction for propositions here.
 
-  \end{description}
 
   These operations always operate simultaneously on a list; use the combinator
   @{ML singleton} to apply them to a single item.
--- a/src/Doc/Implementation/Tactic.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Implementation/Tactic.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -70,24 +70,20 @@
   @{index_ML Goal.conclude: "thm -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
+  \<^descr> @{ML "Goal.init"}~@{text C} initializes a tactical goal from
   the well-formed proposition @{text C}.
 
-  \item @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
+  \<^descr> @{ML "Goal.finish"}~@{text "ctxt thm"} checks whether theorem
   @{text "thm"} is a solved goal (no subgoals), and concludes the
   result by removing the goal protection.  The context is only
   required for printing error messages.
 
-  \item @{ML "Goal.protect"}~@{text "n thm"} protects the statement
+  \<^descr> @{ML "Goal.protect"}~@{text "n thm"} protects the statement
   of theorem @{text "thm"}.  The parameter @{text n} indicates the
   number of premises to be retained.
 
-  \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
+  \<^descr> @{ML "Goal.conclude"}~@{text "thm"} removes the goal
   protection, even if there are pending subgoals.
-
-  \end{description}
 \<close>
 
 
@@ -150,8 +146,6 @@
   The main well-formedness conditions for proper tactics are
   summarized as follows.
 
-  \begin{itemize}
-
   \<^item> General tactic failure is indicated by an empty result, only
   serious faults may produce an exception.
 
@@ -164,7 +158,6 @@
 
   \<^item> Range errors in subgoal addressing produce an empty result.
 
-  \end{itemize}
 
   Some of these conditions are checked by higher-level goal
   infrastructure (\secref{sec:struct-goals}); others are not checked
@@ -187,54 +180,50 @@
   @{index_ML PREFER_GOAL: "tactic -> int -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item Type @{ML_type tactic} represents tactics.  The
+  \<^descr> Type @{ML_type tactic} represents tactics.  The
   well-formedness conditions described above need to be observed.  See
   also @{file "~~/src/Pure/General/seq.ML"} for the underlying
   implementation of lazy sequences.
 
-  \item Type @{ML_type "int -> tactic"} represents tactics with
+  \<^descr> Type @{ML_type "int -> tactic"} represents tactics with
   explicit subgoal addressing, with well-formedness conditions as
   described above.
 
-  \item @{ML no_tac} is a tactic that always fails, returning the
+  \<^descr> @{ML no_tac} is a tactic that always fails, returning the
   empty sequence.
 
-  \item @{ML all_tac} is a tactic that always succeeds, returning a
+  \<^descr> @{ML all_tac} is a tactic that always succeeds, returning a
   singleton sequence with unchanged goal state.
 
-  \item @{ML print_tac}~@{text "ctxt message"} is like @{ML all_tac}, but
+  \<^descr> @{ML print_tac}~@{text "ctxt message"} is like @{ML all_tac}, but
   prints a message together with the goal state on the tracing
   channel.
 
-  \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
+  \<^descr> @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
   into a tactic with unique result.  Exception @{ML THM} is considered
   a regular tactic failure and produces an empty result; other
   exceptions are passed through.
 
-  \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
+  \<^descr> @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
   most basic form to produce a tactic with subgoal addressing.  The
   given abstraction over the subgoal term and subgoal number allows to
   peek at the relevant information of the full goal state.  The
   subgoal range is checked as required above.
 
-  \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
+  \<^descr> @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
   subgoal as @{ML_type cterm} instead of raw @{ML_type term}.  This
   avoids expensive re-certification in situations where the subgoal is
   used directly for primitive inferences.
 
-  \item @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
+  \<^descr> @{ML SELECT_GOAL}~@{text "tac i"} confines a tactic to the
   specified subgoal @{text "i"}.  This rearranges subgoals and the
   main goal protection (\secref{sec:tactical-goals}), while retaining
   the syntactic context of the overall goal state (concerning
   schematic variables etc.).
 
-  \item @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
+  \<^descr> @{ML PREFER_GOAL}~@{text "tac i"} rearranges subgoals to put
   @{text "i"} in front.  This is similar to @{ML SELECT_GOAL}, but
   without changing the main goal protection.
-
-  \end{description}
 \<close>
 
 
@@ -264,8 +253,6 @@
   sequence enumerates all possibilities of the following choices (if
   applicable):
 
-  \begin{enumerate}
-
   \<^enum> selecting one of the rules given as argument to the tactic;
 
   \<^enum> selecting a subgoal premise to eliminate, unifying it against
@@ -274,7 +261,6 @@
   \<^enum> unifying the conclusion of the subgoal to the conclusion of
   the rule.
 
-  \end{enumerate}
 
   Recall that higher-order unification may produce multiple results
   that are enumerated here.
@@ -295,31 +281,29 @@
   @{index_ML bimatch_tac: "Proof.context -> (bool * thm) list -> int -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML resolve_tac}~@{text "ctxt thms i"} refines the goal state
+  \<^descr> @{ML resolve_tac}~@{text "ctxt thms i"} refines the goal state
   using the given theorems, which should normally be introduction
   rules.  The tactic resolves a rule's conclusion with subgoal @{text
   i}, replacing it by the corresponding versions of the rule's
   premises.
 
-  \item @{ML eresolve_tac}~@{text "ctxt thms i"} performs elim-resolution
+  \<^descr> @{ML eresolve_tac}~@{text "ctxt thms i"} performs elim-resolution
   with the given theorems, which are normally be elimination rules.
 
   Note that @{ML_text "eresolve_tac ctxt [asm_rl]"} is equivalent to @{ML_text
   "assume_tac ctxt"}, which facilitates mixing of assumption steps with
   genuine eliminations.
 
-  \item @{ML dresolve_tac}~@{text "ctxt thms i"} performs
+  \<^descr> @{ML dresolve_tac}~@{text "ctxt thms i"} performs
   destruct-resolution with the given theorems, which should normally
   be destruction rules.  This replaces an assumption by the result of
   applying one of the rules.
 
-  \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
+  \<^descr> @{ML forward_tac} is like @{ML dresolve_tac} except that the
   selected assumption is not deleted.  It applies a rule to an
   assumption, adding the result as a new assumption.
 
-  \item @{ML biresolve_tac}~@{text "ctxt brls i"} refines the proof state
+  \<^descr> @{ML biresolve_tac}~@{text "ctxt brls i"} refines the proof state
   by resolution or elim-resolution on each rule, as indicated by its
   flag.  It affects subgoal @{text "i"} of the proof state.
 
@@ -329,16 +313,16 @@
   elimination rules, which is useful to organize the search process
   systematically in proof tools.
 
-  \item @{ML assume_tac}~@{text "ctxt i"} attempts to solve subgoal @{text i}
+  \<^descr> @{ML assume_tac}~@{text "ctxt i"} attempts to solve subgoal @{text i}
   by assumption (modulo higher-order unification).
 
-  \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
+  \<^descr> @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
   only for immediate @{text "\<alpha>"}-convertibility instead of using
   unification.  It succeeds (with a unique next state) if one of the
   assumptions is equal to the subgoal's conclusion.  Since it does not
   instantiate variables, it cannot make other subgoals unprovable.
 
-  \item @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
+  \<^descr> @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
   bimatch_tac} are similar to @{ML resolve_tac}, @{ML eresolve_tac},
   @{ML dresolve_tac}, and @{ML biresolve_tac}, respectively, but do
   not instantiate schematic variables in the goal state.%
@@ -350,7 +334,6 @@
   These tactics were written for a specific application within the classical reasoner.
 
   Flexible subgoals are not updated at will, but are left alone.
-  \end{description}
 \<close>
 
 
@@ -419,37 +402,34 @@
   @{index_ML rename_tac: "string list -> int -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML Rule_Insts.res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
+  \<^descr> @{ML Rule_Insts.res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
   rule @{text thm} with the instantiations @{text insts}, as described
   above, and then performs resolution on subgoal @{text i}.
   
-  \item @{ML Rule_Insts.eres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
+  \<^descr> @{ML Rule_Insts.eres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
   but performs elim-resolution.
 
-  \item @{ML Rule_Insts.dres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
+  \<^descr> @{ML Rule_Insts.dres_inst_tac} is like @{ML Rule_Insts.res_inst_tac},
   but performs destruct-resolution.
 
-  \item @{ML Rule_Insts.forw_inst_tac} is like @{ML Rule_Insts.dres_inst_tac}
+  \<^descr> @{ML Rule_Insts.forw_inst_tac} is like @{ML Rule_Insts.dres_inst_tac}
   except that the selected assumption is not deleted.
 
-  \item @{ML Rule_Insts.subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
+  \<^descr> @{ML Rule_Insts.subgoal_tac}~@{text "ctxt \<phi> i"} adds the proposition
   @{text "\<phi>"} as local premise to subgoal @{text "i"}, and poses the
   same as a new subgoal @{text "i + 1"} (in the original context).
 
-  \item @{ML Rule_Insts.thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
+  \<^descr> @{ML Rule_Insts.thin_tac}~@{text "ctxt \<phi> i"} deletes the specified
   premise from subgoal @{text i}.  Note that @{text \<phi>} may contain
   schematic variables, to abbreviate the intended proposition; the
   first matching subgoal premise will be deleted.  Removing useless
   premises from a subgoal increases its readability and can make
   search tactics run faster.
 
-  \item @{ML rename_tac}~@{text "names i"} renames the innermost
+  \<^descr> @{ML rename_tac}~@{text "names i"} renames the innermost
   parameters of subgoal @{text i} according to the provided @{text
   names} (which need to be distinct identifiers).
 
-  \end{description}
 
   For historical reasons, the above instantiation tactics take
   unparsed string arguments, which makes them hard to use in general
@@ -473,16 +453,14 @@
   @{index_ML flexflex_tac: "Proof.context -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
+  \<^descr> @{ML rotate_tac}~@{text "n i"} rotates the premises of subgoal
   @{text i} by @{text n} positions: from right to left if @{text n} is
   positive, and from left to right if @{text n} is negative.
 
-  \item @{ML distinct_subgoals_tac} removes duplicate subgoals from a
+  \<^descr> @{ML distinct_subgoals_tac} removes duplicate subgoals from a
   proof state.  This is potentially inefficient.
 
-  \item @{ML flexflex_tac} removes all flex-flex pairs from the proof
+  \<^descr> @{ML flexflex_tac} removes all flex-flex pairs from the proof
   state by applying the trivial unifier.  This drastic step loses
   information.  It is already part of the Isar infrastructure for
   facts resulting from goals, and rarely needs to be invoked manually.
@@ -491,8 +469,6 @@
   unification.  To prevent this, use @{ML Rule_Insts.res_inst_tac} to
   instantiate some variables in a rule.  Normally flex-flex constraints
   can be ignored; they often disappear as unknowns get instantiated.
-
-  \end{description}
 \<close>
 
 
@@ -513,9 +489,7 @@
   @{index_ML_op COMP: "thm * thm -> thm"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML compose_tac}~@{text "ctxt (flag, rule, m) i"} refines subgoal
+  \<^descr> @{ML compose_tac}~@{text "ctxt (flag, rule, m) i"} refines subgoal
   @{text "i"} using @{text "rule"}, without lifting.  The @{text
   "rule"} is taken to have the form @{text "\<psi>\<^sub>1 \<Longrightarrow> \<dots> \<psi>\<^sub>m \<Longrightarrow> \<psi>"}, where
   @{text "\<psi>"} need not be atomic; thus @{text "m"} determines the
@@ -523,7 +497,7 @@
   performs elim-resolution --- it solves the first premise of @{text
   "rule"} by assumption and deletes that assumption.
 
-  \item @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
+  \<^descr> @{ML Drule.compose}~@{text "(thm\<^sub>1, i, thm\<^sub>2)"} uses @{text "thm\<^sub>1"},
   regarded as an atomic formula, to solve premise @{text "i"} of
   @{text "thm\<^sub>2"}.  Let @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} be @{text
   "\<psi>"} and @{text "\<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>"}.  The unique @{text "s"} that
@@ -531,10 +505,9 @@
   \<dots> \<phi>\<^sub>i\<^sub>-\<^sub>1 \<Longrightarrow> \<phi>\<^sub>i\<^sub>+\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> \<phi>)s"}.  Multiple results are considered as
   error (exception @{ML THM}).
 
-  \item @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
+  \<^descr> @{text "thm\<^sub>1 COMP thm\<^sub>2"} is the same as @{text "Drule.compose
   (thm\<^sub>1, 1, thm\<^sub>2)"}.
 
-  \end{description}
 
   \begin{warn}
   These low-level operations are stepping outside the structure
@@ -581,9 +554,7 @@
   @{index_ML "FIRST'": "('a -> tactic) list -> 'a -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op THEN}~@{text "tac\<^sub>2"} is the sequential
   composition of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a goal
   state, it returns all states reachable in two steps by applying
   @{text "tac\<^sub>1"} followed by @{text "tac\<^sub>2"}.  First, it applies @{text
@@ -592,37 +563,35 @@
   concatenates the results to produce again one flat sequence of
   states.
 
-  \item @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "tac\<^sub>2"} makes a choice
   between @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Applied to a state, it
   tries @{text "tac\<^sub>1"} and returns the result if non-empty; if @{text
   "tac\<^sub>1"} fails then it uses @{text "tac\<^sub>2"}.  This is a deterministic
   choice: if @{text "tac\<^sub>1"} succeeds then @{text "tac\<^sub>2"} is excluded
   from the result.
 
-  \item @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
+  \<^descr> @{text "tac\<^sub>1"}~@{ML_op APPEND}~@{text "tac\<^sub>2"} concatenates the
   possible results of @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"}.  Unlike
   @{ML_op "ORELSE"} there is \emph{no commitment} to either tactic, so
   @{ML_op "APPEND"} helps to avoid incompleteness during search, at
   the cost of potential inefficiencies.
 
-  \item @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  \<^descr> @{ML EVERY}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
   "tac\<^sub>1"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac\<^sub>n"}.
   Note that @{ML "EVERY []"} is the same as @{ML all_tac}: it always
   succeeds.
 
-  \item @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
+  \<^descr> @{ML FIRST}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>n]"} abbreviates @{text
   "tac\<^sub>1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op "ORELSE"}~@{text
   "tac\<^sub>n"}.  Note that @{ML "FIRST []"} is the same as @{ML no_tac}: it
   always fails.
 
-  \item @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
+  \<^descr> @{ML_op "THEN'"} is the lifted version of @{ML_op "THEN"}, for
   tactics with explicit subgoal addressing.  So @{text
   "(tac\<^sub>1"}~@{ML_op THEN'}~@{text "tac\<^sub>2) i"} is the same as @{text
   "(tac\<^sub>1 i"}~@{ML_op THEN}~@{text "tac\<^sub>2 i)"}.
 
   The other primed tacticals work analogously.
-
-  \end{description}
 \<close>
 
 
@@ -641,9 +610,7 @@
   @{index_ML "REPEAT_DETERM_N": "int -> tactic -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML TRY}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns the resulting sequence, if non-empty; otherwise it
   returns the original state.  Thus, it applies @{text "tac"} at most
   once.
@@ -652,7 +619,7 @@
   applied via functional composition: @{ML "TRY"}~@{ML_op o}~@{text
   "tac"}.  There is no need for @{verbatim TRY'}.
 
-  \item @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML REPEAT}~@{text "tac"} applies @{text "tac"} to the goal
   state and, recursively, to each element of the resulting sequence.
   The resulting sequence consists of those states that make @{text
   "tac"} fail.  Thus, it applies @{text "tac"} as many times as
@@ -660,27 +627,23 @@
   invocation of @{text "tac"}.  @{ML REPEAT} is more general than @{ML
   REPEAT_DETERM}, but requires more space.
 
-  \item @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
+  \<^descr> @{ML REPEAT1}~@{text "tac"} is like @{ML REPEAT}~@{text "tac"}
   but it always applies @{text "tac"} at least once, failing if this
   is impossible.
 
-  \item @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
+  \<^descr> @{ML REPEAT_DETERM}~@{text "tac"} applies @{text "tac"} to the
   goal state and, recursively, to the head of the resulting sequence.
   It returns the first state to make @{text "tac"} fail.  It is
   deterministic, discarding alternative outcomes.
 
-  \item @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
+  \<^descr> @{ML REPEAT_DETERM_N}~@{text "n tac"} is like @{ML
   REPEAT_DETERM}~@{text "tac"} but the number of repetitions is bound
   by @{text "n"} (where @{ML "~1"} means @{text "\<infinity>"}).
-
-  \end{description}
 \<close>
 
 text %mlex \<open>The basic tactics and tacticals considered above follow
   some algebraic laws:
 
-  \begin{itemize}
-
   \<^item> @{ML all_tac} is the identity element of the tactical @{ML_op
   "THEN"}.
 
@@ -692,8 +655,6 @@
   \<^item> @{ML TRY} and @{ML REPEAT} can be expressed as (recursive)
   functions over more basic combinators (ignoring some internal
   implementation tricks):
-
-  \end{itemize}
 \<close>
 
 ML \<open>
@@ -747,35 +708,31 @@
   @{index_ML RANGE: "(int -> tactic) list -> int -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML ALLGOALS}~@{text "tac"} is equivalent to @{text "tac
   n"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op THEN}~@{text "tac 1"}.  It
   applies the @{text tac} to all the subgoals, counting downwards.
 
-  \item @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML SOMEGOAL}~@{text "tac"} is equivalent to @{text "tac
   n"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac 1"}.  It
   applies @{text "tac"} to one subgoal, counting downwards.
 
-  \item @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
+  \<^descr> @{ML FIRSTGOAL}~@{text "tac"} is equivalent to @{text "tac
   1"}~@{ML_op ORELSE}~@{text "\<dots>"}~@{ML_op ORELSE}~@{text "tac n"}.  It
   applies @{text "tac"} to one subgoal, counting upwards.
 
-  \item @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
+  \<^descr> @{ML HEADGOAL}~@{text "tac"} is equivalent to @{text "tac 1"}.
   It applies @{text "tac"} unconditionally to the first subgoal.
 
-  \item @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
+  \<^descr> @{ML REPEAT_SOME}~@{text "tac"} applies @{text "tac"} once or
   more to a subgoal, counting downwards.
 
-  \item @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
+  \<^descr> @{ML REPEAT_FIRST}~@{text "tac"} applies @{text "tac"} once or
   more to a subgoal, counting upwards.
 
-  \item @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
+  \<^descr> @{ML RANGE}~@{text "[tac\<^sub>1, \<dots>, tac\<^sub>k] i"} is equivalent to
   @{text "tac\<^sub>k (i + k - 1)"}~@{ML_op THEN}~@{text "\<dots>"}~@{ML_op
   THEN}~@{text "tac\<^sub>1 i"}.  It applies the given list of tactics to the
   corresponding range of subgoals, counting downwards.
-
-  \end{description}
 \<close>
 
 
@@ -800,18 +757,14 @@
   @{index_ML CHANGED: "tactic -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
+  \<^descr> @{ML FILTER}~@{text "sat tac"} applies @{text "tac"} to the
   goal state and returns a sequence consisting of those result goal
   states that are satisfactory in the sense of @{text "sat"}.
 
-  \item @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML CHANGED}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns precisely those states that differ from the
   original state (according to @{ML Thm.eq_thm}).  Thus @{ML
   CHANGED}~@{text "tac"} always has some effect on the state.
-
-  \end{description}
 \<close>
 
 
@@ -824,23 +777,19 @@
   @{index_ML DEPTH_SOLVE_1: "tactic -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
+  \<^descr> @{ML DEPTH_FIRST}~@{text "sat tac"} returns the goal state if
   @{text "sat"} returns true.  Otherwise it applies @{text "tac"},
   then recursively searches from each element of the resulting
   sequence.  The code uses a stack for efficiency, in effect applying
   @{text "tac"}~@{ML_op THEN}~@{ML DEPTH_FIRST}~@{text "sat tac"} to
   the state.
 
-  \item @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
+  \<^descr> @{ML DEPTH_SOLVE}@{text "tac"} uses @{ML DEPTH_FIRST} to
   search for states having no subgoals.
 
-  \item @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
+  \<^descr> @{ML DEPTH_SOLVE_1}~@{text "tac"} uses @{ML DEPTH_FIRST} to
   search for states having fewer subgoals than the given state.  Thus,
   it insists upon solving at least one subgoal.
-
-  \end{description}
 \<close>
 
 
@@ -857,13 +806,11 @@
   However, they do not enumerate all solutions; they terminate after
   the first satisfactory result from @{text "tac"}.
 
-  \begin{description}
-
-  \item @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
+  \<^descr> @{ML BREADTH_FIRST}~@{text "sat tac"} uses breadth-first
   search to find states for which @{text "sat"} is true.  For most
   applications, it is too slow.
 
-  \item @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
+  \<^descr> @{ML BEST_FIRST}~@{text "(sat, dist) tac"} does a heuristic
   search, using @{text "dist"} to estimate the distance from a
   satisfactory state (in the sense of @{text "sat"}).  It maintains a
   list of states ordered by distance.  It applies @{text "tac"} to the
@@ -875,13 +822,11 @@
   the size of the state.  The smaller the state, the fewer and simpler
   subgoals it has.
 
-  \item @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
+  \<^descr> @{ML THEN_BEST_FIRST}~@{text "tac\<^sub>0 (sat, dist) tac"} is like
   @{ML BEST_FIRST}, except that the priority queue initially contains
   the result of applying @{text "tac\<^sub>0"} to the goal state.  This
   tactical permits separate tactics for starting the search and
   continuing the search.
-
-  \end{description}
 \<close>
 
 
@@ -895,28 +840,24 @@
   @{index_ML DETERM: "tactic -> tactic"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
+  \<^descr> @{ML COND}~@{text "sat tac\<^sub>1 tac\<^sub>2"} applies @{text "tac\<^sub>1"} to
   the goal state if it satisfies predicate @{text "sat"}, and applies
   @{text "tac\<^sub>2"}.  It is a conditional tactical in that only one of
   @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} is applied to a goal state.
   However, both @{text "tac\<^sub>1"} and @{text "tac\<^sub>2"} are evaluated
   because ML uses eager evaluation.
 
-  \item @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
+  \<^descr> @{ML IF_UNSOLVED}~@{text "tac"} applies @{text "tac"} to the
   goal state if it has any subgoals, and simply returns the goal state
   otherwise.  Many common tactics, such as @{ML resolve_tac}, fail if
   applied to a goal state that has no subgoals.
 
-  \item @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML SOLVE}~@{text "tac"} applies @{text "tac"} to the goal
   state and then fails iff there are subgoals left.
 
-  \item @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
+  \<^descr> @{ML DETERM}~@{text "tac"} applies @{text "tac"} to the goal
   state and returns the head of the resulting sequence.  @{ML DETERM}
   limits the search space by making its argument deterministic.
-
-  \end{description}
 \<close>
 
 
@@ -930,26 +871,22 @@
   @{index_ML size_of_thm: "thm -> int"} \\
   \end{mldecls}
 
-  \begin{description}
-
-  \item @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
+  \<^descr> @{ML has_fewer_prems}~@{text "n thm"} reports whether @{text
   "thm"} has fewer than @{text "n"} premises.
 
-  \item @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
+  \<^descr> @{ML Thm.eq_thm}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether @{text
   "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.  Both theorems must have the
   same conclusions, the same set of hypotheses, and the same set of sort
   hypotheses.  Names of bound variables are ignored as usual.
 
-  \item @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
+  \<^descr> @{ML Thm.eq_thm_prop}~@{text "(thm\<^sub>1, thm\<^sub>2)"} reports whether
   the propositions of @{text "thm\<^sub>1"} and @{text "thm\<^sub>2"} are equal.
   Names of bound variables are ignored.
 
-  \item @{ML size_of_thm}~@{text "thm"} computes the size of @{text
+  \<^descr> @{ML size_of_thm}~@{text "thm"} computes the size of @{text
   "thm"}, namely the number of variables, constants and abstractions
   in its conclusion.  It may serve as a distance function for
   @{ML BEST_FIRST}.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Isar_Ref/Document_Preparation.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -26,6 +26,8 @@
     @{command_def "section"} & : & @{text "any \<rightarrow> any"} \\
     @{command_def "subsection"} & : & @{text "any \<rightarrow> any"} \\
     @{command_def "subsubsection"} & : & @{text "any \<rightarrow> any"} \\
+    @{command_def "paragraph"} & : & @{text "any \<rightarrow> any"} \\
+    @{command_def "subparagraph"} & : & @{text "any \<rightarrow> any"} \\
     @{command_def "text"} & : & @{text "any \<rightarrow> any"} \\
     @{command_def "txt"} & : & @{text "any \<rightarrow> any"} \\
     @{command_def "text_raw"} & : & @{text "any \<rightarrow> any"} \\
@@ -44,36 +46,29 @@
 
   @{rail \<open>
     (@@{command chapter} | @@{command section} | @@{command subsection} |
-      @@{command subsubsection} | @@{command text} | @@{command txt}) @{syntax text}
-    ;
-    @@{command text_raw} @{syntax text}
+      @@{command subsubsection} | @@{command paragraph} | @@{command subparagraph} |
+      @@{command text} | @@{command txt} | @@{command text_raw}) @{syntax text}
   \<close>}
 
-  \begin{description}
+  \<^descr> @{command chapter}, @{command section}, @{command subsection} etc.\ mark
+  section headings within the theory source. This works in any context, even
+  before the initial @{command theory} command. The corresponding {\LaTeX}
+  macros are @{verbatim \<open>\isamarkupchapter\<close>}, @{verbatim
+  \<open>\isamarkupsection\<close>}, @{verbatim \<open>\isamarkupsubsection\<close>} etc.\
 
-  \item @{command chapter}, @{command section}, @{command subsection}, and
-  @{command subsubsection} mark chapter and section headings within the
-  theory source; this works in any context, even before the initial
-  @{command theory} command. The corresponding {\LaTeX} macros are
-  @{verbatim \<open>\isamarkupchapter\<close>}, @{verbatim \<open>\isamarkupsection\<close>},
-  @{verbatim \<open>\isamarkupsubsection\<close>}, @{verbatim \<open>\isamarkupsubsubsection\<close>}.
-
-  \item @{command text} and @{command txt} specify paragraphs of plain text.
+  \<^descr> @{command text} and @{command txt} specify paragraphs of plain text.
   This corresponds to a {\LaTeX} environment @{verbatim
   \<open>\begin{isamarkuptext}\<close>} @{text "\<dots>"} @{verbatim \<open>\end{isamarkuptext}\<close>}
   etc.
 
-  \item @{command text_raw} inserts {\LaTeX} source directly into the
-  output, without additional markup. Thus the full range of document
-  manipulations becomes available, at the risk of messing up document
-  output.
+  \<^descr> @{command text_raw} is similar to @{command text}, but without
+  any surrounding markup environment. This allows to inject arbitrary
+  {\LaTeX} source into the generated document.
 
-  \end{description}
 
-  Except for @{command "text_raw"}, the text passed to any of the above
-  markup commands may refer to formal entities via \emph{document
-  antiquotations}, see also \secref{sec:antiq}. These are interpreted in the
-  present theory or proof context.
+  All text passed to any of the above markup commands may refer to formal
+  entities via \emph{document antiquotations}, see also \secref{sec:antiq}.
+  These are interpreted in the present theory or proof context.
 
   \<^medskip>
   The proof markup commands closely resemble those for theory
@@ -187,57 +182,55 @@
   comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
   text @{verbatim "{*"}~@{text "\<dots>"}~@{verbatim "*}"}.
 
-  \begin{description}
-
-  \item @{command "print_antiquotations"} prints all document antiquotations
+  \<^descr> @{command "print_antiquotations"} prints all document antiquotations
   that are defined in the current context; the ``@{text "!"}'' option
   indicates extra verbosity.
 
-  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
+  \<^descr> @{text "@{theory A}"} prints the name @{text "A"}, which is
   guaranteed to refer to a valid ancestor theory in the current
   context.
 
-  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
+  \<^descr> @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
   Full fact expressions are allowed here, including attributes
   (\secref{sec:syn-att}).
 
-  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
+  \<^descr> @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
   "\<phi>"}.
 
-  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
+  \<^descr> @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
   @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
 
-  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
+  \<^descr> @{text "@{term t}"} prints a well-typed term @{text "t"}.
   
-  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
+  \<^descr> @{text "@{value t}"} evaluates a term @{text "t"} and prints
   its result, see also @{command_ref (HOL) value}.
 
-  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
+  \<^descr> @{text "@{term_type t}"} prints a well-typed term @{text "t"}
   annotated with its type.
 
-  \item @{text "@{typeof t}"} prints the type of a well-typed term
+  \<^descr> @{text "@{typeof t}"} prints the type of a well-typed term
   @{text "t"}.
 
-  \item @{text "@{const c}"} prints a logical or syntactic constant
+  \<^descr> @{text "@{const c}"} prints a logical or syntactic constant
   @{text "c"}.
   
-  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
+  \<^descr> @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
   @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
 
-  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
+  \<^descr> @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
 
-  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
+  \<^descr> @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
     constructor @{text "\<kappa>"}.
 
-  \item @{text "@{class c}"} prints a class @{text c}.
+  \<^descr> @{text "@{class c}"} prints a class @{text c}.
 
-  \item @{text "@{text s}"} prints uninterpreted source text @{text
+  \<^descr> @{text "@{text s}"} prints uninterpreted source text @{text
   s}.  This is particularly useful to print portions of text according
   to the Isabelle document style, without demanding well-formedness,
   e.g.\ small pieces of terms that should not be parsed or
   type-checked yet.
 
-  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
+  \<^descr> @{text "@{goals}"} prints the current \emph{dynamic} goal
   state.  This is mainly for support of tactic-emulation scripts
   within Isar.  Presentation of goal states does not conform to the
   idea of human-readable proof documents!
@@ -246,38 +239,38 @@
   the reasoning via proper Isar proof commands, instead of peeking at
   the internal machine configuration.
   
-  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
+  \<^descr> @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
   does not print the main goal.
   
-  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
+  \<^descr> @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
   corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
   requires proof terms to be switched on for the current logic
   session.
   
-  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
+  \<^descr> @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
   a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
   information omitted in the compact proof term, which is denoted by
   ``@{text _}'' placeholders there.
   
-  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
+  \<^descr> @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
   s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
   check text @{text s} as ML value, infix operator, type, structure,
   and functor respectively.  The source is printed verbatim.
 
-  \item @{text "@{verbatim s}"} prints uninterpreted source text literally
+  \<^descr> @{text "@{verbatim s}"} prints uninterpreted source text literally
   as ASCII characters, using some type-writer font style.
 
-  \item @{text "@{file path}"} checks that @{text "path"} refers to a
+  \<^descr> @{text "@{file path}"} checks that @{text "path"} refers to a
   file (or directory) and prints it verbatim.
 
-  \item @{text "@{file_unchecked path}"} is like @{text "@{file
+  \<^descr> @{text "@{file_unchecked path}"} is like @{text "@{file
   path}"}, but does not check the existence of the @{text "path"}
   within the file-system.
 
-  \item @{text "@{url name}"} produces markup for the given URL, which
+  \<^descr> @{text "@{url name}"} produces markup for the given URL, which
   results in an active hyperlink within the text.
 
-  \item @{text "@{cite name}"} produces a citation @{verbatim
+  \<^descr> @{text "@{cite name}"} produces a citation @{verbatim
   \<open>\cite{name}\<close>} in {\LaTeX}, where the name refers to some Bib{\TeX}
   database entry.
 
@@ -290,8 +283,6 @@
   @{antiquotation_option_def cite_macro}, or the configuration option
   @{attribute cite_macro} in the context. For example, @{text "@{cite
   [cite_macro = nocite] foobar}"} produces @{verbatim \<open>\nocite{foobar}\<close>}.
-
-  \end{description}
 \<close>
 
 
@@ -303,23 +294,19 @@
   empty number of arguments;  multiple styles can be sequenced with
   commas.  The following standard styles are available:
 
-  \begin{description}
-  
-  \item @{text lhs} extracts the first argument of any application
+  \<^descr> @{text lhs} extracts the first argument of any application
   form with at least two arguments --- typically meta-level or
   object-level equality, or any other binary relation.
   
-  \item @{text rhs} is like @{text lhs}, but extracts the second
+  \<^descr> @{text rhs} is like @{text lhs}, but extracts the second
   argument.
   
-  \item @{text "concl"} extracts the conclusion @{text C} from a rule
+  \<^descr> @{text "concl"} extracts the conclusion @{text C} from a rule
   in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
   
-  \item @{text "prem"} @{text n} extract premise number
+  \<^descr> @{text "prem"} @{text n} extract premise number
   @{text "n"} from from a rule in Horn-clause
   normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
-
-  \end{description}
 \<close>
 
 
@@ -329,36 +316,34 @@
   of antiquotations.  Note that many of these coincide with system and
   configuration options of the same names.
 
-  \begin{description}
-
-  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
+  \<^descr> @{antiquotation_option_def show_types}~@{text "= bool"} and
   @{antiquotation_option_def show_sorts}~@{text "= bool"} control
   printing of explicit type and sort constraints.
 
-  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def show_structs}~@{text "= bool"}
   controls printing of implicit structures.
 
-  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
   controls folding of abbreviations.
 
-  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
+  \<^descr> @{antiquotation_option_def names_long}~@{text "= bool"} forces
   names of types and constants etc.\ to be printed in their fully
   qualified internal form.
 
-  \item @{antiquotation_option_def names_short}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def names_short}~@{text "= bool"}
   forces names of types and constants etc.\ to be printed unqualified.
   Note that internalizing the output again in the current context may
   well yield a different result.
 
-  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def names_unique}~@{text "= bool"}
   determines whether the printed version of qualified names should be
   made sufficiently long to avoid overlap with names declared further
   back.  Set to @{text false} for more concise output.
 
-  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
+  \<^descr> @{antiquotation_option_def eta_contract}~@{text "= bool"}
   prints terms in @{text \<eta>}-contracted form.
 
-  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
+  \<^descr> @{antiquotation_option_def display}~@{text "= bool"} indicates
   if the text is to be output as multi-line ``display material'',
   rather than a small piece of text without line breaks (which is the
   default).
@@ -366,26 +351,26 @@
   In this mode the embedded entities are printed in the same style as
   the main theory text.
 
-  \item @{antiquotation_option_def break}~@{text "= bool"} controls
+  \<^descr> @{antiquotation_option_def break}~@{text "= bool"} controls
   line breaks in non-display material.
 
-  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
+  \<^descr> @{antiquotation_option_def quotes}~@{text "= bool"} indicates
   if the output should be enclosed in double quotes.
 
-  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
+  \<^descr> @{antiquotation_option_def mode}~@{text "= name"} adds @{text
   name} to the print mode to be used for presentation.  Note that the
   standard setup for {\LaTeX} output is already present by default,
   including the modes @{text latex} and @{text xsymbols}.
 
-  \item @{antiquotation_option_def margin}~@{text "= nat"} and
+  \<^descr> @{antiquotation_option_def margin}~@{text "= nat"} and
   @{antiquotation_option_def indent}~@{text "= nat"} change the margin
   or indentation for pretty printing of display material.
 
-  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
+  \<^descr> @{antiquotation_option_def goals_limit}~@{text "= nat"}
   determines the maximum number of subgoals to be printed (for goal-based
   antiquotation).
 
-  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
+  \<^descr> @{antiquotation_option_def source}~@{text "= bool"} prints the
   original source text of the antiquotation arguments, rather than its
   internal representation.  Note that formal checking of
   @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
@@ -404,12 +389,12 @@
   well-formedness check in the background, but without modification of
   the printed text.
 
-  \end{description}
 
   For Boolean flags, ``@{text "name = true"}'' may be abbreviated as
   ``@{text name}''.  All of the above flags are disabled by default,
   unless changed specifically for a logic session in the corresponding
-  @{verbatim "ROOT"} file.\<close>
+  @{verbatim "ROOT"} file.
+\<close>
 
 
 section \<open>Markup via command tags \label{sec:tags}\<close>
@@ -514,8 +499,6 @@
   recursion.  The meaning and visual appearance of these rail language
   elements is illustrated by the following representative examples.
 
-  \begin{itemize}
-
   \<^item> Empty @{verbatim "()"}
 
   @{rail \<open>()\<close>}
@@ -574,8 +557,6 @@
   \<^item> Strict repetition with separator @{verbatim "A + sep"}
 
   @{rail \<open>A + sep\<close>}
-
-  \end{itemize}
 \<close>
 
 
@@ -590,14 +571,10 @@
     @@{command display_drafts} (@{syntax name} +)
   \<close>}
 
-  \begin{description}
-
-  \item @{command "display_drafts"}~@{text paths} performs simple output of a
+  \<^descr> @{command "display_drafts"}~@{text paths} performs simple output of a
   given list of raw source files. Only those symbols that do not require
   additional {\LaTeX} packages are displayed properly, everything else is left
   verbatim.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Isar_Ref/Generic.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Generic.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -38,18 +38,14 @@
     @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "print_options"} prints the available configuration
+  \<^descr> @{command "print_options"} prints the available configuration
   options, with names, types, and current values; the ``@{text "!"}'' option
   indicates extra verbosity.
   
-  \item @{text "name = value"} as an attribute expression modifies the
+  \<^descr> @{text "name = value"} as an attribute expression modifies the
   named option, with the syntax of the value depending on the option's
   type.  For @{ML_type bool} the default value is @{text true}.  Any
   attempt to change a global option in a local context is ignored.
-
-  \end{description}
 \<close>
 
 
@@ -83,18 +79,16 @@
     @@{method sleep} @{syntax real}
   \<close>}
 
-  \begin{description}
-  
-  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
+  \<^descr> @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
   "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
   all goals; any chained facts provided are inserted into the goal and
   subject to rewriting as well.
 
-  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
+  \<^descr> @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
   into all goals of the proof state.  Note that current facts
   indicated for forward chaining are ignored.
 
-  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
+  \<^descr> @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
   drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
   "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
   method (see \secref{sec:pure-meth-att}), but apply rules by
@@ -111,25 +105,24 @@
   the plain @{method rule} method, with forward chaining of current
   facts.
 
-  \item @{method intro} and @{method elim} repeatedly refine some goal
+  \<^descr> @{method intro} and @{method elim} repeatedly refine some goal
   by intro- or elim-resolution, after having inserted any chained
   facts.  Exactly the rules given as arguments are taken into account;
   this allows fine-tuned decomposition of a proof problem, in contrast
   to common automated tools.
 
-  \item @{method fail} yields an empty result sequence; it is the
+  \<^descr> @{method fail} yields an empty result sequence; it is the
   identity of the ``@{text "|"}'' method combinator (cf.\
   \secref{sec:proof-meth}).
 
-  \item @{method succeed} yields a single (unchanged) result; it is
+  \<^descr> @{method succeed} yields a single (unchanged) result; it is
   the identity of the ``@{text ","}'' method combinator (cf.\
   \secref{sec:proof-meth}).
 
-  \item @{method sleep}~@{text s} succeeds after a real-time delay of @{text
+  \<^descr> @{method sleep}~@{text s} succeeds after a real-time delay of @{text
   s} seconds. This is occasionally useful for demonstration and testing
   purposes.
 
-  \end{description}
 
   \begin{matharray}{rcl}
     @{attribute_def tagged} & : & @{text attribute} \\
@@ -155,42 +148,38 @@
     @@{attribute rotated} @{syntax int}?
   \<close>}
 
-  \begin{description}
-
-  \item @{attribute tagged}~@{text "name value"} and @{attribute
+  \<^descr> @{attribute tagged}~@{text "name value"} and @{attribute
   untagged}~@{text name} add and remove \emph{tags} of some theorem.
   Tags may be any list of string pairs that serve as formal comment.
   The first string is considered the tag name, the second its value.
   Note that @{attribute untagged} removes any tags of the same name.
 
-  \item @{attribute THEN}~@{text a} composes rules by resolution; it
+  \<^descr> @{attribute THEN}~@{text a} composes rules by resolution; it
   resolves with the first premise of @{text a} (an alternative
   position may be also specified).  See also @{ML_op "RS"} in
   @{cite "isabelle-implementation"}.
   
-  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
+  \<^descr> @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
   folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
   definitions throughout a rule.
 
-  \item @{attribute abs_def} turns an equation of the form @{prop "f x
+  \<^descr> @{attribute abs_def} turns an equation of the form @{prop "f x
   y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
   simp} or @{method unfold} steps always expand it.  This also works
   for object-logic equality.
 
-  \item @{attribute rotated}~@{text n} rotate the premises of a
+  \<^descr> @{attribute rotated}~@{text n} rotate the premises of a
   theorem by @{text n} (default 1).
 
-  \item @{attribute (Pure) elim_format} turns a destruction rule into
+  \<^descr> @{attribute (Pure) elim_format} turns a destruction rule into
   elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
   (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
   
   Note that the Classical Reasoner (\secref{sec:classical}) provides
   its own version of this operation.
 
-  \item @{attribute no_vars} replaces schematic variables by free
+  \<^descr> @{attribute no_vars} replaces schematic variables by free
   ones; this is mainly for tuning output of pretty printed theorems.
-
-  \end{description}
 \<close>
 
 
@@ -216,16 +205,14 @@
   provide the canonical way for automated normalization (see
   \secref{sec:simplifier}).
 
-  \begin{description}
-
-  \item @{method subst}~@{text eq} performs a single substitution step
+  \<^descr> @{method subst}~@{text eq} performs a single substitution step
   using rule @{text eq}, which may be either a meta or object
   equality.
 
-  \item @{method subst}~@{text "(asm) eq"} substitutes in an
+  \<^descr> @{method subst}~@{text "(asm) eq"} substitutes in an
   assumption.
 
-  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
+  \<^descr> @{method subst}~@{text "(i \<dots> j) eq"} performs several
   substitutions in the conclusion. The numbers @{text i} to @{text j}
   indicate the positions to substitute at.  Positions are ordered from
   the top of the term tree moving down from left to right. For
@@ -238,18 +225,18 @@
   assume all substitutions are performed simultaneously.  Otherwise
   the behaviour of @{text subst} is not specified.
 
-  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
+  \<^descr> @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
   substitutions in the assumptions. The positions refer to the
   assumptions in order from left to right.  For example, given in a
   goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
   commutativity of @{text "+"} is the subterm @{text "a + b"} and
   position 2 is the subterm @{text "c + d"}.
 
-  \item @{method hypsubst} performs substitution using some
+  \<^descr> @{method hypsubst} performs substitution using some
   assumption; this only works for equations of the form @{text "x =
   t"} where @{text x} is a free or bound variable.
 
-  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
+  \<^descr> @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
   splitting using the given rules.  Splitting is performed in the
   conclusion or some assumption of the subgoal, depending of the
   structure of the rule.
@@ -257,8 +244,6 @@
   Note that the @{method simp} method already involves repeated
   application of split rules as declared in the current context, using
   @{attribute split}, for example.
-
-  \end{description}
 \<close>
 
 
@@ -305,9 +290,7 @@
       'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
   \<close>}
 
-  \begin{description}
-
-  \item @{method simp} invokes the Simplifier on the first subgoal,
+  \<^descr> @{method simp} invokes the Simplifier on the first subgoal,
   after inserting chained facts as additional goal premises; further
   rule declarations may be included via @{text "(simp add: facts)"}.
   The proof method fails if the subgoal remains unchanged after
@@ -348,7 +331,7 @@
   congruence rules (see also \secref{sec:simp-rules}); the default is
   to add.
 
-  \item @{method simp_all} is similar to @{method simp}, but acts on
+  \<^descr> @{method simp_all} is similar to @{method simp}, but acts on
   all goals, working backwards from the last to the first one as usual
   in Isabelle.\footnote{The order is irrelevant for goals without
   schematic variables, so simplification might actually be performed
@@ -361,10 +344,9 @@
   The proof method fails if all subgoals remain unchanged after
   simplification.
 
-  \item @{attribute simp_depth_limit} limits the number of recursive
+  \<^descr> @{attribute simp_depth_limit} limits the number of recursive
   invocations of the Simplifier during conditional rewriting.
 
-  \end{description}
 
   By default the Simplifier methods above take local assumptions fully
   into account, using equational assumptions in the subsequent
@@ -513,9 +495,7 @@
     @@{command print_simpset} ('!'?)
   \<close>}
 
-  \begin{description}
-
-  \item @{attribute simp} declares rewrite rules, by adding or
+  \<^descr> @{attribute simp} declares rewrite rules, by adding or
   deleting them from the simpset within the theory or proof context.
   Rewrite rules are theorems expressing some form of equality, for
   example:
@@ -541,44 +521,40 @@
   The Simplifier accepts the following formats for the @{text "lhs"}
   term:
 
-  \begin{enumerate}
-
-  \<^enum> First-order patterns, considering the sublanguage of
-  application of constant operators to variable operands, without
-  @{text "\<lambda>"}-abstractions or functional variables.
-  For example:
+    \<^enum> First-order patterns, considering the sublanguage of
+    application of constant operators to variable operands, without
+    @{text "\<lambda>"}-abstractions or functional variables.
+    For example:
 
-  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
-  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
+    @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
+    @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
 
-  \<^enum> Higher-order patterns in the sense of @{cite "nipkow-patterns"}.
-  These are terms in @{text "\<beta>"}-normal form (this will always be the
-  case unless you have done something strange) where each occurrence
-  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
-  @{text "x\<^sub>i"} are distinct bound variables.
+    \<^enum> Higher-order patterns in the sense of @{cite "nipkow-patterns"}.
+    These are terms in @{text "\<beta>"}-normal form (this will always be the
+    case unless you have done something strange) where each occurrence
+    of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
+    @{text "x\<^sub>i"} are distinct bound variables.
 
-  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
-  or its symmetric form, since the @{text "rhs"} is also a
-  higher-order pattern.
+    For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
+    or its symmetric form, since the @{text "rhs"} is also a
+    higher-order pattern.
 
-  \<^enum> Physical first-order patterns over raw @{text "\<lambda>"}-term
-  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
-  variables are treated like quasi-constant term material.
+    \<^enum> Physical first-order patterns over raw @{text "\<lambda>"}-term
+    structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
+    variables are treated like quasi-constant term material.
 
-  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
-  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
-  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
-  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
-  be replaced by adding new variables and conditions like this: @{text
-  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
-  rewrite rule of the second category since conditions can be
-  arbitrary terms.
+    For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
+    term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
+    match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
+    subterms (in our case @{text "?f ?x"}, which is not a pattern) can
+    be replaced by adding new variables and conditions like this: @{text
+    "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
+    rewrite rule of the second category since conditions can be
+    arbitrary terms.
 
-  \end{enumerate}
+  \<^descr> @{attribute split} declares case split rules.
 
-  \item @{attribute split} declares case split rules.
-
-  \item @{attribute cong} declares congruence rules to the Simplifier
+  \<^descr> @{attribute cong} declares congruence rules to the Simplifier
   context.
 
   Congruence rules are equalities of the form @{text [display]
@@ -619,7 +595,7 @@
   This can make simplification much faster, but may require an extra
   case split over the condition @{text "?q"} to prove the goal.
 
-  \item @{command "print_simpset"} prints the collection of rules declared
+  \<^descr> @{command "print_simpset"} prints the collection of rules declared
   to the Simplifier, which is also known as ``simpset'' internally; the
   ``@{text "!"}'' option indicates extra verbosity.
 
@@ -634,7 +610,6 @@
   simpset and the context of the problem being simplified may lead to
   unexpected results.
 
-  \end{description}
 
   The implicit simpset of the theory context is propagated
   monotonically through the theory hierarchy: forming a new theory,
@@ -706,8 +681,6 @@
   permutative.)  When dealing with an AC-operator @{text "f"}, keep
   the following points in mind:
 
-  \begin{itemize}
-
   \<^item> The associative law must always be oriented from left to
   right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
   orientation, if used with commutativity, leads to looping in
@@ -717,7 +690,6 @@
   associativity (A) and commutativity (C) but also a derived rule
   \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
 
-  \end{itemize}
 
   Ordered rewriting with the combination of A, C, and LC sorts a term
   lexicographically --- the rewriting engine imitates bubble-sort.
@@ -794,21 +766,19 @@
   These attributes and configurations options control various aspects of
   Simplifier tracing and debugging.
 
-  \begin{description}
-
-  \item @{attribute simp_trace} makes the Simplifier output internal
+  \<^descr> @{attribute simp_trace} makes the Simplifier output internal
   operations.  This includes rewrite steps, but also bookkeeping like
   modifications of the simpset.
 
-  \item @{attribute simp_trace_depth_limit} limits the effect of
+  \<^descr> @{attribute simp_trace_depth_limit} limits the effect of
   @{attribute simp_trace} to the given depth of recursive Simplifier
   invocations (when solving conditions of rewrite rules).
 
-  \item @{attribute simp_debug} makes the Simplifier output some extra
+  \<^descr> @{attribute simp_debug} makes the Simplifier output some extra
   information about internal operations.  This includes any attempted
   invocation of simplification procedures.
 
-  \item @{attribute simp_trace_new} controls Simplifier tracing within
+  \<^descr> @{attribute simp_trace_new} controls Simplifier tracing within
   Isabelle/PIDE applications, notably Isabelle/jEdit @{cite "isabelle-jedit"}.
   This provides a hierarchical representation of the rewriting steps
   performed by the Simplifier.
@@ -820,13 +790,11 @@
   Interactive mode interrupts the normal flow of the Simplifier and defers
   the decision how to continue to the user via some GUI dialog.
 
-  \item @{attribute simp_break} declares term or theorem breakpoints for
+  \<^descr> @{attribute simp_break} declares term or theorem breakpoints for
   @{attribute simp_trace_new} as described above. Term breakpoints are
   patterns which are checked for matches on the redex of a rule application.
   Theorem breakpoints trigger when the corresponding theorem is applied in a
   rewrite step. For example:
-
-  \end{description}
 \<close>
 
 (*<*)experiment begin(*>*)
@@ -866,9 +834,7 @@
     @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
   \<close>}
 
-  \begin{description}
-
-  \item @{command "simproc_setup"} defines a named simplification
+  \<^descr> @{command "simproc_setup"} defines a named simplification
   procedure that is invoked by the Simplifier whenever any of the
   given term patterns match the current redex.  The implementation,
   which is provided as ML source text, needs to be of type @{ML_type
@@ -887,12 +853,10 @@
   Morphisms and identifiers are only relevant for simprocs that are
   defined within a local target context, e.g.\ in a locale.
 
-  \item @{text "simproc add: name"} and @{text "simproc del: name"}
+  \<^descr> @{text "simproc add: name"} and @{text "simproc del: name"}
   add or delete named simprocs to the current Simplifier context.  The
   default is to add a simproc.  Note that @{command "simproc_setup"}
   already adds the new simproc to the subsequent context.
-
-  \end{description}
 \<close>
 
 
@@ -944,18 +908,15 @@
   rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
   ?m < ?n"}, the default strategy could loop.  % FIXME !??
 
-  \begin{description}
-
-  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
+  \<^descr> @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
   subgoaler of the context to @{text "tac"}.  The tactic will
   be applied to the context of the running Simplifier instance.
 
-  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
+  \<^descr> @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
   set of premises from the context.  This may be non-empty only if
   the Simplifier has been told to utilize local assumptions in the
   first place (cf.\ the options in \secref{sec:simp-meth}).
 
-  \end{description}
 
   As an example, consider the following alternative subgoaler:
 \<close>
@@ -1014,27 +975,24 @@
   tactic is not totally safe: it may instantiate unknowns that appear
   also in other subgoals.
 
-  \begin{description}
-
-  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
+  \<^descr> @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
   "tac"} into a solver; the @{text "name"} is only attached as a
   comment and has no further significance.
 
-  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
+  \<^descr> @{text "ctxt setSSolver solver"} installs @{text "solver"} as
   the safe solver of @{text "ctxt"}.
 
-  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
+  \<^descr> @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
   additional safe solver; it will be tried after the solvers which had
   already been present in @{text "ctxt"}.
 
-  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
+  \<^descr> @{text "ctxt setSolver solver"} installs @{text "solver"} as the
   unsafe solver of @{text "ctxt"}.
 
-  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
+  \<^descr> @{text "ctxt addSolver solver"} adds @{text "solver"} as an
   additional unsafe solver; it will be tried after the solvers which
   had already been present in @{text "ctxt"}.
 
-  \end{description}
 
   \<^medskip>
   The solver tactic is invoked with the context of the
@@ -1100,28 +1058,25 @@
   conditional.  Another possibility is to apply an elimination rule on
   the assumptions.  More adventurous loopers could start an induction.
 
-  \begin{description}
-
-  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
+  \<^descr> @{text "ctxt setloop tac"} installs @{text "tac"} as the only
   looper tactic of @{text "ctxt"}.
 
-  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
+  \<^descr> @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
   additional looper tactic with name @{text "name"}, which is
   significant for managing the collection of loopers.  The tactic will
   be tried after the looper tactics that had already been present in
   @{text "ctxt"}.
 
-  \item @{text "ctxt delloop name"} deletes the looper tactic that was
+  \<^descr> @{text "ctxt delloop name"} deletes the looper tactic that was
   associated with @{text "name"} from @{text "ctxt"}.
 
-  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
+  \<^descr> @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
   for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
 
-  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
+  \<^descr> @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
   tactic corresponding to @{text thm} from the looper tactics of
   @{text "ctxt"}.
 
-  \end{description}
 
   The splitter replaces applications of a given function; the
   right-hand side of the replacement can be anything.  For example,
@@ -1174,9 +1129,7 @@
     opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
   \<close>}
 
-  \begin{description}
-  
-  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
+  \<^descr> @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
   be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
   a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
   The result is fully simplified by default, including assumptions and
@@ -1188,8 +1141,6 @@
   (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
   @{attribute simplified} attribute should be only rarely required
   under normal circumstances.
-
-  \end{description}
 \<close>
 
 
@@ -1436,13 +1387,11 @@
     @@{attribute iff} (((() | 'add') '?'?) | 'del')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "print_claset"} prints the collection of rules
+  \<^descr> @{command "print_claset"} prints the collection of rules
   declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
   within the context.
 
-  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
+  \<^descr> @{attribute intro}, @{attribute elim}, and @{attribute dest}
   declare introduction, elimination, and destruction rules,
   respectively.  By default, rules are considered as \emph{unsafe}
   (i.e.\ not applied blindly without backtracking), while ``@{text
@@ -1467,11 +1416,11 @@
   added with some other classification, but the rule is added anyway
   as requested.
 
-  \item @{attribute rule}~@{text del} deletes all occurrences of a
+  \<^descr> @{attribute rule}~@{text del} deletes all occurrences of a
   rule from the classical context, regardless of its classification as
   introduction~/ elimination~/ destruction and safe~/ unsafe.
 
-  \item @{attribute iff} declares logical equivalences to the
+  \<^descr> @{attribute iff} declares logical equivalences to the
   Simplifier and the Classical reasoner at the same time.
   Non-conditional rules result in a safe introduction and elimination
   pair; conditional ones are considered unsafe.  Rules with negative
@@ -1482,13 +1431,11 @@
   the Isabelle/Pure context only, and omits the Simplifier
   declaration.
 
-  \item @{attribute swapped} turns an introduction rule into an
+  \<^descr> @{attribute swapped} turns an introduction rule into an
   elimination, by resolving with the classical swap principle @{text
   "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
   illustrative purposes: the Classical Reasoner already swaps rules
   internally as explained above.
-
-  \end{description}
 \<close>
 
 
@@ -1504,9 +1451,7 @@
     @@{method rule} @{syntax thmrefs}?
   \<close>}
 
-  \begin{description}
-
-  \item @{method rule} as offered by the Classical Reasoner is a
+  \<^descr> @{method rule} as offered by the Classical Reasoner is a
   refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
   versions work the same, but the classical version observes the
   classical rule context in addition to that of Isabelle/Pure.
@@ -1516,12 +1461,10 @@
   ones), but only few declarations to the rule context of
   Isabelle/Pure (\secref{sec:pure-meth-att}).
 
-  \item @{method contradiction} solves some goal by contradiction,
+  \<^descr> @{method contradiction} solves some goal by contradiction,
   deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
   facts, which are guaranteed to participate, may appear in either
   order.
-
-  \end{description}
 \<close>
 
 
@@ -1564,9 +1507,7 @@
       (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
   \<close>}
 
-  \begin{description}
-
-  \item @{method blast} is a separate classical tableau prover that
+  \<^descr> @{method blast} is a separate classical tableau prover that
   uses the same classical rule declarations as explained before.
 
   Proof search is coded directly in ML using special data structures.
@@ -1574,27 +1515,23 @@
   inferences.  It is faster and more powerful than the other classical
   reasoning tools, but has major limitations too.
 
-  \begin{itemize}
-
-  \<^item> It does not use the classical wrapper tacticals, such as the
-  integration with the Simplifier of @{method fastforce}.
+    \<^item> It does not use the classical wrapper tacticals, such as the
+    integration with the Simplifier of @{method fastforce}.
 
-  \<^item> It does not perform higher-order unification, as needed by the
-  rule @{thm [source=false] rangeI} in HOL.  There are often
-  alternatives to such rules, for example @{thm [source=false]
-  range_eqI}.
+    \<^item> It does not perform higher-order unification, as needed by the
+    rule @{thm [source=false] rangeI} in HOL.  There are often
+    alternatives to such rules, for example @{thm [source=false]
+    range_eqI}.
 
-  \<^item> Function variables may only be applied to parameters of the
-  subgoal.  (This restriction arises because the prover does not use
-  higher-order unification.)  If other function variables are present
-  then the prover will fail with the message
-  @{verbatim [display] \<open>Function unknown's argument not a bound variable\<close>}
+    \<^item> Function variables may only be applied to parameters of the
+    subgoal.  (This restriction arises because the prover does not use
+    higher-order unification.)  If other function variables are present
+    then the prover will fail with the message
+    @{verbatim [display] \<open>Function unknown's argument not a bound variable\<close>}
 
-  \<^item> Its proof strategy is more general than @{method fast} but can
-  be slower.  If @{method blast} fails or seems to be running forever,
-  try @{method fast} and the other proof tools described below.
-
-  \end{itemize}
+    \<^item> Its proof strategy is more general than @{method fast} but can
+    be slower.  If @{method blast} fails or seems to be running forever,
+    try @{method fast} and the other proof tools described below.
 
   The optional integer argument specifies a bound for the number of
   unsafe steps used in a proof.  By default, @{method blast} starts
@@ -1604,7 +1541,7 @@
   be made much faster by supplying the successful search bound to this
   proof method instead.
 
-  \item @{method auto} combines classical reasoning with
+  \<^descr> @{method auto} combines classical reasoning with
   simplification.  It is intended for situations where there are a lot
   of mostly trivial subgoals; it proves all the easy ones, leaving the
   ones it cannot prove.  Occasionally, attempting to prove the hard
@@ -1616,12 +1553,12 @@
   for a slower but more general alternative that also takes wrappers
   into account.
 
-  \item @{method force} is intended to prove the first subgoal
+  \<^descr> @{method force} is intended to prove the first subgoal
   completely, using many fancy proof tools and performing a rather
   exhaustive search.  As a result, proof attempts may take rather long
   or diverge easily.
 
-  \item @{method fast}, @{method best}, @{method slow} attempt to
+  \<^descr> @{method fast}, @{method best}, @{method slow} attempt to
   prove the first subgoal using sequent-style reasoning as explained
   before.  Unlike @{method blast}, they construct proofs directly in
   Isabelle.
@@ -1635,13 +1572,13 @@
   search: it may, when backtracking from a failed proof attempt, undo
   even the step of proving a subgoal by assumption.
 
-  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
+  \<^descr> @{method fastforce}, @{method slowsimp}, @{method bestsimp}
   are like @{method fast}, @{method slow}, @{method best},
   respectively, but use the Simplifier as additional wrapper. The name
   @{method fastforce}, reflects the behaviour of this popular method
   better without requiring an understanding of its implementation.
 
-  \item @{method deepen} works by exhaustive search up to a certain
+  \<^descr> @{method deepen} works by exhaustive search up to a certain
   depth.  The start depth is 4 (unless specified explicitly), and the
   depth is increased iteratively up to 10.  Unsafe rules are modified
   to preserve the formula they act on, so that it be used repeatedly.
@@ -1649,7 +1586,6 @@
   slower, for example if the assumptions have many universal
   quantifiers.
 
-  \end{description}
 
   Any of the above methods support additional modifiers of the context
   of classical (and simplifier) rules, but the ones related to the
@@ -1679,19 +1615,15 @@
     @@{method clarsimp} (@{syntax clasimpmod} * )
   \<close>}
 
-  \begin{description}
-
-  \item @{method safe} repeatedly performs safe steps on all subgoals.
+  \<^descr> @{method safe} repeatedly performs safe steps on all subgoals.
   It is deterministic, with at most one outcome.
 
-  \item @{method clarify} performs a series of safe steps without
+  \<^descr> @{method clarify} performs a series of safe steps without
   splitting subgoals; see also @{method clarify_step}.
 
-  \item @{method clarsimp} acts like @{method clarify}, but also does
+  \<^descr> @{method clarsimp} acts like @{method clarify}, but also does
   simplification.  Note that if the Simplifier context includes a
   splitter for the premises, the subgoal may still be split.
-
-  \end{description}
 \<close>
 
 
@@ -1710,34 +1642,30 @@
   of the Classical Reasoner.  By calling them yourself, you can
   execute these procedures one step at a time.
 
-  \begin{description}
-
-  \item @{method safe_step} performs a safe step on the first subgoal.
+  \<^descr> @{method safe_step} performs a safe step on the first subgoal.
   The safe wrapper tacticals are applied to a tactic that may include
   proof by assumption or Modus Ponens (taking care not to instantiate
   unknowns), or substitution.
 
-  \item @{method inst_step} is like @{method safe_step}, but allows
+  \<^descr> @{method inst_step} is like @{method safe_step}, but allows
   unknowns to be instantiated.
 
-  \item @{method step} is the basic step of the proof procedure, it
+  \<^descr> @{method step} is the basic step of the proof procedure, it
   operates on the first subgoal.  The unsafe wrapper tacticals are
   applied to a tactic that tries @{method safe}, @{method inst_step},
   or applies an unsafe rule from the context.
 
-  \item @{method slow_step} resembles @{method step}, but allows
+  \<^descr> @{method slow_step} resembles @{method step}, but allows
   backtracking between using safe rules with instantiation (@{method
   inst_step}) and using unsafe rules.  The resulting search space is
   larger.
 
-  \item @{method clarify_step} performs a safe step on the first
+  \<^descr> @{method clarify_step} performs a safe step on the first
   subgoal; no splitting step is applied.  For example, the subgoal
   @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
   Modus Ponens, etc., may be performed provided they do not
   instantiate unknowns.  Assumptions of the form @{text "x = t"} may
   be eliminated.  The safe wrapper tactical is applied.
-
-  \end{description}
 \<close>
 
 
@@ -1790,46 +1718,42 @@
   wrapper names.  These names may be used to selectively delete
   wrappers.
 
-  \begin{description}
-
-  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
+  \<^descr> @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
   which should yield a safe tactic, to modify the existing safe step
   tactic.
 
-  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
+  \<^descr> @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
   safe wrapper, such that it is tried \emph{before} each safe step of
   the search.
 
-  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
+  \<^descr> @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
   safe wrapper, such that it is tried when a safe step of the search
   would fail.
 
-  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
+  \<^descr> @{text "ctxt delSWrapper name"} deletes the safe wrapper with
   the given name.
 
-  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
+  \<^descr> @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
   modify the existing (unsafe) step tactic.
 
-  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
+  \<^descr> @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
   unsafe wrapper, such that it its result is concatenated
   \emph{before} the result of each unsafe step.
 
-  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
+  \<^descr> @{text "ctxt addafter (name, tac)"} adds the given tactic as an
   unsafe wrapper, such that it its result is concatenated \emph{after}
   the result of each unsafe step.
 
-  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
+  \<^descr> @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
   the given name.
 
-  \item @{text "addSss"} adds the simpset of the context to its
+  \<^descr> @{text "addSss"} adds the simpset of the context to its
   classical set. The assumptions and goal will be simplified, in a
   rather safe way, after each safe step of the search.
 
-  \item @{text "addss"} adds the simpset of the context to its
+  \<^descr> @{text "addss"} adds the simpset of the context to its
   classical set. The assumptions and goal will be simplified, before
   the each unsafe step of the search.
-
-  \end{description}
 \<close>
 
 
@@ -1872,9 +1796,7 @@
     @@{attribute rule_format} ('(' 'noasm' ')')?
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
+  \<^descr> @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
   @{text c} as the truth judgment of the current object-logic.  Its
   type @{text \<sigma>} should specify a coercion of the category of
   object-level propositions to @{text prop} of the Pure meta-logic;
@@ -1883,7 +1805,7 @@
   with that of @{text prop}.  Only one @{command "judgment"}
   declaration may be given in any theory development.
   
-  \item @{method atomize} (as a method) rewrites any non-atomic
+  \<^descr> @{method atomize} (as a method) rewrites any non-atomic
   premises of a sub-goal, using the meta-level equations declared via
   @{attribute atomize} (as an attribute) beforehand.  As a result,
   heavily nested goals become amenable to fundamental operations such
@@ -1898,7 +1820,7 @@
   Meta-level conjunction should be covered as well (this is
   particularly important for locales, see \secref{sec:locale}).
 
-  \item @{attribute rule_format} rewrites a theorem by the equalities
+  \<^descr> @{attribute rule_format} rewrites a theorem by the equalities
   declared as @{attribute rulify} rules in the current object-logic.
   By default, the result is fully normalized, including assumptions
   and conclusions at any depth.  The @{text "(no_asm)"} option
@@ -1908,8 +1830,6 @@
   rule_format} is to replace (bounded) universal quantification
   (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
   rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
-
-  \end{description}
 \<close>
 
 
@@ -1928,28 +1848,25 @@
   but sometimes needs extra care to identify problems.  These tracing
   options may help.
 
-  \begin{description}
-
-  \item @{attribute unify_trace_simp} controls tracing of the
+  \<^descr> @{attribute unify_trace_simp} controls tracing of the
   simplification phase of higher-order unification.
 
-  \item @{attribute unify_trace_types} controls warnings of
+  \<^descr> @{attribute unify_trace_types} controls warnings of
   incompleteness, when unification is not considering all possible
   instantiations of schematic type variables.
 
-  \item @{attribute unify_trace_bound} determines the depth where
+  \<^descr> @{attribute unify_trace_bound} determines the depth where
   unification starts to print tracing information once it reaches
   depth; 0 for full tracing.  At the default value, tracing
   information is almost never printed in practice.
 
-  \item @{attribute unify_search_bound} prevents unification from
+  \<^descr> @{attribute unify_search_bound} prevents unification from
   searching past the given depth.  Because of this bound, higher-order
   unification cannot return an infinite sequence, though it can return
   an exponentially long one.  The search rarely approaches the default
   value in practice.  If the search is cut off, unification prints a
   warning ``Unification bound exceeded''.
 
-  \end{description}
 
   \begin{warn}
   Options for unification cannot be modified in a local context.  Only
--- a/src/Doc/Isar_Ref/HOL_Specific.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/HOL_Specific.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -116,9 +116,7 @@
     @@{attribute (HOL) mono} (() | 'add' | 'del')
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "inductive"} and @{command (HOL)
+  \<^descr> @{command (HOL) "inductive"} and @{command (HOL)
   "coinductive"} define (co)inductive predicates from the introduction
   rules.
 
@@ -139,19 +137,17 @@
   \emph{monotonicity theorems}, which are required for each operator
   applied to a recursive set in the introduction rules.
 
-  \item @{command (HOL) "inductive_set"} and @{command (HOL)
+  \<^descr> @{command (HOL) "inductive_set"} and @{command (HOL)
   "coinductive_set"} are wrappers for to the previous commands for
   native HOL predicates.  This allows to define (co)inductive sets,
   where multiple arguments are simulated via tuples.
 
-  \item @{command "print_inductives"} prints (co)inductive definitions and
+  \<^descr> @{command "print_inductives"} prints (co)inductive definitions and
   monotonicity rules; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{attribute (HOL) mono} declares monotonicity rules in the
+  \<^descr> @{attribute (HOL) mono} declares monotonicity rules in the
   context.  These rule are involved in the automated monotonicity
   proof of the above inductive and coinductive definitions.
-
-  \end{description}
 \<close>
 
 
@@ -160,22 +156,19 @@
 text \<open>A (co)inductive definition of @{text R} provides the following
   main theorems:
 
-  \begin{description}
-
-  \item @{text R.intros} is the list of introduction rules as proven
+  \<^descr> @{text R.intros} is the list of introduction rules as proven
   theorems, for the recursive predicates (or sets).  The rules are
   also available individually, using the names given them in the
   theory file;
 
-  \item @{text R.cases} is the case analysis (or elimination) rule;
-
-  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
+  \<^descr> @{text R.cases} is the case analysis (or elimination) rule;
+
+  \<^descr> @{text R.induct} or @{text R.coinduct} is the (co)induction
   rule;
 
-  \item @{text R.simps} is the equation unrolling the fixpoint of the
+  \<^descr> @{text R.simps} is the equation unrolling the fixpoint of the
   predicate one step.
 
-  \end{description}
 
   When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are defined simultaneously,
   the list of introduction rules is called @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the
@@ -192,8 +185,6 @@
   sources for some examples.  The general format of such monotonicity
   theorems is as follows:
 
-  \begin{itemize}
-
   \<^item> Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
   monotonicity of inductive definitions whose introduction rules have
   premises involving terms such as @{text "\<M> R t"}.
@@ -218,8 +209,6 @@
   @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
   @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
   \]
-
-  \end{itemize}
 \<close>
 
 subsubsection \<open>Examples\<close>
@@ -290,9 +279,7 @@
     @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "primrec"} defines primitive recursive functions
+  \<^descr> @{command (HOL) "primrec"} defines primitive recursive functions
   over datatypes (see also @{command_ref (HOL) datatype}). The given @{text
   equations} specify reduction rules that are produced by instantiating the
   generic combinator for primitive recursion that is available for each
@@ -315,7 +302,7 @@
   normalize expressions of @{text "f"} applied to datatype constructions, by
   simulating symbolic computation via rewriting.
 
-  \item @{command (HOL) "function"} defines functions by general wellfounded
+  \<^descr> @{command (HOL) "function"} defines functions by general wellfounded
   recursion. A detailed description with examples can be found in @{cite
   "isabelle-function"}. The function is specified by a set of (possibly
   conditional) recursive equations with arbitrary pattern matching. The
@@ -328,23 +315,22 @@
   "f_dom"}. The @{command (HOL) "termination"} command can then be used to
   establish that the function is total.
 
-  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command (HOL)
+  \<^descr> @{command (HOL) "fun"} is a shorthand notation for ``@{command (HOL)
   "function"}~@{text "(sequential)"}'', followed by automated proof attempts
   regarding pattern matching and termination. See @{cite
   "isabelle-function"} for further details.
 
-  \item @{command (HOL) "termination"}~@{text f} commences a termination
+  \<^descr> @{command (HOL) "termination"}~@{text f} commences a termination
   proof for the previously defined function @{text f}. If this is omitted,
   the command refers to the most recent function definition. After the proof
   is closed, the recursive equations and the induction principle is
   established.
 
-  \item @{command (HOL) "fun_cases"} generates specialized elimination rules
+  \<^descr> @{command (HOL) "fun_cases"} generates specialized elimination rules
   for function equations. It expects one or more function equations and
   produces rules that eliminate the given equalities, following the cases
   given in the function definition.
 
-  \end{description}
 
   Recursive definitions introduced by the @{command (HOL) "function"}
   command accommodate reasoning by induction (cf.\ @{method induct}): rule
@@ -360,9 +346,7 @@
 
   The @{command (HOL) "function"} command accepts the following options.
 
-  \begin{description}
-
-  \item @{text sequential} enables a preprocessor which disambiguates
+  \<^descr> @{text sequential} enables a preprocessor which disambiguates
   overlapping patterns by making them mutually disjoint. Earlier equations
   take precedence over later ones. This allows to give the specification in
   a format very similar to functional programming. Note that the resulting
@@ -371,11 +355,9 @@
   equation given by the user may result in several theorems. Also note that
   this automatic transformation only works for ML-style datatype patterns.
 
-  \item @{text domintros} enables the automated generation of introduction
+  \<^descr> @{text domintros} enables the automated generation of introduction
   rules for the domain predicate. While mostly not needed, they can be
   helpful in some proofs about partial functions.
-
-  \end{description}
 \<close>
 
 
@@ -535,21 +517,19 @@
     orders: ( 'max' | 'min' | 'ms' ) *
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) pat_completeness} is a specialized method to
+  \<^descr> @{method (HOL) pat_completeness} is a specialized method to
   solve goals regarding the completeness of pattern matching, as
   required by the @{command (HOL) "function"} package (cf.\
   @{cite "isabelle-function"}).
 
-  \item @{method (HOL) relation}~@{text R} introduces a termination
+  \<^descr> @{method (HOL) relation}~@{text R} introduces a termination
   proof using the relation @{text R}.  The resulting proof state will
   contain goals expressing that @{text R} is wellfounded, and that the
   arguments of recursive calls decrease with respect to @{text R}.
   Usually, this method is used as the initial proof step of manual
   termination proofs.
 
-  \item @{method (HOL) "lexicographic_order"} attempts a fully
+  \<^descr> @{method (HOL) "lexicographic_order"} attempts a fully
   automated termination proof by searching for a lexicographic
   combination of size measures on the arguments of the function. The
   method accepts the same arguments as the @{method auto} method,
@@ -559,7 +539,7 @@
   In case of failure, extensive information is printed, which can help
   to analyse the situation (cf.\ @{cite "isabelle-function"}).
 
-  \item @{method (HOL) "size_change"} also works on termination goals,
+  \<^descr> @{method (HOL) "size_change"} also works on termination goals,
   using a variation of the size-change principle, together with a
   graph decomposition technique (see @{cite krauss_phd} for details).
   Three kinds of orders are used internally: @{text max}, @{text min},
@@ -571,13 +551,11 @@
   For local descent proofs, the @{syntax clasimpmod} modifiers are
   accepted (as for @{method auto}).
 
-  \item @{method (HOL) induction_schema} derives user-specified
+  \<^descr> @{method (HOL) induction_schema} derives user-specified
   induction rules from well-founded induction and completeness of
   patterns. This factors out some operations that are done internally
   by the function package and makes them available separately. See
   @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
-
-  \end{description}
 \<close>
 
 
@@ -594,9 +572,7 @@
       @'where' @{syntax thmdecl}? @{syntax prop}
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
+  \<^descr> @{command (HOL) "partial_function"}~@{text "(mode)"} defines
   recursive functions based on fixpoints in complete partial
   orders. No termination proof is required from the user or
   constructed internally. Instead, the possibility of non-termination
@@ -619,34 +595,28 @@
   order on the result type. By default, the following modes are
   defined:
 
-  \begin{description}
-
-  \item @{text option} defines functions that map into the @{type
-  option} type. Here, the value @{term None} is used to model a
-  non-terminating computation. Monotonicity requires that if @{term
-  None} is returned by a recursive call, then the overall result must
-  also be @{term None}. This is best achieved through the use of the
-  monadic operator @{const "Option.bind"}.
-
-  \item @{text tailrec} defines functions with an arbitrary result
-  type and uses the slightly degenerated partial order where @{term
-  "undefined"} is the bottom element.  Now, monotonicity requires that
-  if @{term undefined} is returned by a recursive call, then the
-  overall result must also be @{term undefined}. In practice, this is
-  only satisfied when each recursive call is a tail call, whose result
-  is directly returned. Thus, this mode of operation allows the
-  definition of arbitrary tail-recursive functions.
-
-  \end{description}
+    \<^descr> @{text option} defines functions that map into the @{type
+    option} type. Here, the value @{term None} is used to model a
+    non-terminating computation. Monotonicity requires that if @{term
+    None} is returned by a recursive call, then the overall result must
+    also be @{term None}. This is best achieved through the use of the
+    monadic operator @{const "Option.bind"}.
+
+    \<^descr> @{text tailrec} defines functions with an arbitrary result
+    type and uses the slightly degenerated partial order where @{term
+    "undefined"} is the bottom element.  Now, monotonicity requires that
+    if @{term undefined} is returned by a recursive call, then the
+    overall result must also be @{term undefined}. In practice, this is
+    only satisfied when each recursive call is a tail call, whose result
+    is directly returned. Thus, this mode of operation allows the
+    definition of arbitrary tail-recursive functions.
 
   Experienced users may define new modes by instantiating the locale
   @{const "partial_function_definitions"} appropriately.
 
-  \item @{attribute (HOL) partial_function_mono} declares rules for
+  \<^descr> @{attribute (HOL) partial_function_mono} declares rules for
   use in the internal monotonicity proofs of partial function
   definitions.
-
-  \end{description}
 \<close>
 
 
@@ -671,9 +641,7 @@
       (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "recdef"} defines general well-founded
+  \<^descr> @{command (HOL) "recdef"} defines general well-founded
   recursive functions (using the TFL package), see also
   @{cite "isabelle-HOL"}.  The ``@{text "(permissive)"}'' option tells
   TFL to recover from failed proof attempts, returning unfinished
@@ -684,7 +652,6 @@
   (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
   \secref{sec:classical}).
 
-  \end{description}
 
   \<^medskip>
   Hints for @{command (HOL) "recdef"} may be also declared
@@ -725,22 +692,18 @@
       (@{syntax nameref} (@{syntax term} + ) + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
+  \<^descr> @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
   associates variants with an existing constant.
 
-  \item @{command "no_adhoc_overloading"} is similar to
+  \<^descr> @{command "no_adhoc_overloading"} is similar to
   @{command "adhoc_overloading"}, but removes the specified variants
   from the present context.
 
-  \item @{attribute "show_variants"} controls printing of variants
+  \<^descr> @{attribute "show_variants"} controls printing of variants
   of overloaded constants. If enabled, the internally used variants
   are printed instead of their respective overloaded constants. This
   is occasionally useful to check whether the system agrees with a
   user's expectations about derived variants.
-
-  \end{description}
 \<close>
 
 
@@ -758,9 +721,7 @@
     decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
+  \<^descr> @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
   goal stating the existence of terms with the properties specified to
   hold for the constants given in @{text decls}.  After finishing the
   proof, the theory will be augmented with definitions for the given
@@ -771,8 +732,6 @@
   specification given.  The definition for the constant @{text c} is
   bound to the name @{text c_def} unless a theorem name is given in
   the declaration.  Overloaded constants should be declared as such.
-
-  \end{description}
 \<close>
 
 
@@ -795,15 +754,12 @@
     cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "old_datatype"} defines old-style inductive
+  \<^descr> @{command (HOL) "old_datatype"} defines old-style inductive
   datatypes in HOL.
 
-  \item @{command (HOL) "old_rep_datatype"} represents existing types as
+  \<^descr> @{command (HOL) "old_rep_datatype"} represents existing types as
   old-style datatypes.
 
-  \end{description}
 
   These commands are mostly obsolete; @{command (HOL) "datatype"}
   should be used instead.
@@ -906,15 +862,12 @@
   Two key observations make extensible records in a simply
   typed language like HOL work out:
 
-  \begin{enumerate}
-
   \<^enum> the more part is internalized, as a free term or type
   variable,
 
   \<^enum> field names are externalized, they cannot be accessed within
   the logic as first-class values.
 
-  \end{enumerate}
 
   \<^medskip>
   In Isabelle/HOL record types have to be defined explicitly,
@@ -943,9 +896,7 @@
     constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
+  \<^descr> @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
   \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
   derived from the optional parent record @{text "\<tau>"} by adding new
   field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
@@ -971,8 +922,6 @@
   "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
   @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
   \<zeta>\<rparr>"}.
-
-  \end{description}
 \<close>
 
 
@@ -1062,8 +1011,6 @@
   reason about record structures quite conveniently.  Assume that
   @{text t} is a record type as specified above.
 
-  \begin{enumerate}
-
   \<^enum> Standard conversions for selectors or updates applied to record
   constructor terms are made part of the default Simplifier context; thus
   proofs by reduction of basic operations merely require the @{method simp}
@@ -1098,8 +1045,6 @@
   @{text "t.extend"}, @{text "t.truncate"} are \emph{not} treated
   automatically, but usually need to be expanded by hand, using the
   collective fact @{text "t.defs"}.
-
-  \end{enumerate}
 \<close>
 
 
@@ -1177,9 +1122,7 @@
   dependent type: the meaning relies on the operations provided by different
   type-class instances.
 
-  \begin{description}
-
-  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"} defines a
+  \<^descr> @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"} defines a
   new type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} from the set @{text A} over an existing
   type. The set @{text A} may contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"}
   as specified on the LHS, but no term variables. Non-emptiness of @{text A}
@@ -1223,8 +1166,6 @@
   surjectivity.  These rules are already declared as set or type rules
   for the generic @{method cases} and @{method induct} methods,
   respectively.
-
-  \end{description}
 \<close>
 
 
@@ -1272,9 +1213,7 @@
     @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to prove and
+  \<^descr> @{command (HOL) "functor"}~@{text "prefix: m"} allows to prove and
   register properties about the functorial structure of type constructors.
   These properties then can be used by other packages to deal with those
   type constructors in certain type constructions. Characteristic theorems
@@ -1296,8 +1235,6 @@
   theory and @{text "\<sigma>\<^sub>1"}, \ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text
   "\<alpha>\<^sub>1 \<Rightarrow> \<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots, @{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text
   "\<beta>\<^sub>n \<Rightarrow> \<alpha>\<^sub>n"}.
-
-  \end{description}
 \<close>
 
 
@@ -1330,9 +1267,7 @@
     quot_parametric: @'parametric' @{syntax thmref}
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text
+  \<^descr> @{command (HOL) "quotient_type"} defines a new quotient type @{text
   \<tau>}. The injection from a quotient type to a raw type is called @{text
   rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
   "morphisms"} specification provides alternative names. @{command (HOL)
@@ -1350,8 +1285,6 @@
   extra argument of the command and is passed to the corresponding internal
   call of @{command (HOL) setup_lifting}. This theorem allows the Lifting
   package to generate a stronger transfer rule for equality.
-
-  \end{description}
 \<close>
 
 
@@ -1405,35 +1338,29 @@
       @{syntax thmref} (@{syntax thmref} @{syntax thmref})?
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package to work
+  \<^descr> @{command (HOL) "setup_lifting"} Sets up the Lifting package to work
   with a user-defined type. The command supports two modes.
 
-  \begin{enumerate}
-
-  \<^enum> The first one is a low-level mode when the user must provide as a
-  first argument of @{command (HOL) "setup_lifting"} a quotient theorem
-  @{term "Quotient R Abs Rep T"}. The package configures a transfer rule for
-  equality, a domain transfer rules and sets up the @{command_def (HOL)
-  "lift_definition"} command to work with the abstract type. An optional
-  theorem @{term "reflp R"}, which certifies that the equivalence relation R
-  is total, can be provided as a second argument. This allows the package to
-  generate stronger transfer rules. And finally, the parametricity theorem
-  for @{term R} can be provided as a third argument. This allows the package
-  to generate a stronger transfer rule for equality.
-
-  Users generally will not prove the @{text Quotient} theorem manually for
-  new types, as special commands exist to automate the process.
-
-  \<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command
-  (HOL) "lift_definition"} can be used in its second mode, where only the
-  @{term type_definition} theorem @{term "type_definition Rep Abs A"} is
-  used as an argument of the command. The command internally proves the
-  corresponding @{term Quotient} theorem and registers it with @{command
-  (HOL) setup_lifting} using its first mode.
-
-  \end{enumerate}
+    \<^enum> The first one is a low-level mode when the user must provide as a
+    first argument of @{command (HOL) "setup_lifting"} a quotient theorem
+    @{term "Quotient R Abs Rep T"}. The package configures a transfer rule for
+    equality, a domain transfer rules and sets up the @{command_def (HOL)
+    "lift_definition"} command to work with the abstract type. An optional
+    theorem @{term "reflp R"}, which certifies that the equivalence relation R
+    is total, can be provided as a second argument. This allows the package to
+    generate stronger transfer rules. And finally, the parametricity theorem
+    for @{term R} can be provided as a third argument. This allows the package
+    to generate a stronger transfer rule for equality.
+
+    Users generally will not prove the @{text Quotient} theorem manually for
+    new types, as special commands exist to automate the process.
+
+    \<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command
+    (HOL) "lift_definition"} can be used in its second mode, where only the
+    @{term type_definition} theorem @{term "type_definition Rep Abs A"} is
+    used as an argument of the command. The command internally proves the
+    corresponding @{term Quotient} theorem and registers it with @{command
+    (HOL) setup_lifting} using its first mode.
 
   For quotients, the command @{command (HOL) quotient_type} can be used. The
   command defines a new quotient type and similarly to the previous case,
@@ -1446,7 +1373,7 @@
   by @{command (HOL) "lift_definition"}, the Lifting package proves and
   registers a code equation (if there is one) for the new constant.
 
-  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL)
+  \<^descr> @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL)
   "is"} @{text t} Defines a new function @{text f} with an abstract type
   @{text \<tau>} in terms of a corresponding operation @{text t} on a
   representation type. More formally, if @{text "t :: \<sigma>"}, then the command
@@ -1488,24 +1415,22 @@
   code execution through series of internal type and lifting definitions if
   the return type @{text "\<tau>"} meets the following inductive conditions:
 
-  \begin{description}
-
-  \item @{text "\<tau>"} is a type variable \item @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"},
-  where @{text "\<kappa>"} is an abstract type constructor and @{text "\<tau>\<^sub>1 \<dots> \<tau>\<^sub>n"}
-  do not contain abstract types (i.e.\ @{typ "int dlist"} is allowed whereas
-  @{typ "int dlist dlist"} not)
-
-  \item @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"}, @{text "\<kappa>"} is a type constructor that
-  was defined as a (co)datatype whose constructor argument types do not
-  contain either non-free datatypes or the function type.
-
-  \end{description}
+    \<^descr> @{text "\<tau>"} is a type variable
+
+    \<^descr> @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"},
+    where @{text "\<kappa>"} is an abstract type constructor and @{text "\<tau>\<^sub>1 \<dots> \<tau>\<^sub>n"}
+    do not contain abstract types (i.e.\ @{typ "int dlist"} is allowed whereas
+    @{typ "int dlist dlist"} not)
+
+    \<^descr> @{text "\<tau> = \<tau>\<^sub>1 \<dots> \<tau>\<^sub>n \<kappa>"}, @{text "\<kappa>"} is a type constructor that
+    was defined as a (co)datatype whose constructor argument types do not
+    contain either non-free datatypes or the function type.
 
   Integration with [@{attribute code} equation]: For total quotients,
   @{command (HOL) "lift_definition"} uses @{text f.abs_eq} as a code
   equation.
 
-  \item @{command (HOL) lifting_forget} and @{command (HOL) lifting_update}
+  \<^descr> @{command (HOL) lifting_forget} and @{command (HOL) lifting_update}
   These two commands serve for storing and deleting the set-up of the
   Lifting package and corresponding transfer rules defined by this package.
   This is useful for hiding of type construction details of an abstract type
@@ -1528,19 +1453,19 @@
   including a bundle (@{command "include"}, @{keyword "includes"} and
   @{command "including"}).
 
-  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
+  \<^descr> @{command (HOL) "print_quot_maps"} prints stored quotient map
   theorems.
 
-  \item @{command (HOL) "print_quotients"} prints stored quotient theorems.
-
-  \item @{attribute (HOL) quot_map} registers a quotient map theorem, a
+  \<^descr> @{command (HOL) "print_quotients"} prints stored quotient theorems.
+
+  \<^descr> @{attribute (HOL) quot_map} registers a quotient map theorem, a
   theorem showing how to ``lift'' quotients over type constructors. E.g.\
   @{term "Quotient R Abs Rep T \<Longrightarrow> Quotient (rel_set R) (image Abs) (image
   Rep) (rel_set T)"}. For examples see @{file "~~/src/HOL/Lifting_Set.thy"}
   or @{file "~~/src/HOL/Lifting.thy"}. This property is proved automatically
   if the involved type is BNF without dead variables.
 
-  \item @{attribute (HOL) relator_eq_onp} registers a theorem that shows
+  \<^descr> @{attribute (HOL) relator_eq_onp} registers a theorem that shows
   that a relator applied to an equality restricted by a predicate @{term P}
   (i.e.\ @{term "eq_onp P"}) is equal to a predicator applied to the @{term
   P}. The combinator @{const eq_onp} is used for internal encoding of proper
@@ -1550,7 +1475,7 @@
   This property is proved automatically if the involved type is BNF without
   dead variables.
 
-  \item @{attribute (HOL) "relator_mono"} registers a property describing a
+  \<^descr> @{attribute (HOL) "relator_mono"} registers a property describing a
   monotonicity of a relator. E.g.\ @{prop "A \<le> B \<Longrightarrow> rel_set A \<le> rel_set B"}.
   This property is needed for proving a stronger transfer rule in
   @{command_def (HOL) "lift_definition"} when a parametricity theorem for
@@ -1559,7 +1484,7 @@
   "~~/src/HOL/Lifting.thy"}. This property is proved automatically if the
   involved type is BNF without dead variables.
 
-  \item @{attribute (HOL) "relator_distr"} registers a property describing a
+  \<^descr> @{attribute (HOL) "relator_distr"} registers a property describing a
   distributivity of the relation composition and a relator. E.g.\ @{text
   "rel_set R \<circ>\<circ> rel_set S = rel_set (R \<circ>\<circ> S)"}. This property is needed for
   proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
@@ -1573,14 +1498,14 @@
   property is proved automatically if the involved type is BNF without dead
   variables.
 
-  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
+  \<^descr> @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
   from the Lifting infrastructure and thus de-register the corresponding
   quotient. This effectively causes that @{command (HOL) lift_definition}
   will not do any lifting for the corresponding type. This attribute is
   rather used for low-level manipulation with set-up of the Lifting package
   because @{command (HOL) lifting_forget} is preferred for normal usage.
 
-  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def
+  \<^descr> @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def
   pcr_cr_eq_thm"} registers the Quotient theorem @{text Quotient_thm} in the
   Lifting infrastructure and thus sets up lifting for an abstract type
   @{text \<tau>} (that is defined by @{text Quotient_thm}). Optional theorems
@@ -1593,7 +1518,7 @@
   together with the commands @{command (HOL) lifting_forget} and @{command
   (HOL) lifting_update} is preferred for normal usage.
 
-  \item Integration with the BNF package @{cite "isabelle-datatypes"}: As
+  \<^descr> Integration with the BNF package @{cite "isabelle-datatypes"}: As
   already mentioned, the theorems that are registered by the following
   attributes are proved and registered automatically if the involved type is
   BNF without dead variables: @{attribute (HOL) quot_map}, @{attribute (HOL)
@@ -1601,8 +1526,6 @@
   "relator_distr"}. Also the definition of a relator and predicator is
   provided automatically. Moreover, if the BNF represents a datatype,
   simplification rules for a predicator are again proved automatically.
-
-  \end{description}
 \<close>
 
 
@@ -1626,27 +1549,25 @@
     @{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{method (HOL) "transfer"} method replaces the current subgoal with
+  \<^descr> @{method (HOL) "transfer"} method replaces the current subgoal with
   a logically equivalent one that uses different types and constants. The
   replacement of types and constants is guided by the database of transfer
   rules. Goals are generalized over all free variables by default; this is
   necessary for variables whose types change, but can be overridden for
   specific variables with e.g. @{text "transfer fixing: x y z"}.
 
-  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL) transfer}
+  \<^descr> @{method (HOL) "transfer'"} is a variant of @{method (HOL) transfer}
   that allows replacing a subgoal with one that is logically stronger
   (rather than equivalent). For example, a subgoal involving equality on a
   quotient type could be replaced with a subgoal involving equality (instead
   of the corresponding equivalence relation) on the underlying raw type.
 
-  \item @{method (HOL) "transfer_prover"} method assists with proving a
+  \<^descr> @{method (HOL) "transfer_prover"} method assists with proving a
   transfer rule for a new constant, provided the constant is defined in
   terms of other constants that already have transfer rules. It should be
   applied after unfolding the constant definitions.
 
-  \item @{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"},
+  \<^descr> @{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"},
   @{method (HOL) "transfer_end"}, @{method (HOL) "transfer_prover_start"}
   and @{method (HOL) "transfer_prover_end"} methods are meant to be used
   for debugging of @{method (HOL) "transfer"} and @{method (HOL) "transfer_prover"},
@@ -1657,17 +1578,17 @@
   @{method (HOL) "transfer_step"}+, @{method (HOL) "transfer_prover_end"}).
   For usage examples see @{file "~~/src/HOL/ex/Transfer_Debug.thy"} 
 
-  \item @{attribute (HOL) "untransferred"} proves the same equivalent
+  \<^descr> @{attribute (HOL) "untransferred"} proves the same equivalent
   theorem as @{method (HOL) "transfer"} internally does.
 
-  \item @{attribute (HOL) Transfer.transferred} works in the opposite
+  \<^descr> @{attribute (HOL) Transfer.transferred} works in the opposite
   direction than @{method (HOL) "transfer'"}. E.g.\ given the transfer
   relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and
   the theorem @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would
   prove @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
   phase of development.
 
-  \item @{attribute (HOL) "transfer_rule"} attribute maintains a collection
+  \<^descr> @{attribute (HOL) "transfer_rule"} attribute maintains a collection
   of transfer rules, which relate constants at two different types. Typical
   transfer rules may relate different type instances of the same polymorphic
   constant, or they may relate an operation on a raw type to a corresponding
@@ -1687,14 +1608,14 @@
   a relator is proved automatically if the involved type is BNF @{cite
   "isabelle-datatypes"} without dead variables.
 
-  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a
+  \<^descr> @{attribute (HOL) "transfer_domain_rule"} attribute maintains a
   collection of rules, which specify a domain of a transfer relation by a
   predicate. E.g.\ given the transfer relation @{text "ZN x n \<equiv> (x = int
   n)"}, one can register the following transfer domain rule: @{text "Domainp
   ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce more readable
   transferred goals, e.g.\ when quantifiers are transferred.
 
-  \item @{attribute (HOL) relator_eq} attribute collects identity laws for
+  \<^descr> @{attribute (HOL) relator_eq} attribute collects identity laws for
   relators of various type constructors, e.g. @{term "rel_set (op =) = (op
   =)"}. The @{method (HOL) transfer} method uses these lemmas to infer
   transfer rules for non-polymorphic constants on the fly. For examples see
@@ -1702,7 +1623,7 @@
   This property is proved automatically if the involved type is BNF without
   dead variables.
 
-  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules
+  \<^descr> @{attribute_def (HOL) "relator_domain"} attribute collects rules
   describing domains of relators by predicators. E.g.\ @{term "Domainp
   (rel_set T) = (\<lambda>A. Ball A (Domainp T))"}. This allows the package to lift
   transfer domain rules through type constructors. For examples see @{file
@@ -1710,7 +1631,6 @@
   property is proved automatically if the involved type is BNF without dead
   variables.
 
-  \end{description}
 
   Theoretical background can be found in @{cite
   "Huffman-Kuncar:2013:lifting_transfer"}.
@@ -1751,18 +1671,16 @@
     @@{method (HOL) lifting_setup} @{syntax thmrefs}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "quotient_definition"} defines a constant on the
+  \<^descr> @{command (HOL) "quotient_definition"} defines a constant on the
   quotient type.
 
-  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map functions.
-
-  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
-
-  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
-
-  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
+  \<^descr> @{command (HOL) "print_quotmapsQ3"} prints quotient map functions.
+
+  \<^descr> @{command (HOL) "print_quotientsQ3"} prints quotients.
+
+  \<^descr> @{command (HOL) "print_quotconsts"} prints quotient constants.
+
+  \<^descr> @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
   methods match the current goal with the given raw theorem to be lifted
   producing three new subgoals: regularization, injection and cleaning
   subgoals. @{method (HOL) "lifting"} tries to apply the heuristics for
@@ -1770,7 +1688,7 @@
   unsolved by the heuristics to the user as opposed to @{method (HOL)
   "lifting_setup"} which leaves the three subgoals unsolved.
 
-  \item @{method (HOL) "descending"} and @{method (HOL) "descending_setup"}
+  \<^descr> @{method (HOL) "descending"} and @{method (HOL) "descending_setup"}
   try to guess a raw statement that would lift to the current subgoal. Such
   statement is assumed as a new subgoal and @{method (HOL) "descending"}
   continues in the same way as @{method (HOL) "lifting"} does. @{method
@@ -1778,34 +1696,34 @@
   and cleaning subgoals with the analogous method @{method (HOL)
   "descending_setup"} which leaves the four unsolved subgoals.
 
-  \item @{method (HOL) "partiality_descending"} finds the regularized
+  \<^descr> @{method (HOL) "partiality_descending"} finds the regularized
   theorem that would lift to the current subgoal, lifts it and leaves as a
   subgoal. This method can be used with partial equivalence quotients where
   the non regularized statements would not be true. @{method (HOL)
   "partiality_descending_setup"} leaves the injection and cleaning subgoals
   unchanged.
 
-  \item @{method (HOL) "regularize"} applies the regularization heuristics
+  \<^descr> @{method (HOL) "regularize"} applies the regularization heuristics
   to the current subgoal.
 
-  \item @{method (HOL) "injection"} applies the injection heuristics to the
+  \<^descr> @{method (HOL) "injection"} applies the injection heuristics to the
   current goal using the stored quotient respectfulness theorems.
 
-  \item @{method (HOL) "cleaning"} applies the injection cleaning heuristics
+  \<^descr> @{method (HOL) "cleaning"} applies the injection cleaning heuristics
   to the current subgoal using the stored quotient preservation theorems.
 
-  \item @{attribute (HOL) quot_lifted} attribute tries to automatically
+  \<^descr> @{attribute (HOL) quot_lifted} attribute tries to automatically
   transport the theorem to the quotient type. The attribute uses all the
   defined quotients types and quotient constants often producing undesired
   results or theorems that cannot be lifted.
 
-  \item @{attribute (HOL) quot_respect} and @{attribute (HOL) quot_preserve}
+  \<^descr> @{attribute (HOL) quot_respect} and @{attribute (HOL) quot_preserve}
   attributes declare a theorem as a respectfulness and preservation theorem
   respectively. These are stored in the local theory store and used by the
   @{method (HOL) "injection"} and @{method (HOL) "cleaning"} methods
   respectively.
 
-  \item @{attribute (HOL) quot_thm} declares that a certain theorem is a
+  \<^descr> @{attribute (HOL) quot_thm} declares that a certain theorem is a
   quotient extension theorem. Quotient extension theorems allow for
   quotienting inside container types. Given a polymorphic type that serves
   as a container, a map function defined for this container using @{command
@@ -1813,8 +1731,6 @@
   the quotient extension theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow>
   Quotient3 (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
   are stored in a database and are used all the steps of lifting theorems.
-
-  \end{description}
 \<close>
 
 
@@ -1853,33 +1769,29 @@
     facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
   \<close>} % FIXME check args "value"
 
-  \begin{description}
-
-  \item @{command (HOL) "solve_direct"} checks whether the current
+  \<^descr> @{command (HOL) "solve_direct"} checks whether the current
   subgoals can be solved directly by an existing theorem. Duplicate
   lemmas can be detected in this way.
 
-  \item @{command (HOL) "try0"} attempts to prove a subgoal
+  \<^descr> @{command (HOL) "try0"} attempts to prove a subgoal
   using a combination of standard proof methods (@{method auto},
   @{method simp}, @{method blast}, etc.).  Additional facts supplied
   via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
   "dest:"} are passed to the appropriate proof methods.
 
-  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
+  \<^descr> @{command (HOL) "try"} attempts to prove or disprove a subgoal
   using a combination of provers and disprovers (@{command (HOL)
   "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
   "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
   "nitpick"}).
 
-  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
+  \<^descr> @{command (HOL) "sledgehammer"} attempts to prove a subgoal
   using external automatic provers (resolution provers and SMT
   solvers). See the Sledgehammer manual @{cite "isabelle-sledgehammer"}
   for details.
 
-  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
+  \<^descr> @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
   "sledgehammer"} configuration options persistently.
-
-  \end{description}
 \<close>
 
 
@@ -1927,9 +1839,7 @@
     args: ( @{syntax name} '=' value + ',' )
   \<close>} % FIXME check "value"
 
-  \begin{description}
-
-  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
+  \<^descr> @{command (HOL) "value"}~@{text t} evaluates and prints a
   term; optionally @{text modes} can be specified, which are appended
   to the current print mode; see \secref{sec:print-modes}.
   Evaluation is tried first using ML, falling
@@ -1940,13 +1850,13 @@
   using the simplifier, @{text nbe} for \emph{normalization by
   evaluation} and \emph{code} for code generation in SML.
 
-  \item @{command (HOL) "values"}~@{text t} enumerates a set
+  \<^descr> @{command (HOL) "values"}~@{text t} enumerates a set
   comprehension by evaluation and prints its values up to the given
   number of solutions; optionally @{text modes} can be specified,
   which are appended to the current print mode; see
   \secref{sec:print-modes}.
 
-  \item @{command (HOL) "quickcheck"} tests the current goal for
+  \<^descr> @{command (HOL) "quickcheck"} tests the current goal for
   counterexamples using a series of assignments for its free
   variables; by default the first subgoal is tested, an other can be
   selected explicitly using an optional goal index.  Assignments can
@@ -1956,9 +1866,7 @@
   quickcheck uses exhaustive testing.  A number of configuration
   options are supported for @{command (HOL) "quickcheck"}, notably:
 
-    \begin{description}
-
-    \item[@{text tester}] specifies which testing approach to apply.
+    \<^descr>[@{text tester}] specifies which testing approach to apply.
     There are three testers, @{text exhaustive}, @{text random}, and
     @{text narrowing}.  An unknown configuration option is treated as
     an argument to tester, making @{text "tester ="} optional.  When
@@ -1969,31 +1877,31 @@
     quickcheck_random_active}, @{attribute
     quickcheck_narrowing_active} are set to true.
 
-    \item[@{text size}] specifies the maximum size of the search space
+    \<^descr>[@{text size}] specifies the maximum size of the search space
     for assignment values.
 
-    \item[@{text genuine_only}] sets quickcheck only to return genuine
+    \<^descr>[@{text genuine_only}] sets quickcheck only to return genuine
     counterexample, but not potentially spurious counterexamples due
     to underspecified functions.
 
-    \item[@{text abort_potential}] sets quickcheck to abort once it
+    \<^descr>[@{text abort_potential}] sets quickcheck to abort once it
     found a potentially spurious counterexample and to not continue
     to search for a further genuine counterexample.
     For this option to be effective, the @{text genuine_only} option
     must be set to false.
 
-    \item[@{text eval}] takes a term or a list of terms and evaluates
+    \<^descr>[@{text eval}] takes a term or a list of terms and evaluates
     these terms under the variable assignment found by quickcheck.
     This option is currently only supported by the default
     (exhaustive) tester.
 
-    \item[@{text iterations}] sets how many sets of assignments are
+    \<^descr>[@{text iterations}] sets how many sets of assignments are
     generated for each particular size.
 
-    \item[@{text no_assms}] specifies whether assumptions in
+    \<^descr>[@{text no_assms}] specifies whether assumptions in
     structured proofs should be ignored.
 
-    \item[@{text locale}] specifies how to process conjectures in
+    \<^descr>[@{text locale}] specifies how to process conjectures in
     a locale context, i.e.\ they can be interpreted or expanded.
     The option is a whitespace-separated list of the two words
     @{text interpret} and @{text expand}. The list determines the
@@ -2002,113 +1910,108 @@
     The option is only provided as attribute declaration, but not
     as parameter to the command.
 
-    \item[@{text timeout}] sets the time limit in seconds.
-
-    \item[@{text default_type}] sets the type(s) generally used to
+    \<^descr>[@{text timeout}] sets the time limit in seconds.
+
+    \<^descr>[@{text default_type}] sets the type(s) generally used to
     instantiate type variables.
 
-    \item[@{text report}] if set quickcheck reports how many tests
+    \<^descr>[@{text report}] if set quickcheck reports how many tests
     fulfilled the preconditions.
 
-    \item[@{text use_subtype}] if set quickcheck automatically lifts
+    \<^descr>[@{text use_subtype}] if set quickcheck automatically lifts
     conjectures to registered subtypes if possible, and tests the
     lifted conjecture.
 
-    \item[@{text quiet}] if set quickcheck does not output anything
+    \<^descr>[@{text quiet}] if set quickcheck does not output anything
     while testing.
 
-    \item[@{text verbose}] if set quickcheck informs about the current
+    \<^descr>[@{text verbose}] if set quickcheck informs about the current
     size and cardinality while testing.
 
-    \item[@{text expect}] can be used to check if the user's
+    \<^descr>[@{text expect}] can be used to check if the user's
     expectation was met (@{text no_expectation}, @{text
     no_counterexample}, or @{text counterexample}).
 
-    \end{description}
-
   These option can be given within square brackets.
 
   Using the following type classes, the testers generate values and convert
   them back into Isabelle terms for displaying counterexamples.
-    \begin{description}
-    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
-      and @{class full_exhaustive} implement the testing. They take a
-      testing function as a parameter, which takes a value of type @{typ "'a"}
-      and optionally produces a counterexample, and a size parameter for the test values.
-      In @{class full_exhaustive}, the testing function parameter additionally
-      expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
-      of the tested value.
-
-      The canonical implementation for @{text exhaustive} testers calls the given
-      testing function on all values up to the given size and stops as soon
-      as a counterexample is found.
-
-    \item[@{text random}] The operation @{const Quickcheck_Random.random}
-      of the type class @{class random} generates a pseudo-random
-      value of the given size and a lazy term reconstruction of the value
-      in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
-      is defined in theory @{theory Random}.
-
-    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"}
-      using the type classes @{class narrowing} and @{class partial_term_of}.
-      Variables in the current goal are initially represented as symbolic variables.
-      If the execution of the goal tries to evaluate one of them, the test engine
-      replaces it with refinements provided by @{const narrowing}.
-      Narrowing views every value as a sum-of-products which is expressed using the operations
-      @{const Quickcheck_Narrowing.cons} (embedding a value),
-      @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
-      The refinement should enable further evaluation of the goal.
-
-      For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
-      can be recursively defined as
-      @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
+
+    \<^descr>[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
+    and @{class full_exhaustive} implement the testing. They take a
+    testing function as a parameter, which takes a value of type @{typ "'a"}
+    and optionally produces a counterexample, and a size parameter for the test values.
+    In @{class full_exhaustive}, the testing function parameter additionally
+    expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
+    of the tested value.
+
+    The canonical implementation for @{text exhaustive} testers calls the given
+    testing function on all values up to the given size and stops as soon
+    as a counterexample is found.
+
+    \<^descr>[@{text random}] The operation @{const Quickcheck_Random.random}
+    of the type class @{class random} generates a pseudo-random
+    value of the given size and a lazy term reconstruction of the value
+    in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
+    is defined in theory @{theory Random}.
+
+    \<^descr>[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"}
+    using the type classes @{class narrowing} and @{class partial_term_of}.
+    Variables in the current goal are initially represented as symbolic variables.
+    If the execution of the goal tries to evaluate one of them, the test engine
+    replaces it with refinements provided by @{const narrowing}.
+    Narrowing views every value as a sum-of-products which is expressed using the operations
+    @{const Quickcheck_Narrowing.cons} (embedding a value),
+    @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
+    The refinement should enable further evaluation of the goal.
+
+    For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
+    can be recursively defined as
+    @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
+              (Quickcheck_Narrowing.apply
                 (Quickcheck_Narrowing.apply
-                  (Quickcheck_Narrowing.apply
-                    (Quickcheck_Narrowing.cons (op #))
-                    narrowing)
-                  narrowing)"}.
-      If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
-      list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
-      refined if needed.
-
-      To reconstruct counterexamples, the operation @{const partial_term_of} transforms
-      @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
-      The deep representation models symbolic variables as
-      @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
-      @{const Code_Evaluation.Free}, and refined values as
-      @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
-      denotes the index in the sum of refinements. In the above example for lists,
-      @{term "0"} corresponds to @{term "[]"} and @{term "1"}
-      to @{term "op #"}.
-
-      The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
-      such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
-      but it does not ensures consistency with @{const narrowing}.
-    \end{description}
-
-  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
+                  (Quickcheck_Narrowing.cons (op #))
+                  narrowing)
+                narrowing)"}.
+    If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
+    list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
+    refined if needed.
+
+    To reconstruct counterexamples, the operation @{const partial_term_of} transforms
+    @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
+    The deep representation models symbolic variables as
+    @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
+    @{const Code_Evaluation.Free}, and refined values as
+    @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
+    denotes the index in the sum of refinements. In the above example for lists,
+    @{term "0"} corresponds to @{term "[]"} and @{term "1"}
+    to @{term "op #"}.
+
+    The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
+    such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
+    but it does not ensures consistency with @{const narrowing}.
+
+  \<^descr> @{command (HOL) "quickcheck_params"} changes @{command (HOL)
   "quickcheck"} configuration options persistently.
 
-  \item @{command (HOL) "quickcheck_generator"} creates random and
+  \<^descr> @{command (HOL) "quickcheck_generator"} creates random and
   exhaustive value generators for a given type and operations.  It
   generates values by using the operations as if they were
   constructors of that type.
 
-  \item @{command (HOL) "nitpick"} tests the current goal for
+  \<^descr> @{command (HOL) "nitpick"} tests the current goal for
   counterexamples using a reduction to first-order relational
   logic. See the Nitpick manual @{cite "isabelle-nitpick"} for details.
 
-  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
+  \<^descr> @{command (HOL) "nitpick_params"} changes @{command (HOL)
   "nitpick"} configuration options persistently.
 
-  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
+  \<^descr> @{command (HOL) "find_unused_assms"} finds potentially superfluous
   assumptions in theorems using quickcheck.
   It takes the theory name to be checked for superfluous assumptions as
   optional argument. If not provided, it checks the current theory.
   Options to the internal quickcheck invocations can be changed with
   common configuration declarations.
-
-  \end{description}
 \<close>
 
 
@@ -2138,20 +2041,18 @@
     @@{attribute (HOL) coercion_args} (@{syntax const}) (('+' | '0' | '-')+)
   \<close>}
 
-  \begin{description}
-
-  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
+  \<^descr> @{attribute (HOL) "coercion"}~@{text "f"} registers a new
   coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
   @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
   composed by the inference algorithm if needed.  Note that the type
   inference algorithm is complete only if the registered coercions
   form a lattice.
 
-  \item @{attribute (HOL) "coercion_delete"}~@{text "f"} deletes a
+  \<^descr> @{attribute (HOL) "coercion_delete"}~@{text "f"} deletes a
   preceding declaration (using @{attribute (HOL) "coercion"}) of the
   function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} as a coercion.
 
-  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
+  \<^descr> @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
   new map function to lift coercions through type constructors. The
   function @{text "map"} must conform to the following type pattern
 
@@ -2165,7 +2066,7 @@
   overwrites any existing map function for this particular type
   constructor.
 
-  \item @{attribute (HOL) "coercion_args"} can be used to disallow
+  \<^descr> @{attribute (HOL) "coercion_args"} can be used to disallow
   coercions to be inserted in certain positions in a term. For example,
   given the constant @{text "c :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2 \<Rightarrow> \<sigma>\<^sub>3 \<Rightarrow> \<sigma>\<^sub>4"} and the list
   of policies @{text "- + 0"} as arguments, coercions will not be
@@ -2180,10 +2081,8 @@
   insertion of coercions (see, for example, the setup for the case syntax
   in @{theory Ctr_Sugar}). 
 
-  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
+  \<^descr> @{attribute (HOL) "coercion_enabled"} enables the coercion
   inference algorithm.
-
-  \end{description}
 \<close>
 
 
@@ -2196,19 +2095,16 @@
     @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{method (HOL) arith} decides linear arithmetic problems (on
+  \<^descr> @{method (HOL) arith} decides linear arithmetic problems (on
   types @{text nat}, @{text int}, @{text real}).  Any current facts
   are inserted into the goal before running the procedure.
 
-  \item @{attribute (HOL) arith} declares facts that are supplied to
+  \<^descr> @{attribute (HOL) arith} declares facts that are supplied to
   the arithmetic provers implicitly.
 
-  \item @{attribute (HOL) arith_split} attribute declares case split
+  \<^descr> @{attribute (HOL) arith_split} attribute declares case split
   rules to be expanded before @{method (HOL) arith} is invoked.
 
-  \end{description}
 
   Note that a simpler (but faster) arithmetic prover is already
   invoked by the Simplifier.
@@ -2226,9 +2122,7 @@
     @@{method (HOL) iprover} (@{syntax rulemod} *)
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) iprover} performs intuitionistic proof search,
+  \<^descr> @{method (HOL) iprover} performs intuitionistic proof search,
   depending on specifically declared rules from the context, or given
   as explicit arguments.  Chained facts are inserted into the goal
   before commencing proof search.
@@ -2241,8 +2135,6 @@
   single-step @{method (Pure) rule} method still observes these).  An
   explicit weight annotation may be given as well; otherwise the
   number of rule premises will be taken into account here.
-
-  \end{description}
 \<close>
 
 
@@ -2262,20 +2154,16 @@
       @{syntax thmrefs}?
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) meson} implements Loveland's model elimination
+  \<^descr> @{method (HOL) meson} implements Loveland's model elimination
   procedure @{cite "loveland-78"}.  See @{file
   "~~/src/HOL/ex/Meson_Test.thy"} for examples.
 
-  \item @{method (HOL) metis} combines ordered resolution and ordered
+  \<^descr> @{method (HOL) metis} combines ordered resolution and ordered
   paramodulation to find first-order (or mildly higher-order) proofs.
   The first optional argument specifies a type encoding; see the
   Sledgehammer manual @{cite "isabelle-sledgehammer"} for details.  The
   directory @{file "~~/src/HOL/Metis_Examples"} contains several small
   theories developed to a large extent using @{method (HOL) metis}.
-
-  \end{description}
 \<close>
 
 
@@ -2295,49 +2183,41 @@
     @@{attribute (HOL) algebra} (() | 'add' | 'del')
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) algebra} performs algebraic reasoning via
+  \<^descr> @{method (HOL) algebra} performs algebraic reasoning via
   Gr\"obner bases, see also @{cite "Chaieb-Wenzel:2007"} and
   @{cite \<open>\S3.2\<close> "Chaieb-thesis"}. The method handles deals with two main
   classes of problems:
 
-  \begin{enumerate}
-
-  \<^enum> Universal problems over multivariate polynomials in a
-  (semi)-ring/field/idom; the capabilities of the method are augmented
-  according to properties of these structures. For this problem class
-  the method is only complete for algebraically closed fields, since
-  the underlying method is based on Hilbert's Nullstellensatz, where
-  the equivalence only holds for algebraically closed fields.
-
-  The problems can contain equations @{text "p = 0"} or inequations
-  @{text "q \<noteq> 0"} anywhere within a universal problem statement.
-
-  \<^enum> All-exists problems of the following restricted (but useful)
-  form:
-
-  @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
-    e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
-    (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
-      p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
-      \<dots> \<and>
-      p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
-
-  Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
-  polynomials only in the variables mentioned as arguments.
-
-  \end{enumerate}
+    \<^enum> Universal problems over multivariate polynomials in a
+    (semi)-ring/field/idom; the capabilities of the method are augmented
+    according to properties of these structures. For this problem class
+    the method is only complete for algebraically closed fields, since
+    the underlying method is based on Hilbert's Nullstellensatz, where
+    the equivalence only holds for algebraically closed fields.
+
+    The problems can contain equations @{text "p = 0"} or inequations
+    @{text "q \<noteq> 0"} anywhere within a universal problem statement.
+
+    \<^enum> All-exists problems of the following restricted (but useful)
+    form:
+
+    @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
+      e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
+      (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
+        p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
+        \<dots> \<and>
+        p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
+
+    Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
+    polynomials only in the variables mentioned as arguments.
 
   The proof method is preceded by a simplification step, which may be
   modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
   This acts like declarations for the Simplifier
   (\secref{sec:simplifier}) on a private simpset for this tool.
 
-  \item @{attribute algebra} (as attribute) manages the default
+  \<^descr> @{attribute algebra} (as attribute) manages the default
   collection of pre-simplification rules of the above proof method.
-
-  \end{description}
 \<close>
 
 
@@ -2376,14 +2256,10 @@
     @@{method (HOL) coherent} @{syntax thmrefs}?
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) coherent} solves problems of \emph{Coherent
+  \<^descr> @{method (HOL) coherent} solves problems of \emph{Coherent
   Logic} @{cite "Bezem-Coquand:2005"}, which covers applications in
   confluence theory, lattice theory and projective geometry.  See
   @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
-
-  \end{description}
 \<close>
 
 
@@ -2413,9 +2289,7 @@
     rule: 'rule' ':' @{syntax thmref}
   \<close>}
 
-  \begin{description}
-
-  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
+  \<^descr> @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
   to reason about inductive types.  Rules are selected according to
   the declarations by the @{attribute cases} and @{attribute induct}
   attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
@@ -2429,7 +2303,7 @@
   statements, only the compact object-logic conclusion of the subgoal
   being addressed.
 
-  \item @{method (HOL) ind_cases} and @{command (HOL)
+  \<^descr> @{method (HOL) ind_cases} and @{command (HOL)
   "inductive_cases"} provide an interface to the internal @{ML_text
   mk_cases} operation.  Rules are simplified in an unrestricted
   forward manner.
@@ -2440,12 +2314,9 @@
   for later use.  The @{keyword "for"} argument of the @{method (HOL)
   ind_cases} method allows to specify a list of variables that should
   be generalized before applying the resulting rule.
-
-  \end{description}
 \<close>
 
 
-
 section \<open>Adhoc tuples\<close>
 
 text \<open>
@@ -2457,16 +2328,12 @@
     @@{attribute (HOL) split_format} ('(' 'complete' ')')?
   \<close>}
 
-  \begin{description}
-
-  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
+  \<^descr> @{attribute (HOL) split_format}\ @{text "(complete)"} causes
   arguments in function applications to be represented canonically
   according to their tuple type structure.
 
   Note that this operation tends to invent funny names for new local
   parameters introduced.
-
-  \end{description}
 \<close>
 
 
@@ -2597,9 +2464,7 @@
     modes: mode @'as' const
   \<close>}
 
-  \begin{description}
-
-  \item @{command (HOL) "export_code"} generates code for a given list of
+  \<^descr> @{command (HOL) "export_code"} generates code for a given list of
   constants in the specified target language(s). If no serialization
   instruction is given, only abstract code is generated internally.
 
@@ -2627,7 +2492,7 @@
   "deriving (Read, Show)"}'' clause to each appropriate datatype
   declaration.
 
-  \item @{attribute (HOL) code} declare code equations for code generation.
+  \<^descr> @{attribute (HOL) code} declare code equations for code generation.
   Variant @{text "code equation"} declares a conventional equation as code
   equation. Variants @{text "code abstype"} and @{text "code abstract"}
   declare abstract datatype certificates or code equations on abstract
@@ -2645,49 +2510,49 @@
   Usually packages introducing code equations provide a reasonable default
   setup for selection.
 
-  \item @{command (HOL) "code_datatype"} specifies a constructor set for a
+  \<^descr> @{command (HOL) "code_datatype"} specifies a constructor set for a
   logical type.
 
-  \item @{command (HOL) "print_codesetup"} gives an overview on selected
+  \<^descr> @{command (HOL) "print_codesetup"} gives an overview on selected
   code equations and code generator datatypes.
 
-  \item @{attribute (HOL) code_unfold} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_unfold} declares (or with option ``@{text
   "del"}'' removes) theorems which during preprocessing are applied as
   rewrite rules to any code equation or evaluation input.
 
-  \item @{attribute (HOL) code_post} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_post} declares (or with option ``@{text
   "del"}'' removes) theorems which are applied as rewrite rules to any
   result of an evaluation.
 
-  \item @{attribute (HOL) code_abbrev} declares (or with option ``@{text
+  \<^descr> @{attribute (HOL) code_abbrev} declares (or with option ``@{text
   "del"}'' removes) equations which are applied as rewrite rules to any
   result of an evaluation and symmetrically during preprocessing to any code
   equation or evaluation input.
 
-  \item @{command (HOL) "print_codeproc"} prints the setup of the code
+  \<^descr> @{command (HOL) "print_codeproc"} prints the setup of the code
   generator preprocessor.
 
-  \item @{command (HOL) "code_thms"} prints a list of theorems representing
+  \<^descr> @{command (HOL) "code_thms"} prints a list of theorems representing
   the corresponding program containing all given constants after
   preprocessing.
 
-  \item @{command (HOL) "code_deps"} visualizes dependencies of theorems
+  \<^descr> @{command (HOL) "code_deps"} visualizes dependencies of theorems
   representing the corresponding program containing all given constants
   after preprocessing.
 
-  \item @{command (HOL) "code_reserved"} declares a list of names as
+  \<^descr> @{command (HOL) "code_reserved"} declares a list of names as
   reserved for a given target, preventing it to be shadowed by any generated
   code.
 
-  \item @{command (HOL) "code_printing"} associates a series of symbols
+  \<^descr> @{command (HOL) "code_printing"} associates a series of symbols
   (constants, type constructors, classes, class relations, instances, module
   names) with target-specific serializations; omitting a serialization
   deletes an existing serialization.
 
-  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism to
+  \<^descr> @{command (HOL) "code_monad"} provides an auxiliary mechanism to
   generate monadic code for Haskell.
 
-  \item @{command (HOL) "code_identifier"} associates a a series of symbols
+  \<^descr> @{command (HOL) "code_identifier"} associates a a series of symbols
   (constants, type constructors, classes, class relations, instances, module
   names) with target-specific hints how these symbols shall be named. These
   hints gain precedence over names for symbols with no hints at all.
@@ -2696,7 +2561,7 @@
   identifiers in compound statements like type classes or datatypes are
   still the same.
 
-  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
+  \<^descr> @{command (HOL) "code_reflect"} without a ``@{text "file"}''
   argument compiles code into the system runtime environment and modifies
   the code generator setup that future invocations of system runtime code
   generation referring to one of the ``@{text "datatypes"}'' or ``@{text
@@ -2704,13 +2569,11 @@
   "file"}'' argument, the corresponding code is generated into that
   specified file without modifying the code generator setup.
 
-  \item @{command (HOL) "code_pred"} creates code equations for a predicate
+  \<^descr> @{command (HOL) "code_pred"} creates code equations for a predicate
   given a set of introduction rules. Optional mode annotations determine
   which arguments are supposed to be input or output. If alternative
   introduction rules are declared, one must prove a corresponding
   elimination rule.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -62,46 +62,43 @@
     @{syntax_def modes}: '(' (@{syntax name} + ) ')'
   \<close>}
 
-  \begin{description}
-
-  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
+  \<^descr> @{command "typ"}~@{text \<tau>} reads and prints a type expression
   according to the current context.
 
-  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
+  \<^descr> @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
   determine the most general way to make @{text "\<tau>"} conform to sort
   @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
   belongs to that sort.  Dummy type parameters ``@{text "_"}''
   (underscore) are assigned to fresh type variables with most general
   sorts, according the the principles of type-inference.
 
-  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
+  \<^descr> @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
   read, type-check and print terms or propositions according to the
   current theory or proof context; the inferred type of @{text t} is
   output as well.  Note that these commands are also useful in
   inspecting the current environment of term abbreviations.
 
-  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
+  \<^descr> @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
   theorems from the current theory or proof context.  Note that any
   attributes included in the theorem specifications are applied to a
   temporary context derived from the current theory or proof; the
   result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
   \<dots>, a\<^sub>n"} do not have any permanent effect.
 
-  \item @{command "prf"} displays the (compact) proof term of the
+  \<^descr> @{command "prf"} displays the (compact) proof term of the
   current proof state (if present), or of the given theorems. Note
   that this requires proof terms to be switched on for the current
   object logic (see the ``Proof terms'' section of the Isabelle
   reference manual for information on how to do this).
 
-  \item @{command "full_prf"} is like @{command "prf"}, but displays
+  \<^descr> @{command "full_prf"} is like @{command "prf"}, but displays
   the full proof term, i.e.\ also displays information omitted in the
   compact proof term, which is denoted by ``@{text _}'' placeholders
   there.
 
-  \item @{command "print_state"} prints the current proof state (if
+  \<^descr> @{command "print_state"} prints the current proof state (if
   present), including current facts and goals.
 
-  \end{description}
 
   All of the diagnostic commands above admit a list of @{text modes}
   to be specified, which is appended to the current print mode; see
@@ -144,16 +141,14 @@
   is displayed for types, terms, theorems, goals etc.  See also
   \secref{sec:config}.
 
-  \begin{description}
-
-  \item @{attribute show_markup} controls direct inlining of markup
+  \<^descr> @{attribute show_markup} controls direct inlining of markup
   into the printed representation of formal entities --- notably type
   and sort constraints.  This enables Prover IDE users to retrieve
   that information via tooltips or popups while hovering with the
   mouse over the output window, for example.  Consequently, this
   option is enabled by default for Isabelle/jEdit.
 
-  \item @{attribute show_types} and @{attribute show_sorts} control
+  \<^descr> @{attribute show_types} and @{attribute show_sorts} control
   printing of type constraints for term variables, and sort
   constraints for type variables.  By default, neither of these are
   shown in output.  If @{attribute show_sorts} is enabled, types are
@@ -165,29 +160,29 @@
   inference rule fails to resolve with some goal, or why a rewrite
   rule does not apply as expected.
 
-  \item @{attribute show_consts} controls printing of types of
+  \<^descr> @{attribute show_consts} controls printing of types of
   constants when displaying a goal state.
 
   Note that the output can be enormous, because polymorphic constants
   often occur at several different type instances.
 
-  \item @{attribute show_abbrevs} controls folding of constant
+  \<^descr> @{attribute show_abbrevs} controls folding of constant
   abbreviations.
 
-  \item @{attribute show_brackets} controls bracketing in pretty
+  \<^descr> @{attribute show_brackets} controls bracketing in pretty
   printed output.  If enabled, all sub-expressions of the pretty
   printing tree will be parenthesized, even if this produces malformed
   term syntax!  This crude way of showing the internal structure of
   pretty printed entities may occasionally help to diagnose problems
   with operator priorities, for example.
 
-  \item @{attribute names_long}, @{attribute names_short}, and
+  \<^descr> @{attribute names_long}, @{attribute names_short}, and
   @{attribute names_unique} control the way of printing fully
   qualified internal names in external form.  See also
   \secref{sec:antiq} for the document antiquotation options of the
   same names.
 
-  \item @{attribute eta_contract} controls @{text "\<eta>"}-contracted
+  \<^descr> @{attribute eta_contract} controls @{text "\<eta>"}-contracted
   printing of terms.
 
   The @{text \<eta>}-contraction law asserts @{prop "(\<lambda>x. f x) \<equiv> f"},
@@ -207,15 +202,15 @@
   rewriting operate modulo @{text "\<alpha>\<beta>\<eta>"}-conversion, some other tools
   might look at terms more discretely.
 
-  \item @{attribute goals_limit} controls the maximum number of
+  \<^descr> @{attribute goals_limit} controls the maximum number of
   subgoals to be printed.
 
-  \item @{attribute show_main_goal} controls whether the main result
+  \<^descr> @{attribute show_main_goal} controls whether the main result
   to be proven should be displayed.  This information might be
   relevant for schematic goals, to inspect the current claim that has
   been synthesized so far.
 
-  \item @{attribute show_hyps} controls printing of implicit
+  \<^descr> @{attribute show_hyps} controls printing of implicit
   hypotheses of local facts.  Normally, only those hypotheses are
   displayed that are \emph{not} covered by the assumptions of the
   current context: this situation indicates a fault in some tool being
@@ -225,7 +220,7 @@
   can be enforced, which is occasionally useful for diagnostic
   purposes.
 
-  \item @{attribute show_tags} controls printing of extra annotations
+  \<^descr> @{attribute show_tags} controls printing of extra annotations
   within theorems, such as internal position information, or the case
   names being attached by the attribute @{attribute case_names}.
 
@@ -233,13 +228,11 @@
   attributes provide low-level access to the collection of tags
   associated with a theorem.
 
-  \item @{attribute show_question_marks} controls printing of question
+  \<^descr> @{attribute show_question_marks} controls printing of question
   marks for schematic variables, such as @{text ?x}.  Only the leading
   question mark is affected, the remaining text is unchanged
   (including proper markup for schematic variables that might be
   relevant for user interfaces).
-
-  \end{description}
 \<close>
 
 
@@ -257,21 +250,18 @@
   modes as optional argument.  The underlying ML operations are as
   follows.
 
-  \begin{description}
-
-  \item @{ML "print_mode_value ()"} yields the list of currently
+  \<^descr> @{ML "print_mode_value ()"} yields the list of currently
   active print mode names.  This should be understood as symbolic
   representation of certain individual features for printing (with
   precedence from left to right).
 
-  \item @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
+  \<^descr> @{ML Print_Mode.with_modes}~@{text "modes f x"} evaluates
   @{text "f x"} in an execution context where the print mode is
   prepended by the given @{text "modes"}.  This provides a thread-safe
   way to augment print modes.  It is also monotonic in the set of mode
   names: it retains the default print mode that certain
   user-interfaces might have installed for their proper functioning!
 
-  \end{description}
 
   \<^medskip>
   The pretty printer for inner syntax maintains alternative
@@ -280,8 +270,6 @@
   Mode names can be arbitrary, but the following ones have a specific
   meaning by convention:
 
-  \begin{itemize}
-
   \<^item> @{verbatim \<open>""\<close>} (the empty string): default mode;
   implicitly active as last element in the list of modes.
 
@@ -302,8 +290,6 @@
   \<^item> @{verbatim latex}: additional mode that is active in {\LaTeX}
   document preparation of Isabelle theory sources; allows to provide
   alternative output notation.
-
-  \end{itemize}
 \<close>
 
 
@@ -377,9 +363,7 @@
   general template format is a sequence over any of the following
   entities.
 
-  \begin{description}
-
-  \item @{text "d"} is a delimiter, namely a non-empty sequence of
+  \<^descr> @{text "d"} is a delimiter, namely a non-empty sequence of
   characters other than the following special characters:
 
   \<^medskip>
@@ -393,7 +377,7 @@
   \end{tabular}
   \<^medskip>
 
-  \item @{verbatim "'"} escapes the special meaning of these
+  \<^descr> @{verbatim "'"} escapes the special meaning of these
   meta-characters, producing a literal version of the following
   character, unless that is a blank.
 
@@ -401,30 +385,29 @@
   affecting printing, but input tokens may have additional white space
   here.
 
-  \item @{verbatim "_"} is an argument position, which stands for a
+  \<^descr> @{verbatim "_"} is an argument position, which stands for a
   certain syntactic category in the underlying grammar.
 
-  \item @{text "\<index>"} is an indexed argument position; this is the place
+  \<^descr> @{text "\<index>"} is an indexed argument position; this is the place
   where implicit structure arguments can be attached.
 
-  \item @{text "s"} is a non-empty sequence of spaces for printing.
+  \<^descr> @{text "s"} is a non-empty sequence of spaces for printing.
   This and the following specifications do not affect parsing at all.
 
-  \item @{verbatim "("}@{text n} opens a pretty printing block.  The
+  \<^descr> @{verbatim "("}@{text n} opens a pretty printing block.  The
   optional number specifies how much indentation to add when a line
   break occurs within the block.  If the parenthesis is not followed
   by digits, the indentation defaults to 0.  A block specified via
   @{verbatim "(00"} is unbreakable.
 
-  \item @{verbatim ")"} closes a pretty printing block.
+  \<^descr> @{verbatim ")"} closes a pretty printing block.
 
-  \item @{verbatim "//"} forces a line break.
+  \<^descr> @{verbatim "//"} forces a line break.
 
-  \item @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
+  \<^descr> @{verbatim "/"}@{text s} allows a line break.  Here @{text s}
   stands for the string of spaces (zero or more) right after the
   slash.  These spaces are printed if the break is \emph{not} taken.
 
-  \end{description}
 
   The general idea of pretty printing with blocks and breaks is also
   described in @{cite "paulson-ml2"}; it goes back to @{cite "Oppen:1980"}.
@@ -532,28 +515,24 @@
     @@{command write} @{syntax mode}? (@{syntax nameref} @{syntax mixfix} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "type_notation"}~@{text "c (mx)"} associates mixfix
+  \<^descr> @{command "type_notation"}~@{text "c (mx)"} associates mixfix
   syntax with an existing type constructor.  The arity of the
   constructor is retrieved from the context.
 
-  \item @{command "no_type_notation"} is similar to @{command
+  \<^descr> @{command "no_type_notation"} is similar to @{command
   "type_notation"}, but removes the specified syntax annotation from
   the present context.
 
-  \item @{command "notation"}~@{text "c (mx)"} associates mixfix
+  \<^descr> @{command "notation"}~@{text "c (mx)"} associates mixfix
   syntax with an existing constant or fixed variable.  The type
   declaration of the given entity is retrieved from the context.
 
-  \item @{command "no_notation"} is similar to @{command "notation"},
+  \<^descr> @{command "no_notation"} is similar to @{command "notation"},
   but removes the specified syntax annotation from the present
   context.
 
-  \item @{command "write"} is similar to @{command "notation"}, but
+  \<^descr> @{command "write"} is similar to @{command "notation"}, but
   works within an Isar proof body.
-
-  \end{description}
 \<close>
 
 
@@ -565,15 +544,12 @@
   (\secref{sec:outer-lex}), but some details are different.  There are
   two main categories of inner syntax tokens:
 
-  \begin{enumerate}
-
   \<^enum> \emph{delimiters} --- the literal tokens occurring in
   productions of the given priority grammar (cf.\
   \secref{sec:priority-grammar});
 
   \<^enum> \emph{named tokens} --- various categories of identifiers etc.
 
-  \end{enumerate}
 
   Delimiters override named tokens and may thus render certain
   identifiers inaccessible.  Sometimes the logical context admits
@@ -659,7 +635,6 @@
 
   \<^medskip>
   For clarity, grammars obey these conventions:
-  \begin{itemize}
 
   \<^item> All priorities must lie between 0 and 1000.
 
@@ -675,7 +650,6 @@
   \<^item> Repetition is indicated by dots @{text "(\<dots>)"} in an informal
   but obvious way.
 
-  \end{itemize}
 
   Using these conventions, the example grammar specification above
   takes the form:
@@ -771,17 +745,15 @@
   inner syntax.  The meaning of the nonterminals defined by the above
   grammar is as follows:
 
-  \begin{description}
+  \<^descr> @{syntax_ref (inner) any} denotes any term.
 
-  \item @{syntax_ref (inner) any} denotes any term.
-
-  \item @{syntax_ref (inner) prop} denotes meta-level propositions,
+  \<^descr> @{syntax_ref (inner) prop} denotes meta-level propositions,
   which are terms of type @{typ prop}.  The syntax of such formulae of
   the meta-logic is carefully distinguished from usual conventions for
   object-logics.  In particular, plain @{text "\<lambda>"}-term notation is
   \emph{not} recognized as @{syntax (inner) prop}.
 
-  \item @{syntax_ref (inner) aprop} denotes atomic propositions, which
+  \<^descr> @{syntax_ref (inner) aprop} denotes atomic propositions, which
   are embedded into regular @{syntax (inner) prop} by means of an
   explicit @{verbatim PROP} token.
 
@@ -791,7 +763,7 @@
   the printed version will appear like @{syntax (inner) logic} and
   cannot be parsed again as @{syntax (inner) prop}.
 
-  \item @{syntax_ref (inner) logic} denotes arbitrary terms of a
+  \<^descr> @{syntax_ref (inner) logic} denotes arbitrary terms of a
   logical type, excluding type @{typ prop}.  This is the main
   syntactic category of object-logic entities, covering plain @{text
   \<lambda>}-term notation (variables, abstraction, application), plus
@@ -801,35 +773,32 @@
   (excluding @{typ prop}) are \emph{collapsed} to this single category
   of @{syntax (inner) logic}.
 
-  \item @{syntax_ref (inner) index} denotes an optional index term for
+  \<^descr> @{syntax_ref (inner) index} denotes an optional index term for
   indexed syntax.  If omitted, it refers to the first @{keyword_ref
   "structure"} variable in the context.  The special dummy ``@{text
   "\<index>"}'' serves as pattern variable in mixfix annotations that
   introduce indexed notation.
 
-  \item @{syntax_ref (inner) idt} denotes identifiers, possibly
+  \<^descr> @{syntax_ref (inner) idt} denotes identifiers, possibly
   constrained by types.
 
-  \item @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
+  \<^descr> @{syntax_ref (inner) idts} denotes a sequence of @{syntax_ref
   (inner) idt}.  This is the most basic category for variables in
   iterated binders, such as @{text "\<lambda>"} or @{text "\<And>"}.
 
-  \item @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
+  \<^descr> @{syntax_ref (inner) pttrn} and @{syntax_ref (inner) pttrns}
   denote patterns for abstraction, cases bindings etc.  In Pure, these
   categories start as a merely copy of @{syntax (inner) idt} and
   @{syntax (inner) idts}, respectively.  Object-logics may add
   additional productions for binding forms.
 
-  \item @{syntax_ref (inner) type} denotes types of the meta-logic.
+  \<^descr> @{syntax_ref (inner) type} denotes types of the meta-logic.
 
-  \item @{syntax_ref (inner) sort} denotes meta-level sorts.
+  \<^descr> @{syntax_ref (inner) sort} denotes meta-level sorts.
 
-  \end{description}
 
   Here are some further explanations of certain syntax features.
 
-  \begin{itemize}
-
   \<^item> In @{syntax (inner) idts}, note that @{text "x :: nat y"} is
   parsed as @{text "x :: (nat y)"}, treating @{text y} like a type
   constructor applied to @{text nat}.  To avoid this interpretation,
@@ -849,46 +818,40 @@
   \<^item> Dummy variables (written as underscore) may occur in different
   roles.
 
-  \begin{description}
+    \<^descr> A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
+    anonymous inference parameter, which is filled-in according to the
+    most general type produced by the type-checking phase.
 
-  \item A type ``@{text "_"}'' or ``@{text "_ :: sort"}'' acts like an
-  anonymous inference parameter, which is filled-in according to the
-  most general type produced by the type-checking phase.
-
-  \item A bound ``@{text "_"}'' refers to a vacuous abstraction, where
-  the body does not refer to the binding introduced here.  As in the
-  term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
-  "\<lambda>x y. x"}.
+    \<^descr> A bound ``@{text "_"}'' refers to a vacuous abstraction, where
+    the body does not refer to the binding introduced here.  As in the
+    term @{term "\<lambda>x _. x"}, which is @{text "\<alpha>"}-equivalent to @{text
+    "\<lambda>x y. x"}.
 
-  \item A free ``@{text "_"}'' refers to an implicit outer binding.
-  Higher definitional packages usually allow forms like @{text "f x _
-  = x"}.
+    \<^descr> A free ``@{text "_"}'' refers to an implicit outer binding.
+    Higher definitional packages usually allow forms like @{text "f x _
+    = x"}.
 
-  \item A schematic ``@{text "_"}'' (within a term pattern, see
-  \secref{sec:term-decls}) refers to an anonymous variable that is
-  implicitly abstracted over its context of locally bound variables.
-  For example, this allows pattern matching of @{text "{x. f x = g
-  x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
-  using both bound and schematic dummies.
+    \<^descr> A schematic ``@{text "_"}'' (within a term pattern, see
+    \secref{sec:term-decls}) refers to an anonymous variable that is
+    implicitly abstracted over its context of locally bound variables.
+    For example, this allows pattern matching of @{text "{x. f x = g
+    x}"} against @{text "{x. _ = _}"}, or even @{text "{_. _ = _}"} by
+    using both bound and schematic dummies.
 
-  \end{description}
-
-  \item The three literal dots ``@{verbatim "..."}'' may be also
+  \<^descr> The three literal dots ``@{verbatim "..."}'' may be also
   written as ellipsis symbol @{verbatim "\<dots>"}.  In both cases this
   refers to a special schematic variable, which is bound in the
   context.  This special term abbreviation works nicely with
   calculational reasoning (\secref{sec:calculation}).
 
-  \item @{verbatim CONST} ensures that the given identifier is treated
+  \<^descr> @{verbatim CONST} ensures that the given identifier is treated
   as constant term, and passed through the parse tree in fully
   internalized form.  This is particularly relevant for translation
   rules (\secref{sec:syn-trans}), notably on the RHS.
 
-  \item @{verbatim XCONST} is similar to @{verbatim CONST}, but
+  \<^descr> @{verbatim XCONST} is similar to @{verbatim CONST}, but
   retains the constant name as given.  This is only relevant to
   translation rules (\secref{sec:syn-trans}), notably on the LHS.
-
-  \end{itemize}
 \<close>
 
 
@@ -899,56 +862,48 @@
     @{command_def "print_syntax"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
   \end{matharray}
 
-  \begin{description}
-
-  \item @{command "print_syntax"} prints the inner syntax of the
+  \<^descr> @{command "print_syntax"} prints the inner syntax of the
   current context.  The output can be quite large; the most important
   sections are explained below.
 
-  \begin{description}
+    \<^descr> @{text "lexicon"} lists the delimiters of the inner token
+    language; see \secref{sec:inner-lex}.
 
-  \item @{text "lexicon"} lists the delimiters of the inner token
-  language; see \secref{sec:inner-lex}.
-
-  \item @{text "prods"} lists the productions of the underlying
-  priority grammar; see \secref{sec:priority-grammar}.
+    \<^descr> @{text "prods"} lists the productions of the underlying
+    priority grammar; see \secref{sec:priority-grammar}.
 
-  The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
-  "A[p]"}; delimiters are quoted.  Many productions have an extra
-  @{text "\<dots> => name"}.  These names later become the heads of parse
-  trees; they also guide the pretty printer.
+    The nonterminal @{text "A\<^sup>(\<^sup>p\<^sup>)"} is rendered in plain text as @{text
+    "A[p]"}; delimiters are quoted.  Many productions have an extra
+    @{text "\<dots> => name"}.  These names later become the heads of parse
+    trees; they also guide the pretty printer.
 
-  Productions without such parse tree names are called \emph{copy
-  productions}.  Their right-hand side must have exactly one
-  nonterminal symbol (or named token).  The parser does not create a
-  new parse tree node for copy productions, but simply returns the
-  parse tree of the right-hand symbol.
+    Productions without such parse tree names are called \emph{copy
+    productions}.  Their right-hand side must have exactly one
+    nonterminal symbol (or named token).  The parser does not create a
+    new parse tree node for copy productions, but simply returns the
+    parse tree of the right-hand symbol.
 
-  If the right-hand side of a copy production consists of a single
-  nonterminal without any delimiters, then it is called a \emph{chain
-  production}.  Chain productions act as abbreviations: conceptually,
-  they are removed from the grammar by adding new productions.
-  Priority information attached to chain productions is ignored; only
-  the dummy value @{text "-1"} is displayed.
+    If the right-hand side of a copy production consists of a single
+    nonterminal without any delimiters, then it is called a \emph{chain
+    production}.  Chain productions act as abbreviations: conceptually,
+    they are removed from the grammar by adding new productions.
+    Priority information attached to chain productions is ignored; only
+    the dummy value @{text "-1"} is displayed.
 
-  \item @{text "print modes"} lists the alternative print modes
-  provided by this grammar; see \secref{sec:print-modes}.
-
-  \item @{text "parse_rules"} and @{text "print_rules"} relate to
-  syntax translations (macros); see \secref{sec:syn-trans}.
+    \<^descr> @{text "print modes"} lists the alternative print modes
+    provided by this grammar; see \secref{sec:print-modes}.
 
-  \item @{text "parse_ast_translation"} and @{text
-  "print_ast_translation"} list sets of constants that invoke
-  translation functions for abstract syntax trees, which are only
-  required in very special situations; see \secref{sec:tr-funs}.
+    \<^descr> @{text "parse_rules"} and @{text "print_rules"} relate to
+    syntax translations (macros); see \secref{sec:syn-trans}.
 
-  \item @{text "parse_translation"} and @{text "print_translation"}
-  list the sets of constants that invoke regular translation
-  functions; see \secref{sec:tr-funs}.
+    \<^descr> @{text "parse_ast_translation"} and @{text
+    "print_ast_translation"} list sets of constants that invoke
+    translation functions for abstract syntax trees, which are only
+    required in very special situations; see \secref{sec:tr-funs}.
 
-  \end{description}
-
-  \end{description}
+    \<^descr> @{text "parse_translation"} and @{text "print_translation"}
+    list the sets of constants that invoke regular translation
+    functions; see \secref{sec:tr-funs}.
 \<close>
 
 
@@ -974,16 +929,12 @@
   situation and the given configuration options.  Parsing ultimately
   fails, if multiple results remain after the filtering phase.
 
-  \begin{description}
-
-  \item @{attribute syntax_ambiguity_warning} controls output of
+  \<^descr> @{attribute syntax_ambiguity_warning} controls output of
   explicit warning messages about syntax ambiguity.
 
-  \item @{attribute syntax_ambiguity_limit} determines the number of
+  \<^descr> @{attribute syntax_ambiguity_limit} determines the number of
   resulting parse trees that are shown as part of the printed message
   in case of an ambiguity.
-
-  \end{description}
 \<close>
 
 
@@ -1133,8 +1084,6 @@
   bound variables is excluded as well.  Authentic syntax names work
   implicitly in the following situations:
 
-  \begin{itemize}
-
   \<^item> Input of term constants (or fixed variables) that are
   introduced by concrete syntax via @{command notation}: the
   correspondence of a particular grammar production to some known term
@@ -1148,7 +1097,6 @@
   this information is already available from the internal term to be
   printed.
 
-  \end{itemize}
 
   In other words, syntax transformations that operate on input terms
   written as prefix applications are difficult to make robust.
@@ -1195,13 +1143,11 @@
     transpat: ('(' @{syntax nameref} ')')? @{syntax string}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "nonterminal"}~@{text c} declares a type
+  \<^descr> @{command "nonterminal"}~@{text c} declares a type
   constructor @{text c} (without arguments) to act as purely syntactic
   type: a nonterminal symbol of the inner syntax.
 
-  \item @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
+  \<^descr> @{command "syntax"}~@{text "(mode) c :: \<sigma> (mx)"} augments the
   priority grammar and the pretty printer table for the given print
   mode (default @{verbatim \<open>""\<close>}). An optional keyword @{keyword_ref
   "output"} means that only the pretty printer table is affected.
@@ -1213,19 +1159,16 @@
   (@{verbatim "_"}).  The latter correspond to nonterminal symbols
   @{text "A\<^sub>i"} derived from the argument types @{text "\<tau>\<^sub>i"} as
   follows:
-  \begin{itemize}
 
-  \<^item> @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
-
-  \<^item> @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
-  constructor @{text "\<kappa> \<noteq> prop"}
+    \<^item> @{text "prop"} if @{text "\<tau>\<^sub>i = prop"}
 
-  \<^item> @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
+    \<^item> @{text "logic"} if @{text "\<tau>\<^sub>i = (\<dots>)\<kappa>"} for logical type
+    constructor @{text "\<kappa> \<noteq> prop"}
 
-  \<^item> @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
-  (syntactic type constructor)
+    \<^item> @{text any} if @{text "\<tau>\<^sub>i = \<alpha>"} for type variables
 
-  \end{itemize}
+    \<^item> @{text "\<kappa>"} if @{text "\<tau>\<^sub>i = \<kappa>"} for nonterminal @{text "\<kappa>"}
+    (syntactic type constructor)
 
   Each @{text "A\<^sub>i"} is decorated by priority @{text "p\<^sub>i"} from the
   given list @{text "ps"}; missing priorities default to 0.
@@ -1251,11 +1194,11 @@
   resulting parse tree @{text "t"} is copied directly, without any
   further decoration.
 
-  \item @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
+  \<^descr> @{command "no_syntax"}~@{text "(mode) decls"} removes grammar
   declarations (and translations) resulting from @{text decls}, which
   are interpreted in the same manner as for @{command "syntax"} above.
 
-  \item @{command "translations"}~@{text rules} specifies syntactic
+  \<^descr> @{command "translations"}~@{text rules} specifies syntactic
   translation rules (i.e.\ macros) as first-order rewrite rules on
   ASTs (\secref{sec:ast}).  The theory context maintains two
   independent lists translation rules: parse rules (@{verbatim "=>"}
@@ -1289,29 +1232,24 @@
   AST rewrite rules @{text "(lhs, rhs)"} need to obey the following
   side-conditions:
 
-  \begin{itemize}
+    \<^item> Rules must be left linear: @{text "lhs"} must not contain
+    repeated variables.\footnote{The deeper reason for this is that AST
+    equality is not well-defined: different occurrences of the ``same''
+    AST could be decorated differently by accidental type-constraints or
+    source position information, for example.}
 
-  \<^item> Rules must be left linear: @{text "lhs"} must not contain
-  repeated variables.\footnote{The deeper reason for this is that AST
-  equality is not well-defined: different occurrences of the ``same''
-  AST could be decorated differently by accidental type-constraints or
-  source position information, for example.}
+    \<^item> Every variable in @{text "rhs"} must also occur in @{text
+    "lhs"}.
 
-  \<^item> Every variable in @{text "rhs"} must also occur in @{text
-  "lhs"}.
-
-  \end{itemize}
-
-  \item @{command "no_translations"}~@{text rules} removes syntactic
+  \<^descr> @{command "no_translations"}~@{text rules} removes syntactic
   translation rules, which are interpreted in the same manner as for
   @{command "translations"} above.
 
-  \item @{attribute syntax_ast_trace} and @{attribute
+  \<^descr> @{attribute syntax_ast_trace} and @{attribute
   syntax_ast_stats} control diagnostic output in the AST normalization
   process, when translation rules are applied to concrete input or
   output.
 
-  \end{description}
 
   Raw syntax and translations provides a slightly more low-level
   access to the grammar and the form of resulting parse trees.  It is
@@ -1320,8 +1258,6 @@
   Some important situations where @{command syntax} and @{command
   translations} are really need are as follows:
 
-  \begin{itemize}
-
   \<^item> Iterated replacement via recursive @{command translations}.
   For example, consider list enumeration @{term "[a, b, c, d]"} as
   defined in theory @{theory List} in Isabelle/HOL.
@@ -1331,9 +1267,8 @@
   syntax translations.  For example, consider list filter
   comprehension @{term "[x \<leftarrow> xs . P]"} as defined in theory @{theory
   List} in Isabelle/HOL.
+\<close>
 
-  \end{itemize}
-\<close>
 
 subsubsection \<open>Applying translation rules\<close>
 
@@ -1356,8 +1291,6 @@
   More precisely, the matching of the object @{text "u"} against the
   pattern @{text "lhs"} is performed as follows:
 
-  \begin{itemize}
-
   \<^item> Objects of the form @{ML Ast.Variable}~@{text "x"} or @{ML
   Ast.Constant}~@{text "x"} are matched by pattern @{ML
   Ast.Constant}~@{text "x"}.  Thus all atomic ASTs in the object are
@@ -1374,7 +1307,6 @@
 
   \<^item> In every other case, matching fails.
 
-  \end{itemize}
 
   A successful match yields a substitution that is applied to @{text
   "rhs"}, generating the instance that replaces @{text "u"}.
@@ -1439,9 +1371,7 @@
    @@{ML_antiquotation syntax_const}) name
   \<close>}
 
-  \begin{description}
-
-  \item @{command parse_translation} etc. declare syntax translation
+  \<^descr> @{command parse_translation} etc. declare syntax translation
   functions to the theory.  Any of these commands have a single
   @{syntax text} argument that refers to an ML expression of
   appropriate type as follows:
@@ -1473,21 +1403,19 @@
   associated with the translation functions of a theory under @{text
   "parse_ast_translation"} etc.
 
-  \item @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
+  \<^descr> @{text "@{class_syntax c}"}, @{text "@{type_syntax c}"},
   @{text "@{const_syntax c}"} inline the authentic syntax name of the
   given formal entities into the ML source.  This is the
   fully-qualified logical name prefixed by a special marker to
   indicate its kind: thus different logical name spaces are properly
   distinguished within parse trees.
 
-  \item @{text "@{const_syntax c}"} inlines the name @{text "c"} of
+  \<^descr> @{text "@{const_syntax c}"} inlines the name @{text "c"} of
   the given syntax constant, having checked that it has been declared
   via some @{command syntax} commands within the theory context.  Note
   that the usual naming convention makes syntax constants start with
   underscore, to reduce the chance of accidental clashes with other
   names occurring in parse trees (unqualified constants etc.).
-
-  \end{description}
 \<close>
 
 
@@ -1515,14 +1443,12 @@
   functions called during the parsing process differ from those for
   printing in their overall behaviour:
 
-  \begin{description}
-
-  \item [Parse translations] are applied bottom-up.  The arguments are
+  \<^descr>[Parse translations] are applied bottom-up.  The arguments are
   already in translated form.  The translations must not fail;
   exceptions trigger an error message.  There may be at most one
   function associated with any syntactic name.
 
-  \item [Print translations] are applied top-down.  They are supplied
+  \<^descr>[Print translations] are applied top-down.  They are supplied
   with arguments that are partly still in internal form.  The result
   again undergoes translation; therefore a print translation should
   not introduce as head the very constant that invoked it.  The
@@ -1531,7 +1457,6 @@
   some syntactic name are tried in the order of declaration in the
   theory.
 
-  \end{description}
 
   Only constant atoms --- constructor @{ML Ast.Constant} for ASTs and
   @{ML Const} for terms --- can invoke translation functions.  This
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -47,15 +47,11 @@
     @@{command help} (@{syntax name} * )
   \<close>}
 
-  \begin{description}
-
-  \item @{command "print_commands"} prints all outer syntax keywords
+  \<^descr> @{command "print_commands"} prints all outer syntax keywords
   and commands.
 
-  \item @{command "help"}~@{text "pats"} retrieves outer syntax
+  \<^descr> @{command "help"}~@{text "pats"} retrieves outer syntax
   commands according to the specified name patterns.
-
-  \end{description}
 \<close>
 
 
@@ -73,8 +69,6 @@
 text \<open>The outer lexical syntax consists of three main categories of
   syntax tokens:
 
-  \begin{enumerate}
-
   \<^enum> \emph{major keywords} --- the command names that are available
   in the present logic session;
 
@@ -83,7 +77,6 @@
 
   \<^enum> \emph{named tokens} --- various categories of identifiers etc.
 
-  \end{enumerate}
 
   Major keywords and minor keywords are guaranteed to be disjoint.
   This helps user-interfaces to determine the overall structure of a
@@ -412,7 +405,6 @@
   result.
 
   There are three forms of theorem references:
-  \begin{enumerate}
 
   \<^enum> named facts @{text "a"},
 
@@ -422,7 +414,6 @@
   @{verbatim "`"}@{text "\<phi>"}@{verbatim "`"} or @{syntax_ref cartouche}
   @{text "\<open>\<phi>\<close>"} (see also method @{method_ref fact}).
 
-  \end{enumerate}
 
   Any kind of theorem specification may include lists of attributes
   both on the left and right hand sides; attributes are applied to any
@@ -503,37 +494,35 @@
   Note that there are some further ones available, such as for the set
   of rules declared for simplifications.
 
-  \begin{description}
-
-  \item @{command "print_theory"} prints the main logical content of the
+  \<^descr> @{command "print_theory"} prints the main logical content of the
   background theory; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{command "print_definitions"} prints dependencies of definitional
+  \<^descr> @{command "print_definitions"} prints dependencies of definitional
   specifications within the background theory, which may be constants
   \secref{sec:consts} or types (\secref{sec:types-pure},
   \secref{sec:hol-typedef}); the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_methods"} prints all proof methods available in the
+  \<^descr> @{command "print_methods"} prints all proof methods available in the
   current theory context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_attributes"} prints all attributes available in the
+  \<^descr> @{command "print_attributes"} prints all attributes available in the
   current theory context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command "print_theorems"} prints theorems of the background theory
+  \<^descr> @{command "print_theorems"} prints theorems of the background theory
   resulting from the last command; the ``@{text "!"}'' option indicates
   extra verbosity.
 
-  \item @{command "print_facts"} prints all local facts of the current
+  \<^descr> @{command "print_facts"} prints all local facts of the current
   context, both named and unnamed ones; the ``@{text "!"}'' option indicates
   extra verbosity.
 
-  \item @{command "print_term_bindings"} prints all term bindings that
+  \<^descr> @{command "print_term_bindings"} prints all term bindings that
   are present in the context.
 
-  \item @{command "find_theorems"}~@{text criteria} retrieves facts
+  \<^descr> @{command "find_theorems"}~@{text criteria} retrieves facts
   from the theory or proof context matching all of given search
   criteria.  The criterion @{text "name: p"} selects all theorems
   whose fully qualified name matches pattern @{text p}, which may
@@ -555,7 +544,7 @@
   default, duplicates are removed from the search result. Use
   @{text with_dups} to display duplicates.
 
-  \item @{command "find_consts"}~@{text criteria} prints all constants
+  \<^descr> @{command "find_consts"}~@{text criteria} prints all constants
   whose type meets all of the given criteria. The criterion @{text
   "strict: ty"} is met by any type that matches the type pattern
   @{text ty}.  Patterns may contain both the dummy type ``@{text _}''
@@ -564,19 +553,17 @@
   the prefix ``@{text "-"}'' function as described for @{command
   "find_theorems"}.
 
-  \item @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
+  \<^descr> @{command "thm_deps"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}
   visualizes dependencies of facts, using Isabelle's graph browser
   tool (see also @{cite "isabelle-system"}).
 
-  \item @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
+  \<^descr> @{command "unused_thms"}~@{text "A\<^sub>1 \<dots> A\<^sub>m - B\<^sub>1 \<dots> B\<^sub>n"}
   displays all theorems that are proved in theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}
   or their parents but not in @{text "A\<^sub>1 \<dots> A\<^sub>m"} or their parents and
   that are never used.
   If @{text n} is @{text 0}, the end of the range of theories
   defaults to the current theory. If no range is specified,
   only the unused theorems in the current theory are displayed.
-
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Isar_Ref/Proof.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Proof.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -11,23 +11,20 @@
   facts, and open goals.  Isar/VM transitions are typed according to
   the following three different modes of operation:
 
-  \begin{description}
-
-  \item @{text "proof(prove)"} means that a new goal has just been
+  \<^descr> @{text "proof(prove)"} means that a new goal has just been
   stated that is now to be \emph{proven}; the next command may refine
   it by some proof method, and enter a sub-proof to establish the
   actual result.
 
-  \item @{text "proof(state)"} is like a nested theory mode: the
+  \<^descr> @{text "proof(state)"} is like a nested theory mode: the
   context may be augmented by \emph{stating} additional assumptions,
   intermediate results etc.
 
-  \item @{text "proof(chain)"} is intermediate between @{text
+  \<^descr> @{text "proof(chain)"} is intermediate between @{text
   "proof(state)"} and @{text "proof(prove)"}: existing facts (i.e.\ the
   contents of the special @{fact_ref this} register) have been just picked
   up in order to be used when refining the goal claimed next.
 
-  \end{description}
 
   The proof mode indicator may be understood as an instruction to the
   writer, telling what kind of operation may be performed next.  The
@@ -58,13 +55,9 @@
     @@{command end}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "notepad"}~@{keyword "begin"} opens a proof state without
+  \<^descr> @{command "notepad"}~@{keyword "begin"} opens a proof state without
   any goal statement. This allows to experiment with Isar, without producing
   any persistent result. The notepad is closed by @{command "end"}.
-
-  \end{description}
 \<close>
 
 
@@ -91,12 +84,10 @@
   parentheses as well.  These typically achieve a stronger forward
   style of reasoning.
 
-  \begin{description}
-
-  \item @{command "next"} switches to a fresh block within a
+  \<^descr> @{command "next"} switches to a fresh block within a
   sub-proof, resetting the local context to the initial one.
 
-  \item @{command "{"} and @{command "}"} explicitly open and close
+  \<^descr> @{command "{"} and @{command "}"} explicitly open and close
   blocks.  Any current facts pass through ``@{command "{"}''
   unchanged, while ``@{command "}"}'' causes any result to be
   \emph{exported} into the enclosing context.  Thus fixed variables
@@ -105,8 +96,6 @@
   of @{command "assume"} and @{command "presume"} in this mode of
   forward reasoning --- in contrast to plain backward reasoning with
   the result exported at @{command "show"} time.
-
-  \end{description}
 \<close>
 
 
@@ -190,12 +179,10 @@
       @{syntax name} ('==' | '\<equiv>') @{syntax term} @{syntax term_pat}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "fix"}~@{text x} introduces a local variable @{text
+  \<^descr> @{command "fix"}~@{text x} introduces a local variable @{text
   x} that is \emph{arbitrary, but fixed.}
 
-  \item @{command "assume"}~@{text "a: \<phi>"} and @{command
+  \<^descr> @{command "assume"}~@{text "a: \<phi>"} and @{command
   "presume"}~@{text "a: \<phi>"} introduce a local fact @{text "\<phi> \<turnstile> \<phi>"} by
   assumption.  Subsequent results applied to an enclosing goal (e.g.\
   by @{command_ref "show"}) are handled as follows: @{command
@@ -206,7 +193,7 @@
   @{keyword_ref "and"}; the resulting list of current facts consists
   of all of these concatenated.
 
-  \item @{command "def"}~@{text "x \<equiv> t"} introduces a local
+  \<^descr> @{command "def"}~@{text "x \<equiv> t"} introduces a local
   (non-polymorphic) definition.  In results exported from the context,
   @{text x} is replaced by @{text t}.  Basically, ``@{command
   "def"}~@{text "x \<equiv> t"}'' abbreviates ``@{command "fix"}~@{text
@@ -215,8 +202,6 @@
 
   The default name for the definitional equation is @{text x_def}.
   Several simultaneous definitions may be given at the same time.
-
-  \end{description}
 \<close>
 
 
@@ -262,18 +247,15 @@
   The syntax of @{keyword "is"} patterns follows @{syntax term_pat} or
   @{syntax prop_pat} (see \secref{sec:term-decls}).
 
-  \begin{description}
-
-  \item @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
+  \<^descr> @{command "let"}~@{text "p\<^sub>1 = t\<^sub>1 \<AND> \<dots> p\<^sub>n = t\<^sub>n"} binds any
   text variables in patterns @{text "p\<^sub>1, \<dots>, p\<^sub>n"} by simultaneous
   higher-order matching against terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"}.
 
-  \item @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
+  \<^descr> @{text "(\<IS> p\<^sub>1 \<dots> p\<^sub>n)"} resembles @{command "let"}, but
   matches @{text "p\<^sub>1, \<dots>, p\<^sub>n"} against the preceding statement.  Also
   note that @{keyword "is"} is not a separate command, but part of
   others (such as @{command "assume"}, @{command "have"} etc.).
 
-  \end{description}
 
   Some \emph{implicit} term abbreviations\index{term abbreviations}
   for goals and facts are available as well.  For any open goal,
@@ -318,14 +300,12 @@
       (@{syntax thmrefs} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
+  \<^descr> @{command "note"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"} recalls existing facts
   @{text "b\<^sub>1, \<dots>, b\<^sub>n"}, binding the result as @{text a}.  Note that
   attributes may be involved as well, both on the left and right hand
   sides.
 
-  \item @{command "then"} indicates forward chaining by the current
+  \<^descr> @{command "then"} indicates forward chaining by the current
   facts in order to establish the goal to be claimed next.  The
   initial proof method invoked to refine that will be offered the
   facts to do ``anything appropriate'' (see also
@@ -335,23 +315,22 @@
   facts into the goal state before operation.  This provides a simple
   scheme to control relevance of facts in automated proof search.
 
-  \item @{command "from"}~@{text b} abbreviates ``@{command
+  \<^descr> @{command "from"}~@{text b} abbreviates ``@{command
   "note"}~@{text b}~@{command "then"}''; thus @{command "then"} is
   equivalent to ``@{command "from"}~@{text this}''.
 
-  \item @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
+  \<^descr> @{command "with"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} abbreviates ``@{command
   "from"}~@{text "b\<^sub>1 \<dots> b\<^sub>n \<AND> this"}''; thus the forward chaining
   is from earlier facts together with the current ones.
 
-  \item @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
+  \<^descr> @{command "using"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} augments the facts being
   currently indicated for use by a subsequent refinement step (such as
   @{command_ref "apply"} or @{command_ref "proof"}).
 
-  \item @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
+  \<^descr> @{command "unfolding"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is structurally
   similar to @{command "using"}, but unfolds definitional equations
   @{text "b\<^sub>1, \<dots> b\<^sub>n"} throughout the goal state and facts.
 
-  \end{description}
 
   Forward chaining with an empty list of theorems is the same as not
   chaining at all.  Thus ``@{command "from"}~@{text nothing}'' has no
@@ -443,9 +422,7 @@
       (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
+  \<^descr> @{command "lemma"}~@{text "a: \<phi>"} enters proof mode with
   @{text \<phi>} as main goal, eventually resulting in some fact @{text "\<turnstile>
   \<phi>"} to be put back into the target context.  An additional @{syntax
   context} specification may build up an initial proof context for the
@@ -453,11 +430,11 @@
   well, see also @{syntax "includes"} in \secref{sec:bundle} and
   @{syntax context_elem} in \secref{sec:locale}.
 
-  \item @{command "theorem"}, @{command "corollary"}, and @{command
+  \<^descr> @{command "theorem"}, @{command "corollary"}, and @{command
   "proposition"} are the same as @{command "lemma"}. The different command
   names merely serve as a formal comment in the theory source.
 
-  \item @{command "schematic_goal"} is similar to @{command "theorem"},
+  \<^descr> @{command "schematic_goal"} is similar to @{command "theorem"},
   but allows the statement to contain unbound schematic variables.
 
   Under normal circumstances, an Isar proof text needs to specify
@@ -467,14 +444,14 @@
   proofs is lost, which also impacts performance, because proof
   checking is forced into sequential mode.
 
-  \item @{command "have"}~@{text "a: \<phi>"} claims a local goal,
+  \<^descr> @{command "have"}~@{text "a: \<phi>"} claims a local goal,
   eventually resulting in a fact within the current logical context.
   This operation is completely independent of any pending sub-goals of
   an enclosing goal statements, so @{command "have"} may be freely
   used for experimental exploration of potential results within a
   proof body.
 
-  \item @{command "show"}~@{text "a: \<phi>"} is like @{command
+  \<^descr> @{command "show"}~@{text "a: \<phi>"} is like @{command
   "have"}~@{text "a: \<phi>"} plus a second stage to refine some pending
   sub-goal for each one of the finished result, after having been
   exported into the corresponding context (at the head of the
@@ -487,20 +464,19 @@
   following message:
   @{verbatim [display] \<open>Local statement fails to refine any pending goal\<close>}
 
-  \item @{command "hence"} abbreviates ``@{command "then"}~@{command
+  \<^descr> @{command "hence"} abbreviates ``@{command "then"}~@{command
   "have"}'', i.e.\ claims a local goal to be proven by forward
   chaining the current facts.  Note that @{command "hence"} is also
   equivalent to ``@{command "from"}~@{text this}~@{command "have"}''.
 
-  \item @{command "thus"} abbreviates ``@{command "then"}~@{command
+  \<^descr> @{command "thus"} abbreviates ``@{command "then"}~@{command
   "show"}''.  Note that @{command "thus"} is also equivalent to
   ``@{command "from"}~@{text this}~@{command "show"}''.
 
-  \item @{command "print_statement"}~@{text a} prints facts from the
+  \<^descr> @{command "print_statement"}~@{text a} prints facts from the
   current theory or proof context in long statement form, according to
   the syntax for @{command "lemma"} given above.
 
-  \end{description}
 
   Any goal statement causes some term abbreviations (such as
   @{variable_ref "?thesis"}) to be bound automatically, see also
@@ -574,9 +550,7 @@
     @@{attribute trans} (() | 'add' | 'del')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
+  \<^descr> @{command "also"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintains the auxiliary
   @{fact calculation} register as follows.  The first occurrence of
   @{command "also"} in some calculational thread initializes @{fact
   calculation} by @{fact this}. Any subsequent @{command "also"} on
@@ -586,7 +560,7 @@
   current context, unless alternative rules are given as explicit
   arguments.
 
-  \item @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
+  \<^descr> @{command "finally"}~@{text "(a\<^sub>1 \<dots> a\<^sub>n)"} maintaining @{fact
   calculation} in the same way as @{command "also"}, and concludes the
   current calculational thread.  The final result is exhibited as fact
   for forward chaining towards the next goal. Basically, @{command
@@ -596,22 +570,22 @@
   "show"}~@{text ?thesis}~@{command "."}'' and ``@{command
   "finally"}~@{command "have"}~@{text \<phi>}~@{command "."}''.
 
-  \item @{command "moreover"} and @{command "ultimately"} are
+  \<^descr> @{command "moreover"} and @{command "ultimately"} are
   analogous to @{command "also"} and @{command "finally"}, but collect
   results only, without applying rules.
 
-  \item @{command "print_trans_rules"} prints the list of transitivity
+  \<^descr> @{command "print_trans_rules"} prints the list of transitivity
   rules (for calculational commands @{command "also"} and @{command
   "finally"}) and symmetry rules (for the @{attribute symmetric}
   operation and single step elimination patters) of the current
   context.
 
-  \item @{attribute trans} declares theorems as transitivity rules.
+  \<^descr> @{attribute trans} declares theorems as transitivity rules.
 
-  \item @{attribute sym} declares symmetry rules, as well as
+  \<^descr> @{attribute sym} declares symmetry rules, as well as
   @{attribute "Pure.elim"}@{text "?"} rules.
 
-  \item @{attribute symmetric} resolves a theorem with some rule
+  \<^descr> @{attribute symmetric} resolves a theorem with some rule
   declared as @{attribute sym} in the current context.  For example,
   ``@{command "assume"}~@{text "[symmetric]: x = y"}'' produces a
   swapped fact derived from that assumption.
@@ -620,8 +594,6 @@
   explicit single-step elimination proof, such as ``@{command
   "assume"}~@{text "x = y"}~@{command "then"}~@{command "have"}~@{text
   "y = x"}~@{command ".."}''.
-
-  \end{description}
 \<close>
 
 
@@ -704,8 +676,6 @@
   Structured proof composition in Isar admits proof methods to be
   invoked in two places only.
 
-  \begin{enumerate}
-
   \<^enum> An \emph{initial} refinement step @{command_ref
   "proof"}~@{text "m\<^sub>1"} reduces a newly stated goal to a number
   of sub-goals that are to be solved later.  Facts are passed to
@@ -716,7 +686,6 @@
   "m\<^sub>2"} is intended to solve remaining goals.  No facts are
   passed to @{text "m\<^sub>2"}.
 
-  \end{enumerate}
 
   The only other (proper) way to affect pending goals in a proof body
   is by @{command_ref "show"}, which involves an explicit statement of
@@ -749,13 +718,11 @@
     (@@{command "."} | @@{command ".."} | @@{command sorry})
   \<close>}
 
-  \begin{description}
-
-  \item @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
+  \<^descr> @{command "proof"}~@{text "m\<^sub>1"} refines the goal by proof
   method @{text "m\<^sub>1"}; facts for forward chaining are passed if so
   indicated by @{text "proof(chain)"} mode.
 
-  \item @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
+  \<^descr> @{command "qed"}~@{text "m\<^sub>2"} refines any remaining goals by
   proof method @{text "m\<^sub>2"} and concludes the sub-proof by assumption.
   If the goal had been @{text "show"} (or @{text "thus"}), some
   pending sub-goal is solved as well by the rule resulting from the
@@ -768,7 +735,7 @@
   @{command "have"}, or weakening the local context by replacing
   occurrences of @{command "assume"} by @{command "presume"}.
 
-  \item @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
+  \<^descr> @{command "by"}~@{text "m\<^sub>1 m\<^sub>2"} is a \emph{terminal
   proof}\index{proof!terminal}; it abbreviates @{command
   "proof"}~@{text "m\<^sub>1"}~@{command "qed"}~@{text "m\<^sub>2"}, but with
   backtracking across both methods.  Debugging an unsuccessful
@@ -777,15 +744,15 @@
   @{text "apply"}~@{text "m\<^sub>1"}) is already sufficient to see the
   problem.
 
-  \item ``@{command ".."}'' is a \emph{standard
+  \<^descr> ``@{command ".."}'' is a \emph{standard
   proof}\index{proof!standard}; it abbreviates @{command "by"}~@{text
   "standard"}.
 
-  \item ``@{command "."}'' is a \emph{trivial
+  \<^descr> ``@{command "."}'' is a \emph{trivial
   proof}\index{proof!trivial}; it abbreviates @{command "by"}~@{text
   "this"}.
 
-  \item @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
+  \<^descr> @{command "sorry"} is a \emph{fake proof}\index{proof!fake}
   pretending to solve the pending claim without further ado.  This
   only works in interactive development, or if the @{attribute
   quick_and_dirty} is enabled.  Facts emerging from fake
@@ -796,7 +763,7 @@
   The most important application of @{command "sorry"} is to support
   experimentation and top-down proof development.
 
-  \item @{method standard} refers to the default refinement step of some
+  \<^descr> @{method standard} refers to the default refinement step of some
   Isar language elements (notably @{command proof} and ``@{command ".."}'').
   It is \emph{dynamically scoped}, so the behaviour depends on the
   application environment.
@@ -808,8 +775,6 @@
 
   In Isabelle/HOL, @{method standard} also takes classical rules into
   account (cf.\ \secref{sec:classical}).
-
-  \end{description}
 \<close>
 
 
@@ -860,9 +825,7 @@
     @@{attribute "where"} @{syntax named_insts} @{syntax for_fixes}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "print_rules"} prints rules declared via attributes
+  \<^descr> @{command "print_rules"} prints rules declared via attributes
   @{attribute (Pure) intro}, @{attribute (Pure) elim}, @{attribute
   (Pure) dest} of Isabelle/Pure.
 
@@ -870,7 +833,7 @@
   rule declarations of the classical reasoner
   (\secref{sec:classical}).
 
-  \item ``@{method "-"}'' (minus) inserts the forward chaining facts as
+  \<^descr> ``@{method "-"}'' (minus) inserts the forward chaining facts as
   premises into the goal, and nothing else.
 
   Note that command @{command_ref "proof"} without any method actually
@@ -878,7 +841,7 @@
   method; thus a plain \emph{do-nothing} proof step would be ``@{command
   "proof"}~@{text "-"}'' rather than @{command "proof"} alone.
 
-  \item @{method "goal_cases"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} turns the current subgoals
+  \<^descr> @{method "goal_cases"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} turns the current subgoals
   into cases within the context (see also \secref{sec:cases-induct}). The
   specified case names are used if present; otherwise cases are numbered
   starting from 1.
@@ -888,7 +851,7 @@
   premises, and @{command let} variable @{variable_ref ?case} refer to the
   conclusion.
 
-  \item @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
+  \<^descr> @{method "fact"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} composes some fact from
   @{text "a\<^sub>1, \<dots>, a\<^sub>n"} (or implicitly from the current proof context)
   modulo unification of schematic type and term variables.  The rule
   structure is not taken into account, i.e.\ meta-level implication is
@@ -899,7 +862,7 @@
   @{text "\<turnstile> \<phi>"} is an instance of some known @{text "\<turnstile> \<phi>"} in the
   proof context.
 
-  \item @{method assumption} solves some goal by a single assumption
+  \<^descr> @{method assumption} solves some goal by a single assumption
   step.  All given facts are guaranteed to participate in the
   refinement; this means there may be only 0 or 1 in the first place.
   Recall that @{command "qed"} (\secref{sec:proof-steps}) already
@@ -907,11 +870,11 @@
   proofs usually need not quote the @{method assumption} method at
   all.
 
-  \item @{method this} applies all of the current facts directly as
+  \<^descr> @{method this} applies all of the current facts directly as
   rules.  Recall that ``@{command "."}'' (dot) abbreviates ``@{command
   "by"}~@{text this}''.
 
-  \item @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
+  \<^descr> @{method (Pure) rule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some rule given as
   argument in backward manner; facts are used to reduce the rule
   before applying it to the goal.  Thus @{method (Pure) rule} without facts
   is plain introduction, while with facts it becomes elimination.
@@ -923,7 +886,7 @@
   behaviour of @{command "proof"} and ``@{command ".."}'' (double-dot) steps
   (see \secref{sec:proof-steps}).
 
-  \item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
+  \<^descr> @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
   @{attribute (Pure) dest} declare introduction, elimination, and
   destruct rules, to be used with method @{method (Pure) rule}, and similar
   tools.  Note that the latter will ignore rules declared with
@@ -934,10 +897,10 @@
   present versions of Isabelle/Pure, i.e.\ @{attribute (Pure)
   "Pure.intro"}.
 
-  \item @{attribute (Pure) rule}~@{text del} undeclares introduction,
+  \<^descr> @{attribute (Pure) rule}~@{text del} undeclares introduction,
   elimination, or destruct rules.
 
-  \item @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
+  \<^descr> @{attribute OF}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} applies some theorem to all
   of the given rules @{text "a\<^sub>1, \<dots>, a\<^sub>n"} in canonical right-to-left
   order, which means that premises stemming from the @{text "a\<^sub>i"}
   emerge in parallel in the result, without interfering with each
@@ -949,7 +912,7 @@
   (underscore), which refers to the propositional identity rule in the
   Pure theory.
 
-  \item @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
+  \<^descr> @{attribute of}~@{text "t\<^sub>1 \<dots> t\<^sub>n"} performs positional
   instantiation of term variables.  The terms @{text "t\<^sub>1, \<dots>, t\<^sub>n"} are
   substituted for any schematic variables occurring in a theorem from
   left to right; ``@{text _}'' (underscore) indicates to skip a
@@ -960,7 +923,7 @@
   be specified: the instantiated theorem is exported, and these
   variables become schematic (usually with some shifting of indices).
 
-  \item @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
+  \<^descr> @{attribute "where"}~@{text "x\<^sub>1 = t\<^sub>1 \<AND> \<dots> x\<^sub>n = t\<^sub>n"}
   performs named instantiation of schematic type and term variables
   occurring in a theorem.  Schematic variables have to be specified on
   the left-hand side (e.g.\ @{text "?x1.3"}).  The question mark may
@@ -970,8 +933,6 @@
 
   An optional context of local variables @{text "\<FOR> x\<^sub>1 \<dots> x\<^sub>m"} may
   be specified as for @{attribute "of"} above.
-
-  \end{description}
 \<close>
 
 
@@ -986,9 +947,7 @@
     @@{command method_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "method_setup"}~@{text "name = text description"}
+  \<^descr> @{command "method_setup"}~@{text "name = text description"}
   defines a proof method in the current context.  The given @{text
   "text"} has to be an ML expression of type
   @{ML_type "(Proof.context -> Proof.method) context_parser"}, cf.\
@@ -999,8 +958,6 @@
   addressing.
 
   Here are some example method definitions:
-
-  \end{description}
 \<close>
 
 (*<*)experiment begin(*>*)
@@ -1094,9 +1051,7 @@
     @@{attribute consumes} @{syntax int}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
+  \<^descr> @{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"} invokes a named local
   context @{text "c: x\<^sub>1, \<dots>, x\<^sub>m, \<phi>\<^sub>1, \<dots>, \<phi>\<^sub>m"}, as provided by an
   appropriate proof method (such as @{method_ref cases} and @{method_ref
   induct}). The command ``@{command "case"}~@{text "a: (c x\<^sub>1 \<dots> x\<^sub>m)"}''
@@ -1109,17 +1064,17 @@
   re-use @{text c}. So @{command "case"}~@{text "(c x\<^sub>1 \<dots> x\<^sub>m)"} is the same
   as @{command "case"}~@{text "c: (c x\<^sub>1 \<dots> x\<^sub>m)"}.
 
-  \item @{command "print_cases"} prints all local contexts of the
+  \<^descr> @{command "print_cases"} prints all local contexts of the
   current state, using Isar proof language notation.
 
-  \item @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
+  \<^descr> @{attribute case_names}~@{text "c\<^sub>1 \<dots> c\<^sub>k"} declares names for
   the local contexts of premises of a theorem; @{text "c\<^sub>1, \<dots>, c\<^sub>k"}
   refers to the \emph{prefix} of the list of premises. Each of the
   cases @{text "c\<^sub>i"} can be of the form @{text "c[h\<^sub>1 \<dots> h\<^sub>n]"} where
   the @{text "h\<^sub>1 \<dots> h\<^sub>n"} are the names of the hypotheses in case @{text "c\<^sub>i"}
   from left to right.
 
-  \item @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
+  \<^descr> @{attribute case_conclusion}~@{text "c d\<^sub>1 \<dots> d\<^sub>k"} declares
   names for the conclusions of a named premise @{text c}; here @{text
   "d\<^sub>1, \<dots>, d\<^sub>k"} refers to the prefix of arguments of a logical formula
   built by nesting a binary connective (e.g.\ @{text "\<or>"}).
@@ -1129,7 +1084,7 @@
   whole.  The need to name subformulas only arises with cases that
   split into several sub-cases, as in common co-induction rules.
 
-  \item @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
+  \<^descr> @{attribute params}~@{text "p\<^sub>1 \<dots> p\<^sub>m \<AND> \<dots> q\<^sub>1 \<dots> q\<^sub>n"} renames
   the innermost parameters of premises @{text "1, \<dots>, n"} of some
   theorem.  An empty list of names may be given to skip positions,
   leaving the present parameters unchanged.
@@ -1137,7 +1092,7 @@
   Note that the default usage of case rules does \emph{not} directly
   expose parameters to the proof context.
 
-  \item @{attribute consumes}~@{text n} declares the number of ``major
+  \<^descr> @{attribute consumes}~@{text n} declares the number of ``major
   premises'' of a rule, i.e.\ the number of facts to be consumed when
   it is applied by an appropriate proof method.  The default value of
   @{attribute consumes} is @{text "n = 1"}, which is appropriate for
@@ -1158,8 +1113,6 @@
   rarely needed; this is already taken care of automatically by the
   higher-level @{attribute cases}, @{attribute induct}, and
   @{attribute coinduct} declarations.
-
-  \end{description}
 \<close>
 
 
@@ -1214,9 +1167,7 @@
     taking: 'taking' ':' @{syntax insts}
   \<close>}
 
-  \begin{description}
-
-  \item @{method cases}~@{text "insts R"} applies method @{method
+  \<^descr> @{method cases}~@{text "insts R"} applies method @{method
   rule} with an appropriate case distinction theorem, instantiated to
   the subjects @{text insts}.  Symbolic case names are bound according
   to the rule's local contexts.
@@ -1243,7 +1194,7 @@
   "(no_simp)"} option can be used to disable pre-simplification of
   cases (see the description of @{method induct} below for details).
 
-  \item @{method induct}~@{text "insts R"} and
+  \<^descr> @{method induct}~@{text "insts R"} and
   @{method induction}~@{text "insts R"} are analogous to the
   @{method cases} method, but refer to induction rules, which are
   determined as follows:
@@ -1300,7 +1251,7 @@
   pending variables in the rule.  Such schematic induction rules
   rarely occur in practice, though.
 
-  \item @{method coinduct}~@{text "inst R"} is analogous to the
+  \<^descr> @{method coinduct}~@{text "inst R"} is analogous to the
   @{method induct} method, but refers to coinduction rules, which are
   determined as follows:
 
@@ -1327,7 +1278,6 @@
   specification may be required in order to specify the bisimulation
   to be used in the coinduction step.
 
-  \end{description}
 
   Above methods produce named local contexts, as determined by the
   instantiated rule as given in the text.  Beyond that, the @{method
@@ -1404,12 +1354,10 @@
     spec: (('type' | 'pred' | 'set') ':' @{syntax nameref}) | 'del'
   \<close>}
 
-  \begin{description}
-
-  \item @{command "print_induct_rules"} prints cases and induct rules
+  \<^descr> @{command "print_induct_rules"} prints cases and induct rules
   for predicates (or sets) and types of the current context.
 
-  \item @{attribute cases}, @{attribute induct}, and @{attribute
+  \<^descr> @{attribute cases}, @{attribute induct}, and @{attribute
   coinduct} (as attributes) declare rules for reasoning about
   (co)inductive predicates (or sets) and types, using the
   corresponding methods of the same name.  Certain definitional
@@ -1428,8 +1376,6 @@
   declaration is taken care of automatically: @{attribute
   consumes}~@{text 0} is specified for ``type'' rules and @{attribute
   consumes}~@{text 1} for ``predicate'' / ``set'' rules.
-
-  \end{description}
 \<close>
 
 
@@ -1470,9 +1416,7 @@
     @@{command guess} (@{syntax "fixes"} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command consider}~@{text "(a) \<^vec>x \<WHERE> \<^vec>A \<^vec>x
+  \<^descr> @{command consider}~@{text "(a) \<^vec>x \<WHERE> \<^vec>A \<^vec>x
   | (b) \<^vec>y \<WHERE> \<^vec>B \<^vec>y | \<dots> "} states a rule for case
   splitting into separate subgoals, such that each case involves new
   parameters and premises. After the proof is finished, the resulting rule
@@ -1502,7 +1446,7 @@
   statements, as well as @{command print_statement} to print existing rules
   in a similar format.
 
-  \item @{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x"}
+  \<^descr> @{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x"}
   states a generalized elimination rule with exactly one case. After the
   proof is finished, it is activated for the subsequent proof text: the
   context is augmented via @{command fix}~@{text "\<^vec>x"} @{command
@@ -1529,7 +1473,7 @@
     \quad @{command "fix"}~@{text "\<^vec>x"}~@{command "assume"}@{text "\<^sup>* a: \<^vec>A \<^vec>x"} \\
   \end{matharray}
 
-  \item @{command guess} is similar to @{command obtain}, but it derives the
+  \<^descr> @{command guess} is similar to @{command obtain}, but it derives the
   obtained context elements from the course of tactical reasoning in the
   proof. Thus it can considerably obscure the proof: it is classified as
   \emph{improper}.
@@ -1543,7 +1487,6 @@
   The variable names and type constraints given as arguments for @{command
   "guess"} specify a prefix of accessible parameters.
 
-  \end{description}
 
   In the proof of @{command consider} and @{command obtain} the local
   premises are always bound to the fact name @{fact_ref that}, according to
--- a/src/Doc/Isar_Ref/Proof_Script.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Proof_Script.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -41,13 +41,11 @@
     @@{command prefer} @{syntax nat}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "supply"} supports fact definitions during goal
+  \<^descr> @{command "supply"} supports fact definitions during goal
   refinement: it is similar to @{command "note"}, but it operates in
   backwards mode and does not have any impact on chained facts.
 
-  \item @{command "apply"}~@{text m} applies proof method @{text m} in
+  \<^descr> @{command "apply"}~@{text m} applies proof method @{text m} in
   initial position, but unlike @{command "proof"} it retains ``@{text
   "proof(prove)"}'' mode.  Thus consecutive method applications may be
   given just as in tactic scripts.
@@ -57,7 +55,7 @@
   further @{command "apply"} command would always work in a purely
   backward manner.
 
-  \item @{command "apply_end"}~@{text "m"} applies proof method @{text
+  \<^descr> @{command "apply_end"}~@{text "m"} applies proof method @{text
   m} as if in terminal position.  Basically, this simulates a
   multi-step tactic script for @{command "qed"}, but may be given
   anywhere within the proof body.
@@ -67,24 +65,22 @@
   "qed"}).  Thus the proof method may not refer to any assumptions
   introduced in the current body, for example.
 
-  \item @{command "done"} completes a proof script, provided that the
+  \<^descr> @{command "done"} completes a proof script, provided that the
   current goal state is solved completely.  Note that actual
   structured proof commands (e.g.\ ``@{command "."}'' or @{command
   "sorry"}) may be used to conclude proof scripts as well.
 
-  \item @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
+  \<^descr> @{command "defer"}~@{text n} and @{command "prefer"}~@{text n}
   shuffle the list of pending goals: @{command "defer"} puts off
   sub-goal @{text n} to the end of the list (@{text "n = 1"} by
   default), while @{command "prefer"} brings sub-goal @{text n} to the
   front.
 
-  \item @{command "back"} does back-tracking over the result sequence
+  \<^descr> @{command "back"} does back-tracking over the result sequence
   of the latest proof command.  Any proof command may return multiple
   results, and this command explores the possibilities step-by-step.
   It is mainly useful for experimentation and interactive exploration,
   and should be avoided in finished proofs.
-
-  \end{description}
 \<close>
 
 
@@ -103,9 +99,7 @@
     params: @'for' '\<dots>'? (('_' | @{syntax name})+)
   \<close>}
 
-  \begin{description}
-
-  \item @{command "subgoal"} allows to impose some structure on backward
+  \<^descr> @{command "subgoal"} allows to impose some structure on backward
   refinements, to avoid proof scripts degenerating into long of @{command
   apply} sequences.
 
@@ -133,7 +127,6 @@
   of a proven subgoal. Thus it may be re-used in further reasoning, similar
   to the result of @{command show} in structured Isar proofs.
 
-  \end{description}
 
   Here are some abstract examples:
 \<close>
@@ -245,9 +238,7 @@
     (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
   \<close>}
 
-\begin{description}
-
-  \item @{method rule_tac} etc. do resolution of rules with explicit
+  \<^descr> @{method rule_tac} etc. do resolution of rules with explicit
   instantiation.  This works the same way as the ML tactics @{ML
   Rule_Insts.res_inst_tac} etc.\ (see @{cite "isabelle-implementation"}).
 
@@ -255,47 +246,45 @@
   @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
   @{cite "isabelle-implementation"}).
 
-  \item @{method cut_tac} inserts facts into the proof state as
+  \<^descr> @{method cut_tac} inserts facts into the proof state as
   assumption of a subgoal; instantiations may be given as well.  Note
   that the scope of schematic variables is spread over the main goal
   statement and rule premises are turned into new subgoals.  This is
   in contrast to the regular method @{method insert} which inserts
   closed rule statements.
 
-  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
+  \<^descr> @{method thin_tac}~@{text \<phi>} deletes the specified premise
   from a subgoal.  Note that @{text \<phi>} may contain schematic
   variables, to abbreviate the intended proposition; the first
   matching subgoal premise will be deleted.  Removing useless premises
   from a subgoal increases its readability and can make search tactics
   run faster.
 
-  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
+  \<^descr> @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
   @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
   as new subgoals (in the original context).
 
-  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
+  \<^descr> @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
   goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
   \emph{suffix} of variables.
 
-  \item @{method rotate_tac}~@{text n} rotates the premises of a
+  \<^descr> @{method rotate_tac}~@{text n} rotates the premises of a
   subgoal by @{text n} positions: from right to left if @{text n} is
   positive, and from left to right if @{text n} is negative; the
   default value is 1.
 
-  \item @{method tactic}~@{text "text"} produces a proof method from
+  \<^descr> @{method tactic}~@{text "text"} produces a proof method from
   any ML text of type @{ML_type tactic}.  Apart from the usual ML
   environment and the current proof context, the ML code may refer to
   the locally bound values @{ML_text facts}, which indicates any
   current facts used for forward-chaining.
 
-  \item @{method raw_tactic} is similar to @{method tactic}, but
+  \<^descr> @{method raw_tactic} is similar to @{method tactic}, but
   presents the goal state in its raw internal form, where simultaneous
   subgoals appear as conjunction of the logical framework instead of
   the usual split into several subgoals.  While feature this is useful
   for debugging of complex method definitions, it should not never
   appear in production theories.
-
-  \end{description}
 \<close>
 
 end
\ No newline at end of file
--- a/src/Doc/Isar_Ref/Spec.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Spec.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -72,9 +72,7 @@
     thy_bounds: @{syntax name} | '(' (@{syntax name} + @'|') ')'
   \<close>}
 
-  \begin{description}
-
-  \item @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
+  \<^descr> @{command "theory"}~@{text "A \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"}
   starts a new theory @{text A} based on the merge of existing
   theories @{text "B\<^sub>1 \<dots> B\<^sub>n"}.  Due to the possibility to import more
   than one ancestor, the resulting theory structure of an Isabelle
@@ -104,19 +102,17 @@
   It is possible to specify an alternative completion via @{verbatim
   "=="}~@{text "text"}, while the default is the corresponding keyword name.
   
-  \item @{command (global) "end"} concludes the current theory
+  \<^descr> @{command (global) "end"} concludes the current theory
   definition.  Note that some other commands, e.g.\ local theory
   targets @{command locale} or @{command class} may involve a
   @{keyword "begin"} that needs to be matched by @{command (local)
   "end"}, according to the usual rules for nested blocks.
 
-  \item @{command thy_deps} visualizes the theory hierarchy as a directed
+  \<^descr> @{command thy_deps} visualizes the theory hierarchy as a directed
   acyclic graph. By default, all imported theories are shown, taking the
   base session as a starting point. Alternatively, it is possibly to
   restrict the full theory graph by giving bounds, analogously to
   @{command_ref class_deps}.
-
-  \end{description}
 \<close>
 
 
@@ -153,15 +149,13 @@
     @{syntax_def target}: '(' @'in' @{syntax nameref} ')'
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "context"}~@{text "c \<BEGIN>"} opens a named
+  \<^descr> @{command "context"}~@{text "c \<BEGIN>"} opens a named
   context, by recommencing an existing locale or class @{text c}.
   Note that locale and class definitions allow to include the
   @{keyword "begin"} keyword as well, in order to continue the local
   theory immediately after the initial specification.
 
-  \item @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
+  \<^descr> @{command "context"}~@{text "bundles elements \<BEGIN>"} opens
   an unnamed context, by extending the enclosing global or local
   theory target by the given declaration bundles (\secref{sec:bundle})
   and context elements (@{text "\<FIXES>"}, @{text "\<ASSUMES>"}
@@ -169,12 +163,12 @@
   in the extended context will be exported into the enclosing target
   by lifting over extra parameters and premises.
   
-  \item @{command (local) "end"} concludes the current local theory,
+  \<^descr> @{command (local) "end"} concludes the current local theory,
   according to the nesting of contexts.  Note that a global @{command
   (global) "end"} has a different meaning: it concludes the theory
   itself (\secref{sec:begin-thy}).
   
-  \item @{keyword "private"} or @{keyword "qualified"} may be given as
+  \<^descr> @{keyword "private"} or @{keyword "qualified"} may be given as
   modifiers before any local theory command. This restricts name space
   accesses to the local scope, as determined by the enclosing @{command
   "context"}~@{keyword "begin"}~\dots~@{command "end"} block. Outside its
@@ -185,7 +179,7 @@
   a local scope by itself: an extra unnamed context is required to use
   @{keyword "private"} or @{keyword "qualified"} here.
 
-  \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any local
+  \<^descr> @{text "("}@{keyword_def "in"}~@{text "c)"} given after any local
   theory command specifies an immediate target, e.g.\ ``@{command
   "definition"}~@{text "(\<IN> c)"}'' or ``@{command "theorem"}~@{text
   "(\<IN> c)"}''. This works both in a local or global theory context; the
@@ -193,7 +187,6 @@
   ``@{text "(\<IN> -)"}'' will always produce a global result independently
   of the current target context.
 
-  \end{description}
 
   Any specification element that operates on @{text local_theory} according
   to this manual implicitly allows the above target syntax @{text
@@ -256,38 +249,36 @@
     @{syntax_def "includes"}: @'includes' (@{syntax nameref}+)
   \<close>}
 
-  \begin{description}
-
-  \item @{command bundle}~@{text "b = decls"} defines a bundle of
+  \<^descr> @{command bundle}~@{text "b = decls"} defines a bundle of
   declarations in the current context.  The RHS is similar to the one
   of the @{command declare} command.  Bundles defined in local theory
   targets are subject to transformations via morphisms, when moved
   into different application contexts; this works analogously to any
   other local theory specification.
 
-  \item @{command print_bundles} prints the named bundles that are available
+  \<^descr> @{command print_bundles} prints the named bundles that are available
   in the current context; the ``@{text "!"}'' option indicates extra
   verbosity.
 
-  \item @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
+  \<^descr> @{command include}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} includes the declarations
   from the given bundles into the current proof body context.  This is
   analogous to @{command "note"} (\secref{sec:proof-facts}) with the
   expanded bundles.
 
-  \item @{command including} is similar to @{command include}, but
+  \<^descr> @{command including} is similar to @{command include}, but
   works in proof refinement (backward mode).  This is analogous to
   @{command "using"} (\secref{sec:proof-facts}) with the expanded
   bundles.
 
-  \item @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
+  \<^descr> @{keyword "includes"}~@{text "b\<^sub>1 \<dots> b\<^sub>n"} is similar to
   @{command include}, but works in situations where a specification
   context is constructed, notably for @{command context} and long
   statements of @{command theorem} etc.
 
-  \end{description}
 
   Here is an artificial example of bundling various configuration
-  options:\<close>
+  options:
+\<close>
 
 (*<*)experiment begin(*>*)
 bundle trace = [[simp_trace, linarith_trace, metis_trace, smt_trace]]
@@ -324,9 +315,7 @@
     @@{command print_abbrevs} ('!'?)
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "definition"}~@{text "c \<WHERE> eq"} produces an
+  \<^descr> @{command "definition"}~@{text "c \<WHERE> eq"} produces an
   internal definition @{text "c \<equiv> t"} according to the specification
   given as @{text eq}, which is then turned into a proven fact.  The
   given proposition may deviate from internal meta-level equality
@@ -340,10 +329,10 @@
   @{text "f \<equiv> \<lambda>x y. t"} and @{text "y \<noteq> 0 \<Longrightarrow> g x y = u"} instead of an
   unrestricted @{text "g \<equiv> \<lambda>x y. u"}.
 
-  \item @{command "print_defn_rules"} prints the definitional rewrite rules
+  \<^descr> @{command "print_defn_rules"} prints the definitional rewrite rules
   declared via @{attribute defn} in the current context.
 
-  \item @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
+  \<^descr> @{command "abbreviation"}~@{text "c \<WHERE> eq"} introduces a
   syntactic constant which is associated with a certain term according
   to the meta-level equality @{text eq}.
   
@@ -360,10 +349,8 @@
   declared for abbreviations, cf.\ @{command "syntax"} in
   \secref{sec:syn-trans}.
   
-  \item @{command "print_abbrevs"} prints all constant abbreviations of the
+  \<^descr> @{command "print_abbrevs"} prints all constant abbreviations of the
   current context; the ``@{text "!"}'' option indicates extra verbosity.
-  
-  \end{description}
 \<close>
 
 
@@ -380,9 +367,7 @@
     specs: (@{syntax thmdecl}? @{syntax props} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+  \<^descr> @{command "axiomatization"}~@{text "c\<^sub>1 \<dots> c\<^sub>m \<WHERE> \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
   introduces several constants simultaneously and states axiomatic
   properties for these. The constants are marked as being specified once and
   for all, which prevents additional specifications for the same constants
@@ -400,8 +385,6 @@
   within Isabelle/Pure, but in an application environment like Isabelle/HOL
   the user normally stays within definitional mechanisms provided by the
   logic and its libraries.
-
-  \end{description}
 \<close>
 
 
@@ -430,9 +413,7 @@
     @@{command declare} (@{syntax thmrefs} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "declaration"}~@{text d} adds the declaration
+  \<^descr> @{command "declaration"}~@{text d} adds the declaration
   function @{text d} of ML type @{ML_type declaration}, to the current
   local theory under construction.  In later application contexts, the
   function is transformed according to the morphisms being involved in
@@ -442,17 +423,15 @@
   declaration is applied to all possible contexts involved, including
   the global background theory.
 
-  \item @{command "syntax_declaration"} is similar to @{command
+  \<^descr> @{command "syntax_declaration"} is similar to @{command
   "declaration"}, but is meant to affect only ``syntactic'' tools by
   convention (such as notation and type-checking information).
 
-  \item @{command "declare"}~@{text thms} declares theorems to the
+  \<^descr> @{command "declare"}~@{text thms} declares theorems to the
   current local theory context.  No theorem binding is involved here,
   unlike @{command "lemmas"} (cf.\ \secref{sec:theorems}), so
   @{command "declare"} only has the effect of applying attributes as
   included in the theorem specification.
-
-  \end{description}
 \<close>
 
 
@@ -563,9 +542,7 @@
       @'notes' (@{syntax thmdef}? @{syntax thmrefs} + @'and')
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "locale"}~@{text "loc = import + body"} defines a
+  \<^descr> @{command "locale"}~@{text "loc = import + body"} defines a
   new locale @{text loc} as a context consisting of a certain view of
   existing locales (@{text import}) plus some additional elements
   (@{text body}).  Both @{text import} and @{text body} are optional;
@@ -588,36 +565,32 @@
 
   The @{text body} consists of context elements.
 
-  \begin{description}
+    \<^descr> @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
+    parameter of type @{text \<tau>} and mixfix annotation @{text mx} (both
+    are optional).  The special syntax declaration ``@{text
+    "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may
+    be referenced implicitly in this context.
 
-  \item @{element "fixes"}~@{text "x :: \<tau> (mx)"} declares a local
-  parameter of type @{text \<tau>} and mixfix annotation @{text mx} (both
-  are optional).  The special syntax declaration ``@{text
-  "("}@{keyword_ref "structure"}@{text ")"}'' means that @{text x} may
-  be referenced implicitly in this context.
-
-  \item @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
-  constraint @{text \<tau>} on the local parameter @{text x}.  This
-  element is deprecated.  The type constraint should be introduced in
-  the @{keyword "for"} clause or the relevant @{element "fixes"} element.
+    \<^descr> @{element "constrains"}~@{text "x :: \<tau>"} introduces a type
+    constraint @{text \<tau>} on the local parameter @{text x}.  This
+    element is deprecated.  The type constraint should be introduced in
+    the @{keyword "for"} clause or the relevant @{element "fixes"} element.
 
-  \item @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
-  introduces local premises, similar to @{command "assume"} within a
-  proof (cf.\ \secref{sec:proof-context}).
+    \<^descr> @{element "assumes"}~@{text "a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"}
+    introduces local premises, similar to @{command "assume"} within a
+    proof (cf.\ \secref{sec:proof-context}).
 
-  \item @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
-  declared parameter.  This is similar to @{command "def"} within a
-  proof (cf.\ \secref{sec:proof-context}), but @{element "defines"}
-  takes an equational proposition instead of variable-term pair.  The
-  left-hand side of the equation may have additional arguments, e.g.\
-  ``@{element "defines"}~@{text "f x\<^sub>1 \<dots> x\<^sub>n \<equiv> t"}''.
+    \<^descr> @{element "defines"}~@{text "a: x \<equiv> t"} defines a previously
+    declared parameter.  This is similar to @{command "def"} within a
+    proof (cf.\ \secref{sec:proof-context}), but @{element "defines"}
+    takes an equational proposition instead of variable-term pair.  The
+    left-hand side of the equation may have additional arguments, e.g.\
+    ``@{element "defines"}~@{text "f x\<^sub>1 \<dots> x\<^sub>n \<equiv> t"}''.
 
-  \item @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
-  reconsiders facts within a local context.  Most notably, this may
-  include arbitrary declarations in any attribute specifications
-  included here, e.g.\ a local @{attribute simp} rule.
-
-  \end{description}
+    \<^descr> @{element "notes"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}
+    reconsiders facts within a local context.  Most notably, this may
+    include arbitrary declarations in any attribute specifications
+    included here, e.g.\ a local @{attribute simp} rule.
 
   Both @{element "assumes"} and @{element "defines"} elements
   contribute to the locale specification.  When defining an operation
@@ -649,25 +622,25 @@
   \secref{sec:object-logic}).  Separate introduction rules @{text
   loc_axioms.intro} and @{text loc.intro} are provided as well.
 
-  \item @{command experiment}~@{text exprs}~@{keyword "begin"} opens an
+  \<^descr> @{command experiment}~@{text exprs}~@{keyword "begin"} opens an
   anonymous locale context with private naming policy. Specifications in its
   body are inaccessible from outside. This is useful to perform experiments,
   without polluting the name space.
 
-  \item @{command "print_locale"}~@{text "locale"} prints the
+  \<^descr> @{command "print_locale"}~@{text "locale"} prints the
   contents of the named locale.  The command omits @{element "notes"}
   elements by default.  Use @{command "print_locale"}@{text "!"} to
   have them included.
 
-  \item @{command "print_locales"} prints the names of all locales of the
+  \<^descr> @{command "print_locales"} prints the names of all locales of the
   current theory; the ``@{text "!"}'' option indicates extra verbosity.
 
-  \item @{command "locale_deps"} visualizes all locales and their
+  \<^descr> @{command "locale_deps"} visualizes all locales and their
   relations as a Hasse diagram. This includes locales defined as type
   classes (\secref{sec:class}).  See also @{command
   "print_dependencies"} below.
 
-  \item @{method intro_locales} and @{method unfold_locales}
+  \<^descr> @{method intro_locales} and @{method unfold_locales}
   repeatedly expand all introduction rules of locale predicates of the
   theory.  While @{method intro_locales} only applies the @{text
   loc.intro} introduction rules and therefore does not descend to
@@ -676,8 +649,6 @@
   specifications entailed by the context, both from target statements,
   and from interpretations (see below).  New goals that are entailed
   by the current context are discharged automatically.
-
-  \end{description}
 \<close>
 
 
@@ -716,9 +687,7 @@
     equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
+  \<^descr> @{command "interpretation"}~@{text "expr \<WHERE> eqns"}
   interprets @{text expr} in a global or local theory.  The command
   generates proof obligations for the instantiated specifications.
   Once these are discharged by the user, instantiated declarations (in
@@ -759,13 +728,13 @@
   concepts introduced through definitions.  The equations must be
   proved.
 
-  \item @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
+  \<^descr> @{command "interpret"}~@{text "expr \<WHERE> eqns"} interprets
   @{text expr} in the proof context and is otherwise similar to
   interpretation in local theories.  Note that for @{command
   "interpret"} the @{text eqns} should be
   explicitly universally quantified.
 
-  \item @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
+  \<^descr> @{command "sublocale"}~@{text "name \<subseteq> expr \<WHERE>
   eqns"}
   interprets @{text expr} in the locale @{text name}.  A proof that
   the specification of @{text name} implies the specification of
@@ -798,7 +767,7 @@
   be used, but the locale argument must be omitted.  The command then
   refers to the locale (or class) target of the context block.
 
-  \item @{command "print_dependencies"}~@{text "expr"} is useful for
+  \<^descr> @{command "print_dependencies"}~@{text "expr"} is useful for
   understanding the effect of an interpretation of @{text "expr"} in
   the current context.  It lists all locale instances for which
   interpretations would be added to the current context.  Variant
@@ -808,13 +777,12 @@
   latter is useful for understanding the dependencies of a locale
   expression.
 
-  \item @{command "print_interps"}~@{text "locale"} lists all
+  \<^descr> @{command "print_interps"}~@{text "locale"} lists all
   interpretations of @{text "locale"} in the current theory or proof
   context, including those due to a combination of an @{command
   "interpretation"} or @{command "interpret"} and one or several
   @{command "sublocale"} declarations.
 
-  \end{description}
 
   \begin{warn}
     If a global theory inherits declarations (body elements) for a
@@ -851,13 +819,10 @@
   available by importing theory @{file "~~/src/Tools/Permanent_Interpretation.thy"}
   and provides
 
-  \begin{enumerate}
-
   \<^enum> a unified view on arbitrary suitable local theories as interpretation target;
 
   \<^enum> rewrite morphisms by means of \emph{rewrite definitions}.
 
-  \end{enumerate}
   
   \begin{matharray}{rcl}
     @{command_def "permanent_interpretation"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
@@ -872,9 +837,7 @@
     equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
   \<close>}
 
-  \begin{description}
-
-  \item @{command "permanent_interpretation"}~@{text "expr \<DEFINING> defs \<WHERE> eqns"}
+  \<^descr> @{command "permanent_interpretation"}~@{text "expr \<DEFINING> defs \<WHERE> eqns"}
   interprets @{text expr} in the current local theory.  The command
   generates proof obligations for the instantiated specifications.
   Instantiated declarations (in particular, facts) are added to the
@@ -898,33 +861,23 @@
   also \emph{rewrite definitions} may be specified.  Semantically, a
   rewrite definition
   
-  \begin{itemize}
-  
-  \<^item> produces a corresponding definition in
-  the local theory's underlying target \emph{and}
-  
-  \<^item> augments the rewrite morphism with the equation
-  stemming from the symmetric of the corresponding definition.
-  
-  \end{itemize}
+    \<^item> produces a corresponding definition in
+    the local theory's underlying target \emph{and}
+
+    \<^item> augments the rewrite morphism with the equation
+    stemming from the symmetric of the corresponding definition.
   
   This is technically different to to a naive combination
   of a conventional definition and an explicit rewrite equation:
   
-  \begin{itemize}
-  
-  \<^item> Definitions are parsed in the syntactic interpretation
-  context, just like equations.
+    \<^item> Definitions are parsed in the syntactic interpretation
+    context, just like equations.
 
-  \<^item> The proof needs not consider the equations stemming from
-  definitions -- they are proved implicitly by construction.
-      
-  \end{itemize}
+    \<^item> The proof needs not consider the equations stemming from
+    definitions -- they are proved implicitly by construction.
   
   Rewrite definitions yield a pattern for introducing new explicit
   operations for existing terms after interpretation.
-  
-  \end{description}
 \<close>
 
 
@@ -970,9 +923,7 @@
     class_bounds: @{syntax sort} | '(' (@{syntax sort} + @'|') ')'
   \<close>}
 
-  \begin{description}
-
-  \item @{command "class"}~@{text "c = superclasses + body"} defines
+  \<^descr> @{command "class"}~@{text "c = superclasses + body"} defines
   a new class @{text c}, inheriting from @{text superclasses}.  This
   introduces a locale @{text c} with import of all locales @{text
   superclasses}.
@@ -990,7 +941,7 @@
   --- the @{method intro_classes} method takes care of the details of
   class membership proofs.
 
-  \item @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
+  \<^descr> @{command "instantiation"}~@{text "t :: (s\<^sub>1, \<dots>, s\<^sub>n)s
   \<BEGIN>"} opens a target (cf.\ \secref{sec:target}) which
   allows to specify class operations @{text "f\<^sub>1, \<dots>, f\<^sub>n"} corresponding
   to sort @{text s} at the particular type instance @{text "(\<alpha>\<^sub>1 :: s\<^sub>1,
@@ -1002,7 +953,7 @@
   this corresponds nicely to mutually recursive type definitions, e.g.\
   in Isabelle/HOL.
 
-  \item @{command "instance"} in an instantiation target body sets
+  \<^descr> @{command "instance"} in an instantiation target body sets
   up a goal stating the type arities claimed at the opening @{command
   "instantiation"}.  The proof would usually proceed by @{method
   intro_classes}, and then establish the characteristic theorems of
@@ -1014,7 +965,7 @@
   need to specify operations: one can continue with the
   instantiation proof immediately.
 
-  \item @{command "subclass"}~@{text c} in a class context for class
+  \<^descr> @{command "subclass"}~@{text c} in a class context for class
   @{text d} sets up a goal stating that class @{text c} is logically
   contained in class @{text d}.  After finishing the proof, class
   @{text d} is proven to be subclass @{text c} and the locale @{text
@@ -1027,10 +978,10 @@
   the logical connection are not sufficient on the locale level but on
   the theory level.
 
-  \item @{command "print_classes"} prints all classes in the current
+  \<^descr> @{command "print_classes"} prints all classes in the current
   theory.
 
-  \item @{command "class_deps"} visualizes classes and their subclass
+  \<^descr> @{command "class_deps"} visualizes classes and their subclass
   relations as a directed acyclic graph. By default, all classes from the
   current theory context are show. This may be restricted by optional bounds
   as follows: @{command "class_deps"}~@{text upper} or @{command
@@ -1038,14 +989,12 @@
   subclass of some sort from @{text upper} and a superclass of some sort
   from @{text lower}.
 
-  \item @{method intro_classes} repeatedly expands all class
+  \<^descr> @{method intro_classes} repeatedly expands all class
   introduction rules of this theory.  Note that this method usually
   needs not be named explicitly, as it is already included in the
   default proof step (e.g.\ of @{command "proof"}).  In particular,
   instantiation of trivial (syntactic) classes may be performed by a
   single ``@{command ".."}'' proof step.
-
-  \end{description}
 \<close>
 
 
@@ -1058,8 +1007,6 @@
   If this locale is also a class @{text c}, apart from the common
   locale target behaviour the following happens.
 
-  \begin{itemize}
-
   \<^item> Local constant declarations @{text "g[\<alpha>]"} referring to the
   local type parameter @{text \<alpha>} and local parameters @{text "f[\<alpha>]"}
   are accompanied by theory-level constants @{text "g[?\<alpha> :: c]"}
@@ -1071,8 +1018,6 @@
   global operations @{text "g[?\<alpha> :: c]"} uniformly.  Type inference
   resolves ambiguities.  In rare cases, manual type annotations are
   needed.
-  
-  \end{itemize}
 \<close>
 
 
@@ -1141,9 +1086,7 @@
     spec: @{syntax name} ( '==' | '\<equiv>' ) @{syntax term} ( '(' @'unchecked' ')' )?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
+  \<^descr> @{command "overloading"}~@{text "x\<^sub>1 \<equiv> c\<^sub>1 :: \<tau>\<^sub>1 \<AND> \<dots> x\<^sub>n \<equiv> c\<^sub>n :: \<tau>\<^sub>n \<BEGIN>"}
   opens a theory target (cf.\ \secref{sec:target}) which allows to
   specify constants with overloaded definitions.  These are identified
   by an explicitly given mapping from variable names @{text "x\<^sub>i"} to
@@ -1158,8 +1101,6 @@
   exotic overloading (see \secref{sec:consts} for a precise description).
   It is at the discretion of the user to avoid
   malformed theory specifications!
-
-  \end{description}
 \<close>
 
 
@@ -1192,9 +1133,7 @@
     @@{command attribute_setup} @{syntax name} '=' @{syntax text} @{syntax text}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "SML_file"}~@{text "name"} reads and evaluates the
+  \<^descr> @{command "SML_file"}~@{text "name"} reads and evaluates the
   given Standard ML file.  Top-level SML bindings are stored within
   the theory context; the initial environment is restricted to the
   Standard ML implementation of Poly/ML, without the many add-ons of
@@ -1202,41 +1141,41 @@
   build larger Standard ML projects, independently of the regular
   Isabelle/ML environment.
 
-  \item @{command "ML_file"}~@{text "name"} reads and evaluates the
+  \<^descr> @{command "ML_file"}~@{text "name"} reads and evaluates the
   given ML file.  The current theory context is passed down to the ML
   toplevel and may be modified, using @{ML "Context.>>"} or derived ML
   commands.  Top-level ML bindings are stored within the (global or
   local) theory context.
   
-  \item @{command "ML"}~@{text "text"} is similar to @{command
+  \<^descr> @{command "ML"}~@{text "text"} is similar to @{command
   "ML_file"}, but evaluates directly the given @{text "text"}.
   Top-level ML bindings are stored within the (global or local) theory
   context.
 
-  \item @{command "ML_prf"} is analogous to @{command "ML"} but works
+  \<^descr> @{command "ML_prf"} is analogous to @{command "ML"} but works
   within a proof context.  Top-level ML bindings are stored within the
   proof context in a purely sequential fashion, disregarding the
   nested proof structure.  ML bindings introduced by @{command
   "ML_prf"} are discarded at the end of the proof.
 
-  \item @{command "ML_val"} and @{command "ML_command"} are diagnostic
+  \<^descr> @{command "ML_val"} and @{command "ML_command"} are diagnostic
   versions of @{command "ML"}, which means that the context may not be
   updated.  @{command "ML_val"} echos the bindings produced at the ML
   toplevel, but @{command "ML_command"} is silent.
   
-  \item @{command "setup"}~@{text "text"} changes the current theory
+  \<^descr> @{command "setup"}~@{text "text"} changes the current theory
   context by applying @{text "text"}, which refers to an ML expression
   of type @{ML_type "theory -> theory"}.  This enables to initialize
   any object-logic specific tools and packages written in ML, for
   example.
 
-  \item @{command "local_setup"} is similar to @{command "setup"} for
+  \<^descr> @{command "local_setup"} is similar to @{command "setup"} for
   a local theory context, and an ML expression of type @{ML_type
   "local_theory -> local_theory"}.  This allows to
   invoke local theory specification packages without going through
   concrete outer syntax, for example.
 
-  \item @{command "attribute_setup"}~@{text "name = text description"}
+  \<^descr> @{command "attribute_setup"}~@{text "name = text description"}
   defines an attribute in the current context.  The given @{text
   "text"} has to be an ML expression of type
   @{ML_type "attribute context_parser"}, cf.\ basic parsers defined in
@@ -1245,8 +1184,6 @@
   In principle, attributes can operate both on a given theorem and the
   implicit context, although in practice only one is modified and the
   other serves as parameter.  Here are examples for these two cases:
-
-  \end{description}
 \<close>
 
 (*<*)experiment begin(*>*)
@@ -1266,18 +1203,16 @@
 (*<*)end(*>*)
 
 text \<open>
-  \begin{description}
-
-  \item @{attribute ML_print_depth} controls the printing depth of the ML
+  \<^descr> @{attribute ML_print_depth} controls the printing depth of the ML
   toplevel pretty printer; the precise effect depends on the ML compiler and
   run-time system. Typically the limit should be less than 10. Bigger values
   such as 100--1000 are occasionally useful for debugging.
 
-  \item @{attribute ML_source_trace} indicates whether the source text that
+  \<^descr> @{attribute ML_source_trace} indicates whether the source text that
   is given to the ML compiler should be output: it shows the raw Standard ML
   after expansion of Isabelle/ML antiquotations.
 
-  \item @{attribute ML_exception_trace} indicates whether the ML run-time
+  \<^descr> @{attribute ML_exception_trace} indicates whether the ML run-time
   system should print a detailed stack trace on exceptions. The result is
   dependent on the particular ML compiler version. Note that after Poly/ML
   5.3 some optimizations in the run-time systems may hinder exception
@@ -1288,8 +1223,6 @@
   Runtime.exn_trace} into ML code for debugging @{cite
   "isabelle-implementation"}, closer to the point where it actually
   happens.
-
-  \end{description}
 \<close>
 
 
@@ -1306,9 +1239,7 @@
     @@{command default_sort} @{syntax sort}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "default_sort"}~@{text s} makes sort @{text s} the
+  \<^descr> @{command "default_sort"}~@{text s} makes sort @{text s} the
   new default sort for any type variable that is given explicitly in
   the text, but lacks a sort constraint (wrt.\ the current context).
   Type variables generated by type inference are not affected.
@@ -1320,8 +1251,6 @@
   When merging theories, the default sorts of the parents are
   logically intersected, i.e.\ the representations as lists of classes
   are joined.
-
-  \end{description}
 \<close>
 
 
@@ -1339,20 +1268,17 @@
     @@{command typedecl} @{syntax typespec} @{syntax mixfix}?
   \<close>}
 
-  \begin{description}
-
-  \item @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"} introduces a
+  \<^descr> @{command "type_synonym"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = \<tau>"} introduces a
   \emph{type synonym} @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} for the existing type @{text
   "\<tau>"}. Unlike the semantic type definitions in Isabelle/HOL, type synonyms
   are merely syntactic abbreviations without any logical significance.
   Internally, type synonyms are fully expanded.
   
-  \item @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
+  \<^descr> @{command "typedecl"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} declares a new
   type constructor @{text t}.  If the object-logic defines a base sort
   @{text s}, then the constructor is declared to operate on that, via
   the axiomatic type-class instance @{text "t :: (s, \<dots>, s)s"}.
 
-  \end{description}
 
   \begin{warn}
   If you introduce a new type axiomatically, i.e.\ via @{command_ref
@@ -1389,8 +1315,6 @@
   The built-in well-formedness conditions for definitional
   specifications are:
 
-  \begin{itemize}
-
   \<^item> Arguments (on the left-hand side) must be distinct variables.
 
   \<^item> All variables on the right-hand side must also appear on the
@@ -1404,7 +1328,6 @@
   provide definitional principles that can be used to express
   recursion safely.
 
-  \end{itemize}
 
   The right-hand side of overloaded definitions may mention overloaded constants
   recursively at type instances corresponding to the immediate
@@ -1422,14 +1345,12 @@
     opt: '(' @'unchecked'? @'overloaded'? ')'
   \<close>}
 
-  \begin{description}
-
-  \item @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
+  \<^descr> @{command "consts"}~@{text "c :: \<sigma>"} declares constant @{text
   c} to have any instance of type scheme @{text \<sigma>}.  The optional
   mixfix annotations may attach concrete syntax to the constants
   declared.
   
-  \item @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
+  \<^descr> @{command "defs"}~@{text "name: eqn"} introduces @{text eqn}
   as a definitional axiom for some existing constant.
   
   The @{text "(unchecked)"} option disables global dependency checks
@@ -1441,8 +1362,6 @@
   potentially overloaded.  Unless this option is given, a warning
   message would be issued for any definitional equation with a more
   special type than that of the corresponding constant declaration.
-  
-  \end{description}
 \<close>
 
 
@@ -1461,21 +1380,17 @@
     @@{command named_theorems} (@{syntax name} @{syntax text}? + @'and')
   \<close>}
 
-  \begin{description}
-  
-  \item @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
+  \<^descr> @{command "lemmas"}~@{text "a = b\<^sub>1 \<dots> b\<^sub>n"}~@{keyword_def
   "for"}~@{text "x\<^sub>1 \<dots> x\<^sub>m"} evaluates given facts (with attributes) in
   the current context, which may be augmented by local variables.
   Results are standardized before being stored, i.e.\ schematic
   variables are renamed to enforce index @{text "0"} uniformly.
 
-  \item @{command "named_theorems"}~@{text "name description"} declares a
+  \<^descr> @{command "named_theorems"}~@{text "name description"} declares a
   dynamic fact within the context. The same @{text name} is used to define
   an attribute with the usual @{text add}/@{text del} syntax (e.g.\ see
   \secref{sec:simp-rules}) to maintain the content incrementally, in
   canonical declaration order of the text structure.
-
-  \end{description}
 \<close>
 
 
@@ -1505,16 +1420,13 @@
     @@{command oracle} @{syntax name} '=' @{syntax text}
   \<close>}
 
-  \begin{description}
-
-  \item @{command "oracle"}~@{text "name = text"} turns the given ML
+  \<^descr> @{command "oracle"}~@{text "name = text"} turns the given ML
   expression @{text "text"} of type @{ML_text "'a -> cterm"} into an
   ML function of type @{ML_text "'a -> thm"}, which is bound to the
   global identifier @{ML_text name}.  This acts like an infinitary
   specification of axioms!  Invoking the oracle only works within the
   scope of the resulting theory.
 
-  \end{description}
 
   See @{file "~~/src/HOL/ex/Iff_Oracle.thy"} for a worked example of
   defining a new primitive rule as oracle, and turning it into a proof
@@ -1543,9 +1455,7 @@
   name spaces by hand, yet the following commands provide some way to
   do so.
 
-  \begin{description}
-
-  \item @{command "hide_class"}~@{text names} fully removes class
+  \<^descr> @{command "hide_class"}~@{text names} fully removes class
   declarations from a given name space; with the @{text "(open)"}
   option, only the unqualified base name is hidden.
 
@@ -1554,11 +1464,9 @@
   longer accessible to the user are printed with the special qualifier
   ``@{text "??"}'' prefixed to the full internal name.
 
-  \item @{command "hide_type"}, @{command "hide_const"}, and @{command
+  \<^descr> @{command "hide_type"}, @{command "hide_const"}, and @{command
   "hide_fact"} are similar to @{command "hide_class"}, but hide types,
   constants, and facts, respectively.
-  
-  \end{description}
 \<close>
 
 end
--- a/src/Doc/Isar_Ref/Synopsis.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/Isar_Ref/Synopsis.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -129,8 +129,6 @@
 subsubsection \<open>Naming conventions\<close>
 
 text \<open>
-  \begin{itemize}
-
   \<^item> Lower-case identifiers are usually preferred.
 
   \<^item> Facts can be named after the main term within the proposition.
@@ -144,8 +142,6 @@
 
   \<^item> Symbolic identifiers are supported (e.g. @{text "*"}, @{text
   "**"}, @{text "***"}).
-
-  \end{itemize}
 \<close>
 
 
@@ -224,8 +220,6 @@
 subsection \<open>Special names in Isar proofs\<close>
 
 text \<open>
-  \begin{itemize}
-
   \<^item> term @{text "?thesis"} --- the main conclusion of the
   innermost pending claim
 
@@ -233,8 +227,6 @@
   stated result (for infix application this is the right-hand side)
 
   \<^item> fact @{text "this"} --- the last result produced in the text
-
-  \end{itemize}
 \<close>
 
 notepad
@@ -313,15 +305,11 @@
 subsubsection \<open>Notes\<close>
 
 text \<open>
-  \begin{itemize}
-
   \<^item> The notion of @{text trans} rule is very general due to the
   flexibility of Isabelle/Pure rule composition.
 
   \<^item> User applications may declare their own rules, with some care
   about the operational details of higher-order unification.
-
-  \end{itemize}
 \<close>
 
 
@@ -391,7 +379,6 @@
   In practice, much more proof infrastructure is required.
 
   The proof method @{method induct} provides:
-  \begin{itemize}
 
   \<^item> implicit rule selection and robust instantiation
 
@@ -399,8 +386,6 @@
 
   \<^item> support for rule-structured induction statements, with local
   parameters, premises, etc.
-
-  \end{itemize}
 \<close>
 
 notepad
@@ -421,7 +406,6 @@
 
 text \<open>
   The subsequent example combines the following proof patterns:
-  \begin{itemize}
 
   \<^item> outermost induction (over the datatype structure of natural
   numbers), to decompose the proof problem in top-down manner
@@ -430,8 +414,6 @@
   to compose the result in each case
 
   \<^item> solving local claims within the calculation by simplification
-
-  \end{itemize}
 \<close>
 
 lemma
@@ -682,16 +664,14 @@
 text \<open>
   The Pure framework provides means for:
 
-  \begin{itemize}
-
   \<^item> backward-chaining of rules by @{inference resolution}
 
   \<^item> closing of branches by @{inference assumption}
 
-  \end{itemize}
 
   Both principles involve higher-order unification of @{text \<lambda>}-terms
-  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).\<close>
+  modulo @{text "\<alpha>\<beta>\<eta>"}-equivalence (cf.\ Huet and Miller).
+\<close>
 
 notepad
 begin
@@ -972,14 +952,10 @@
   Combining these characteristics leads to the following general scheme
   for elimination rules with cases:
 
-  \begin{itemize}
-
   \<^item> prefix of assumptions (or ``major premises'')
 
   \<^item> one or more cases that enable to establish the main conclusion
   in an augmented context
-
-  \end{itemize}
 \<close>
 
 notepad
@@ -1015,16 +991,12 @@
 
   Isar provides some infrastructure to support this:
 
-  \begin{itemize}
-
   \<^item> native language elements to state eliminations
 
   \<^item> symbolic case names
 
   \<^item> method @{method cases} to recover this structure in a
   sub-proof
-
-  \end{itemize}
 \<close>
 
 print_statement exE
--- a/src/Doc/JEdit/JEdit.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/JEdit/JEdit.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -18,41 +18,38 @@
   components are fit together in order to make this work. The main building
   blocks are as follows.
 
-  \begin{description}
-
-  \item [PIDE] is a general framework for Prover IDEs based on Isabelle/Scala.
+  \<^descr>[PIDE] is a general framework for Prover IDEs based on Isabelle/Scala.
   It is built around a concept of parallel and asynchronous document
   processing, which is supported natively by the parallel proof engine that is
   implemented in Isabelle/ML. The traditional prover command loop is given up;
   instead there is direct support for editing of source text, with rich formal
   markup for GUI rendering.
 
-  \item [Isabelle/ML] is the implementation and extension language of
+  \<^descr>[Isabelle/ML] is the implementation and extension language of
   Isabelle, see also @{cite "isabelle-implementation"}. It is integrated
   into the logical context of Isabelle/Isar and allows to manipulate
   logical entities directly. Arbitrary add-on tools may be implemented
   for object-logics such as Isabelle/HOL.
 
-  \item [Isabelle/Scala] is the system programming language of
+  \<^descr>[Isabelle/Scala] is the system programming language of
   Isabelle. It extends the pure logical environment of Isabelle/ML
   towards the ``real world'' of graphical user interfaces, text
   editors, IDE frameworks, web services etc.  Special infrastructure
   allows to transfer algebraic datatypes and formatted text easily
   between ML and Scala, using asynchronous protocol commands.
 
-  \item [jEdit] is a sophisticated text editor implemented in
+  \<^descr>[jEdit] is a sophisticated text editor implemented in
   Java.\footnote{@{url "http://www.jedit.org"}} It is easily extensible
   by plugins written in languages that work on the JVM, e.g.\
   Scala\footnote{@{url "http://www.scala-lang.org"}}.
 
-  \item [Isabelle/jEdit] is the main example application of the PIDE
+  \<^descr>[Isabelle/jEdit] is the main example application of the PIDE
   framework and the default user-interface for Isabelle. It targets
   both beginners and experts. Technically, Isabelle/jEdit combines a
   slightly modified version of the jEdit code base with a special
   plugin for Isabelle, integrated as standalone application for the
   main operating system platforms: Linux, Windows, Mac OS X.
 
-  \end{description}
 
   The subtle differences of Isabelle/ML versus Standard ML,
   Isabelle/Scala versus Scala, Isabelle/jEdit versus jEdit need to be
@@ -286,9 +283,7 @@
   Isabelle/jEdit enables platform-specific look-and-feel by default as
   follows.
 
-  \begin{description}
-
-  \item[Linux:] The platform-independent \emph{Nimbus} is used by
+  \<^descr>[Linux:] The platform-independent \emph{Nimbus} is used by
   default.
 
   \emph{GTK+} also works under the side-condition that the overall GTK theme
@@ -300,10 +295,10 @@
   ``4K'' or ``UHD'' models), because the rendering by the external library is
   subject to global system settings for font scaling.}
 
-  \item[Windows:] Regular \emph{Windows} is used by default, but
+  \<^descr>[Windows:] Regular \emph{Windows} is used by default, but
   \emph{Windows Classic} also works.
 
-  \item[Mac OS X:] Regular \emph{Mac OS X} is used by default.
+  \<^descr>[Mac OS X:] Regular \emph{Mac OS X} is used by default.
 
   The bundled \emph{MacOSX} plugin provides various functions that are
   expected from applications on that particular platform: quit from menu or
@@ -311,7 +306,6 @@
   full-screen mode for main editor windows. It is advisable to have the
   \emph{MacOSX} plugin enabled all the time on that platform.
 
-  \end{description}
 
   Users may experiment with different look-and-feels, but need to keep
   in mind that this extra variance of GUI functionality is unlikely to
@@ -349,8 +343,6 @@
   The Isabelle/jEdit \textbf{application} and its plugins provide
   various font properties that are summarized below.
 
-  \begin{itemize}
-
   \<^item> \emph{Global Options / Text Area / Text font}: the main text area
   font, which is also used as reference point for various derived font sizes,
   e.g.\ the Output panel (\secref{sec:output}).
@@ -372,7 +364,6 @@
   \<^item> \emph{Plugin Options / Console / General / Font}: the console window
   font, e.g.\ relevant for Isabelle/Scala command-line.
 
-  \end{itemize}
 
   In \figref{fig:isabelle-jedit-hdpi} the \emph{Metal} look-and-feel is
   configured with custom fonts at 30 pixels, and the main text area and
@@ -419,8 +410,6 @@
   Compared to plain jEdit, dockable window management in Isabelle/jEdit is
   slightly augmented according to the the following principles:
 
-  \begin{itemize}
-
   \<^item> Floating windows are dependent on the main window as \emph{dialog} in
   the sense of Java/AWT/Swing. Dialog windows always stay on top of the view,
   which is particularly important in full-screen mode. The desktop environment
@@ -441,8 +430,6 @@
   independently of ongoing changes of the PIDE document-model. Note that
   Isabelle/jEdit popup windows (\secref{sec:tooltips-hyperlinks}) provide a
   similar \emph{Detach} operation as an icon.
-
-  \end{itemize}
 \<close>
 
 
@@ -518,8 +505,6 @@
   This is a summary for practically relevant input methods for Isabelle
   symbols.
 
-  \begin{enumerate}
-
   \<^enum> The \emph{Symbols} panel: some GUI buttons allow to insert
   certain symbols in the text buffer.  There are also tooltips to
   reveal the official Isabelle representation with some additional
@@ -579,7 +564,6 @@
   explicit completion (see also @{verbatim "C+b"} explained in
   \secref{sec:completion}).
 
-  \end{enumerate}
 
   \paragraph{Control symbols.} There are some special control symbols
   to modify the display style of a single symbol (without
@@ -753,8 +737,6 @@
   \emph{document nodes}. The overall document structure is defined by the
   theory nodes in two dimensions:
 
-  \begin{enumerate}
-
   \<^enum> via \textbf{theory imports} that are specified in the \emph{theory
   header} using concrete syntax of the @{command_ref theory} command
   @{cite "isabelle-isar-ref"};
@@ -763,7 +745,6 @@
   \emph{load commands}, notably @{command_ref ML_file} and @{command_ref
   SML_file} @{cite "isabelle-isar-ref"}.
 
-  \end{enumerate}
 
   In any case, source files are managed by the PIDE infrastructure: the
   physical file-system only plays a subordinate role. The relevant version of
@@ -985,7 +966,6 @@
 
   \<^medskip>
   The following GUI elements are common to all query modes:
-  \begin{itemize}
 
   \<^item> The spinning wheel provides feedback about the status of a pending
   query wrt.\ the evaluation of its context and its own operation.
@@ -1001,7 +981,6 @@
 
   \<^item> The \emph{Zoom} box controls the font size of the output area.
 
-  \end{itemize}
 
   All query operations are asynchronous: there is no need to wait for the
   evaluation of the document for the query context, nor for the query
@@ -1154,22 +1133,19 @@
   kinds and purposes. The completion mechanism supports this by the following
   built-in templates:
 
-  \begin{description}
-
-  \item[] @{verbatim "`"} (single ASCII back-quote) supports \emph{quotations}
+  \<^descr> @{verbatim "`"} (single ASCII back-quote) supports \emph{quotations}
   via text cartouches. There are three selections, which are always presented
   in the same order and do not depend on any context information. The default
   choice produces a template ``@{text "\<open>\<box>\<close>"}'', where the box indicates the
   cursor position after insertion; the other choices help to repair the block
   structure of unbalanced text cartouches.
 
-  \item[] @{verbatim "@{"} is completed to the template ``@{text "@{\<box>}"}'',
+  \<^descr> @{verbatim "@{"} is completed to the template ``@{text "@{\<box>}"}'',
   where the box indicates the cursor position after insertion. Here it is
   convenient to use the wildcard ``@{verbatim __}'' or a more specific name
   prefix to let semantic completion of name-space entries propose
   antiquotation names.
 
-  \end{description}
 
   With some practice, input of quoted sub-languages and antiquotations of
   embedded languages should work fluently. Note that national keyboard layouts
@@ -1342,48 +1318,40 @@
   optional delay after keyboard input according to @{system_option
   jedit_completion_delay}.
 
-  \begin{description}
-
-  \item[Explicit completion] works via action @{action_ref
+  \<^descr>[Explicit completion] works via action @{action_ref
   "isabelle.complete"} with keyboard shortcut @{verbatim "C+b"}. This
   overrides the shortcut for @{action_ref "complete-word"} in jEdit, but it is
   possible to restore the original jEdit keyboard mapping of @{action
   "complete-word"} via \emph{Global Options~/ Shortcuts} and invent a
   different one for @{action "isabelle.complete"}.
 
-  \item[Explicit spell-checker completion] works via @{action_ref
+  \<^descr>[Explicit spell-checker completion] works via @{action_ref
   "isabelle.complete-word"}, which is exposed in the jEdit context menu, if
   the mouse points to a word that the spell-checker can complete.
 
-  \item[Implicit completion] works via regular keyboard input of the editor.
+  \<^descr>[Implicit completion] works via regular keyboard input of the editor.
   It depends on further side-conditions:
 
-  \begin{enumerate}
-
-  \<^enum> The system option @{system_option_ref jedit_completion} needs to
-  be enabled (default).
+    \<^enum> The system option @{system_option_ref jedit_completion} needs to
+    be enabled (default).
 
-  \<^enum> Completion of syntax keywords requires at least 3 relevant
-  characters in the text.
-
-  \<^enum> The system option @{system_option_ref jedit_completion_delay}
-  determines an additional delay (0.5 by default), before opening a completion
-  popup.  The delay gives the prover a chance to provide semantic completion
-  information, notably the context (\secref{sec:completion-context}).
+    \<^enum> Completion of syntax keywords requires at least 3 relevant
+    characters in the text.
 
-  \<^enum> The system option @{system_option_ref jedit_completion_immediate}
-  (enabled by default) controls whether replacement text should be inserted
-  immediately without popup, regardless of @{system_option
-  jedit_completion_delay}. This aggressive mode of completion is restricted to
-  Isabelle symbols and their abbreviations (\secref{sec:symbols}).
+    \<^enum> The system option @{system_option_ref jedit_completion_delay}
+    determines an additional delay (0.5 by default), before opening a completion
+    popup.  The delay gives the prover a chance to provide semantic completion
+    information, notably the context (\secref{sec:completion-context}).
 
-  \<^enum> Completion of symbol abbreviations with only one relevant
-  character in the text always enforces an explicit popup,
-  regardless of @{system_option_ref jedit_completion_immediate}.
+    \<^enum> The system option @{system_option_ref jedit_completion_immediate}
+    (enabled by default) controls whether replacement text should be inserted
+    immediately without popup, regardless of @{system_option
+    jedit_completion_delay}. This aggressive mode of completion is restricted to
+    Isabelle symbols and their abbreviations (\secref{sec:symbols}).
 
-  \end{enumerate}
-
-  \end{description}
+    \<^enum> Completion of symbol abbreviations with only one relevant
+    character in the text always enforces an explicit popup,
+    regardless of @{system_option_ref jedit_completion_immediate}.
 \<close>
 
 
@@ -1434,21 +1402,18 @@
   all combinations make sense. At least the following important cases are
   well-defined:
 
-  \begin{description}
-
-  \item[No selection.] The original is removed and the replacement inserted,
+  \<^descr>[No selection.] The original is removed and the replacement inserted,
   depending on the caret position.
 
-  \item[Rectangular selection of zero width.] This special case is treated by
+  \<^descr>[Rectangular selection of zero width.] This special case is treated by
   jEdit as ``tall caret'' and insertion of completion imitates its normal
   behaviour: separate copies of the replacement are inserted for each line of
   the selection.
 
-  \item[Other rectangular selection or multiple selections.] Here the original
+  \<^descr>[Other rectangular selection or multiple selections.] Here the original
   is removed and the replacement is inserted for each line (or segment) of the
   selection.
 
-  \end{description}
 
   Support for multiple selections is particularly useful for
   \emph{HyperSearch}: clicking on one of the items in the \emph{HyperSearch
@@ -1473,8 +1438,6 @@
   completion. They may be configured in \emph{Plugin Options~/ Isabelle~/
   General} as usual.
 
-  \begin{itemize}
-
   \<^item> @{system_option_def completion_limit} specifies the maximum number of
   items for various semantic completion operations (name-space entries etc.)
 
@@ -1520,8 +1483,6 @@
   \<^item> @{system_option_def spell_checker_elements} specifies a
   comma-separated list of markup elements that delimit words in the source
   that is subject to spell-checking, including various forms of comments.
-
-  \end{itemize}
 \<close>
 
 
@@ -1561,8 +1522,6 @@
   \emph{Plugin Options~/ Isabelle~/ General~/ Automatically tried
   tools}):
 
-  \begin{itemize}
-
   \<^item> @{system_option_ref auto_methods} controls automatic use of a
   combination of standard proof methods (@{method auto}, @{method
   simp}, @{method blast}, etc.).  This corresponds to the Isar command
@@ -1603,13 +1562,10 @@
 
   This tool is \emph{enabled} by default.
 
-  \end{itemize}
 
   Invocation of automatically tried tools is subject to some global
   policies of parallel execution, which may be configured as follows:
 
-  \begin{itemize}
-
   \<^item> @{system_option_ref auto_time_limit} (default 2.0) determines the
   timeout (in seconds) for each tool execution.
 
@@ -1617,7 +1573,6 @@
   start delay (in seconds) for automatically tried tools, after the
   main command evaluation is finished.
 
-  \end{itemize}
 
   Each tool is submitted independently to the pool of parallel
   execution tasks in Isabelle/ML, using hardwired priorities according
@@ -1782,8 +1737,6 @@
   Beyond this, it is occasionally useful to inspect low-level output
   channels via some of the following additional panels:
 
-  \begin{itemize}
-
   \<^item> \emph{Protocol} shows internal messages between the
   Isabelle/Scala and Isabelle/ML side of the PIDE document editing protocol.
   Recording of messages starts with the first activation of the
@@ -1823,16 +1776,12 @@
 
   Under normal situations, such low-level system output can be
   ignored.
-
-  \end{itemize}
 \<close>
 
 
 chapter \<open>Known problems and workarounds \label{sec:problems}\<close>
 
 text \<open>
-  \begin{itemize}
-
   \<^item> \textbf{Problem:} Odd behavior of some diagnostic commands with
   global side-effects, like writing a physical file.
 
@@ -1899,8 +1848,6 @@
 
   \textbf{Workaround:} Use native full-screen control of the window
   manager (notably on Mac OS X).
-
-  \end{itemize}
 \<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Basics.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/System/Basics.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -16,8 +16,6 @@
   The Isabelle system environment provides the following
   basic infrastructure to integrate tools smoothly.
 
-  \begin{enumerate}
-
   \<^enum> The \emph{Isabelle settings} mechanism provides process
   environment variables to all Isabelle executables (including tools
   and user interfaces).
@@ -32,8 +30,6 @@
   isabelle}) provides a generic startup environment Isabelle related
   utilities, user interfaces etc.  Such tools automatically benefit
   from the settings mechanism.
-
-  \end{enumerate}
 \<close>
 
 
@@ -72,8 +68,6 @@
   process tree, i.e.\ the environment is passed to subprocesses
   according to regular Unix conventions.
 
-  \begin{enumerate}
-
   \<^enum> The special variable @{setting_def ISABELLE_HOME} is
   determined automatically from the location of the binary that has
   been run.
@@ -106,8 +100,7 @@
   Typically, a user settings file contains only a few lines, with some
   assignments that are actually changed.  Never copy the central
   @{file "$ISABELLE_HOME/etc/settings"} file!
-  
-  \end{enumerate}
+
 
   Since settings files are regular GNU @{executable_def bash} scripts,
   one may use complex shell commands, such as @{verbatim "if"} or
@@ -120,8 +113,6 @@
   \<^medskip>
   A few variables are somewhat special:
 
-  \begin{itemize}
-
   \<^item> @{setting_def ISABELLE_PROCESS} and @{setting_def ISABELLE_TOOL} are set
   automatically to the absolute path names of the @{executable
   "isabelle_process"} and @{executable isabelle} executables,
@@ -132,7 +123,6 @@
   the ML system (cf.\ @{setting ML_IDENTIFIER}) appended automatically
   to its value.
 
-  \end{itemize}
 
   \<^medskip>
   Note that the settings environment may be inspected with
@@ -148,9 +138,7 @@
   may add their own selection. Variables that are special in some
   sense are marked with @{text "\<^sup>*"}.
 
-  \begin{description}
-
-  \item[@{setting_def USER_HOME}@{text "\<^sup>*"}] Is the cross-platform
+  \<^descr>[@{setting_def USER_HOME}@{text "\<^sup>*"}] Is the cross-platform
   user home directory.  On Unix systems this is usually the same as
   @{setting HOME}, but on Windows it is the regular home directory of
   the user, not the one of within the Cygwin root
@@ -158,12 +146,12 @@
   its HOME should point to the @{file_unchecked "/home"} directory tree or the
   Windows user home.}
 
-  \item[@{setting_def ISABELLE_HOME}@{text "\<^sup>*"}] is the location of the
+  \<^descr>[@{setting_def ISABELLE_HOME}@{text "\<^sup>*"}] is the location of the
   top-level Isabelle distribution directory. This is automatically
   determined from the Isabelle executable that has been invoked.  Do
   not attempt to set @{setting ISABELLE_HOME} yourself from the shell!
   
-  \item[@{setting_def ISABELLE_HOME_USER}] is the user-specific
+  \<^descr>[@{setting_def ISABELLE_HOME_USER}] is the user-specific
   counterpart of @{setting ISABELLE_HOME}. The default value is
   relative to @{file_unchecked "$USER_HOME/.isabelle"}, under rare
   circumstances this may be changed in the global setting file.
@@ -172,21 +160,21 @@
   defaults may be overridden by a private @{verbatim
   "$ISABELLE_HOME_USER/etc/settings"}.
 
-  \item[@{setting_def ISABELLE_PLATFORM_FAMILY}@{text "\<^sup>*"}] is
+  \<^descr>[@{setting_def ISABELLE_PLATFORM_FAMILY}@{text "\<^sup>*"}] is
   automatically set to the general platform family: @{verbatim linux},
   @{verbatim macos}, @{verbatim windows}.  Note that
   platform-dependent tools usually need to refer to the more specific
   identification according to @{setting ISABELLE_PLATFORM}, @{setting
   ISABELLE_PLATFORM32}, @{setting ISABELLE_PLATFORM64}.
 
-  \item[@{setting_def ISABELLE_PLATFORM}@{text "\<^sup>*"}] is automatically
+  \<^descr>[@{setting_def ISABELLE_PLATFORM}@{text "\<^sup>*"}] is automatically
   set to a symbolic identifier for the underlying hardware and
   operating system.  The Isabelle platform identification always
   refers to the 32 bit variant, even this is a 64 bit machine.  Note
   that the ML or Java runtime may have a different idea, depending on
   which binaries are actually run.
 
-  \item[@{setting_def ISABELLE_PLATFORM64}@{text "\<^sup>*"}] is similar to
+  \<^descr>[@{setting_def ISABELLE_PLATFORM64}@{text "\<^sup>*"}] is similar to
   @{setting ISABELLE_PLATFORM} but refers to the proper 64 bit variant
   on a platform that supports this; the value is empty for 32 bit.
   Note that the following bash expression (including the quotes)
@@ -194,18 +182,18 @@
 
   @{verbatim [display] \<open>"${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM}"\<close>}
 
-  \item[@{setting_def ISABELLE_PROCESS}@{text "\<^sup>*"}, @{setting
+  \<^descr>[@{setting_def ISABELLE_PROCESS}@{text "\<^sup>*"}, @{setting
   ISABELLE_TOOL}@{text "\<^sup>*"}] are automatically set to the full path
   names of the @{executable "isabelle_process"} and @{executable
   isabelle} executables, respectively.  Thus other tools and scripts
   need not assume that the @{file "$ISABELLE_HOME/bin"} directory is
   on the current search path of the shell.
   
-  \item[@{setting_def ISABELLE_IDENTIFIER}@{text "\<^sup>*"}] refers
+  \<^descr>[@{setting_def ISABELLE_IDENTIFIER}@{text "\<^sup>*"}] refers
   to the name of this Isabelle distribution, e.g.\ ``@{verbatim
   Isabelle2012}''.
 
-  \item[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME},
+  \<^descr>[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME},
   @{setting_def ML_OPTIONS}, @{setting_def ML_PLATFORM}, @{setting_def
   ML_IDENTIFIER}@{text "\<^sup>*"}] specify the underlying ML system
   to be used for Isabelle.  There is only a fixed set of admissable
@@ -220,64 +208,62 @@
   automatically obtained by composing the values of @{setting
   ML_SYSTEM}, @{setting ML_PLATFORM} and the Isabelle version values.
 
-  \item[@{setting_def ML_SYSTEM_POLYML}@{text "\<^sup>*"}] is @{verbatim true}
+  \<^descr>[@{setting_def ML_SYSTEM_POLYML}@{text "\<^sup>*"}] is @{verbatim true}
   for @{setting ML_SYSTEM} values derived from Poly/ML, as opposed to
   SML/NJ where it is empty.  This is particularly useful with the
   build option @{system_option condition}
   (\secref{sec:system-options}) to restrict big sessions to something
   that SML/NJ can still handle.
 
-  \item[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK
+  \<^descr>[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK
   (Java Development Kit) installation with @{verbatim javac} and
   @{verbatim jar} executables.  This is essential for Isabelle/Scala
   and other JVM-based tools to work properly.  Note that conventional
   @{verbatim JAVA_HOME} usually points to the JRE (Java Runtime
   Environment), not JDK.
   
-  \item[@{setting_def ISABELLE_PATH}] is a list of directories
+  \<^descr>[@{setting_def ISABELLE_PATH}] is a list of directories
   (separated by colons) where Isabelle logic images may reside.  When
   looking up heaps files, the value of @{setting ML_IDENTIFIER} is
   appended to each component internally.
   
-  \item[@{setting_def ISABELLE_OUTPUT}@{text "\<^sup>*"}] is a
+  \<^descr>[@{setting_def ISABELLE_OUTPUT}@{text "\<^sup>*"}] is a
   directory where output heap files should be stored by default. The
   ML system and Isabelle version identifier is appended here, too.
   
-  \item[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where
+  \<^descr>[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where
   theory browser information (HTML text, graph data, and printable
   documents) is stored (see also \secref{sec:info}).  The default
   value is @{file_unchecked "$ISABELLE_HOME_USER/browser_info"}.
   
-  \item[@{setting_def ISABELLE_LOGIC}] specifies the default logic to
+  \<^descr>[@{setting_def ISABELLE_LOGIC}] specifies the default logic to
   load if none is given explicitely by the user.  The default value is
   @{verbatim HOL}.
   
-  \item[@{setting_def ISABELLE_LINE_EDITOR}] specifies the
+  \<^descr>[@{setting_def ISABELLE_LINE_EDITOR}] specifies the
   line editor for the @{tool_ref console} interface.
 
-  \item[@{setting_def ISABELLE_LATEX}, @{setting_def
+  \<^descr>[@{setting_def ISABELLE_LATEX}, @{setting_def
   ISABELLE_PDFLATEX}, @{setting_def ISABELLE_BIBTEX}] refer to {\LaTeX}
   related tools for Isabelle document preparation (see also
   \secref{sec:tool-latex}).
   
-  \item[@{setting_def ISABELLE_TOOLS}] is a colon separated list of
+  \<^descr>[@{setting_def ISABELLE_TOOLS}] is a colon separated list of
   directories that are scanned by @{executable isabelle} for external
   utility programs (see also \secref{sec:isabelle-tool}).
   
-  \item[@{setting_def ISABELLE_DOCS}] is a colon separated list of
+  \<^descr>[@{setting_def ISABELLE_DOCS}] is a colon separated list of
   directories with documentation files.
 
-  \item[@{setting_def PDF_VIEWER}] specifies the program to be used
+  \<^descr>[@{setting_def PDF_VIEWER}] specifies the program to be used
   for displaying @{verbatim pdf} files.
 
-  \item[@{setting_def DVI_VIEWER}] specifies the program to be used
+  \<^descr>[@{setting_def DVI_VIEWER}] specifies the program to be used
   for displaying @{verbatim dvi} files.
   
-  \item[@{setting_def ISABELLE_TMP_PREFIX}@{text "\<^sup>*"}] is the
+  \<^descr>[@{setting_def ISABELLE_TMP_PREFIX}@{text "\<^sup>*"}] is the
   prefix from which any running @{executable "isabelle_process"}
   derives an individual directory for temporary files.
-  
-  \end{description}
 \<close>
 
 
@@ -288,8 +274,6 @@
   Isabelle distribution itself, and the following two files (both
   optional) have a special meaning:
 
-  \begin{itemize}
-
   \<^item> @{verbatim "etc/settings"} holds additional settings that are
   initialized when bootstrapping the overall Isabelle environment,
   cf.\ \secref{sec:boot}.  As usual, the content is interpreted as a
@@ -311,7 +295,6 @@
   given here can be either absolute (with leading @{verbatim "/"}) or
   relative to the component's main directory.
 
-  \end{itemize}
 
   The root of component initialization is @{setting ISABELLE_HOME}
   itself.  After initializing all of its sub-components recursively,
--- a/src/Doc/System/Misc.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/System/Misc.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -75,8 +75,6 @@
   We describe the usage of the directory browser and the meaning of
   the different items in the browser window.
 
-  \begin{itemize}
-
   \<^item> A red arrow before a directory name indicates that the
   directory is currently ``folded'', i.e.~the nodes in this directory
   are collapsed to one single node. In the right sub-window, the names
@@ -95,8 +93,6 @@
   focuses to the corresponding node. Double clicking invokes a text
   viewer window in which the contents of the theory file are
   displayed.
-
-  \end{itemize}
 \<close>
 
 
@@ -124,21 +120,17 @@
   in the full application version. The meaning of the menu items is as
   follows:
 
-  \begin{description}
+  \<^descr>[Open \dots] Open a new graph file.
 
-  \item[Open \dots] Open a new graph file.
-
-  \item[Export to PostScript] Outputs the current graph in Postscript
+  \<^descr>[Export to PostScript] Outputs the current graph in Postscript
   format, appropriately scaled to fit on one single sheet of A4 paper.
   The resulting file can be printed directly.
 
-  \item[Export to EPS] Outputs the current graph in Encapsulated
+  \<^descr>[Export to EPS] Outputs the current graph in Encapsulated
   Postscript format. The resulting file can be included in other
   documents.
 
-  \item[Quit] Quit the graph browser.
-
-  \end{description}
+  \<^descr>[Quit] Quit the graph browser.
 \<close>
 
 
@@ -156,29 +148,25 @@
 
   The meaning of the items in a vertex description is as follows:
 
-  \begin{description}
+  \<^descr>[@{text vertex_name}] The name of the vertex.
 
-  \item[@{text vertex_name}] The name of the vertex.
-
-  \item[@{text vertex_ID}] The vertex identifier. Note that there may
+  \<^descr>[@{text vertex_ID}] The vertex identifier. Note that there may
   be several vertices with equal names, whereas identifiers must be
   unique.
 
-  \item[@{text dir_name}] The name of the ``directory'' the vertex
+  \<^descr>[@{text dir_name}] The name of the ``directory'' the vertex
   should be placed in.  A ``@{verbatim "+"}'' sign after @{text
   dir_name} indicates that the nodes in the directory are initially
   visible. Directories are initially invisible by default.
 
-  \item[@{text path}] The path of the corresponding theory file. This
+  \<^descr>[@{text path}] The path of the corresponding theory file. This
   is specified relatively to the path of the graph definition file.
 
-  \item[List of successor/predecessor nodes] A ``@{verbatim "<"}''
+  \<^descr>[List of successor/predecessor nodes] A ``@{verbatim "<"}''
   sign before the list means that successor nodes are listed, a
   ``@{verbatim ">"}'' sign means that predecessor nodes are listed. If
   neither ``@{verbatim "<"}'' nor ``@{verbatim ">"}'' is found, the
   browser assumes that successor nodes are listed.
-
-  \end{description}
 \<close>
 
 
@@ -449,8 +437,6 @@
   to the much simpler and more efficient YXML format of Isabelle
   (stdout).  The YXML format is defined as follows.
 
-  \begin{enumerate}
-
   \<^enum> The encoding is always UTF-8.
 
   \<^enum> Body text is represented verbatim (no escaping, no special
@@ -472,7 +458,6 @@
   @{text "\<^bold>X"} and @{text "\<^bold>Y"} may never occur in
   well-formed XML documents.
 
-  \end{enumerate}
 
   Parsing YXML is pretty straight-forward: split the text into chunks
   separated by @{text "\<^bold>X"}, then split each chunk into
--- a/src/Doc/System/Sessions.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/System/Sessions.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -77,9 +77,7 @@
     document_files: @'document_files' ('(' dir ')')? (@{syntax name}+)
   \<close>}
 
-  \begin{description}
-
-  \item \isakeyword{session}~@{text "A = B + body"} defines a new
+  \<^descr> \isakeyword{session}~@{text "A = B + body"} defines a new
   session @{text "A"} based on parent session @{text "B"}, with its
   content given in @{text body} (theories and auxiliary source files).
   Note that a parent (like @{text "HOL"}) is mandatory in practical
@@ -90,7 +88,7 @@
   @{text "A"} should be sufficiently long and descriptive to stand on
   its own in a potentially large library.
 
-  \item \isakeyword{session}~@{text "A (groups)"} indicates a
+  \<^descr> \isakeyword{session}~@{text "A (groups)"} indicates a
   collection of groups where the new session is a member.  Group names
   are uninterpreted and merely follow certain conventions.  For
   example, the Isabelle distribution tags some important sessions by
@@ -98,7 +96,7 @@
   their own conventions, but this requires some care to avoid clashes
   within this unchecked name space.
 
-  \item \isakeyword{session}~@{text "A"}~\isakeyword{in}~@{text "dir"}
+  \<^descr> \isakeyword{session}~@{text "A"}~\isakeyword{in}~@{text "dir"}
   specifies an explicit directory for this session; by default this is
   the current directory of the @{verbatim ROOT} file.
 
@@ -106,30 +104,30 @@
   the session directory.  The prover process is run within the same as
   its current working directory.
 
-  \item \isakeyword{description}~@{text "text"} is a free-form
+  \<^descr> \isakeyword{description}~@{text "text"} is a free-form
   annotation for this session.
 
-  \item \isakeyword{options}~@{text "[x = a, y = b, z]"} defines
+  \<^descr> \isakeyword{options}~@{text "[x = a, y = b, z]"} defines
   separate options (\secref{sec:system-options}) that are used when
   processing this session, but \emph{without} propagation to child
   sessions.  Note that @{text "z"} abbreviates @{text "z = true"} for
   Boolean options.
 
-  \item \isakeyword{theories}~@{text "options names"} specifies a
+  \<^descr> \isakeyword{theories}~@{text "options names"} specifies a
   block of theories that are processed within an environment that is
   augmented by the given options, in addition to the global session
   options given before.  Any number of blocks of \isakeyword{theories}
   may be given.  Options are only active for each
   \isakeyword{theories} block separately.
 
-  \item \isakeyword{files}~@{text "files"} lists additional source
+  \<^descr> \isakeyword{files}~@{text "files"} lists additional source
   files that are involved in the processing of this session.  This
   should cover anything outside the formal content of the theory
   sources.  In contrast, files that are loaded formally
   within a theory, e.g.\ via @{command "ML_file"}, need not be
   declared again.
 
-  \item \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
+  \<^descr> \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
   "base_dir) files"} lists source files for document preparation,
   typically @{verbatim ".tex"} and @{verbatim ".sty"} for {\LaTeX}.
   Only these explicitly given files are copied from the base directory
@@ -138,12 +136,10 @@
   structure of the @{text files} is preserved, which allows to
   reconstruct the original directory hierarchy of @{text "base_dir"}.
 
-  \item \isakeyword{document_files}~@{text "files"} abbreviates
+  \<^descr> \isakeyword{document_files}~@{text "files"} abbreviates
   \isakeyword{document_files}~@{text "("}\isakeyword{in}~@{text
   "document) files"}, i.e.\ document sources are taken from the base
   directory @{verbatim document} within the session root directory.
-
-  \end{description}
 \<close>
 
 
@@ -169,8 +165,6 @@
   sessions, in particular with document preparation
   (\chref{ch:present}).
 
-  \begin{itemize}
-
   \<^item> @{system_option_def "browser_info"} controls output of HTML
   browser info, see also \secref{sec:info}.
 
@@ -229,7 +223,6 @@
   processes that get out of control, even if there is a deadlock
   without CPU time usage.
 
-  \end{itemize}
 
   The @{tool_def options} tool prints Isabelle system options.  Its
   command-line usage is:
--- a/src/Doc/antiquote_setup.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Doc/antiquote_setup.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -132,7 +132,7 @@
                   (Pretty.indent (Config.get ctxt Thy_Output.indent) p)) ^
               "\\rulename{" ^ Output.output (Pretty.str_of (Thy_Output.pretty_text ctxt name)) ^ "}")
             #> space_implode "\\par\\smallskip%\n"
-            #> enclose "\\begin{isabelle}%\n" "%\n\\end{isabelle}"
+            #> Latex.environment "isabelle"
           else
             map (fn (p, name) =>
               Output.output (Pretty.str_of p) ^
--- a/src/HOL/ex/Cartouche_Examples.thy	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/HOL/ex/Cartouche_Examples.thy	Sat Oct 17 21:42:18 2015 +0200
@@ -179,7 +179,8 @@
 ML \<open>
   Outer_Syntax.command
     @{command_keyword text_cartouche} ""
-    (Parse.opt_target -- Parse.input Parse.cartouche >> Thy_Output.document_command)
+    (Parse.opt_target -- Parse.input Parse.cartouche
+      >> Thy_Output.document_command {markdown = true})
 \<close>
 
 text_cartouche
--- a/src/Pure/General/antiquote.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/General/antiquote.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -8,12 +8,14 @@
 sig
   type antiq = Symbol_Pos.T list * {start: Position.T, stop: Position.T, range: Position.range}
   datatype 'a antiquote = Text of 'a | Antiq of antiq
-  val antiq_reports: antiq -> Position.report list
-  val antiquote_reports: ('a -> Position.report_text list) ->
-    'a antiquote list -> Position.report_text list
+  type text_antiquote = Symbol_Pos.T list antiquote
+  val range: text_antiquote list -> Position.range
+  val split_lines: text_antiquote list -> text_antiquote list list
+  val antiq_reports: 'a antiquote list -> Position.report list
   val scan_antiq: Symbol_Pos.T list -> antiq * Symbol_Pos.T list
-  val scan_antiquote: Symbol_Pos.T list -> Symbol_Pos.T list antiquote * Symbol_Pos.T list
-  val read: Input.source -> Symbol_Pos.T list antiquote list
+  val scan_antiquote: Symbol_Pos.T list -> text_antiquote * Symbol_Pos.T list
+  val read': Position.T -> Symbol_Pos.T list -> text_antiquote list
+  val read: Input.source -> text_antiquote list
 end;
 
 structure Antiquote: ANTIQUOTE =
@@ -24,15 +26,41 @@
 type antiq = Symbol_Pos.T list * {start: Position.T, stop: Position.T, range: Position.range};
 datatype 'a antiquote = Text of 'a | Antiq of antiq;
 
+type text_antiquote = Symbol_Pos.T list antiquote;
+
+fun antiquote_range (Text ss) = Symbol_Pos.range ss
+  | antiquote_range (Antiq (_, {range, ...})) = range;
+
+fun range ants =
+  if null ants then Position.no_range
+  else Position.range (#1 (antiquote_range (hd ants))) (#2 (antiquote_range (List.last ants)));
+
+
+(* split lines *)
+
+fun split_lines input =
+  let
+    fun add a (line, lines) = (a :: line, lines);
+    fun flush (line, lines) = ([], rev line :: lines);
+    fun split (a as Text ss) =
+          (case take_prefix (fn ("\n", _) => false | _ => true) ss of
+            ([], []) => I
+          | (_, []) => add a
+          | ([], _ :: rest) => flush #> split (Text rest)
+          | (prefix, _ :: rest) => add (Text prefix) #> flush #> split (Text rest))
+      | split a = add a;
+  in if null input then [] else rev (#2 (flush (fold split input ([], [])))) end;
+
 
 (* reports *)
 
-fun antiq_reports ((_, {start, stop, range = (pos, _)}): antiq) =
-  [(start, Markup.antiquote), (stop, Markup.antiquote),
-   (pos, Markup.antiquoted), (pos, Markup.language_antiquotation)];
-
-fun antiquote_reports text =
-  maps (fn Text x => text x | Antiq antiq => map (rpair "") (antiq_reports antiq));
+fun antiq_reports ants = ants |> maps
+  (fn Antiq (_, {start, stop, range = (pos, _)}) =>
+      [(start, Markup.antiquote),
+       (stop, Markup.antiquote),
+       (pos, Markup.antiquoted),
+       (pos, Markup.language_antiquotation)]
+    | _ => []);
 
 
 (* scan *)
@@ -71,10 +99,11 @@
 
 (* read *)
 
-fun read source =
-  (case Scan.read Symbol_Pos.stopper (Scan.repeat scan_antiquote) (Input.source_explode source) of
-    SOME xs => (Position.reports_text (antiquote_reports (K []) xs); xs)
-  | NONE =>
-      error ("Malformed quotation/antiquotation source" ^ Position.here (Input.pos_of source)));
+fun read' pos syms =
+  (case Scan.read Symbol_Pos.stopper (Scan.repeat scan_antiquote) syms of
+    SOME ants => (Position.reports (antiq_reports ants); ants)
+  | NONE => error ("Malformed quotation/antiquotation source" ^ Position.here pos));
+
+fun read source = read' (Input.pos_of source) (Input.source_explode source);
 
 end;
--- a/src/Pure/General/symbol.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/General/symbol.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -59,7 +59,7 @@
   val scanner: string -> (string list -> 'a * string list) -> symbol list -> 'a
   val split_words: symbol list -> string list
   val explode_words: string -> string list
-  val strip_blanks: string -> string
+  val trim_blanks: string -> string
   val bump_init: string -> string
   val bump_string: string -> string
   val length: symbol list -> int
@@ -503,7 +503,7 @@
 
 (* blanks *)
 
-fun strip_blanks s =
+fun trim_blanks s =
   sym_explode s
   |> take_prefix is_blank |> #2
   |> take_suffix is_blank |> #1
--- a/src/Pure/General/symbol_pos.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/General/symbol_pos.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -14,6 +14,7 @@
   val ~$$$ : Symbol.symbol -> T list -> T list * T list
   val content: T list -> string
   val range: T list -> Position.range
+  val trim_blanks: T list -> T list
   val is_eof: T -> bool
   val stopper: T Scan.stopper
   val !!! : Scan.message -> (T list -> 'a) -> T list -> 'a
@@ -59,6 +60,10 @@
       in Position.range pos pos' end
   | range [] = Position.no_range;
 
+val trim_blanks =
+  take_prefix (Symbol.is_blank o symbol) #> #2 #>
+  take_suffix (Symbol.is_blank o symbol) #> #1;
+
 
 (* stopper *)
 
--- a/src/Pure/Isar/outer_syntax.scala	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Isar/outer_syntax.scala	Sat Oct 17 21:42:18 2015 +0200
@@ -240,9 +240,11 @@
       case Thy_Header.SECTION | Thy_Header.HEADER => Some(1)
       case Thy_Header.SUBSECTION => Some(2)
       case Thy_Header.SUBSUBSECTION => Some(3)
+      case Thy_Header.PARAGRAPH => Some(4)
+      case Thy_Header.SUBPARAGRAPH => Some(5)
       case _ =>
         keywords.command_kind(name) match {
-          case Some(kind) if Keyword.theory(kind) && !Keyword.theory_end(kind) => Some(4)
+          case Some(kind) if Keyword.theory(kind) && !Keyword.theory_end(kind) => Some(6)
           case _ => None
         }
     }
--- a/src/Pure/ML/ml_lex.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/ML/ml_lex.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -157,7 +157,7 @@
 
 in
 
-fun report_of_token SML (tok as Token ((pos, _), (kind, x))) =
+fun token_report SML (tok as Token ((pos, _), (kind, x))) =
   let
     val (markup, txt) =
       if not (is_keyword tok) then token_kind_markup SML kind
@@ -328,7 +328,8 @@
         (Scan.recover (Scan.bulk (!!! "bad input" scan))
           (fn msg => recover msg >> map Antiquote.Text))
       |> Source.exhaust
-      |> tap (Position.reports_text o Antiquote.antiquote_reports (single o report_of_token SML))
+      |> tap (Position.reports o Antiquote.antiq_reports)
+      |> tap (Position.reports_text o maps (fn Antiquote.Text t => [token_report SML t] | _ => []))
       |> tap (List.app check);
   in input @ termination end;
 
--- a/src/Pure/PIDE/command.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/PIDE/command.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -193,7 +193,7 @@
   Toplevel.setmp_thread_position tr
     (fn () =>
       Outer_Syntax.side_comments span |> maps (fn cmt =>
-        (Thy_Output.check_text (Token.input_of cmt) st'; [])
+        (Thy_Output.output_text st' {markdown = false} (Token.input_of cmt); [])
           handle exn =>
             if Exn.is_interrupt exn then reraise exn
             else Runtime.exn_messages_ids exn)) ();
--- a/src/Pure/PIDE/markup.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/PIDE/markup.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -117,6 +117,9 @@
   val document_antiquotation_optionN: string
   val paragraphN: string val paragraph: T
   val text_foldN: string val text_fold: T
+  val markdown_paragraphN: string val markdown_paragraph: T
+  val markdown_listN: string val markdown_list: string -> T
+  val markdown_itemN: string val markdown_item: int -> T
   val inputN: string val input: bool -> Properties.T -> T
   val command_keywordN: string val command_keyword: T
   val commandN: string val command: T
@@ -465,6 +468,13 @@
 val (text_foldN, text_fold) = markup_elem "text_fold";
 
 
+(* Markdown document structure *)
+
+val (markdown_paragraphN, markdown_paragraph) = markup_elem "markdown_paragraph";
+val (markdown_listN, markdown_list) = markup_string "markdown_list" kindN;
+val (markdown_itemN, markdown_item) = markup_int "markdown_item" "depth";
+
+
 (* formal input *)
 
 val inputN = "input";
--- a/src/Pure/PIDE/markup.scala	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/PIDE/markup.scala	Sat Oct 17 21:42:18 2015 +0200
@@ -243,6 +243,13 @@
   val TEXT_FOLD = "text_fold"
 
 
+  /* Markdown document structure */
+
+  val MARKDOWN_PARAGRAPH = "markdown_paragraph"
+  val Markdown_List = new Markup_String("markdown_list", "kind")
+  val Markdown_Item = new Markup_Int("markdown_item", "depth")
+
+
   /* ML */
 
   val ML_KEYWORD1 = "ML_keyword1"
--- a/src/Pure/ROOT	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/ROOT	Sat Oct 17 21:42:18 2015 +0200
@@ -229,6 +229,7 @@
     "System/system_channel.ML"
     "Thy/html.ML"
     "Thy/latex.ML"
+    "Thy/markdown.ML"
     "Thy/present.ML"
     "Thy/term_style.ML"
     "Thy/thy_header.ML"
--- a/src/Pure/ROOT.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/ROOT.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -236,6 +236,7 @@
 use "Thy/thy_header.ML";
 use "PIDE/command_span.ML";
 use "Thy/thy_syntax.ML";
+use "Thy/markdown.ML";
 use "Thy/html.ML";
 use "Thy/latex.ML";
 
--- a/src/Pure/Thy/latex.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Thy/latex.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -11,16 +11,12 @@
     Symbol.symbol list -> string
   val output_symbols: Symbol.symbol list -> string
   val output_ctrl_symbols: Symbol.symbol list -> string
-  val output_basic: Token.T -> string
-  val output_markup: string -> string -> string
-  val output_markup_env: string -> string -> string
-  val output_verbatim: string -> string
-  val markup_true: string
-  val markup_false: string
+  val output_token: Token.T -> string
   val begin_delim: string -> string
   val end_delim: string -> string
   val begin_tag: string -> string
   val end_tag: string -> string
+  val environment: string -> string -> string
   val tex_trailer: string
   val isabelle_theory: string -> string -> string
   val symbol_source: (string -> bool) * (string -> bool) ->
@@ -32,7 +28,7 @@
 structure Latex: LATEX =
 struct
 
-(* literal ASCII *)
+(* output verbatim ASCII *)
 
 val output_ascii =
   translate_string
@@ -44,7 +40,7 @@
           then enclose "{\\char`\\" "}" s else s);
 
 
-(* symbol output *)
+(* output symbols *)
 
 local
 
@@ -125,9 +121,9 @@
 end;
 
 
-(* token output *)
+(* output token *)
 
-fun output_basic tok =
+fun output_token tok =
   let val s = Token.content_of tok in
     if Token.is_kind Token.Comment tok then ""
     else if Token.is_command tok then
@@ -148,20 +144,8 @@
     else output_syms s
   end handle ERROR msg => error (msg ^ Position.here (Token.pos_of tok));
 
-val output_text =
-  Symbol.strip_blanks #> Symbol.explode #> output_ctrl_symbols;
 
-fun output_markup cmd txt = "%\n\\isamarkup" ^ cmd ^ "{" ^ output_text txt ^ "%\n}\n";
-
-fun output_markup_env cmd txt =
-  "%\n\\begin{isamarkup" ^ cmd ^ "}%\n" ^
-  output_text txt ^
-  "%\n\\end{isamarkup" ^ cmd ^ "}%\n";
-
-fun output_verbatim txt = "%\n" ^ Symbol.strip_blanks txt ^ "\n";
-
-val markup_true = "\\isamarkuptrue%\n";
-val markup_false = "\\isamarkupfalse%\n";
+(* tags *)
 
 val begin_delim = enclose "%\n\\isadelim" "\n";
 val end_delim = enclose "%\n\\endisadelim" "\n";
@@ -171,6 +155,9 @@
 
 (* theory presentation *)
 
+fun environment name =
+  enclose ("%\n\\begin{" ^ name ^ "}%\n") ("%\n\\end{" ^ name ^ "}%\n");
+
 val tex_trailer =
   "%%% Local Variables:\n\
   \%%% mode: latex\n\
@@ -180,7 +167,7 @@
 fun isabelle_theory name txt =
   "%\n\\begin{isabellebody}%\n\
   \\\setisabellecontext{" ^ output_syms name ^ "}%\n" ^ txt ^
-  "\\end{isabellebody}%\n" ^ tex_trailer;
+  "%\n\\end{isabellebody}%\n" ^ tex_trailer;
 
 fun symbol_source known name syms =
   isabelle_theory name
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Thy/markdown.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -0,0 +1,208 @@
+(*  Title:      Pure/Thy/markdown.ML
+    Author:     Makarius
+
+Minimal support for Markdown documents (see also http://commonmark.org)
+that consist only of paragraphs and (nested) lists:
+
+  * list items start with marker \<^item> (itemize), \<^enum> (enumerate), \<^descr> (description)
+  * adjacent list items with same indentation and same marker are grouped
+    into a single list
+  * singleton blank lines separate paragraphs
+  * multiple blank lines escape from the current list hierarchy
+
+Notable differences to official Markdown:
+
+  * indentation of list items needs to match exactly
+  * indentation is unlimited (Markdown interprets 4 spaces as block quote)
+  * list items always consist of paragraphs -- no notion of "tight" list
+*)
+
+signature MARKDOWN =
+sig
+  val is_control: Symbol.symbol -> bool
+  datatype kind = Itemize | Enumerate | Description
+  val print_kind: kind -> string
+  type line
+  val line_source: line -> Antiquote.text_antiquote list
+  val line_is_item: line -> bool
+  val line_content: line -> Antiquote.text_antiquote list
+  val make_line: Antiquote.text_antiquote list -> line
+  val empty_line: line
+  datatype block = Paragraph of line list | List of {indent: int, kind: kind, body: block list}
+  val read_lines: line list -> block list
+  val read_antiquotes: Antiquote.text_antiquote list -> block list
+  val read_source: Input.source -> block list
+  val text_reports: Antiquote.text_antiquote list -> Position.report list
+  val reports: block list -> Position.report list
+end;
+
+structure Markdown: MARKDOWN =
+struct
+
+(* document lines *)
+
+val is_control = member (op =) ["\\<^item>", "\\<^enum>", "\\<^descr>"];
+
+datatype kind = Itemize | Enumerate | Description;
+
+fun print_kind Itemize = "itemize"
+  | print_kind Enumerate = "enumerate"
+  | print_kind Description = "description";
+
+datatype line =
+  Line of
+   {source: Antiquote.text_antiquote list,
+    is_empty: bool,
+    indent: int,
+    item: kind option,
+    item_pos: Position.T,
+    content: Antiquote.text_antiquote list};
+
+val eof_line =
+  Line {source = [Antiquote.Text [(Symbol.eof, Position.none)]],
+    is_empty = false, indent = 0, item = NONE, item_pos = Position.none, content = []};
+
+fun line_source (Line {source, ...}) = source;
+fun line_is_empty (Line {is_empty, ...}) = is_empty;
+fun line_is_item (Line {item, ...}) = is_some item;
+fun line_content (Line {content, ...}) = content;
+
+
+(* make line *)
+
+local
+
+fun bad_blank ((s, _): Symbol_Pos.T) = Symbol.is_ascii_blank s andalso s <> Symbol.space;
+val bad_blanks = maps (fn Antiquote.Text ss => filter bad_blank ss | _ => []);
+
+fun check_blanks source =
+  (case bad_blanks source of
+    [] => ()
+  | (c, pos) :: _ =>
+      error ("Bad blank character " ^ quote (ML_Syntax.print_char c) ^ Position.here pos));
+
+fun is_space ((s, _): Symbol_Pos.T) = s = Symbol.space;
+val is_empty = forall (fn Antiquote.Text ss => forall is_space ss | _ => false);
+
+val scan_marker =
+  Scan.many is_space -- Symbol_Pos.scan_pos --
+  Scan.option
+   (Symbol_Pos.$$ "\\<^item>" >> K Itemize ||
+    Symbol_Pos.$$ "\\<^enum>" >> K Enumerate ||
+    Symbol_Pos.$$ "\\<^descr>" >> K Description) --| Scan.many is_space
+  >> (fn ((sp, pos), item) => (length sp, item, if is_some item then pos else Position.none));
+
+fun read_marker (Antiquote.Text ss :: rest) =
+      (case Scan.finite Symbol_Pos.stopper scan_marker ss of
+        (marker, []) => (marker, rest)
+      | (marker, ss') => (marker, Antiquote.Text ss' :: rest))
+  | read_marker source = ((0, NONE, Position.none), source);
+
+in
+
+fun make_line source =
+  let
+    val _ = check_blanks source;
+    val ((indent, item, item_pos), content) = read_marker source;
+  in
+    Line {source = source, is_empty = is_empty source, indent = indent,
+      item = item, item_pos = item_pos, content = content}
+  end;
+
+val empty_line = make_line [];
+
+end;
+
+
+(* document blocks *)
+
+datatype block =
+  Paragraph of line list | List of {indent: int, kind: kind, body: block list};
+
+fun block_lines (Paragraph lines) = lines
+  | block_lines (List {body, ...}) = maps block_lines body;
+
+fun block_range (Paragraph lines) = Antiquote.range (maps line_content lines)
+  | block_range (List {body, ...}) = Antiquote.range (maps line_source (maps block_lines body));
+
+fun block_indent (List {indent, ...}) = indent
+  | block_indent (Paragraph (Line {indent, ...} :: _)) = indent
+  | block_indent _ = 0;
+
+fun block_list indent0 kind0 (List {indent, kind, body}) =
+      if indent0 = indent andalso kind0 = kind then SOME body else NONE
+  | block_list _ _ _ = NONE;
+
+val is_list = fn List _ => true | _ => false;
+
+
+(* read document *)
+
+local
+
+fun build (indent, item, rev_body) document =
+  (case (item, document) of
+    (SOME kind, block :: blocks) =>
+      (case block_list indent kind block of
+        SOME list => List {indent = indent, kind = kind, body = fold cons rev_body list} :: blocks
+      | NONE =>
+          if (if is_list block then indent < block_indent block else indent <= block_indent block)
+          then build (indent, item, block :: rev_body) blocks
+          else List {indent = indent, kind = kind, body = rev rev_body} :: document)
+  | (SOME kind, []) => [List {indent = indent, kind = kind, body = rev rev_body}]
+  | (NONE, _) => fold cons rev_body document);
+
+fun plain_line (line as Line {is_empty, item, ...}) =
+  not is_empty andalso is_none item andalso line <> eof_line;
+
+val parse_paragraph =
+  Scan.one (fn line => line <> eof_line) -- Scan.many plain_line >> (fn (line, lines) =>
+    let
+      val Line {indent, item, ...} = line;
+      val block = Paragraph (line :: lines);
+    in (indent, item, [block]) end);
+
+val parse_document =
+  parse_paragraph ::: Scan.repeat (Scan.option (Scan.one line_is_empty) |-- parse_paragraph)
+    >> (fn pars => fold_rev build pars []);
+
+in
+
+val read_lines =
+  Scan.read (Scan.stopper (K eof_line) (fn line => line = eof_line))
+    (Scan.repeat (Scan.many line_is_empty |-- parse_document) --| Scan.many line_is_empty) #>
+  the_default [] #> flat;
+
+val read_antiquotes = Antiquote.split_lines #> map make_line #> read_lines;
+val read_source = Antiquote.read #> read_antiquotes;
+
+end;
+
+
+(* PIDE reports *)
+
+val text_reports =
+  maps (fn Antiquote.Text ss => [(#1 (Symbol_Pos.range ss), Markup.words)] | _ => []);
+
+local
+
+fun line_reports depth (Line {item_pos, content, ...}) =
+  cons (item_pos, Markup.markdown_item depth) #> append (text_reports content);
+
+fun block_reports depth block =
+  (case block of
+    Paragraph lines =>
+      cons (#1 (block_range block), Markup.markdown_paragraph) #>
+      fold (line_reports depth) lines
+  | List {kind, body, ...} =>
+      cons (#1 (block_range block), Markup.markdown_list (print_kind kind)) #>
+      fold (block_reports (depth + 1)) body);
+
+in
+
+fun reports blocks =
+  filter (Position.is_reported o #1) (fold (block_reports 0) blocks []);
+
+end;
+
+end;
--- a/src/Pure/Thy/thy_header.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Thy/thy_header.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -42,11 +42,14 @@
 
 (* bootstrap keywords *)
 
-val headerN = "header";
+val headerN = "header";  (* FIXME legacy *)
+
 val chapterN = "chapter";
 val sectionN = "section";
 val subsectionN = "subsection";
 val subsubsectionN = "subsubsection";
+val paragraphN = "paragraph";
+val subparagraphN = "subparagraph";
 val textN = "text";
 val txtN = "txt";
 val text_rawN = "text_raw";
@@ -74,6 +77,8 @@
      ((sectionN, @{here}), SOME ((Keyword.document_heading, []), [])),
      ((subsectionN, @{here}), SOME ((Keyword.document_heading, []), [])),
      ((subsubsectionN, @{here}), SOME ((Keyword.document_heading, []), [])),
+     ((paragraphN, @{here}), SOME ((Keyword.document_heading, []), [])),
+     ((subparagraphN, @{here}), SOME ((Keyword.document_heading, []), [])),
      ((textN, @{here}), SOME ((Keyword.document_body, []), [])),
      ((txtN, @{here}), SOME ((Keyword.document_body, []), [])),
      ((text_rawN, @{here}), SOME ((Keyword.document_raw, []), [])),
@@ -147,6 +152,8 @@
     Parse.command sectionN ||
     Parse.command subsectionN ||
     Parse.command subsubsectionN ||
+    Parse.command paragraphN ||
+    Parse.command subparagraphN ||
     Parse.command textN ||
     Parse.command txtN ||
     Parse.command text_rawN) --
--- a/src/Pure/Thy/thy_header.scala	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Thy/thy_header.scala	Sat Oct 17 21:42:18 2015 +0200
@@ -19,11 +19,14 @@
 
   type Keywords = List[(String, Option[Keyword.Spec], Option[String])]
 
-  val HEADER = "header"
+  val HEADER = "header"  /* FIXME legacy */
+
   val CHAPTER = "chapter"
   val SECTION = "section"
   val SUBSECTION = "subsection"
   val SUBSUBSECTION = "subsubsection"
+  val PARAGRAPH = "paragraph"
+  val SUBPARAGRAPH = "subparagraph"
   val TEXT = "text"
   val TXT = "txt"
   val TEXT_RAW = "text_raw"
@@ -51,6 +54,8 @@
       (SECTION, Some(((Keyword.DOCUMENT_HEADING, Nil), Nil)), None),
       (SUBSECTION, Some(((Keyword.DOCUMENT_HEADING, Nil), Nil)), None),
       (SUBSUBSECTION, Some(((Keyword.DOCUMENT_HEADING, Nil), Nil)), None),
+      (PARAGRAPH, Some(((Keyword.DOCUMENT_HEADING, Nil), Nil)), None),
+      (SUBPARAGRAPH, Some(((Keyword.DOCUMENT_HEADING, Nil), Nil)), None),
       (TEXT, Some(((Keyword.DOCUMENT_BODY, Nil), Nil)), None),
       (TXT, Some(((Keyword.DOCUMENT_BODY, Nil), Nil)), None),
       (TEXT_RAW, Some(((Keyword.DOCUMENT_RAW, Nil), Nil)), None),
@@ -115,6 +120,8 @@
         command(SECTION) |
         command(SUBSECTION) |
         command(SUBSUBSECTION) |
+        command(PARAGRAPH) |
+        command(SUBPARAGRAPH) |
         command(TEXT) |
         command(TXT) |
         command(TEXT_RAW)) ~
--- a/src/Pure/Thy/thy_output.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Thy/thy_output.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -23,9 +23,8 @@
       theory -> theory
   val boolean: string -> bool
   val integer: string -> int
-  val eval_antiq: Toplevel.state -> Antiquote.antiq -> string
-  val report_text: Input.source -> unit
-  val check_text: Input.source -> Toplevel.state -> unit
+  val eval_antiquote: Toplevel.state -> Antiquote.text_antiquote -> string
+  val output_text: Toplevel.state -> {markdown: bool} -> Input.source -> string
   val present_thy: theory -> (Toplevel.transition * Toplevel.state) list -> Token.T list -> Buffer.T
   val pretty_text: Proof.context -> string -> Pretty.T
   val pretty_term: Proof.context -> term -> Pretty.T
@@ -37,7 +36,7 @@
   val output: Proof.context -> Pretty.T list -> string
   val verbatim_text: Proof.context -> string -> string
   val old_header_command: Input.source -> Toplevel.transition -> Toplevel.transition
-  val document_command: (xstring * Position.T) option * Input.source ->
+  val document_command: {markdown: bool} -> (xstring * Position.T) option * Input.source ->
     Toplevel.transition -> Toplevel.transition
 end;
 
@@ -160,48 +159,62 @@
 end;
 
 
-(* eval_antiq *)
+(* eval antiquote *)
 
-fun eval_antiq state ((ss, {range = (pos, _), ...}): Antiquote.antiq) =
-  let
-    val keywords =
-      (case try Toplevel.presentation_context_of state of
-        SOME ctxt => Thy_Header.get_keywords' ctxt
-      | NONE =>
-          error ("Unknown context -- cannot expand document antiquotations" ^
-            Position.here pos));
+fun eval_antiquote _ (Antiquote.Text ss) = Symbol_Pos.content ss
+  | eval_antiquote state (Antiquote.Antiq (ss, {range = (pos, _), ...})) =
+      let
+        val keywords =
+          (case try Toplevel.presentation_context_of state of
+            SOME ctxt => Thy_Header.get_keywords' ctxt
+          | NONE =>
+              error ("Unknown context -- cannot expand document antiquotations" ^
+                Position.here pos));
 
-    val (opts, src) = Token.read_antiq keywords antiq (ss, pos);
-    fun cmd ctxt = wrap ctxt (fn () => command src state ctxt) ();
+        val (opts, src) = Token.read_antiq keywords antiq (ss, pos);
+        fun cmd ctxt = wrap ctxt (fn () => command src state ctxt) ();
 
-    val preview_ctxt = fold option opts (Toplevel.presentation_context_of state);
-    val print_ctxt = Context_Position.set_visible false preview_ctxt;
-    val _ = cmd preview_ctxt;
-    val print_modes = space_explode "," (Config.get print_ctxt modes) @ Latex.modes;
-  in Print_Mode.with_modes print_modes (fn () => cmd print_ctxt) () end;
+        val preview_ctxt = fold option opts (Toplevel.presentation_context_of state);
+        val print_ctxt = Context_Position.set_visible false preview_ctxt;
+        val _ = cmd preview_ctxt;
+        val print_modes = space_explode "," (Config.get print_ctxt modes) @ Latex.modes;
+      in Print_Mode.with_modes print_modes (fn () => cmd print_ctxt) () end;
 
 
-(* check_text *)
+(* output text *)
 
-fun eval_antiquote state source =
+fun output_text state {markdown} source =
   let
-    fun words (Antiquote.Text ss) = [(#1 (Symbol_Pos.range ss), Markup.words)]
-      | words (Antiquote.Antiq _) = [];
+    val pos = Input.pos_of source;
+    val _ = Position.report pos (Markup.language_document (Input.is_delimited source));
+    val syms = Input.source_explode source;
 
-    fun expand (Antiquote.Text ss) = Symbol_Pos.content ss
-      | expand (Antiquote.Antiq antiq) = eval_antiq state antiq;
+    val output_antiquote = eval_antiquote state #> Symbol.explode #> Latex.output_ctrl_symbols;
+    val output_antiquotes = map output_antiquote #> implode;
+
+    fun output_line line =
+      (if Markdown.line_is_item line then "\\item " else "") ^
+        output_antiquotes (Markdown.line_content line);
 
-    val ants = Antiquote.read source;
-    val _ = Position.reports (maps words ants);
-  in implode (map expand ants) end;
-
-fun report_text source =
-  Position.report (Input.pos_of source) (Markup.language_document (Input.is_delimited source));
-
-fun check_text source state =
- (report_text source;
-  if Toplevel.is_skipped_proof state then ()
-  else ignore (eval_antiquote state source));
+    fun output_blocks blocks = space_implode "\n\n" (map output_block blocks)
+    and output_block (Markdown.Paragraph lines) = cat_lines (map output_line lines)
+      | output_block (Markdown.List {kind, body, ...}) =
+          Latex.environment (Markdown.print_kind kind) (output_blocks body);
+  in
+    if Toplevel.is_skipped_proof state then ""
+    else if markdown andalso exists (Markdown.is_control o Symbol_Pos.symbol) syms
+    then
+      let
+        val ants = Antiquote.read' pos syms;
+        val blocks = Markdown.read_antiquotes ants;
+        val _ = Position.reports (Markdown.reports blocks);
+      in output_blocks blocks end
+    else
+      let
+        val ants = Antiquote.read' pos (Symbol_Pos.trim_blanks syms);
+        val _ = Position.reports (Markdown.text_reports ants);
+      in output_antiquotes ants end
+  end;
 
 
 
@@ -216,16 +229,8 @@
   | Basic_Token of Token.T
   | Markup_Token of string * Input.source
   | Markup_Env_Token of string * Input.source
-  | Verbatim_Token of Input.source;
+  | Raw_Token of Input.source;
 
-fun output_token state =
-  let val eval = eval_antiquote state in
-    fn No_Token => ""
-     | Basic_Token tok => Latex.output_basic tok
-     | Markup_Token (cmd, source) => Latex.output_markup cmd (eval source)
-     | Markup_Env_Token (cmd, source) => Latex.output_markup_env cmd (eval source)
-     | Verbatim_Token source => Latex.output_verbatim (eval source)
-  end;
 
 fun basic_token pred (Basic_Token tok) = pred tok
   | basic_token _ _ = false;
@@ -236,6 +241,20 @@
 val newline_token = basic_token Token.is_newline;
 
 
+(* output token *)
+
+fun output_token state tok =
+  (case tok of
+    No_Token => ""
+  | Basic_Token tok => Latex.output_token tok
+  | Markup_Token (cmd, source) =>
+      "%\n\\isamarkup" ^ cmd ^ "{" ^ output_text state {markdown = false} source ^ "%\n}\n"
+  | Markup_Env_Token (cmd, source) =>
+      Latex.environment ("isamarkup" ^ cmd) (output_text state {markdown = true} source)
+  | Raw_Token source =>
+      "%\n" ^ output_text state {markdown = true} source ^ "\n");
+
+
 (* command spans *)
 
 type command = string * Position.T * string list;   (*name, position, tags*)
@@ -342,6 +361,9 @@
 
 local
 
+val markup_true = "\\isamarkuptrue%\n";
+val markup_false = "\\isamarkupfalse%\n";
+
 val space_proper =
   Scan.one Token.is_blank -- Scan.many Token.is_comment -- Scan.one Token.is_proper;
 
@@ -393,7 +415,7 @@
       >> (fn ((cmd_mod, cmd), tags) =>
         map (fn tok => (NONE, (Basic_Token tok, ("", d)))) cmd_mod @
           [(SOME (Token.content_of cmd, Token.pos_of cmd, tags),
-            (Basic_Token cmd, (Latex.markup_false, d)))]));
+            (Basic_Token cmd, (markup_false, d)))]));
 
     val cmt = Scan.peek (fn d =>
       Parse.$$$ "--" |-- Parse.!!!! (improper |-- Parse.document_source) >>
@@ -404,9 +426,9 @@
 
     val tokens =
       (ignored ||
-        markup Keyword.is_document_heading Markup_Token Latex.markup_true ||
-        markup Keyword.is_document_body Markup_Env_Token Latex.markup_true ||
-        markup Keyword.is_document_raw (Verbatim_Token o #2) "") >> single ||
+        markup Keyword.is_document_heading Markup_Token markup_true ||
+        markup Keyword.is_document_body Markup_Env_Token markup_true ||
+        markup Keyword.is_document_raw (Raw_Token o #2) "") >> single ||
       command ||
       (cmt || other) >> single;
 
@@ -487,11 +509,11 @@
 
 (* basic pretty printing *)
 
-fun tweak_line ctxt s =
-  if Config.get ctxt display then s else Symbol.strip_blanks s;
+fun perhaps_trim ctxt =
+  not (Config.get ctxt display) ? Symbol.trim_blanks;
 
 fun pretty_text ctxt =
-  Pretty.chunks o map Pretty.str o map (tweak_line ctxt) o split_lines;
+  Pretty.chunks o map Pretty.str o map (perhaps_trim ctxt) o split_lines;
 
 fun pretty_text_report ctxt source =
  (Context_Position.report ctxt (Input.pos_of source)
@@ -563,7 +585,7 @@
   |> (if Config.get ctxt display then
         map (Pretty.indent (Config.get ctxt indent) #> string_of_margin ctxt #> Output.output)
         #> space_implode "\\isasep\\isanewline%\n"
-        #> enclose "\\begin{isabelle}%\n" "%\n\\end{isabelle}"
+        #> Latex.environment "isabelle"
       else
         map ((if Config.get ctxt break then string_of_margin ctxt else Pretty.str_of) #>
           Output.output)
@@ -654,8 +676,7 @@
 
 fun verbatim_text ctxt =
   if Config.get ctxt display then
-    Latex.output_ascii #>
-    enclose "\\begin{isabellett}%\n" "%\n\\end{isabellett}"
+    Latex.output_ascii #> Latex.environment "isabellett"
   else
     split_lines #>
     map (Latex.output_ascii #> enclose "\\isatt{" "}") #>
@@ -713,15 +734,15 @@
   Toplevel.keep (fn state =>
     if Toplevel.is_toplevel state then
      (legacy_feature "Obsolete 'header' command -- use 'chapter', 'section' etc. instead";
-      check_text txt state)
+      ignore (output_text state {markdown = false} txt))
     else raise Toplevel.UNDEF);
 
-fun document_command (loc, txt) =
+fun document_command markdown (loc, txt) =
   Toplevel.keep (fn state =>
     (case loc of
-      NONE => check_text txt state
+      NONE => ignore (output_text state markdown txt)
     | SOME (_, pos) =>
         error ("Illegal target specification -- not a theory context" ^ Position.here pos))) o
-  Toplevel.present_local_theory loc (check_text txt);
+  Toplevel.present_local_theory loc (fn state => ignore (output_text state markdown txt));
 
 end;
--- a/src/Pure/Tools/rail.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Tools/rail.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -63,7 +63,7 @@
 fun reports_of_token (Token ((pos, _), (String, _))) = [(pos, Markup.inner_string)]
   | reports_of_token (Token ((pos, _), (Keyword, x))) =
       map (pair pos) (the_list (Symtab.lookup keywords x) @ Completion.suppress_abbrevs x)
-  | reports_of_token (Token (_, (Antiq antiq, _))) = Antiquote.antiq_reports antiq
+  | reports_of_token (Token (_, (Antiq antiq, _))) = Antiquote.antiq_reports [Antiquote.Antiq antiq]
   | reports_of_token _ = [];
 
 
@@ -315,7 +315,7 @@
 
 fun output_rules state rules =
   let
-    val output_antiq = Thy_Output.eval_antiq state;
+    val output_antiq = Thy_Output.eval_antiquote state o Antiquote.Antiq;
     fun output_text b s =
       Output.output s
       |> b ? enclose "\\isakeyword{" "}"
@@ -354,11 +354,7 @@
         output "" rail' ^
         "\\rail@end\n"
       end;
-  in
-    "\\begin{railoutput}\n" ^
-    implode (map output_rule rules) ^
-    "\\end{railoutput}\n"
-  end;
+  in Latex.environment "railoutput" (implode (map output_rule rules)) end;
 
 in
 
--- a/src/Pure/Tools/update_header.scala	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/Tools/update_header.scala	Sat Oct 17 21:42:18 2015 +0200
@@ -25,15 +25,19 @@
 
   /* command line entry point */
 
+  private val headings =
+    Set("chapter", "section", "subsection", "subsubsection", "paragraph", "subparagraph")
+
   def main(args: Array[String])
   {
     Command_Line.tool0 {
       args.toList match {
         case section :: files =>
-          if (!Set("chapter", "section", "subsection", "subsubsection").contains(section))
+          if (!headings.contains(section))
             error("Bad heading command: " + quote(section))
           files.foreach(file => update_header(section, Path.explode(file)))
-        case _ => error("Bad arguments:\n" + cat_lines(args))
+        case _ =>
+            error("Bad arguments:\n" + cat_lines(args))
       }
     }
   }
--- a/src/Pure/pure_syn.ML	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Pure/pure_syn.ML	Sat Oct 17 21:42:18 2015 +0200
@@ -19,31 +19,39 @@
 
 val _ =
   Outer_Syntax.command ("chapter", @{here}) "chapter heading"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
 
 val _ =
   Outer_Syntax.command ("section", @{here}) "section heading"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
 
 val _ =
   Outer_Syntax.command ("subsection", @{here}) "subsection heading"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
 
 val _ =
   Outer_Syntax.command ("subsubsection", @{here}) "subsubsection heading"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
+
+val _ =
+  Outer_Syntax.command ("paragraph", @{here}) "paragraph heading"
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
+
+val _ =
+  Outer_Syntax.command ("subparagraph", @{here}) "subparagraph heading"
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = false});
 
 val _ =
   Outer_Syntax.command ("text", @{here}) "formal comment (primary style)"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = true});
 
 val _ =
   Outer_Syntax.command ("txt", @{here}) "formal comment (secondary style)"
-    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command);
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = true});
 
 val _ =
-  Outer_Syntax.command ("text_raw", @{here}) "raw LaTeX text"
-    (Parse.document_source >> (fn s => Toplevel.keep (fn _ => Thy_Output.report_text s)));
+  Outer_Syntax.command ("text_raw", @{here}) "LaTeX text (without surrounding environment)"
+    (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = true});
 
 val _ =
   Outer_Syntax.command ("theory", @{here}) "begin theory"
--- a/src/Tools/jEdit/etc/options	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Tools/jEdit/etc/options	Sat Oct 17 21:42:18 2015 +0200
@@ -133,6 +133,11 @@
 option inner_comment_color : string = "CC0000FF"
 option dynamic_color : string = "7BA428FF"
 
+option markdown_item_color1 : string = "DAFEDAFF"
+option markdown_item_color2 : string = "FFF0CCFF"
+option markdown_item_color3 : string = "E7E7FFFF"
+option markdown_item_color4 : string = "FFE0F0FF"
+
 
 section "Icons"
 
--- a/src/Tools/jEdit/src/rendering.scala	Sat Oct 17 13:18:43 2015 +0200
+++ b/src/Tools/jEdit/src/rendering.scala	Sat Oct 17 21:42:18 2015 +0200
@@ -155,7 +155,8 @@
     Markup.Elements(Markup.EXPRESSION, Markup.CITATION, Markup.LANGUAGE, Markup.ML_TYPING,
       Markup.TOKEN_RANGE, Markup.ENTITY, Markup.PATH, Markup.URL, Markup.SORTING,
       Markup.TYPING, Markup.FREE, Markup.SKOLEM, Markup.BOUND,
-      Markup.VAR, Markup.TFREE, Markup.TVAR, Markup.ML_BREAKPOINT)
+      Markup.VAR, Markup.TFREE, Markup.TVAR, Markup.ML_BREAKPOINT,
+      Markup.MARKDOWN_PARAGRAPH, Markup.Markdown_List.name)
 
   private val hyperlink_elements =
     Markup.Elements(Markup.ENTITY, Markup.PATH, Markup.POSITION, Markup.CITATION, Markup.URL)
@@ -182,7 +183,8 @@
 
   private val tooltip_elements =
     Markup.Elements(Markup.LANGUAGE, Markup.TIMING, Markup.ENTITY, Markup.SORTING,
-      Markup.TYPING, Markup.ML_TYPING, Markup.ML_BREAKPOINT, Markup.PATH, Markup.URL) ++
+      Markup.TYPING, Markup.ML_TYPING, Markup.ML_BREAKPOINT, Markup.PATH, Markup.URL,
+      Markup.MARKDOWN_PARAGRAPH, Markup.Markdown_List.name) ++
     Markup.Elements(tooltip_descriptions.keySet)
 
   private val gutter_elements =
@@ -204,7 +206,7 @@
       Markup.STATE_MESSAGE + Markup.INFORMATION_MESSAGE +
       Markup.TRACING_MESSAGE + Markup.WARNING_MESSAGE +
       Markup.LEGACY_MESSAGE + Markup.ERROR_MESSAGE +
-      Markup.BAD + Markup.INTENSIFY ++ active_elements
+      Markup.BAD + Markup.INTENSIFY + Markup.Markdown_Item.name ++ active_elements
 
   private val foreground_elements =
     Markup.Elements(Markup.STRING, Markup.ALT_STRING, Markup.VERBATIM,
@@ -282,6 +284,11 @@
   val inner_comment_color = color_value("inner_comment_color")
   val dynamic_color = color_value("dynamic_color")
 
+  val markdown_item_color1 = color_value("markdown_item_color1")
+  val markdown_item_color2 = color_value("markdown_item_color2")
+  val markdown_item_color3 = color_value("markdown_item_color3")
+  val markdown_item_color4 = color_value("markdown_item_color4")
+
 
   /* completion */
 
@@ -514,6 +521,7 @@
         {
           case (Text.Info(r, (t1, info)), Text.Info(_, XML.Elem(Markup.Timing(t2), _))) =>
             Some(Text.Info(r, (t1 + t2, info)))
+
           case (prev, Text.Info(r, XML.Elem(Markup.Entity(kind, name), _))) =>
             val kind1 = Word.implode(Word.explode('_', kind))
             val txt1 =
@@ -525,19 +533,24 @@
                 "\n" + t.message
               else ""
             Some(add(prev, r, (true, XML.Text(txt1 + txt2))))
+
           case (prev, Text.Info(r, XML.Elem(Markup.Path(name), _))) =>
             val file = jedit_file(name)
             val text =
               if (name == file) "file " + quote(file)
               else "path " + quote(name) + "\nfile " + quote(file)
             Some(add(prev, r, (true, XML.Text(text))))
+
           case (prev, Text.Info(r, XML.Elem(Markup.Url(name), _))) =>
             Some(add(prev, r, (true, XML.Text("URL " + quote(name)))))
+
           case (prev, Text.Info(r, XML.Elem(Markup(name, _), body)))
           if name == Markup.SORTING || name == Markup.TYPING =>
             Some(add(prev, r, (true, pretty_typing("::", body))))
+
           case (prev, Text.Info(r, XML.Elem(Markup(Markup.ML_TYPING, _), body))) =>
             Some(add(prev, r, (false, pretty_typing("ML:", body))))
+
           case (prev, Text.Info(r, Protocol.ML_Breakpoint(breakpoint))) =>
             val text =
               if (Debugger.breakpoint_state(breakpoint)) "breakpoint (enabled)"
@@ -545,6 +558,12 @@
             Some(add(prev, r, (true, XML.Text(text))))
           case (prev, Text.Info(r, XML.Elem(Markup.Language(language, _, _, _), _))) =>
             Some(add(prev, r, (true, XML.Text("language: " + language))))
+
+          case (prev, Text.Info(r, XML.Elem(Markup(Markup.MARKDOWN_PARAGRAPH, _), _))) =>
+            Some(add(prev, r, (true, XML.Text("Markdown: paragraph"))))
+          case (prev, Text.Info(r, XML.Elem(Markup.Markdown_List(kind), _))) =>
+            Some(add(prev, r, (true, XML.Text("Markdown: " + kind))))
+
           case (prev, Text.Info(r, XML.Elem(Markup(name, _), _))) =>
             Rendering.tooltip_descriptions.get(name).
               map(descr => add(prev, r, (true, XML.Text(descr))))
@@ -679,6 +698,15 @@
                   Some((Nil, Some(bad_color)))
                 case (_, Text.Info(_, XML.Elem(Markup(Markup.INTENSIFY, _), _))) =>
                   Some((Nil, Some(intensify_color)))
+                case (_, Text.Info(_, XML.Elem(Markup.Markdown_Item(depth), _))) =>
+                  val color =
+                    depth match {
+                      case 1 => markdown_item_color1
+                      case 2 => markdown_item_color2
+                      case 3 => markdown_item_color3
+                      case _ => markdown_item_color4
+                    }
+                  Some((Nil, Some(color)))
                 case (acc, Text.Info(_, Protocol.Dialog(_, serial, result))) =>
                   command_states.collectFirst(
                     { case st if st.results.defined(serial) => st.results.get(serial).get }) match