author | wenzelm |
Tue, 22 Jan 2019 19:36:17 +0100 | |
changeset 69719 | 331ef175a112 |
parent 68845 | 3b2daa7bf9f4 |
child 69859 | e18ba60a1cf8 |
permissions | -rw-r--r-- |
63604 | 1 |
/* Title: Pure/Isar/document_structure.scala |
2 |
Author: Makarius |
|
3 |
||
4 |
Overall document structure. |
|
5 |
*/ |
|
6 |
||
7 |
package isabelle |
|
8 |
||
9 |
||
10 |
import scala.collection.mutable |
|
11 |
import scala.annotation.tailrec |
|
12 |
||
13 |
||
14 |
object Document_Structure |
|
15 |
{ |
|
63610 | 16 |
/** general structure **/ |
63604 | 17 |
|
18 |
sealed abstract class Document { def length: Int } |
|
19 |
case class Block(name: String, text: String, body: List[Document]) extends Document |
|
20 |
{ val length: Int = (0 /: body)(_ + _.length) } |
|
63610 | 21 |
case class Atom(length: Int) extends Document |
63605 | 22 |
|
68840 | 23 |
private def is_theory_command(keywords: Keyword.Keywords, command: Command): Boolean = |
68845
3b2daa7bf9f4
support Thy_Element in Scala, following ML version;
wenzelm
parents:
68840
diff
changeset
|
24 |
command.span.is_kind(keywords, |
3b2daa7bf9f4
support Thy_Element in Scala, following ML version;
wenzelm
parents:
68840
diff
changeset
|
25 |
kind => Keyword.theory(kind) && !Keyword.theory_end(kind), false) |
63605 | 26 |
|
63604 | 27 |
|
63606 | 28 |
|
63610 | 29 |
/** context blocks **/ |
63606 | 30 |
|
31 |
def parse_blocks( |
|
32 |
syntax: Outer_Syntax, |
|
33 |
node_name: Document.Node.Name, |
|
34 |
text: CharSequence): List[Document] = |
|
35 |
{ |
|
63607 | 36 |
def is_plain_theory(command: Command): Boolean = |
68840 | 37 |
is_theory_command(syntax.keywords, command) && |
63607 | 38 |
!command.span.is_begin && !command.span.is_end |
39 |
||
40 |
||
63606 | 41 |
/* stack operations */ |
42 |
||
43 |
def buffer(): mutable.ListBuffer[Document] = new mutable.ListBuffer[Document] |
|
44 |
||
45 |
var stack: List[(Command, mutable.ListBuffer[Document])] = |
|
46 |
List((Command.empty, buffer())) |
|
47 |
||
63607 | 48 |
def open(command: Command) { stack = (command, buffer()) :: stack } |
49 |
||
63606 | 50 |
def close(): Boolean = |
51 |
stack match { |
|
52 |
case (command, body) :: (_, body2) :: _ => |
|
53 |
body2 += Block(command.span.name, command.source, body.toList) |
|
54 |
stack = stack.tail |
|
55 |
true |
|
56 |
case _ => |
|
57 |
false |
|
58 |
} |
|
59 |
||
63607 | 60 |
def flush() { if (is_plain_theory(stack.head._1)) close() } |
61 |
||
63606 | 62 |
def result(): List[Document] = |
63 |
{ |
|
64 |
while (close()) { } |
|
65 |
stack.head._2.toList |
|
66 |
} |
|
67 |
||
68 |
def add(command: Command) |
|
69 |
{ |
|
63607 | 70 |
if (command.span.is_begin || is_plain_theory(command)) { flush(); open(command) } |
71 |
else if (command.span.is_end) { flush(); close() } |
|
63606 | 72 |
|
63610 | 73 |
stack.head._2 += Atom(command.source.length) |
63606 | 74 |
} |
75 |
||
76 |
||
77 |
/* result structure */ |
|
78 |
||
79 |
val spans = syntax.parse_spans(text) |
|
80 |
spans.foreach(span => add(Command(Document_ID.none, node_name, Command.no_blobs, span))) |
|
81 |
result() |
|
82 |
} |
|
83 |
||
84 |
||
85 |
||
63610 | 86 |
/** section headings **/ |
87 |
||
88 |
trait Item |
|
63604 | 89 |
{ |
63610 | 90 |
def name: String = "" |
91 |
def source: String = "" |
|
92 |
def heading_level: Option[Int] = None |
|
93 |
} |
|
63604 | 94 |
|
63610 | 95 |
object No_Item extends Item |
63604 | 96 |
|
63610 | 97 |
class Sections(keywords: Keyword.Keywords) |
98 |
{ |
|
99 |
private def buffer(): mutable.ListBuffer[Document] = new mutable.ListBuffer[Document] |
|
63604 | 100 |
|
63610 | 101 |
private var stack: List[(Int, Item, mutable.ListBuffer[Document])] = |
102 |
List((0, No_Item, buffer())) |
|
103 |
||
104 |
@tailrec private def close(level: Int => Boolean) |
|
63604 | 105 |
{ |
106 |
stack match { |
|
63610 | 107 |
case (lev, item, body) :: (_, _, body2) :: _ if level(lev) => |
108 |
body2 += Block(item.name, item.source, body.toList) |
|
63604 | 109 |
stack = stack.tail |
110 |
close(level) |
|
111 |
case _ => |
|
112 |
} |
|
113 |
} |
|
114 |
||
115 |
def result(): List[Document] = |
|
116 |
{ |
|
117 |
close(_ => true) |
|
118 |
stack.head._3.toList |
|
119 |
} |
|
120 |
||
63610 | 121 |
def add(item: Item) |
63604 | 122 |
{ |
63610 | 123 |
item.heading_level match { |
63604 | 124 |
case Some(i) => |
125 |
close(_ > i) |
|
63610 | 126 |
stack = (i + 1, item, buffer()) :: stack |
63604 | 127 |
case None => |
128 |
} |
|
63610 | 129 |
stack.head._3 += Atom(item.source.length) |
63604 | 130 |
} |
63610 | 131 |
} |
63604 | 132 |
|
133 |
||
63610 | 134 |
/* outer syntax sections */ |
135 |
||
136 |
private class Command_Item(keywords: Keyword.Keywords, command: Command) extends Item |
|
137 |
{ |
|
138 |
override def name: String = command.span.name |
|
139 |
override def source: String = command.source |
|
140 |
override def heading_level: Option[Int] = |
|
141 |
{ |
|
142 |
name match { |
|
143 |
case Thy_Header.CHAPTER => Some(0) |
|
144 |
case Thy_Header.SECTION => Some(1) |
|
145 |
case Thy_Header.SUBSECTION => Some(2) |
|
146 |
case Thy_Header.SUBSUBSECTION => Some(3) |
|
147 |
case Thy_Header.PARAGRAPH => Some(4) |
|
148 |
case Thy_Header.SUBPARAGRAPH => Some(5) |
|
68840 | 149 |
case _ if is_theory_command(keywords, command) => Some(6) |
63610 | 150 |
case _ => None |
151 |
} |
|
152 |
} |
|
153 |
} |
|
154 |
||
155 |
def parse_sections( |
|
156 |
syntax: Outer_Syntax, |
|
157 |
node_name: Document.Node.Name, |
|
158 |
text: CharSequence): List[Document] = |
|
159 |
{ |
|
160 |
val sections = new Sections(syntax.keywords) |
|
161 |
||
162 |
for { span <- syntax.parse_spans(text) } { |
|
163 |
sections.add( |
|
164 |
new Command_Item(syntax.keywords, |
|
165 |
Command(Document_ID.none, node_name, Command.no_blobs, span))) |
|
166 |
} |
|
167 |
sections.result() |
|
168 |
} |
|
169 |
||
63604 | 170 |
|
63610 | 171 |
/* ML sections */ |
172 |
||
173 |
private class ML_Item(token: ML_Lex.Token, level: Option[Int]) extends Item |
|
174 |
{ |
|
175 |
override def source: String = token.source |
|
176 |
override def heading_level: Option[Int] = level |
|
177 |
} |
|
178 |
||
179 |
def parse_ml_sections(SML: Boolean, text: CharSequence): List[Document] = |
|
180 |
{ |
|
181 |
val sections = new Sections(Keyword.Keywords.empty) |
|
182 |
val nl = new ML_Item(ML_Lex.Token(ML_Lex.Kind.SPACE, "\n"), None) |
|
183 |
||
184 |
var context: Scan.Line_Context = Scan.Finished |
|
185 |
for (line <- Library.separated_chunks(_ == '\n', text)) { |
|
186 |
val (toks, next_context) = ML_Lex.tokenize_line(SML, line, context) |
|
187 |
val level = |
|
188 |
toks.filterNot(_.is_space) match { |
|
189 |
case List(tok) if tok.is_comment => |
|
190 |
val s = tok.source |
|
64610 | 191 |
if (Codepoint.iterator(s).exists(c => Character.isLetter(c) || Character.isDigit(c))) |
63666 | 192 |
{ |
193 |
if (s.startsWith("(**** ") && s.endsWith(" ****)")) Some(0) |
|
194 |
else if (s.startsWith("(*** ") && s.endsWith(" ***)")) Some(1) |
|
195 |
else if (s.startsWith("(** ") && s.endsWith(" **)")) Some(2) |
|
196 |
else if (s.startsWith("(* ") && s.endsWith(" *)")) Some(3) |
|
197 |
else None |
|
198 |
} |
|
63610 | 199 |
else None |
200 |
case _ => None |
|
201 |
} |
|
202 |
if (level.isDefined && context == Scan.Finished && next_context == Scan.Finished) |
|
203 |
toks.foreach(tok => sections.add(new ML_Item(tok, if (tok.is_comment) level else None))) |
|
204 |
else |
|
205 |
toks.foreach(tok => sections.add(new ML_Item(tok, None))) |
|
206 |
||
207 |
sections.add(nl) |
|
208 |
context = next_context |
|
209 |
} |
|
210 |
sections.result() |
|
63604 | 211 |
} |
212 |
} |