author | wenzelm |
Sun, 16 Feb 2014 13:18:08 +0100 | |
changeset 55510 | 1585a65aad64 |
parent 55505 | 2a1ca7f6607b |
child 55512 | 75c68e05f9ea |
permissions | -rw-r--r-- |
55497 | 1 |
/* Title: Pure/ML/ml_lex.scala |
2 |
Author: Makarius |
|
3 |
||
4 |
Lexical syntax for SML. |
|
5 |
*/ |
|
6 |
||
7 |
package isabelle |
|
8 |
||
55499 | 9 |
|
10 |
import scala.collection.mutable |
|
55497 | 11 |
import scala.util.parsing.input.{Reader, CharSequenceReader} |
12 |
||
13 |
||
14 |
object ML_Lex |
|
15 |
{ |
|
55505 | 16 |
/** keywords **/ |
17 |
||
18 |
val keywords: Set[String] = |
|
19 |
Set("#", "(", ")", ",", "->", "...", ":", ":>", ";", "=", "=>", |
|
20 |
"[", "]", "_", "{", "|", "}", "abstype", "and", "andalso", "as", |
|
21 |
"case", "datatype", "do", "else", "end", "eqtype", "exception", |
|
22 |
"fn", "fun", "functor", "handle", "if", "in", "include", |
|
23 |
"infix", "infixr", "let", "local", "nonfix", "of", "op", "open", |
|
24 |
"orelse", "raise", "rec", "sharing", "sig", "signature", |
|
25 |
"struct", "structure", "then", "type", "val", "where", "while", |
|
26 |
"with", "withtype") |
|
27 |
||
28 |
val keywords2: Set[String] = |
|
29 |
Set("case", "do", "else", "end", "if", "in", "let", "local", "of", |
|
30 |
"sig", "struct", "then", "while", "with") |
|
31 |
||
32 |
val keywords3: Set[String] = |
|
33 |
Set("handle", "open", "raise") |
|
34 |
||
35 |
private val lexicon: Scan.Lexicon = Scan.Lexicon(keywords.toList: _*) |
|
36 |
||
37 |
||
38 |
||
55497 | 39 |
/** tokens **/ |
40 |
||
41 |
object Kind extends Enumeration |
|
42 |
{ |
|
43 |
val KEYWORD = Value("keyword") |
|
44 |
val IDENT = Value("identifier") |
|
45 |
val LONG_IDENT = Value("long identifier") |
|
46 |
val TYPE_VAR = Value("type variable") |
|
47 |
val WORD = Value("word") |
|
48 |
val INT = Value("integer") |
|
49 |
val REAL = Value("real") |
|
50 |
val CHAR = Value("character") |
|
51 |
val STRING = Value("quoted string") |
|
52 |
val SPACE = Value("white space") |
|
53 |
val COMMENT = Value("comment text") |
|
54 |
val ERROR = Value("bad input") |
|
55 |
} |
|
56 |
||
57 |
sealed case class Token(val kind: Kind.Value, val source: String) |
|
55500 | 58 |
{ |
55501 | 59 |
def is_keyword: Boolean = kind == Kind.KEYWORD |
55505 | 60 |
def is_delimiter: Boolean = is_keyword && !Symbol.is_ascii_identifier(source) |
55500 | 61 |
} |
55497 | 62 |
|
63 |
||
64 |
||
65 |
/** parsers **/ |
|
66 |
||
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
67 |
case object ML_String extends Scan.Line_Context |
55499 | 68 |
|
55497 | 69 |
private object Parsers extends Scan.Parsers |
70 |
{ |
|
71 |
/* string material */ |
|
72 |
||
55500 | 73 |
private val blanks = many(character(Symbol.is_ascii_blank)) |
55497 | 74 |
private val blanks1 = many1(character(Symbol.is_ascii_blank)) |
75 |
||
55499 | 76 |
private val gap = "\\" ~ blanks1 ~ "\\" ^^ { case x ~ y ~ z => x + y + z } |
55500 | 77 |
private val gap_start = "\\" ~ blanks ~ """\z""".r ^^ { case x ~ y ~ _ => x + y } |
55499 | 78 |
|
55497 | 79 |
private val escape = |
80 |
one(character("\"\\abtnvfr".contains(_))) | |
|
81 |
"^" ~ one(character(c => '@' <= c && c <= '_')) ^^ { case x ~ y => x + y } | |
|
82 |
repeated(character(Symbol.is_ascii_digit), 3, 3) |
|
83 |
||
84 |
private val str = |
|
55502 | 85 |
one(Symbol.is_symbolic) | |
55497 | 86 |
one(character(c => c != '"' && c != '\\' && ' ' <= c && c <= '~')) | |
87 |
"\\" ~ escape ^^ { case x ~ y => x + y } |
|
88 |
||
55499 | 89 |
|
90 |
/* ML char -- without gaps */ |
|
91 |
||
92 |
private val ml_char: Parser[Token] = |
|
93 |
"#\"" ~ str ~ "\"" ^^ { case x ~ y ~ z => Token(Kind.CHAR, x + y + z) } |
|
94 |
||
95 |
private val recover_ml_char: Parser[String] = |
|
96 |
"#\"" ~ opt(str) ^^ { case x ~ Some(y) => x + y case x ~ None => x } |
|
97 |
||
98 |
||
99 |
/* ML string */ |
|
100 |
||
101 |
private val ml_string_body: Parser[String] = |
|
102 |
rep(gap | str) ^^ (_.mkString) |
|
103 |
||
104 |
private val recover_ml_string: Parser[String] = |
|
105 |
"\"" ~ ml_string_body ^^ { case x ~ y => x + y } |
|
106 |
||
107 |
private val ml_string: Parser[Token] = |
|
108 |
"\"" ~ ml_string_body ~ "\"" ^^ { case x ~ y ~ z => Token(Kind.STRING, x + y + z) } |
|
109 |
||
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
110 |
private def ml_string_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = |
55499 | 111 |
{ |
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
112 |
def result(x: String, c: Scan.Line_Context) = (Token(Kind.STRING, x), c) |
55499 | 113 |
|
114 |
ctxt match { |
|
115 |
case Scan.Finished => |
|
116 |
"\"" ~ ml_string_body ~ ("\"" | gap_start) ^^ |
|
117 |
{ case x ~ y ~ z => result(x + y + z, if (z == "\"") Scan.Finished else ML_String) } |
|
118 |
case ML_String => |
|
55500 | 119 |
blanks ~ opt_term("\\" ~ ml_string_body ~ ("\"" | gap_start)) ^^ |
55499 | 120 |
{ case x ~ Some(y ~ z ~ w) => |
121 |
result(x + y + z + w, if (w == "\"") Scan.Finished else ML_String) |
|
122 |
case x ~ None => result(x, ML_String) } |
|
123 |
case _ => failure("") |
|
124 |
} |
|
125 |
} |
|
126 |
||
127 |
||
128 |
/* ML comment */ |
|
129 |
||
130 |
private val ml_comment: Parser[Token] = |
|
131 |
comment ^^ (x => Token(Kind.COMMENT, x)) |
|
132 |
||
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
133 |
private def ml_comment_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = |
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
134 |
comment_line(ctxt) ^^ { case (x, c) => (Token(Kind.COMMENT, x), c) } |
55497 | 135 |
|
136 |
||
137 |
/* delimited token */ |
|
138 |
||
139 |
private def delimited_token: Parser[Token] = |
|
55499 | 140 |
ml_char | (ml_string | ml_comment) |
55497 | 141 |
|
55499 | 142 |
private val recover_delimited: Parser[Token] = |
143 |
(recover_ml_char | (recover_ml_string | recover_comment)) ^^ (x => Token(Kind.ERROR, x)) |
|
55497 | 144 |
|
145 |
||
146 |
private def other_token: Parser[Token] = |
|
147 |
{ |
|
148 |
/* identifiers */ |
|
149 |
||
150 |
val letdigs = many(character(Symbol.is_ascii_letdig)) |
|
151 |
||
152 |
val alphanumeric = |
|
153 |
one(character(Symbol.is_ascii_letter)) ~ letdigs ^^ { case x ~ y => x + y } |
|
154 |
||
155 |
val symbolic = many1(character("!#$%&*+-/:<=>?@\\^`|~".contains(_))) |
|
156 |
||
157 |
val ident = (alphanumeric | symbolic) ^^ (x => Token(Kind.IDENT, x)) |
|
158 |
||
159 |
val long_ident = |
|
160 |
rep1(alphanumeric ~ "." ^^ { case x ~ y => x + y }) ~ |
|
161 |
(alphanumeric | (symbolic | "=")) ^^ |
|
162 |
{ case x ~ y => Token(Kind.LONG_IDENT, x.mkString + y) } |
|
163 |
||
164 |
val type_var = "'" ~ letdigs ^^ { case x ~ y => Token(Kind.TYPE_VAR, x + y) } |
|
165 |
||
166 |
||
167 |
/* numerals */ |
|
168 |
||
169 |
val dec = many1(character(Symbol.is_ascii_digit)) |
|
170 |
val hex = many1(character(Symbol.is_ascii_hex)) |
|
171 |
val sign = opt("~") ^^ { case Some(x) => x case None => "" } |
|
172 |
val decint = sign ~ dec ^^ { case x ~ y => x + y } |
|
173 |
val exp = ("E" | "e") ~ decint ^^ { case x ~ y => x + y } |
|
174 |
||
175 |
val word = |
|
176 |
("0wx" ~ hex ^^ { case x ~ y => x + y } | "0w" ~ dec ^^ { case x ~ y => x + y }) ^^ |
|
177 |
(x => Token(Kind.WORD, x)) |
|
178 |
||
179 |
val int = |
|
180 |
sign ~ ("0x" ~ hex ^^ { case x ~ y => x + y } | dec) ^^ |
|
181 |
{ case x ~ y => Token(Kind.INT, x + y) } |
|
182 |
||
183 |
val real = |
|
184 |
(decint ~ "." ~ dec ~ (opt(exp) ^^ { case Some(x) => x case None => "" }) ^^ |
|
185 |
{ case x ~ y ~ z ~ w => x + y + z + w } | |
|
186 |
decint ~ exp ^^ { case x ~ y => x + y }) ^^ (x => Token(Kind.REAL, x)) |
|
187 |
||
188 |
||
55499 | 189 |
/* main */ |
55497 | 190 |
|
191 |
val space = blanks1 ^^ (x => Token(Kind.SPACE, x)) |
|
192 |
||
193 |
val keyword = literal(lexicon) ^^ (x => Token(Kind.KEYWORD, x)) |
|
194 |
||
195 |
val bad = one(_ => true) ^^ (x => Token(Kind.ERROR, x)) |
|
196 |
||
197 |
space | (recover_delimited | |
|
198 |
(((word | (real | (int | (long_ident | (ident | type_var))))) ||| keyword) | bad)) |
|
199 |
} |
|
200 |
||
55499 | 201 |
|
202 |
/* token */ |
|
203 |
||
55497 | 204 |
def token: Parser[Token] = delimited_token | other_token |
55499 | 205 |
|
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
206 |
def token_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = |
55499 | 207 |
{ |
208 |
val other = (ml_char | other_token) ^^ (x => (x, Scan.Finished)) |
|
209 |
||
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
210 |
ml_string_line(ctxt) | (ml_comment_line(ctxt) | other) |
55499 | 211 |
} |
55497 | 212 |
} |
213 |
||
55499 | 214 |
|
215 |
/* tokenize */ |
|
216 |
||
55497 | 217 |
def tokenize(input: CharSequence): List[Token] = |
218 |
{ |
|
219 |
Parsers.parseAll(Parsers.rep(Parsers.token), new CharSequenceReader(input)) match { |
|
220 |
case Parsers.Success(tokens, _) => tokens |
|
221 |
case _ => error("Unexpected failure of tokenizing input:\n" + input.toString) |
|
222 |
} |
|
223 |
} |
|
55499 | 224 |
|
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
225 |
def tokenize_line(input: CharSequence, context: Scan.Line_Context) |
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
226 |
: (List[Token], Scan.Line_Context) = |
55499 | 227 |
{ |
228 |
var in: Reader[Char] = new CharSequenceReader(input) |
|
229 |
val toks = new mutable.ListBuffer[Token] |
|
230 |
var ctxt = context |
|
231 |
while (!in.atEnd) { |
|
55510
1585a65aad64
tuned signature -- emphasize line-oriented aspect;
wenzelm
parents:
55505
diff
changeset
|
232 |
Parsers.parse(Parsers.token_line(ctxt), in) match { |
55499 | 233 |
case Parsers.Success((x, c), rest) => { toks += x; ctxt = c; in = rest } |
234 |
case Parsers.NoSuccess(_, rest) => |
|
235 |
error("Unexpected failure of tokenizing input:\n" + rest.source.toString) |
|
236 |
} |
|
237 |
} |
|
238 |
(toks.toList, ctxt) |
|
239 |
} |
|
55497 | 240 |
} |
241 |