# HG changeset patch # User wenzelm # Date 1407795810 -7200 # Node ID 7fc36b4c7cce6023c783031fcab1b70fd55d2818 # Parent 020df63dd0a95f40c957f4946b047ce43f5bc6b7 tuned signature; diff -r 020df63dd0a9 -r 7fc36b4c7cce src/Pure/Isar/outer_syntax.scala --- a/src/Pure/Isar/outer_syntax.scala Tue Aug 12 00:17:02 2014 +0200 +++ b/src/Pure/Isar/outer_syntax.scala Tue Aug 12 00:23:30 2014 +0200 @@ -124,18 +124,16 @@ /* token language */ - def scan(input: Reader[Char]): List[Token] = + def scan(input: CharSequence): List[Token] = { + var in: Reader[Char] = new CharSequenceReader(input) Token.Parsers.parseAll( - Token.Parsers.rep(Token.Parsers.token(lexicon, is_command)), input) match { + Token.Parsers.rep(Token.Parsers.token(lexicon, is_command)), in) match { case Token.Parsers.Success(tokens, _) => tokens - case _ => error("Unexpected failure of tokenizing input:\n" + input.source.toString) + case _ => error("Unexpected failure of tokenizing input:\n" + input.toString) } } - def scan(input: CharSequence): List[Token] = - scan(new CharSequenceReader(input)) - def scan_line(input: CharSequence, context: Scan.Line_Context): (List[Token], Scan.Line_Context) = { var in: Reader[Char] = new CharSequenceReader(input)