# HG changeset patch # User wenzelm # Date 1413906591 -7200 # Node ID 83b0f633190eda8290c5542ec11b6158908ba76a # Parent 8f92f17d8781f4e15b8c49be1e44e953746c3f49 some structure matching, based on line token iterators; diff -r 8f92f17d8781 -r 83b0f633190e src/Pure/PIDE/text.scala --- a/src/Pure/PIDE/text.scala Tue Oct 21 15:21:44 2014 +0200 +++ b/src/Pure/PIDE/text.scala Tue Oct 21 17:49:51 2014 +0200 @@ -51,6 +51,8 @@ def is_singularity: Boolean = start == stop def inflate_singularity: Range = if (is_singularity) Range(start, start + 1) else this + def touches(i: Offset): Boolean = start <= i && i <= stop + def contains(i: Offset): Boolean = start == i || start < i && i < stop def contains(that: Range): Boolean = this.contains(that.start) && that.stop <= this.stop def overlaps(that: Range): Boolean = this.contains(that.start) || that.contains(this.start) diff -r 8f92f17d8781 -r 83b0f633190e src/Tools/jEdit/src/structure_matching.scala --- a/src/Tools/jEdit/src/structure_matching.scala Tue Oct 21 15:21:44 2014 +0200 +++ b/src/Tools/jEdit/src/structure_matching.scala Tue Oct 21 17:49:51 2014 +0200 @@ -16,23 +16,41 @@ { object Isabelle_Matcher extends StructureMatcher { - def getMatch(text_area: TextArea): StructureMatcher.Match = + def find_pair(text_area: TextArea): Option[(Text.Range, Text.Range)] = { val buffer = text_area.getBuffer val caret_line = text_area.getCaretLine + val caret = text_area.getCaretPosition PIDE.session.recent_syntax match { case syntax: Outer_Syntax if syntax != Outer_Syntax.empty => - Token_Markup.buffer_line_tokens(syntax, buffer, caret_line) match { - case Some(tokens) => - // FIXME - null - case None => null + Token_Markup.line_token_iterator(syntax, buffer, caret_line). + find({ case (tok, r) => r.touches(caret) }) + match { + case Some((tok, range1)) if (syntax.command_kind(tok, Keyword.qed_global)) => + Token_Markup.line_token_reverse_iterator(syntax, buffer, caret_line). + dropWhile({ case (_, r) => caret <= r.stop }). + find({ case (tok, _) => syntax.command_kind(tok, Keyword.theory) }) + match { + case Some((tok, range2)) if syntax.command_kind(tok, Keyword.theory_goal) => + Some((range1, range2)) + case _ => None + } + case _ => None } - case _ => null + case _ => None } } + def getMatch(text_area: TextArea): StructureMatcher.Match = + find_pair(text_area) match { + case Some((_, range)) => + val line = text_area.getBuffer.getLineOfOffset(range.start) + new StructureMatcher.Match(Structure_Matching.Isabelle_Matcher, + line, range.start, line, range.stop) + case None => null + } + def selectMatch(text_area: TextArea) { // FIXME diff -r 8f92f17d8781 -r 83b0f633190e src/Tools/jEdit/src/token_markup.scala --- a/src/Tools/jEdit/src/token_markup.scala Tue Oct 21 15:21:44 2014 +0200 +++ b/src/Tools/jEdit/src/token_markup.scala Tue Oct 21 17:49:51 2014 +0200 @@ -210,8 +210,10 @@ case _ => Line_Context.init } - def buffer_line_tokens(syntax: Outer_Syntax, buffer: JEditBuffer, line: Int) - : Option[List[Token]] = + + /* line tokens */ + + def try_line_tokens(syntax: Outer_Syntax, buffer: JEditBuffer, line: Int): Option[List[Token]] = { val line_context = if (line == 0) Line_Context.init @@ -222,6 +224,28 @@ } yield syntax.scan_line(text, ctxt)._1 } + def line_token_iterator( + syntax: Outer_Syntax, buffer: JEditBuffer, start_line: Int): Iterator[(Token, Text.Range)] = + for { + line <- ((start_line max 0) until buffer.getLineCount).iterator + tokens <- try_line_tokens(syntax, buffer, line).iterator + starts = + tokens.iterator.scanLeft(buffer.getLineStartOffset(line))( + (i, tok) => i + tok.source.length) + (tok, i) <- tokens.iterator zip starts + } yield (tok, Text.Range(i, i + tok.source.length)) + + def line_token_reverse_iterator( + syntax: Outer_Syntax, buffer: JEditBuffer, start_line: Int): Iterator[(Token, Text.Range)] = + for { + line <- Range.inclusive(start_line min (buffer.getLineCount - 1), 0, -1).iterator + tokens <- try_line_tokens(syntax, buffer, line).iterator + stops = + tokens.reverseIterator.scanLeft(buffer.getLineEndOffset(line) min buffer.getLength)( + (i, tok) => i - tok.source.length) + (tok, i) <- tokens.reverseIterator zip stops + } yield (tok, Text.Range(i - tok.source.length, i)) + /* token marker */