| author | wenzelm | 
| Tue, 12 Jul 2022 15:42:57 +0200 | |
| changeset 75679 | aa89255b704c | 
| parent 75659 | 9bd92ac9328f | 
| child 75906 | 2167b9e3157a | 
| permissions | -rw-r--r-- | 
| 67274 | 1 | /* Title: Pure/Thy/bibtex.scala | 
| 58523 | 2 | Author: Makarius | 
| 3 | ||
| 58544 
340f130b3d38
bibtex support in ML: document antiquotation @{cite} with markup;
 wenzelm parents: 
58543diff
changeset | 4 | BibTeX support. | 
| 58523 | 5 | */ | 
| 6 | ||
| 7 | package isabelle | |
| 8 | ||
| 9 | ||
| 67290 | 10 | import java.io.{File => JFile}
 | 
| 11 | ||
| 58528 | 12 | import scala.collection.mutable | 
| 58523 | 13 | import scala.util.parsing.combinator.RegexParsers | 
| 64824 | 14 | import scala.util.parsing.input.Reader | 
| 58523 | 15 | |
| 16 | ||
| 75393 | 17 | object Bibtex {
 | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 18 | /** file format **/ | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 19 | |
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 20 |   def is_bibtex(name: String): Boolean = name.endsWith(".bib")
 | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 21 | |
| 75393 | 22 |   class File_Format extends isabelle.File_Format {
 | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 23 | val format_name: String = "bibtex" | 
| 69257 | 24 | val file_ext: String = "bib" | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 25 | |
| 69257 | 26 | override def theory_suffix: String = "bibtex_file" | 
| 69259 | 27 | override def theory_content(name: String): String = | 
| 72836 | 28 | """theory "bib" imports Pure begin bibtex_file """ + | 
| 29 | Outer_Syntax.quote_string(name) + """ end""" | |
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 30 | |
| 75393 | 31 |     override def html_document(snapshot: Document.Snapshot): Option[Presentation.HTML_Document] = {
 | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 32 | val name = snapshot.node_name | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 33 |       if (detect(name.node)) {
 | 
| 69366 | 34 | val title = "Bibliography " + quote(snapshot.node_name.path.file_name) | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 35 | val content = | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 36 |           Isabelle_System.with_tmp_file("bib", "bib") { bib =>
 | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 37 | File.write(bib, snapshot.node.source) | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 38 | Bibtex.html_output(List(bib), style = "unsort", title = title) | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 39 | } | 
| 72957 | 40 | Some(Presentation.HTML_Document(title, content)) | 
| 69255 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 41 | } | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 42 | else None | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 43 | } | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 44 | } | 
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 45 | |
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 46 | |
| 
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
 wenzelm parents: 
68224diff
changeset | 47 | |
| 67203 | 48 | /** bibtex errors **/ | 
| 49 | ||
| 75393 | 50 |   def bibtex_errors(dir: Path, root_name: String): List[String] = {
 | 
| 67203 | 51 |     val log_path = dir + Path.explode(root_name).ext("blg")
 | 
| 52 |     if (log_path.is_file) {
 | |
| 67300 | 53 | val Error1 = """^(I couldn't open database file .+)$""".r | 
| 73730 
2f023b2b0e1e
more uniform bibtex error, without using perl (see 4710dd5093a3);
 wenzelm parents: 
73565diff
changeset | 54 | val Error2 = """^(I found no .+)$""".r | 
| 
2f023b2b0e1e
more uniform bibtex error, without using perl (see 4710dd5093a3);
 wenzelm parents: 
73565diff
changeset | 55 | val Error3 = """^(.+)---line (\d+) of file (.+)""".r | 
| 67203 | 56 | Line.logical_lines(File.read(log_path)).flatMap(line => | 
| 57 |         line match {
 | |
| 67300 | 58 |           case Error1(msg) => Some("Bibtex error: " + msg)
 | 
| 73730 
2f023b2b0e1e
more uniform bibtex error, without using perl (see 4710dd5093a3);
 wenzelm parents: 
73565diff
changeset | 59 |           case Error2(msg) => Some("Bibtex error: " + msg)
 | 
| 
2f023b2b0e1e
more uniform bibtex error, without using perl (see 4710dd5093a3);
 wenzelm parents: 
73565diff
changeset | 60 | case Error3(msg, Value.Int(l), file) => | 
| 67203 | 61 | val path = File.standard_path(file) | 
| 62 |             if (Path.is_wellformed(path)) {
 | |
| 63 | val pos = Position.Line_File(l, (dir + Path.explode(path)).canonical.implode) | |
| 64 |               Some("Bibtex error" + Position.here(pos) + ":\n  " + msg)
 | |
| 65 | } | |
| 66 | else None | |
| 67 | case _ => None | |
| 68 | }) | |
| 69 | } | |
| 70 | else Nil | |
| 71 | } | |
| 72 | ||
| 73 | ||
| 74 | ||
| 67272 | 75 | /** check database **/ | 
| 76 | ||
| 75393 | 77 |   def check_database(database: String): (List[(String, Position.T)], List[(String, Position.T)]) = {
 | 
| 67275 
5e427586cb57
check bibtex database on ML side -- for semantic PIDE editing;
 wenzelm parents: 
67274diff
changeset | 78 | val chunks = parse(Line.normalize(database)) | 
| 
5e427586cb57
check bibtex database on ML side -- for semantic PIDE editing;
 wenzelm parents: 
67274diff
changeset | 79 | var chunk_pos = Map.empty[String, Position.T] | 
| 67272 | 80 | val tokens = new mutable.ListBuffer[(Token, Position.T)] | 
| 81 | var line = 1 | |
| 82 | var offset = 1 | |
| 83 | ||
| 67276 | 84 | def make_pos(length: Int): Position.T = | 
| 85 | Position.Offset(offset) ::: Position.End_Offset(offset + length) ::: Position.Line(line) | |
| 67272 | 86 | |
| 75393 | 87 |     def advance_pos(tok: Token): Unit = {
 | 
| 67272 | 88 |       for (s <- Symbol.iterator(tok.source)) {
 | 
| 89 | if (Symbol.is_newline(s)) line += 1 | |
| 90 | offset += 1 | |
| 91 | } | |
| 92 | } | |
| 93 | ||
| 94 | def get_line_pos(l: Int): Position.T = | |
| 95 | if (0 < l && l <= tokens.length) tokens(l - 1)._2 else Position.none | |
| 96 | ||
| 97 |     for (chunk <- chunks) {
 | |
| 98 | val name = chunk.name | |
| 99 |       if (name != "" && !chunk_pos.isDefinedAt(name)) {
 | |
| 67276 | 100 | chunk_pos += (name -> make_pos(chunk.heading_length)) | 
| 67272 | 101 | } | 
| 102 |       for (tok <- chunk.tokens) {
 | |
| 67276 | 103 |         tokens += (tok.copy(source = tok.source.replace("\n", " ")) -> make_pos(tok.source.length))
 | 
| 67272 | 104 | advance_pos(tok) | 
| 105 | } | |
| 106 | } | |
| 107 | ||
| 75394 | 108 |     Isabelle_System.with_tmp_dir("bibtex") { tmp_dir =>
 | 
| 67272 | 109 |       File.write(tmp_dir + Path.explode("root.bib"),
 | 
| 110 |         tokens.iterator.map(p => p._1.source).mkString("", "\n", "\n"))
 | |
| 111 |       File.write(tmp_dir + Path.explode("root.aux"),
 | |
| 112 |         "\\bibstyle{plain}\n\\bibdata{root}\n\\citation{*}")
 | |
| 113 |       Isabelle_System.bash("\"$ISABELLE_BIBTEX\" root", cwd = tmp_dir.file)
 | |
| 114 | ||
| 115 | val Error = """^(.*)---line (\d+) of file root.bib$""".r | |
| 116 | val Warning = """^Warning--(.+)$""".r | |
| 117 | val Warning_Line = """--line (\d+) of file root.bib$""".r | |
| 118 | val Warning_in_Chunk = """^Warning--(.+) in (.+)$""".r | |
| 119 | ||
| 120 |       val log_file = tmp_dir + Path.explode("root.blg")
 | |
| 121 | val lines = if (log_file.is_file) Line.logical_lines(File.read(log_file)) else Nil | |
| 122 | ||
| 67301 | 123 | val (errors, warnings) = | 
| 69294 | 124 | if (lines.isEmpty) (Nil, Nil) | 
| 125 |         else {
 | |
| 126 |           lines.zip(lines.tail ::: List("")).flatMap(
 | |
| 127 |             {
 | |
| 128 | case (Error(msg, Value.Int(l)), _) => | |
| 129 | Some((true, (msg, get_line_pos(l)))) | |
| 130 | case (Warning_in_Chunk(msg, name), _) if chunk_pos.isDefinedAt(name) => | |
| 131 | Some((false, (Word.capitalize(msg + " in entry " + quote(name)), chunk_pos(name)))) | |
| 132 | case (Warning(msg), Warning_Line(Value.Int(l))) => | |
| 133 | Some((false, (Word.capitalize(msg), get_line_pos(l)))) | |
| 134 | case (Warning(msg), _) => | |
| 135 | Some((false, (Word.capitalize(msg), Position.none))) | |
| 136 | case _ => None | |
| 137 | } | |
| 138 | ).partition(_._1) | |
| 139 | } | |
| 67301 | 140 | (errors.map(_._2), warnings.map(_._2)) | 
| 75394 | 141 | } | 
| 67272 | 142 | } | 
| 143 | ||
| 75393 | 144 |   object Check_Database extends Scala.Fun_String("bibtex_check_database") {
 | 
| 72756 | 145 | val here = Scala_Project.here | 
| 75393 | 146 |     def apply(database: String): String = {
 | 
| 72193 | 147 | import XML.Encode._ | 
| 148 | YXML.string_of_body(pair(list(pair(string, properties)), list(pair(string, properties)))( | |
| 149 | check_database(database))) | |
| 150 | } | |
| 67275 
5e427586cb57
check bibtex database on ML side -- for semantic PIDE editing;
 wenzelm parents: 
67274diff
changeset | 151 | } | 
| 
5e427586cb57
check bibtex database on ML side -- for semantic PIDE editing;
 wenzelm parents: 
67274diff
changeset | 152 | |
| 67272 | 153 | |
| 154 | ||
| 64828 | 155 | /** document model **/ | 
| 156 | ||
| 67290 | 157 | /* entries */ | 
| 64828 | 158 | |
| 75393 | 159 |   def entries(text: String): List[Text.Info[String]] = {
 | 
| 64831 | 160 | val result = new mutable.ListBuffer[Text.Info[String]] | 
| 64828 | 161 | var offset = 0 | 
| 162 |     for (chunk <- Bibtex.parse(text)) {
 | |
| 64831 | 163 | val end_offset = offset + chunk.source.length | 
| 164 | if (chunk.name != "" && !chunk.is_command) | |
| 165 | result += Text.Info(Text.Range(offset, end_offset), chunk.name) | |
| 166 | offset = end_offset | |
| 64828 | 167 | } | 
| 168 | result.toList | |
| 169 | } | |
| 170 | ||
| 75393 | 171 | def entries_iterator[A, B <: Document.Model]( | 
| 172 | models: Map[A, B] | |
| 173 |   ): Iterator[Text.Info[(String, B)]] = {
 | |
| 66150 | 174 |     for {
 | 
| 175 | (_, model) <- models.iterator | |
| 176 | info <- model.bibtex_entries.iterator | |
| 177 | } yield info.map((_, model)) | |
| 178 | } | |
| 179 | ||
| 64828 | 180 | |
| 66152 | 181 | /* completion */ | 
| 182 | ||
| 183 | def completion[A, B <: Document.Model]( | |
| 75393 | 184 | history: Completion.History, | 
| 185 | rendering: Rendering, | |
| 186 | caret: Text.Offset, | |
| 187 | models: Map[A, B] | |
| 188 |   ): Option[Completion.Result] = {
 | |
| 66152 | 189 |     for {
 | 
| 72729 | 190 | Text.Info(r, name) <- rendering.citations(rendering.before_caret_range(caret)).headOption | 
| 66152 | 191 | name1 <- Completion.clean_name(name) | 
| 192 | ||
| 72856 | 193 | original <- rendering.get_text(r) | 
| 66152 | 194 | original1 <- Completion.clean_name(Library.perhaps_unquote(original)) | 
| 195 | ||
| 196 | entries = | |
| 197 |         (for {
 | |
| 198 | Text.Info(_, (entry, _)) <- entries_iterator(models) | |
| 199 | if entry.toLowerCase.containsSlice(name1.toLowerCase) && entry != original1 | |
| 200 | } yield entry).toList | |
| 201 | if entries.nonEmpty | |
| 202 | ||
| 203 | items = | |
| 204 |         entries.sorted.map({
 | |
| 205 | case entry => | |
| 206 | val full_name = Long_Name.qualify(Markup.CITATION, entry) | |
| 207 | val description = List(entry, "(BibTeX entry)") | |
| 208 | val replacement = quote(entry) | |
| 75659 
9bd92ac9328f
more robust Scala 3 indentation, for the sake of IntelliJ IDEA;
 wenzelm parents: 
75420diff
changeset | 209 | Completion.Item(r, original, full_name, description, replacement, 0, false) | 
| 66152 | 210 |         }).sorted(history.ordering).take(rendering.options.int("completion_limit"))
 | 
| 211 | } yield Completion.Result(r, original, false, items) | |
| 212 | } | |
| 213 | ||
| 214 | ||
| 64828 | 215 | |
| 58523 | 216 | /** content **/ | 
| 217 | ||
| 58529 | 218 | private val months = List( | 
| 58523 | 219 | "jan", | 
| 220 | "feb", | |
| 221 | "mar", | |
| 222 | "apr", | |
| 223 | "may", | |
| 224 | "jun", | |
| 225 | "jul", | |
| 226 | "aug", | |
| 227 | "sep", | |
| 228 | "oct", | |
| 229 | "nov", | |
| 230 | "dec") | |
| 58529 | 231 | def is_month(s: String): Boolean = months.contains(s.toLowerCase) | 
| 58523 | 232 | |
| 58529 | 233 |   private val commands = List("preamble", "string")
 | 
| 234 | def is_command(s: String): Boolean = commands.contains(s.toLowerCase) | |
| 58523 | 235 | |
| 58524 | 236 | sealed case class Entry( | 
| 58526 | 237 | kind: String, | 
| 58523 | 238 | required: List[String], | 
| 239 | optional_crossref: List[String], | |
| 75393 | 240 | optional_other: List[String] | 
| 241 |   ) {
 | |
| 70230 | 242 |     val optional_standard: List[String] = List("url", "doi", "ee")
 | 
| 243 | ||
| 58533 | 244 | def is_required(s: String): Boolean = required.contains(s.toLowerCase) | 
| 58529 | 245 | def is_optional(s: String): Boolean = | 
| 70230 | 246 | optional_crossref.contains(s.toLowerCase) || | 
| 247 | optional_other.contains(s.toLowerCase) || | |
| 248 | optional_standard.contains(s.toLowerCase) | |
| 58529 | 249 | |
| 70230 | 250 | def fields: List[String] = | 
| 251 | required ::: optional_crossref ::: optional_other ::: optional_standard | |
| 252 | ||
| 58524 | 253 | def template: String = | 
| 58526 | 254 |       "@" + kind + "{,\n" + fields.map(x => "  " + x + " = {},\n").mkString + "}\n"
 | 
| 58524 | 255 | } | 
| 58523 | 256 | |
| 66150 | 257 | val known_entries: List[Entry] = | 
| 58524 | 258 | List( | 
| 259 |       Entry("Article",
 | |
| 260 |         List("author", "title"),
 | |
| 261 |         List("journal", "year"),
 | |
| 262 |         List("volume", "number", "pages", "month", "note")),
 | |
| 263 |       Entry("InProceedings",
 | |
| 264 |         List("author", "title"),
 | |
| 265 |         List("booktitle", "year"),
 | |
| 266 |         List("editor", "volume", "number", "series", "pages", "month", "address",
 | |
| 267 | "organization", "publisher", "note")), | |
| 268 |       Entry("InCollection",
 | |
| 269 |         List("author", "title", "booktitle"),
 | |
| 270 |         List("publisher", "year"),
 | |
| 271 |         List("editor", "volume", "number", "series", "type", "chapter", "pages",
 | |
| 272 | "edition", "month", "address", "note")), | |
| 273 |       Entry("InBook",
 | |
| 274 |         List("author", "editor", "title", "chapter"),
 | |
| 275 |         List("publisher", "year"),
 | |
| 276 |         List("volume", "number", "series", "type", "address", "edition", "month", "pages", "note")),
 | |
| 277 |       Entry("Proceedings",
 | |
| 278 |         List("title", "year"),
 | |
| 279 | List(), | |
| 280 |         List("booktitle", "editor", "volume", "number", "series", "address", "month",
 | |
| 281 | "organization", "publisher", "note")), | |
| 282 |       Entry("Book",
 | |
| 283 |         List("author", "editor", "title"),
 | |
| 284 |         List("publisher", "year"),
 | |
| 285 |         List("volume", "number", "series", "address", "edition", "month", "note")),
 | |
| 286 |       Entry("Booklet",
 | |
| 287 |         List("title"),
 | |
| 288 | List(), | |
| 289 |         List("author", "howpublished", "address", "month", "year", "note")),
 | |
| 290 |       Entry("PhdThesis",
 | |
| 291 |         List("author", "title", "school", "year"),
 | |
| 292 | List(), | |
| 293 |         List("type", "address", "month", "note")),
 | |
| 294 |       Entry("MastersThesis",
 | |
| 295 |         List("author", "title", "school", "year"),
 | |
| 296 | List(), | |
| 297 |         List("type", "address", "month", "note")),
 | |
| 298 |       Entry("TechReport",
 | |
| 299 |         List("author", "title", "institution", "year"),
 | |
| 300 | List(), | |
| 301 |         List("type", "number", "address", "month", "note")),
 | |
| 302 |       Entry("Manual",
 | |
| 303 |         List("title"),
 | |
| 304 | List(), | |
| 305 |         List("author", "organization", "address", "edition", "month", "year", "note")),
 | |
| 306 |       Entry("Unpublished",
 | |
| 307 |         List("author", "title", "note"),
 | |
| 308 | List(), | |
| 309 |         List("month", "year")),
 | |
| 310 |       Entry("Misc",
 | |
| 311 | List(), | |
| 312 | List(), | |
| 313 |         List("author", "title", "howpublished", "month", "year", "note")))
 | |
| 58523 | 314 | |
| 58529 | 315 | def get_entry(kind: String): Option[Entry] = | 
| 66150 | 316 | known_entries.find(entry => entry.kind.toLowerCase == kind.toLowerCase) | 
| 58529 | 317 | |
| 58530 | 318 | def is_entry(kind: String): Boolean = get_entry(kind).isDefined | 
| 319 | ||
| 58523 | 320 | |
| 321 | ||
| 322 | /** tokens and chunks **/ | |
| 323 | ||
| 75393 | 324 |   object Token {
 | 
| 325 |     object Kind extends Enumeration {
 | |
| 58529 | 326 |       val COMMAND = Value("command")
 | 
| 327 |       val ENTRY = Value("entry")
 | |
| 58523 | 328 |       val KEYWORD = Value("keyword")
 | 
| 329 |       val NAT = Value("natural number")
 | |
| 58529 | 330 |       val STRING = Value("string")
 | 
| 58531 | 331 |       val NAME = Value("name")
 | 
| 58523 | 332 |       val IDENT = Value("identifier")
 | 
| 58535 | 333 |       val SPACE = Value("white space")
 | 
| 334 |       val COMMENT = Value("ignored text")
 | |
| 58523 | 335 |       val ERROR = Value("bad input")
 | 
| 336 | } | |
| 337 | } | |
| 338 | ||
| 75393 | 339 |   sealed case class Token(kind: Token.Kind.Value, source: String) {
 | 
| 58530 | 340 | def is_kind: Boolean = | 
| 341 | kind == Token.Kind.COMMAND || | |
| 342 | kind == Token.Kind.ENTRY || | |
| 343 | kind == Token.Kind.IDENT | |
| 58531 | 344 | def is_name: Boolean = | 
| 345 | kind == Token.Kind.NAME || | |
| 346 | kind == Token.Kind.IDENT | |
| 58535 | 347 | def is_ignored: Boolean = | 
| 348 | kind == Token.Kind.SPACE || | |
| 349 | kind == Token.Kind.COMMENT | |
| 67276 | 350 | def is_malformed: Boolean = | 
| 351 | kind == Token.Kind.ERROR | |
| 352 | def is_open: Boolean = | |
| 353 |       kind == Token.Kind.KEYWORD && (source == "{" || source == "(")
 | |
| 58523 | 354 | } | 
| 355 | ||
| 75393 | 356 |   case class Chunk(kind: String, tokens: List[Token]) {
 | 
| 58529 | 357 | val source = tokens.map(_.source).mkString | 
| 58526 | 358 | |
| 58530 | 359 | private val content: Option[List[Token]] = | 
| 58523 | 360 |       tokens match {
 | 
| 59319 | 361 | case Token(Token.Kind.KEYWORD, "@") :: body if body.nonEmpty => | 
| 58529 | 362 |           (body.init.filterNot(_.is_ignored), body.last) match {
 | 
| 58530 | 363 |             case (tok :: Token(Token.Kind.KEYWORD, "{") :: toks, Token(Token.Kind.KEYWORD, "}"))
 | 
| 364 | if tok.is_kind => Some(toks) | |
| 365 | ||
| 366 |             case (tok :: Token(Token.Kind.KEYWORD, "(") :: toks, Token(Token.Kind.KEYWORD, ")"))
 | |
| 367 | if tok.is_kind => Some(toks) | |
| 368 | ||
| 58528 | 369 | case _ => None | 
| 58523 | 370 | } | 
| 58528 | 371 | case _ => None | 
| 58526 | 372 | } | 
| 373 | ||
| 374 | def name: String = | |
| 58530 | 375 |       content match {
 | 
| 58531 | 376 | case Some(tok :: _) if tok.is_name => tok.source | 
| 58523 | 377 | case _ => "" | 
| 378 | } | |
| 58530 | 379 | |
| 67276 | 380 | def heading_length: Int = | 
| 381 | if (name == "") 1 | |
| 73359 | 382 |       else {
 | 
| 383 |         tokens.takeWhile(tok => !tok.is_open).foldLeft(0) { case (n, tok) => n + tok.source.length }
 | |
| 384 | } | |
| 67276 | 385 | |
| 58530 | 386 | def is_ignored: Boolean = kind == "" && tokens.forall(_.is_ignored) | 
| 387 | def is_malformed: Boolean = kind == "" || tokens.exists(_.is_malformed) | |
| 58543 | 388 | def is_command: Boolean = Bibtex.is_command(kind) && name != "" && content.isDefined | 
| 389 | def is_entry: Boolean = Bibtex.is_entry(kind) && name != "" && content.isDefined | |
| 58523 | 390 | } | 
| 391 | ||
| 392 | ||
| 393 | ||
| 394 | /** parsing **/ | |
| 395 | ||
| 396 | // context of partial line-oriented scans | |
| 397 | abstract class Line_Context | |
| 58589 | 398 | case object Ignored extends Line_Context | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 399 | case object At extends Line_Context | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 400 | case class Item_Start(kind: String) extends Line_Context | 
| 58591 | 401 | case class Item_Open(kind: String, end: String) extends Line_Context | 
| 402 | case class Item(kind: String, end: String, delim: Delimited) extends Line_Context | |
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 403 | |
| 58528 | 404 | case class Delimited(quoted: Boolean, depth: Int) | 
| 405 | val Closed = Delimited(false, 0) | |
| 58523 | 406 | |
| 407 | private def token(kind: Token.Kind.Value)(source: String): Token = Token(kind, source) | |
| 408 | private def keyword(source: String): Token = Token(Token.Kind.KEYWORD, source) | |
| 409 | ||
| 410 | ||
| 68224 | 411 | // See also https://ctan.org/tex-archive/biblio/bibtex/base/bibtex.web | 
| 58523 | 412 |   // module @<Scan for and process a \.{.bib} command or database entry@>.
 | 
| 413 | ||
| 75393 | 414 |   object Parsers extends RegexParsers {
 | 
| 58523 | 415 | /* white space and comments */ | 
| 416 | ||
| 417 | override val whiteSpace = "".r | |
| 418 | ||
| 58535 | 419 | private val space = """[ \t\n\r]+""".r ^^ token(Token.Kind.SPACE) | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 420 | private val spaces = rep(space) | 
| 58523 | 421 | |
| 58528 | 422 | |
| 58535 | 423 | /* ignored text */ | 
| 58528 | 424 | |
| 425 | private val ignored: Parser[Chunk] = | |
| 58609 | 426 |       rep1("""(?i)([^@]+|@[ \t]*comment)""".r) ^^ {
 | 
| 58535 | 427 |         case ss => Chunk("", List(Token(Token.Kind.COMMENT, ss.mkString))) }
 | 
| 58523 | 428 | |
| 58536 
402a8e8107a7
more total chunk_line: recovery via ignored_line;
 wenzelm parents: 
58535diff
changeset | 429 | private def ignored_line: Parser[(Chunk, Line_Context)] = | 
| 58589 | 430 |       ignored ^^ { case a => (a, Ignored) }
 | 
| 58536 
402a8e8107a7
more total chunk_line: recovery via ignored_line;
 wenzelm parents: 
58535diff
changeset | 431 | |
| 58523 | 432 | |
| 433 |     /* delimited string: outermost "..." or {...} and body with balanced {...} */
 | |
| 434 | ||
| 58534 | 435 | // see also bibtex.web: scan_a_field_token_and_eat_white, scan_balanced_braces | 
| 58523 | 436 | private def delimited_depth(delim: Delimited): Parser[(String, Delimited)] = | 
| 75393 | 437 |       new Parser[(String, Delimited)] {
 | 
| 73120 
c3589f2dff31
more informative errors: simplify diagnosis of spurious failures reported by users;
 wenzelm parents: 
72957diff
changeset | 438 | require(if (delim.quoted) delim.depth > 0 else delim.depth >= 0, | 
| 
c3589f2dff31
more informative errors: simplify diagnosis of spurious failures reported by users;
 wenzelm parents: 
72957diff
changeset | 439 | "bad delimiter depth") | 
| 58523 | 440 | |
| 75393 | 441 |         def apply(in: Input) = {
 | 
| 58523 | 442 | val start = in.offset | 
| 443 | val end = in.source.length | |
| 444 | ||
| 445 | var i = start | |
| 446 | var q = delim.quoted | |
| 447 | var d = delim.depth | |
| 448 | var finished = false | |
| 449 |           while (!finished && i < end) {
 | |
| 450 | val c = in.source.charAt(i) | |
| 58534 | 451 | |
| 58523 | 452 |             if (c == '"' && d == 0) { i += 1; d = 1; q = true }
 | 
| 58532 | 453 |             else if (c == '"' && d == 1 && q) {
 | 
| 454 | i += 1; d = 0; q = false; finished = true | |
| 455 | } | |
| 58523 | 456 |             else if (c == '{') { i += 1; d += 1 }
 | 
| 58534 | 457 |             else if (c == '}') {
 | 
| 458 |               if (d == 1 && !q || d > 1) { i += 1; d -= 1; if (d == 0) finished = true }
 | |
| 459 |               else {i = start; finished = true }
 | |
| 58532 | 460 | } | 
| 58523 | 461 | else if (d > 0) i += 1 | 
| 462 | else finished = true | |
| 463 | } | |
| 464 |           if (i == start) Failure("bad input", in)
 | |
| 58528 | 465 |           else {
 | 
| 466 | val s = in.source.subSequence(start, i).toString | |
| 467 | Success((s, Delimited(q, d)), in.drop(i - start)) | |
| 468 | } | |
| 58523 | 469 | } | 
| 470 |       }.named("delimited_depth")
 | |
| 471 | ||
| 58528 | 472 | private def delimited: Parser[Token] = | 
| 473 | delimited_depth(Closed) ^? | |
| 474 |         { case (s, delim) if delim == Closed => Token(Token.Kind.STRING, s) }
 | |
| 58523 | 475 | |
| 58534 | 476 | private def recover_delimited: Parser[Token] = | 
| 58609 | 477 |       """["{][^@]*""".r ^^ token(Token.Kind.ERROR)
 | 
| 58534 | 478 | |
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 479 | def delimited_line(ctxt: Item): Parser[(Chunk, Line_Context)] = | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 480 |       delimited_depth(ctxt.delim) ^^ { case (s, delim1) =>
 | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 481 | (Chunk(ctxt.kind, List(Token(Token.Kind.STRING, s))), ctxt.copy(delim = delim1)) } | | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 482 |       recover_delimited ^^ { case a => (Chunk(ctxt.kind, List(a)), Ignored) }
 | 
| 58523 | 483 | |
| 484 | ||
| 485 | /* other tokens */ | |
| 486 | ||
| 487 | private val at = "@" ^^ keyword | |
| 488 | ||
| 489 | private val nat = "[0-9]+".r ^^ token(Token.Kind.NAT) | |
| 490 | ||
| 58591 | 491 |     private val name = """[\x21-\x7f&&[^"#%'(),={}]]+""".r ^^ token(Token.Kind.NAME)
 | 
| 492 | ||
| 58529 | 493 | private val identifier = | 
| 494 |       """[\x21-\x7f&&[^"#%'(),={}0-9]][\x21-\x7f&&[^"#%'(),={}]]*""".r
 | |
| 495 | ||
| 496 | private val ident = identifier ^^ token(Token.Kind.IDENT) | |
| 58523 | 497 | |
| 58528 | 498 | val other_token = "[=#,]".r ^^ keyword | (nat | (ident | space)) | 
| 499 | ||
| 500 | ||
| 58591 | 501 | /* body */ | 
| 502 | ||
| 503 | private val body = | |
| 504 | delimited | (recover_delimited | other_token) | |
| 505 | ||
| 506 | private def body_line(ctxt: Item) = | |
| 507 | if (ctxt.delim.depth > 0) | |
| 508 | delimited_line(ctxt) | |
| 509 | else | |
| 510 | delimited_line(ctxt) | | |
| 511 |         other_token ^^ { case a => (Chunk(ctxt.kind, List(a)), ctxt) } |
 | |
| 512 |         ctxt.end ^^ { case a => (Chunk(ctxt.kind, List(keyword(a))), Ignored) }
 | |
| 513 | ||
| 514 | ||
| 58530 | 515 | /* items: command or entry */ | 
| 58528 | 516 | |
| 58530 | 517 | private val item_kind = | 
| 58529 | 518 |       identifier ^^ { case a =>
 | 
| 519 | val kind = | |
| 520 | if (is_command(a)) Token.Kind.COMMAND | |
| 58530 | 521 | else if (is_entry(a)) Token.Kind.ENTRY | 
| 58529 | 522 | else Token.Kind.IDENT | 
| 523 | Token(kind, a) | |
| 524 | } | |
| 525 | ||
| 58591 | 526 | private val item_begin = | 
| 527 |       "{" ^^ { case a => ("}", keyword(a)) } |
 | |
| 528 |       "(" ^^ { case a => (")", keyword(a)) }
 | |
| 529 | ||
| 530 | private def item_name(kind: String) = | |
| 531 |       kind.toLowerCase match {
 | |
| 532 |         case "preamble" => failure("")
 | |
| 533 | case "string" => identifier ^^ token(Token.Kind.NAME) | |
| 534 | case _ => name | |
| 535 | } | |
| 536 | ||
| 58531 | 537 | private val item_start = | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 538 | at ~ spaces ~ item_kind ~ spaces ^^ | 
| 58528 | 539 |         { case a ~ b ~ c ~ d => (c.source, List(a) ::: b ::: List(c) ::: d) }
 | 
| 540 | ||
| 58530 | 541 | private val item: Parser[Chunk] = | 
| 58591 | 542 | (item_start ~ item_begin ~ spaces) into | 
| 543 |         { case (kind, a) ~ ((end, b)) ~ c =>
 | |
| 544 |             opt(item_name(kind)) ~ rep(body) ~ opt(end ^^ keyword) ^^ {
 | |
| 545 | case d ~ e ~ f => Chunk(kind, a ::: List(b) ::: c ::: d.toList ::: e ::: f.toList) } } | |
| 58528 | 546 | |
| 58530 | 547 | private val recover_item: Parser[Chunk] = | 
| 58609 | 548 |       at ~ "[^@]*".r ^^ { case a ~ b => Chunk("", List(a, Token(Token.Kind.ERROR, b))) }
 | 
| 58528 | 549 | |
| 58591 | 550 | |
| 551 | /* chunks */ | |
| 58523 | 552 | |
| 58528 | 553 | val chunk: Parser[Chunk] = ignored | (item | recover_item) | 
| 58523 | 554 | |
| 75393 | 555 |     def chunk_line(ctxt: Line_Context): Parser[(Chunk, Line_Context)] = {
 | 
| 58530 | 556 |       ctxt match {
 | 
| 58589 | 557 | case Ignored => | 
| 58538 
299b82d12d53
proper treatment of @comment (amending 402a8e8107a7);
 wenzelm parents: 
58536diff
changeset | 558 | ignored_line | | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 559 |           at ^^ { case a => (Chunk("", List(a)), At) }
 | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 560 | |
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 561 | case At => | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 562 |           space ^^ { case a => (Chunk("", List(a)), ctxt) } |
 | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 563 |           item_kind ^^ { case a => (Chunk(a.source, List(a)), Item_Start(a.source)) } |
 | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 564 |           recover_item ^^ { case a => (a, Ignored) } |
 | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 565 | ignored_line | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 566 | |
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 567 | case Item_Start(kind) => | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 568 |           space ^^ { case a => (Chunk(kind, List(a)), ctxt) } |
 | 
| 58591 | 569 |           item_begin ^^ { case (end, a) => (Chunk(kind, List(a)), Item_Open(kind, end)) } |
 | 
| 58596 | 570 |           recover_item ^^ { case a => (a, Ignored) } |
 | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 571 | ignored_line | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 572 | |
| 58591 | 573 | case Item_Open(kind, end) => | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 574 |           space ^^ { case a => (Chunk(kind, List(a)), ctxt) } |
 | 
| 58591 | 575 |           item_name(kind) ^^ { case a => (Chunk(kind, List(a)), Item(kind, end, Closed)) } |
 | 
| 576 | body_line(Item(kind, end, Closed)) | | |
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 577 | ignored_line | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 578 | |
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 579 | case item_ctxt: Item => | 
| 58591 | 580 | body_line(item_ctxt) | | 
| 58590 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 581 | ignored_line | 
| 
472b9fbcc7f0
back to liberal spaces of official syntax (in contrast to 4b190c763097), e.g. relevant for easychair.bib;
 wenzelm parents: 
58589diff
changeset | 582 | |
| 58530 | 583 |         case _ => failure("")
 | 
| 584 | } | |
| 585 | } | |
| 58528 | 586 | } | 
| 58523 | 587 | |
| 588 | ||
| 58528 | 589 | /* parse */ | 
| 58523 | 590 | |
| 591 | def parse(input: CharSequence): List[Chunk] = | |
| 64824 | 592 |     Parsers.parseAll(Parsers.rep(Parsers.chunk), Scan.char_reader(input)) match {
 | 
| 58523 | 593 | case Parsers.Success(result, _) => result | 
| 58526 | 594 |       case _ => error("Unexpected failure to parse input:\n" + input.toString)
 | 
| 58523 | 595 | } | 
| 58528 | 596 | |
| 75393 | 597 |   def parse_line(input: CharSequence, context: Line_Context): (List[Chunk], Line_Context) = {
 | 
| 64824 | 598 | var in: Reader[Char] = Scan.char_reader(input) | 
| 58528 | 599 | val chunks = new mutable.ListBuffer[Chunk] | 
| 600 | var ctxt = context | |
| 601 |     while (!in.atEnd) {
 | |
| 75420 | 602 | val result = Parsers.parse(Parsers.chunk_line(ctxt), in) | 
| 603 |       (result : @unchecked) match {
 | |
| 60215 | 604 | case Parsers.Success((x, c), rest) => chunks += x; ctxt = c; in = rest | 
| 58528 | 605 | case Parsers.NoSuccess(_, rest) => | 
| 606 |           error("Unepected failure to parse input:\n" + rest.source.toString)
 | |
| 607 | } | |
| 608 | } | |
| 609 | (chunks.toList, ctxt) | |
| 610 | } | |
| 67243 | 611 | |
| 612 | ||
| 613 | ||
| 614 | /** HTML output **/ | |
| 615 | ||
| 616 | private val output_styles = | |
| 617 | List( | |
| 67257 | 618 | "" -> "html-n", | 
| 67243 | 619 | "plain" -> "html-n", | 
| 620 | "alpha" -> "html-a", | |
| 621 | "named" -> "html-n", | |
| 622 | "paragraph" -> "html-n", | |
| 623 | "unsort" -> "html-u", | |
| 624 | "unsortlist" -> "html-u") | |
| 625 | ||
| 626 | def html_output(bib: List[Path], | |
| 67256 | 627 | title: String = "Bibliography", | 
| 67248 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 628 | body: Boolean = false, | 
| 67243 | 629 |     citations: List[String] = List("*"),
 | 
| 67257 | 630 | style: String = "", | 
| 75393 | 631 | chronological: Boolean = false | 
| 632 |   ): String = {
 | |
| 75394 | 633 |     Isabelle_System.with_tmp_dir("bibtex") { tmp_dir =>
 | 
| 67243 | 634 | /* database files */ | 
| 635 | ||
| 69367 | 636 | val bib_files = bib.map(_.drop_ext) | 
| 75393 | 637 |       val bib_names = {
 | 
| 69366 | 638 | val names0 = bib_files.map(_.file_name) | 
| 67243 | 639 | if (Library.duplicates(names0).isEmpty) names0 | 
| 640 |         else names0.zipWithIndex.map({ case (name, i) => (i + 1).toString + "-" + name })
 | |
| 641 | } | |
| 642 | ||
| 643 |       for ((a, b) <- bib_files zip bib_names) {
 | |
| 73317 | 644 |         Isabelle_System.copy_file(a.ext("bib"), tmp_dir + Path.basic(b).ext("bib"))
 | 
| 67243 | 645 | } | 
| 646 | ||
| 647 | ||
| 648 | /* style file */ | |
| 649 | ||
| 650 | val bst = | |
| 651 |         output_styles.toMap.get(style) match {
 | |
| 652 | case Some(base) => base + (if (chronological) "c" else "") + ".bst" | |
| 653 | case None => | |
| 654 |             error("Bad style for bibtex HTML output: " + quote(style) +
 | |
| 655 | "\n(expected: " + commas_quote(output_styles.map(_._1)) + ")") | |
| 656 | } | |
| 73317 | 657 |       Isabelle_System.copy_file(Path.explode("$BIB2XHTML_HOME/bst") + Path.explode(bst), tmp_dir)
 | 
| 67243 | 658 | |
| 659 | ||
| 660 | /* result */ | |
| 661 | ||
| 662 |       val in_file = Path.explode("bib.aux")
 | |
| 663 |       val out_file = Path.explode("bib.html")
 | |
| 664 | ||
| 665 | File.write(tmp_dir + in_file, | |
| 666 |         bib_names.mkString("\\bibdata{", ",", "}\n") +
 | |
| 667 |         citations.map(cite => "\\citation{" + cite + "}\n").mkString)
 | |
| 668 | ||
| 669 | Isabelle_System.bash( | |
| 670 | "\"$BIB2XHTML_HOME/main/bib2xhtml.pl\" -B \"$ISABELLE_BIBTEX\"" + | |
| 67257 | 671 | " -u -s " + Bash.string(proper_string(style) getOrElse "empty") + | 
| 672 | (if (chronological) " -c" else "") + | |
| 67256 | 673 | (if (title != "") " -h " + Bash.string(title) + " " else "") + | 
| 674 | " " + File.bash_path(in_file) + " " + File.bash_path(out_file), | |
| 67243 | 675 | cwd = tmp_dir.file).check | 
| 676 | ||
| 677 | val html = File.read(tmp_dir + out_file) | |
| 678 | ||
| 67248 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 679 |       if (body) {
 | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 680 | cat_lines( | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 681 | split_lines(html). | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 682 |             dropWhile(line => !line.startsWith("<!-- BEGIN BIBLIOGRAPHY")).reverse.
 | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 683 |             dropWhile(line => !line.startsWith("<!-- END BIBLIOGRAPHY")).reverse)
 | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 684 | } | 
| 
68177abb2988
isabelle.preview presents bibtex database files as well;
 wenzelm parents: 
67243diff
changeset | 685 | else html | 
| 75394 | 686 | } | 
| 67243 | 687 | } | 
| 58523 | 688 | } |