author | wenzelm |
Mon, 07 Oct 2019 10:51:20 +0200 | |
changeset 70795 | a90e40118874 |
parent 70794 | da647a0c8313 |
child 70796 | 2739631ac368 |
permissions | -rw-r--r-- |
69012 | 1 |
/* Title: Pure/PIDE/headless.scala |
67054 | 2 |
Author: Makarius |
3 |
||
69012 | 4 |
Headless PIDE session and resources from file-system. |
67054 | 5 |
*/ |
6 |
||
7 |
package isabelle |
|
8 |
||
9 |
||
67925 | 10 |
import java.io.{File => JFile} |
11 |
||
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
12 |
import scala.annotation.tailrec |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
13 |
import scala.collection.mutable |
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
14 |
|
67925 | 15 |
|
69012 | 16 |
object Headless |
67054 | 17 |
{ |
69012 | 18 |
/** session **/ |
67061
2efa25302f34
synchronous session start (similar to isabelle.vscode.Server);
wenzelm
parents:
67059
diff
changeset
|
19 |
|
68916 | 20 |
private def stable_snapshot( |
21 |
state: Document.State, version: Document.Version, name: Document.Node.Name): Document.Snapshot = |
|
22 |
{ |
|
23 |
val snapshot = state.snapshot(name) |
|
24 |
assert(version.id == snapshot.version.id) |
|
25 |
snapshot |
|
26 |
} |
|
27 |
||
69013 | 28 |
class Use_Theories_Result private[Headless]( |
67883 | 29 |
val state: Document.State, |
67889 | 30 |
val version: Document.Version, |
68925 | 31 |
val nodes: List[(Document.Node.Name, Document_Status.Node_Status)], |
32 |
val nodes_committed: List[(Document.Node.Name, Document_Status.Node_Status)]) |
|
67879 | 33 |
{ |
69032
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
34 |
def nodes_pending: List[(Document.Node.Name, Document_Status.Node_Status)] = |
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
35 |
{ |
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
36 |
val committed = nodes_committed.iterator.map(_._1).toSet |
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
37 |
nodes.filter(p => !committed(p._1)) |
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
38 |
} |
90bb4cabe1e8
clarified errors: no result from forced session.stop, check pending theories;
wenzelm
parents:
69013
diff
changeset
|
39 |
|
68925 | 40 |
def snapshot(name: Document.Node.Name): Document.Snapshot = |
41 |
stable_snapshot(state, version, name) |
|
42 |
||
43 |
def ok: Boolean = |
|
44 |
(nodes.iterator ++ nodes_committed.iterator).forall({ case (_, st) => st.ok }) |
|
67879 | 45 |
} |
46 |
||
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
47 |
private type Load = (List[Document.Node.Name], Boolean) |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
48 |
private val no_load: Load = (Nil, false) |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
49 |
|
70765 | 50 |
private sealed abstract class Load_State |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
51 |
{ |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
52 |
def next( |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
53 |
limit: Int, |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
54 |
dep_graph: Document.Node.Name.Graph[Unit], |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
55 |
finished: Document.Node.Name => Boolean): (Load, Load_State) = |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
56 |
{ |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
57 |
def make_pending(maximals: List[Document.Node.Name]): List[Document.Node.Name] = |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
58 |
{ |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
59 |
val pending = maximals.filterNot(finished) |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
60 |
if (pending.isEmpty || pending.tail.isEmpty) pending |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
61 |
else { |
70794 | 62 |
val depth = dep_graph.node_depth(_ => 1) |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
63 |
pending.sortBy(node => - depth(node)) |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
64 |
} |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
65 |
} |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
66 |
|
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
67 |
def load_checkpoints(checkpoints: List[Document.Node.Name]): (Load, Load_State) = |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
68 |
Load_Init(checkpoints).next(limit, dep_graph, finished) |
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
69 |
|
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
70 |
def load_requirements( |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
71 |
pending: List[Document.Node.Name], |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
72 |
checkpoints: List[Document.Node.Name] = Nil, |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
73 |
share_common_data: Boolean = false): (Load, Load_State) = |
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
74 |
{ |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
75 |
if (pending.isEmpty) load_checkpoints(checkpoints) |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
76 |
else if (limit == 0) { |
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
77 |
val requirements = dep_graph.all_preds(pending).reverse |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
78 |
((requirements, share_common_data), Load_Bulk(pending, Nil, checkpoints)) |
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
79 |
} |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
80 |
else { |
70795 | 81 |
val reachable = dep_graph.reachable_limit(limit, _ => 1, dep_graph.imm_preds, pending) |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
82 |
val (pending1, pending2) = pending.partition(reachable) |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
83 |
val requirements = dep_graph.all_preds(pending1).reverse |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
84 |
((requirements, share_common_data), Load_Bulk(pending1, pending2, checkpoints)) |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
85 |
} |
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
86 |
} |
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
87 |
|
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
88 |
val result: (Load, Load_State) = |
70765 | 89 |
this match { |
90 |
case Load_Init(Nil) => |
|
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
91 |
val pending = make_pending(dep_graph.maximals) |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
92 |
if (pending.isEmpty) (no_load, Load_Finished) |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
93 |
else load_requirements(pending) |
70767 | 94 |
case Load_Init(target :: checkpoints) => |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
95 |
val requirements = dep_graph.all_preds(List(target)).reverse |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
96 |
((requirements, false), Load_Target(target, checkpoints)) |
70768 | 97 |
case Load_Target(pending, checkpoints) if finished(pending) => |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
98 |
val dep_graph1 = |
70767 | 99 |
if (checkpoints.isEmpty) dep_graph |
100 |
else dep_graph.exclude(dep_graph.all_succs(checkpoints).toSet) |
|
70769
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
101 |
val dep_graph2 = |
9514fdbb8abe
clarified incremental loading: requirements based on maximal nodes;
wenzelm
parents:
70768
diff
changeset
|
102 |
dep_graph1.restrict(dep_graph.all_succs(List(pending)).toSet) |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
103 |
val pending2 = make_pending(dep_graph.maximals.filter(dep_graph2.defined)) |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
104 |
load_requirements(pending2, checkpoints = checkpoints, share_common_data = true) |
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
105 |
case Load_Bulk(pending, remaining, checkpoints) if pending.forall(finished) => |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
106 |
load_requirements(remaining, checkpoints = checkpoints) |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
107 |
case st => (no_load, st) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
108 |
} |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
109 |
|
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
110 |
val ((load_theories, share_common_data), st1) = result |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
111 |
((load_theories.filterNot(finished), share_common_data), st1) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
112 |
} |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
113 |
} |
70765 | 114 |
private case class Load_Init(checkpoints: List[Document.Node.Name]) extends Load_State |
115 |
private case class Load_Target( |
|
70768 | 116 |
pending: Document.Node.Name, checkpoints: List[Document.Node.Name]) extends Load_State |
117 |
private case class Load_Bulk( |
|
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
118 |
pending: List[Document.Node.Name], |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
119 |
remaining: List[Document.Node.Name], |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
120 |
checkpoints: List[Document.Node.Name]) extends Load_State |
70765 | 121 |
private case object Load_Finished extends Load_State |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
122 |
|
69520 | 123 |
class Session private[Headless]( |
124 |
session_name: String, |
|
125 |
_session_options: => Options, |
|
126 |
override val resources: Resources) extends isabelle.Session(_session_options, resources) |
|
127 |
{ |
|
128 |
session => |
|
68694
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
129 |
|
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
130 |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
131 |
private def loaded_theory(name: Document.Node.Name): Boolean = |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
132 |
resources.session_base.loaded_theory(name.theory) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
133 |
|
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
134 |
|
69520 | 135 |
/* options */ |
136 |
||
70787 | 137 |
override def consolidate_delay: Time = session_options.seconds("headless_consolidate_delay") |
138 |
override def prune_delay: Time = session_options.seconds("headless_prune_delay") |
|
139 |
||
69520 | 140 |
def default_check_delay: Time = session_options.seconds("headless_check_delay") |
141 |
def default_check_limit: Int = session_options.int("headless_check_limit") |
|
142 |
def default_nodes_status_delay: Time = session_options.seconds("headless_nodes_status_delay") |
|
143 |
def default_watchdog_timeout: Time = session_options.seconds("headless_watchdog_timeout") |
|
144 |
def default_commit_cleanup_delay: Time = session_options.seconds("headless_commit_cleanup_delay") |
|
67063 | 145 |
|
70772
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
146 |
def load_limit: Int = |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
147 |
{ |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
148 |
val limit = session_options.int("headless_load_limit") |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
149 |
if (limit == 0) Integer.MAX_VALUE else limit |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
150 |
} |
030a6baa5cb2
support headless_load_limit for more scalable load process;
wenzelm
parents:
70770
diff
changeset
|
151 |
|
68922 | 152 |
|
153 |
/* temporary directory */ |
|
154 |
||
67925 | 155 |
val tmp_dir: JFile = Isabelle_System.tmp_dir("server_session") |
67946 | 156 |
val tmp_dir_name: String = File.path(tmp_dir).implode |
67925 | 157 |
|
68923 | 158 |
def master_directory(master_dir: String): String = |
159 |
proper_string(master_dir) getOrElse tmp_dir_name |
|
160 |
||
67945 | 161 |
override def toString: String = session_name |
162 |
||
67925 | 163 |
override def stop(): Process_Result = |
164 |
{ |
|
165 |
try { super.stop() } |
|
166 |
finally { Isabelle_System.rm_tree(tmp_dir) } |
|
167 |
} |
|
168 |
||
67936 | 169 |
|
170 |
/* theories */ |
|
171 |
||
68914 | 172 |
private sealed case class Use_Theories_State( |
70697 | 173 |
dep_graph: Document.Node.Name.Graph[Unit], |
70765 | 174 |
load_state: Load_State, |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
175 |
watchdog_timeout: Time, |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
176 |
commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit], |
68914 | 177 |
last_update: Time = Time.now(), |
178 |
nodes_status: Document_Status.Nodes_Status = Document_Status.Nodes_Status.empty, |
|
68925 | 179 |
already_committed: Map[Document.Node.Name, Document_Status.Node_Status] = Map.empty, |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
180 |
result: Option[Exn.Result[Use_Theories_Result]] = None) |
68914 | 181 |
{ |
182 |
def update(new_nodes_status: Document_Status.Nodes_Status): Use_Theories_State = |
|
183 |
copy(last_update = Time.now(), nodes_status = new_nodes_status) |
|
184 |
||
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
185 |
def watchdog: Boolean = |
68914 | 186 |
watchdog_timeout > Time.zero && Time.now() - last_update > watchdog_timeout |
187 |
||
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
188 |
def finished_result: Boolean = result.isDefined |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
189 |
|
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
190 |
def join_result: Option[(Exn.Result[Use_Theories_Result], Use_Theories_State)] = |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
191 |
if (finished_result) Some((result.get, this)) else None |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
192 |
|
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
193 |
def cancel_result: Use_Theories_State = |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
194 |
if (finished_result) this else copy(result = Some(Exn.Exn(Exn.Interrupt()))) |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
195 |
|
70763
5fae55752c70
tuned messages (again) -- avoid confusion wrt. total remaining size;
wenzelm
parents:
70710
diff
changeset
|
196 |
def clean_theories: (List[Document.Node.Name], Use_Theories_State) = |
70698 | 197 |
{ |
198 |
@tailrec def frontier(base: List[Document.Node.Name], front: Set[Document.Node.Name]) |
|
199 |
: Set[Document.Node.Name] = |
|
200 |
{ |
|
201 |
val add = base.filter(name => dep_graph.imm_succs(name).forall(front)) |
|
202 |
if (add.isEmpty) front |
|
203 |
else { |
|
204 |
val preds = add.map(dep_graph.imm_preds) |
|
205 |
val base1 = (preds.head /: preds.tail)(_ ++ _).toList.filter(already_committed.keySet) |
|
206 |
frontier(base1, front ++ add) |
|
207 |
} |
|
208 |
} |
|
209 |
||
70763
5fae55752c70
tuned messages (again) -- avoid confusion wrt. total remaining size;
wenzelm
parents:
70710
diff
changeset
|
210 |
if (already_committed.isEmpty) (Nil, this) |
70698 | 211 |
else { |
70705 | 212 |
val base = |
213 |
(for { |
|
214 |
(name, (_, (_, succs))) <- dep_graph.iterator |
|
215 |
if succs.isEmpty && already_committed.isDefinedAt(name) |
|
216 |
} yield name).toList |
|
217 |
val clean = frontier(base, Set.empty) |
|
70763
5fae55752c70
tuned messages (again) -- avoid confusion wrt. total remaining size;
wenzelm
parents:
70710
diff
changeset
|
218 |
if (clean.isEmpty) (Nil, this) |
70698 | 219 |
else { |
70763
5fae55752c70
tuned messages (again) -- avoid confusion wrt. total remaining size;
wenzelm
parents:
70710
diff
changeset
|
220 |
(dep_graph.topological_order.filter(clean), |
70699 | 221 |
copy(dep_graph = dep_graph.exclude(clean))) |
70698 | 222 |
} |
223 |
} |
|
224 |
} |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
225 |
|
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
226 |
def check(state: Document.State, version: Document.Version, beyond_limit: Boolean) |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
227 |
: ((List[Document.Node.Name], Boolean), Use_Theories_State) = |
68916 | 228 |
{ |
69035 | 229 |
val already_committed1 = |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
230 |
commit match { |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
231 |
case None => already_committed |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
232 |
case Some(commit_fn) => |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
233 |
(already_committed /: dep_graph.topological_order)( |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
234 |
{ case (committed, name) => |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
235 |
def parents_committed: Boolean = |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
236 |
version.nodes(name).header.imports.forall(parent => |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
237 |
loaded_theory(parent) || committed.isDefinedAt(parent)) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
238 |
if (!committed.isDefinedAt(name) && parents_committed && |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
239 |
state.node_consolidated(version, name)) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
240 |
{ |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
241 |
val snapshot = stable_snapshot(state, version, name) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
242 |
val status = Document_Status.Node_Status.make(state, version, name) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
243 |
commit_fn(snapshot, status) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
244 |
committed + (name -> status) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
245 |
} |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
246 |
else committed |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
247 |
}) |
68916 | 248 |
} |
249 |
||
70657 | 250 |
def finished_theory(name: Document.Node.Name): Boolean = |
251 |
loaded_theory(name) || |
|
70704 | 252 |
(if (commit.isDefined) already_committed1.isDefinedAt(name) |
253 |
else state.node_consolidated(version, name)) |
|
70657 | 254 |
|
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
255 |
val result1 = |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
256 |
if (!finished_result && |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
257 |
(beyond_limit || watchdog || |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
258 |
dep_graph.keys_iterator.forall(name => |
70657 | 259 |
finished_theory(name) || nodes_status.quasi_consolidated(name)))) |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
260 |
{ |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
261 |
val nodes = |
70657 | 262 |
(for { |
263 |
name <- dep_graph.keys_iterator |
|
264 |
if !loaded_theory(name) |
|
265 |
} yield { (name -> Document_Status.Node_Status.make(state, version, name)) }).toList |
|
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
266 |
val nodes_committed = |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
267 |
(for { |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
268 |
name <- dep_graph.keys_iterator |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
269 |
status <- already_committed1.get(name) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
270 |
} yield (name -> status)).toList |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
271 |
Some(Exn.Res(new Use_Theories_Result(state, version, nodes, nodes_committed))) |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
272 |
} |
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
273 |
else result |
68925 | 274 |
|
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
275 |
val (load, load_state1) = load_state.next(load_limit, dep_graph, finished_theory(_)) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
276 |
|
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
277 |
(load, |
70765 | 278 |
copy(already_committed = already_committed1, result = result1, load_state = load_state1)) |
68916 | 279 |
} |
68914 | 280 |
} |
281 |
||
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
282 |
def use_theories( |
67940
b4e80f062fbf
clarified signature -- eliminated somewhat pointless positions;
wenzelm
parents:
67939
diff
changeset
|
283 |
theories: List[String], |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
284 |
qualifier: String = Sessions.DRAFT, |
67881 | 285 |
master_dir: String = "", |
69920 | 286 |
unicode_symbols: Boolean = false, |
68943 | 287 |
check_delay: Time = default_check_delay, |
69520 | 288 |
check_limit: Int = default_check_limit, |
68947 | 289 |
watchdog_timeout: Time = default_watchdog_timeout, |
68943 | 290 |
nodes_status_delay: Time = default_nodes_status_delay, |
69458 | 291 |
id: UUID.T = UUID.random(), |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
292 |
checkpoints: Set[Document.Node.Name] = Set.empty, |
68916 | 293 |
// commit: must not block, must not fail |
294 |
commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit] = None, |
|
68981 | 295 |
commit_cleanup_delay: Time = default_commit_cleanup_delay, |
69013 | 296 |
progress: Progress = No_Progress): Use_Theories_Result = |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
297 |
{ |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
298 |
val dependencies = |
68894
1dbdad1b57a5
more robust: load_theories after consumer is installed;
wenzelm
parents:
68888
diff
changeset
|
299 |
{ |
1dbdad1b57a5
more robust: load_theories after consumer is installed;
wenzelm
parents:
68888
diff
changeset
|
300 |
val import_names = |
68923 | 301 |
theories.map(thy => |
302 |
resources.import_name(qualifier, master_directory(master_dir), thy) -> Position.none) |
|
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
303 |
resources.dependencies(import_names, progress = progress).check_errors |
68894
1dbdad1b57a5
more robust: load_theories after consumer is installed;
wenzelm
parents:
68888
diff
changeset
|
304 |
} |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
305 |
val dep_theories = dependencies.theories |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
306 |
val dep_theories_set = dep_theories.toSet |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
307 |
val dep_files = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
308 |
dependencies.loaded_files(false).flatMap(_._2). |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
309 |
map(path => Document.Node.Name(resources.append("", path))) |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
310 |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
311 |
val use_theories_state = |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
312 |
{ |
70765 | 313 |
val load_state = |
314 |
Load_Init( |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
315 |
if (checkpoints.isEmpty) Nil |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
316 |
else dependencies.theory_graph.topological_order.filter(checkpoints(_))) |
70697 | 317 |
Synchronized( |
70765 | 318 |
Use_Theories_State(dependencies.theory_graph, load_state, watchdog_timeout, commit)) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
319 |
} |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
320 |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
321 |
def check_state(beyond_limit: Boolean = false) |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
322 |
{ |
70775
97d3485028b6
more sequential access to Session.manager.global_state: avoid minor divergence of tip version;
wenzelm
parents:
70774
diff
changeset
|
323 |
val state = session.get_state() |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
324 |
for (version <- state.stable_tip_version) { |
70774
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
325 |
val (load_theories, share_common_data) = |
64751a7abfa6
clarified share_common_data: after finished checkpoint, before next edits;
wenzelm
parents:
70772
diff
changeset
|
326 |
use_theories_state.change_result(_.check(state, version, beyond_limit)) |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
327 |
if (load_theories.nonEmpty) { |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
328 |
resources.load_theories( |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
329 |
session, id, load_theories, dep_files, unicode_symbols, share_common_data, progress) |
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
330 |
} |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
331 |
} |
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
332 |
} |
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
333 |
|
67894
fee080c4045f
more robust check_state loop, even without session activity (e.g. idempotent use_theories);
wenzelm
parents:
67893
diff
changeset
|
334 |
val check_progress = |
68694
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
335 |
{ |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
336 |
var check_count = 0 |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
337 |
Event_Timer.request(Time.now(), repeat = Some(check_delay)) |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
338 |
{ |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
339 |
if (progress.stopped) use_theories_state.change(_.cancel_result) |
68694
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
340 |
else { |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
341 |
check_count += 1 |
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
342 |
check_state(check_limit > 0 && check_count > check_limit) |
68694
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
343 |
} |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
344 |
} |
03e104be99af
added check_delay / check_limit for more robust treatment of structurally broken theory sources (or genuine non-termination);
wenzelm
parents:
68365
diff
changeset
|
345 |
} |
67894
fee080c4045f
more robust check_state loop, even without session activity (e.g. idempotent use_theories);
wenzelm
parents:
67893
diff
changeset
|
346 |
|
68906 | 347 |
val consumer = |
348 |
{ |
|
349 |
val delay_nodes_status = |
|
350 |
Standard_Thread.delay_first(nodes_status_delay max Time.zero) { |
|
69818
60d0ee8f2ddb
more robust: avoid potentially unrelated snapshot for the sake of is_suppressed;
wenzelm
parents:
69817
diff
changeset
|
351 |
progress.nodes_status(use_theories_state.value.nodes_status) |
68906 | 352 |
} |
68770
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
353 |
|
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
354 |
val delay_commit_clean = |
68981 | 355 |
Standard_Thread.delay_first(commit_cleanup_delay max Time.zero) { |
70763
5fae55752c70
tuned messages (again) -- avoid confusion wrt. total remaining size;
wenzelm
parents:
70710
diff
changeset
|
356 |
val clean_theories = use_theories_state.change_result(_.clean_theories) |
70702 | 357 |
if (clean_theories.nonEmpty) { |
70770 | 358 |
progress.echo("Removing " + clean_theories.length + " theories ...") |
70702 | 359 |
resources.clean_theories(session, id, clean_theories) |
360 |
} |
|
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
361 |
} |
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
362 |
|
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
363 |
Session.Consumer[Session.Commands_Changed](getClass.getName) { |
68330 | 364 |
case changed => |
68770
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
365 |
if (changed.nodes.exists(dep_theories_set)) { |
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
366 |
val snapshot = session.snapshot() |
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
367 |
val state = snapshot.state |
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
368 |
val version = snapshot.version |
add44e2b8cb0
optional notification of nodes_status (via progress);
wenzelm
parents:
68758
diff
changeset
|
369 |
|
68957 | 370 |
val theory_progress = |
68914 | 371 |
use_theories_state.change_result(st => |
68903 | 372 |
{ |
68883
3653b3ad729e
clarified Thy_Resources.Session.use_theories: "terminated" node status is sufficient;
wenzelm
parents:
68771
diff
changeset
|
373 |
val domain = |
68914 | 374 |
if (st.nodes_status.is_empty) dep_theories_set |
68883
3653b3ad729e
clarified Thy_Resources.Session.use_theories: "terminated" node status is sufficient;
wenzelm
parents:
68771
diff
changeset
|
375 |
else changed.nodes.iterator.filter(dep_theories_set).toSet |
68899 | 376 |
|
68903 | 377 |
val (nodes_status_changed, nodes_status1) = |
69255
800b1ce96fce
more general support for Isabelle/PIDE file formats -- less hardwired Bibtex operations;
wenzelm
parents:
69035
diff
changeset
|
378 |
st.nodes_status.update(resources, state, version, |
68903 | 379 |
domain = Some(domain), trim = changed.assignment) |
68899 | 380 |
|
68903 | 381 |
if (nodes_status_delay >= Time.zero && nodes_status_changed) { |
68883
3653b3ad729e
clarified Thy_Resources.Session.use_theories: "terminated" node status is sufficient;
wenzelm
parents:
68771
diff
changeset
|
382 |
delay_nodes_status.invoke |
68899 | 383 |
} |
68883
3653b3ad729e
clarified Thy_Resources.Session.use_theories: "terminated" node status is sufficient;
wenzelm
parents:
68771
diff
changeset
|
384 |
|
68957 | 385 |
val theory_progress = |
68905 | 386 |
(for { |
69818
60d0ee8f2ddb
more robust: avoid potentially unrelated snapshot for the sake of is_suppressed;
wenzelm
parents:
69817
diff
changeset
|
387 |
(name, node_status) <- nodes_status1.present.iterator |
68959 | 388 |
if changed.nodes.contains(name) && !st.already_committed.isDefinedAt(name) |
68962 | 389 |
p1 = node_status.percentage |
390 |
if p1 > 0 && Some(p1) != st.nodes_status.get(name).map(_.percentage) |
|
391 |
} yield Progress.Theory(name.theory, percentage = Some(p1))).toList |
|
68903 | 392 |
|
68957 | 393 |
(theory_progress, st.update(nodes_status1)) |
68903 | 394 |
}) |
68330 | 395 |
|
68957 | 396 |
theory_progress.foreach(progress.theory(_)) |
68903 | 397 |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
398 |
check_state() |
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
399 |
|
68981 | 400 |
if (commit.isDefined && commit_cleanup_delay > Time.zero) { |
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
401 |
if (use_theories_state.value.finished_result) |
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
402 |
delay_commit_clean.revoke |
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
403 |
else delay_commit_clean.invoke |
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
404 |
} |
68330 | 405 |
} |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
406 |
} |
68906 | 407 |
} |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
408 |
|
67892 | 409 |
try { |
410 |
session.commands_changed += consumer |
|
70653
f7c5b30fc432
load theories in stages, to reduce ML heap requirements;
wenzelm
parents:
70649
diff
changeset
|
411 |
check_state() |
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
412 |
use_theories_state.guarded_access(_.join_result) |
67894
fee080c4045f
more robust check_state loop, even without session activity (e.g. idempotent use_theories);
wenzelm
parents:
67893
diff
changeset
|
413 |
check_progress.cancel |
67892 | 414 |
} |
415 |
finally { |
|
68907 | 416 |
session.commands_changed -= consumer |
67892 | 417 |
resources.unload_theories(session, id, dep_theories) |
418 |
} |
|
67884
43af581d7d8e
unload_theories after consolidation -- reset node_required;
wenzelm
parents:
67883
diff
changeset
|
419 |
|
70644
b23a6dfcfd57
clarified state variable: avoid extra mutability via Promise;
wenzelm
parents:
70640
diff
changeset
|
420 |
Exn.release(use_theories_state.guarded_access(_.join_result)) |
67064
fb487246ef4f
synchronous use_theories, based on consolidated_state;
wenzelm
parents:
67063
diff
changeset
|
421 |
} |
67936 | 422 |
|
67939 | 423 |
def purge_theories( |
68915 | 424 |
theories: List[String], |
425 |
qualifier: String = Sessions.DRAFT, |
|
426 |
master_dir: String = "", |
|
427 |
all: Boolean = false): (List[Document.Node.Name], List[Document.Node.Name]) = |
|
428 |
{ |
|
68923 | 429 |
val nodes = |
430 |
if (all) None |
|
431 |
else Some(theories.map(resources.import_name(qualifier, master_directory(master_dir), _))) |
|
68915 | 432 |
resources.purge_theories(session, nodes) |
433 |
} |
|
67063 | 434 |
} |
435 |
||
67061
2efa25302f34
synchronous session start (similar to isabelle.vscode.Server);
wenzelm
parents:
67059
diff
changeset
|
436 |
|
67054 | 437 |
|
69012 | 438 |
/** resources **/ |
68922 | 439 |
|
69012 | 440 |
object Resources |
441 |
{ |
|
69536 | 442 |
def apply(base_info: Sessions.Base_Info, log: Logger = No_Logger): Resources = |
443 |
new Resources(base_info, log = log) |
|
444 |
||
445 |
def make( |
|
446 |
options: Options, |
|
447 |
session_name: String, |
|
448 |
session_dirs: List[Path] = Nil, |
|
449 |
include_sessions: List[String] = Nil, |
|
450 |
progress: Progress = No_Progress, |
|
451 |
log: Logger = No_Logger): Resources = |
|
452 |
{ |
|
453 |
val base_info = |
|
454 |
Sessions.base_info(options, session_name, dirs = session_dirs, |
|
455 |
include_sessions = include_sessions, progress = progress) |
|
456 |
apply(base_info, log = log) |
|
457 |
} |
|
458 |
||
69012 | 459 |
final class Theory private[Headless]( |
460 |
val node_name: Document.Node.Name, |
|
461 |
val node_header: Document.Node.Header, |
|
462 |
val text: String, |
|
463 |
val node_required: Boolean) |
|
68922 | 464 |
{ |
69012 | 465 |
override def toString: String = node_name.toString |
68922 | 466 |
|
69012 | 467 |
def node_perspective: Document.Node.Perspective_Text = |
468 |
Document.Node.Perspective(node_required, Text.Perspective.empty, Document.Node.Overlays.empty) |
|
68922 | 469 |
|
69012 | 470 |
def make_edits(text_edits: List[Text.Edit]): List[Document.Edit_Text] = |
471 |
List(node_name -> Document.Node.Deps(node_header), |
|
472 |
node_name -> Document.Node.Edits(text_edits), |
|
473 |
node_name -> node_perspective) |
|
68922 | 474 |
|
69012 | 475 |
def node_edits(old: Option[Theory]): List[Document.Edit_Text] = |
476 |
{ |
|
477 |
val (text_edits, old_required) = |
|
478 |
if (old.isEmpty) (Text.Edit.inserts(0, text), false) |
|
479 |
else (Text.Edit.replace(0, old.get.text, text), old.get.node_required) |
|
67887
a4d5342898b1
unload_theories: actually observe required state;
wenzelm
parents:
67885
diff
changeset
|
480 |
|
69012 | 481 |
if (text_edits.isEmpty && node_required == old_required) Nil |
482 |
else make_edits(text_edits) |
|
483 |
} |
|
67887
a4d5342898b1
unload_theories: actually observe required state;
wenzelm
parents:
67885
diff
changeset
|
484 |
|
69012 | 485 |
def purge_edits: List[Document.Edit_Text] = |
486 |
make_edits(Text.Edit.removes(0, text)) |
|
67936 | 487 |
|
69012 | 488 |
def required(required: Boolean): Theory = |
489 |
if (required == node_required) this |
|
490 |
else new Theory(node_name, node_header, text, required) |
|
67936 | 491 |
} |
492 |
||
69012 | 493 |
sealed case class State( |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
494 |
blobs: Map[Document.Node.Name, Document.Blob] = Map.empty, |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
495 |
theories: Map[Document.Node.Name, Theory] = Map.empty, |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
496 |
required: Multi_Map[Document.Node.Name, UUID.T] = Multi_Map.empty) |
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
497 |
{ |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
498 |
/* blobs */ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
499 |
|
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
500 |
def doc_blobs: Document.Blobs = Document.Blobs(blobs) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
501 |
|
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
502 |
def update_blobs(names: List[Document.Node.Name]): (Document.Blobs, State) = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
503 |
{ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
504 |
val new_blobs = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
505 |
names.flatMap(name => |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
506 |
{ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
507 |
val bytes = Bytes.read(name.path) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
508 |
def new_blob: Document.Blob = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
509 |
{ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
510 |
val text = bytes.text |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
511 |
Document.Blob(bytes, text, Symbol.Text_Chunk(text), changed = true) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
512 |
} |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
513 |
blobs.get(name) match { |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
514 |
case Some(blob) => if (blob.bytes == bytes) None else Some(name -> new_blob) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
515 |
case None => Some(name -> new_blob) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
516 |
} |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
517 |
}) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
518 |
val blobs1 = (blobs /: new_blobs)(_ + _) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
519 |
val blobs2 = (blobs /: new_blobs)({ case (map, (a, b)) => map + (a -> b.unchanged) }) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
520 |
(Document.Blobs(blobs1), copy(blobs = blobs2)) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
521 |
} |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
522 |
|
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
523 |
def blob_edits(name: Document.Node.Name, old_blob: Option[Document.Blob]) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
524 |
: List[Document.Edit_Text] = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
525 |
{ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
526 |
val blob = blobs.getOrElse(name, error("Missing blob " + quote(name.toString))) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
527 |
val text_edits = |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
528 |
old_blob match { |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
529 |
case None => List(Text.Edit.insert(0, blob.source)) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
530 |
case Some(blob0) => Text.Edit.replace(0, blob0.source, blob.source) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
531 |
} |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
532 |
if (text_edits.isEmpty) Nil |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
533 |
else List(name -> Document.Node.Blob(blob), name -> Document.Node.Edits(text_edits)) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
534 |
} |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
535 |
|
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
536 |
|
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
537 |
/* theories */ |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
538 |
|
70674
29bb1ebb188f
clarified signature: proper Document.Node.Ordering conforming to equality (e.g. required in situations where theory names are ambiguous due to overlapping session directories);
wenzelm
parents:
70657
diff
changeset
|
539 |
lazy val theory_graph: Document.Node.Name.Graph[Unit] = |
29bb1ebb188f
clarified signature: proper Document.Node.Ordering conforming to equality (e.g. required in situations where theory names are ambiguous due to overlapping session directories);
wenzelm
parents:
70657
diff
changeset
|
540 |
Document.Node.Name.make_graph( |
69012 | 541 |
for ((name, theory) <- theories.toList) |
70647 | 542 |
yield ((name, ()), theory.node_header.imports.filter(theories.isDefinedAt(_)))) |
67056 | 543 |
|
69012 | 544 |
def is_required(name: Document.Node.Name): Boolean = required.isDefinedAt(name) |
545 |
||
69458 | 546 |
def insert_required(id: UUID.T, names: List[Document.Node.Name]): State = |
69012 | 547 |
copy(required = (required /: names)(_.insert(_, id))) |
548 |
||
69458 | 549 |
def remove_required(id: UUID.T, names: List[Document.Node.Name]): State = |
69012 | 550 |
copy(required = (required /: names)(_.remove(_, id))) |
68958 | 551 |
|
69012 | 552 |
def update_theories(update: List[(Document.Node.Name, Theory)]): State = |
553 |
copy(theories = |
|
554 |
(theories /: update)({ case (thys, (name, thy)) => |
|
555 |
thys.get(name) match { |
|
556 |
case Some(thy1) if thy1 == thy => thys |
|
557 |
case _ => thys + (name -> thy) |
|
558 |
} |
|
559 |
})) |
|
560 |
||
561 |
def remove_theories(remove: List[Document.Node.Name]): State = |
|
67893 | 562 |
{ |
69012 | 563 |
require(remove.forall(name => !is_required(name))) |
564 |
copy(theories = theories -- remove) |
|
565 |
} |
|
566 |
||
70649 | 567 |
def unload_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name]) |
70783 | 568 |
: (List[Document.Edit_Text], State) = |
69012 | 569 |
{ |
70649 | 570 |
val st1 = remove_required(id, theories) |
67893 | 571 |
val theory_edits = |
69012 | 572 |
for { |
70649 | 573 |
node_name <- theories |
69012 | 574 |
theory <- st1.theories.get(node_name) |
575 |
} |
|
67893 | 576 |
yield { |
577 |
val theory1 = theory.required(st1.is_required(node_name)) |
|
69012 | 578 |
val edits = theory1.node_edits(Some(theory)) |
67893 | 579 |
(edits, (node_name, theory1)) |
580 |
} |
|
70783 | 581 |
(theory_edits.flatMap(_._1), st1.update_theories(theory_edits.map(_._2))) |
69012 | 582 |
} |
583 |
||
70782 | 584 |
def purge_theories(session: Session, nodes: Option[List[Document.Node.Name]]) |
70783 | 585 |
: ((List[Document.Node.Name], List[Document.Node.Name], List[Document.Edit_Text]), State) = |
69012 | 586 |
{ |
587 |
val all_nodes = theory_graph.topological_order |
|
70782 | 588 |
val purge = nodes.getOrElse(all_nodes).filterNot(is_required(_)).toSet |
69012 | 589 |
|
590 |
val retain = theory_graph.all_preds(all_nodes.filterNot(purge)).toSet |
|
591 |
val (retained, purged) = all_nodes.partition(retain) |
|
70783 | 592 |
val purge_edits = purged.flatMap(name => theories(name).purge_edits) |
69012 | 593 |
|
70783 | 594 |
((purged, retained, purge_edits), remove_theories(purged)) |
69012 | 595 |
} |
596 |
} |
|
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
597 |
} |
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
598 |
|
69536 | 599 |
class Resources private[Headless]( |
600 |
val session_base_info: Sessions.Base_Info, |
|
601 |
log: Logger = No_Logger) |
|
70683
8c7706b053c7
find theory files via session structure: much faster Prover IDE startup;
wenzelm
parents:
70674
diff
changeset
|
602 |
extends isabelle.Resources( |
8c7706b053c7
find theory files via session structure: much faster Prover IDE startup;
wenzelm
parents:
70674
diff
changeset
|
603 |
session_base_info.sessions_structure, session_base_info.check_base, log = log) |
67884
43af581d7d8e
unload_theories after consolidation -- reset node_required;
wenzelm
parents:
67883
diff
changeset
|
604 |
{ |
69012 | 605 |
resources => |
606 |
||
69538
faf547d2834c
clarified signature, notably cascade of dump_options, deps, resources, session;
wenzelm
parents:
69536
diff
changeset
|
607 |
def options: Options = session_base_info.options |
faf547d2834c
clarified signature, notably cascade of dump_options, deps, resources, session;
wenzelm
parents:
69536
diff
changeset
|
608 |
|
69536 | 609 |
|
610 |
/* session */ |
|
611 |
||
612 |
def start_session(print_mode: List[String] = Nil, progress: Progress = No_Progress): Session = |
|
613 |
{ |
|
614 |
val session = new Session(session_base_info.session, options, resources) |
|
615 |
||
616 |
val session_error = Future.promise[String] |
|
617 |
var session_phase: Session.Consumer[Session.Phase] = null |
|
618 |
session_phase = |
|
619 |
Session.Consumer(getClass.getName) { |
|
620 |
case Session.Ready => |
|
621 |
session.phase_changed -= session_phase |
|
622 |
session_error.fulfill("") |
|
623 |
case Session.Terminated(result) if !result.ok => |
|
624 |
session.phase_changed -= session_phase |
|
625 |
session_error.fulfill("Session start failed: return code " + result.rc) |
|
626 |
case _ => |
|
627 |
} |
|
628 |
session.phase_changed += session_phase |
|
629 |
||
630 |
progress.echo("Starting session " + session_base_info.session + " ...") |
|
631 |
Isabelle_Process.start(session, options, |
|
632 |
logic = session_base_info.session, dirs = session_base_info.dirs, modes = print_mode) |
|
633 |
||
634 |
session_error.join match { |
|
635 |
case "" => session |
|
636 |
case msg => session.stop(); error(msg) |
|
637 |
} |
|
638 |
} |
|
639 |
||
640 |
||
641 |
/* theories */ |
|
642 |
||
69012 | 643 |
private val state = Synchronized(Resources.State()) |
644 |
||
645 |
def load_theories( |
|
646 |
session: Session, |
|
69458 | 647 |
id: UUID.T, |
70649 | 648 |
theories: List[Document.Node.Name], |
649 |
files: List[Document.Node.Name], |
|
69920 | 650 |
unicode_symbols: Boolean, |
70625
1ae987cc052f
support for share_common_data after define_command and before actual update: this affects string particles of command tokens;
wenzelm
parents:
69920
diff
changeset
|
651 |
share_common_data: Boolean, |
69012 | 652 |
progress: Progress) |
653 |
{ |
|
654 |
val loaded_theories = |
|
70649 | 655 |
for (node_name <- theories) |
69012 | 656 |
yield { |
657 |
val path = node_name.path |
|
658 |
if (!node_name.is_theory) error("Not a theory file: " + path) |
|
659 |
||
660 |
progress.expose_interrupt() |
|
69920 | 661 |
val text0 = File.read(path) |
662 |
val text = if (unicode_symbols) Symbol.decode(text0) else text0 |
|
69012 | 663 |
val node_header = resources.check_thy_reader(node_name, Scan.char_reader(text)) |
664 |
new Resources.Theory(node_name, node_header, text, true) |
|
68936
90c08c7bab9c
continuously clean frontier of already committed theories: much less resource requirements;
wenzelm
parents:
68935
diff
changeset
|
665 |
} |
69012 | 666 |
|
667 |
val loaded = loaded_theories.length |
|
668 |
if (loaded > 1) progress.echo("Loading " + loaded + " theories ...") |
|
669 |
||
670 |
state.change(st => |
|
671 |
{ |
|
70649 | 672 |
val (doc_blobs1, st1) = st.insert_required(id, theories).update_blobs(files) |
69012 | 673 |
val theory_edits = |
674 |
for (theory <- loaded_theories) |
|
675 |
yield { |
|
676 |
val node_name = theory.node_name |
|
677 |
val theory1 = theory.required(st1.is_required(node_name)) |
|
678 |
val edits = theory1.node_edits(st1.theories.get(node_name)) |
|
679 |
(edits, (node_name, theory1)) |
|
680 |
} |
|
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
681 |
val file_edits = |
70649 | 682 |
for { node_name <- files if doc_blobs1.changed(node_name) } |
69562
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
683 |
yield st1.blob_edits(node_name, st.blobs.get(node_name)) |
636b3c03a61a
include loaded_files as doc_blobs (without purging);
wenzelm
parents:
69538
diff
changeset
|
684 |
|
70625
1ae987cc052f
support for share_common_data after define_command and before actual update: this affects string particles of command tokens;
wenzelm
parents:
69920
diff
changeset
|
685 |
session.update(doc_blobs1, theory_edits.flatMap(_._1) ::: file_edits.flatten, |
1ae987cc052f
support for share_common_data after define_command and before actual update: this affects string particles of command tokens;
wenzelm
parents:
69920
diff
changeset
|
686 |
share_common_data = share_common_data) |
69012 | 687 |
st1.update_theories(theory_edits.map(_._2)) |
688 |
}) |
|
689 |
} |
|
67936 | 690 |
|
70649 | 691 |
def unload_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name]) |
69012 | 692 |
{ |
70783 | 693 |
state.change(st => |
694 |
{ |
|
695 |
val (edits, st1) = st.unload_theories(session, id, theories) |
|
696 |
session.update(st.doc_blobs, edits) |
|
697 |
st1 |
|
698 |
}) |
|
69012 | 699 |
} |
700 |
||
70698 | 701 |
def clean_theories(session: Session, id: UUID.T, theories: List[Document.Node.Name]) |
69012 | 702 |
{ |
703 |
state.change(st => |
|
70783 | 704 |
{ |
705 |
val (edits1, st1) = st.unload_theories(session, id, theories) |
|
706 |
val ((_, _, edits2), st2) = st1.purge_theories(session, None) |
|
707 |
session.update(st.doc_blobs, edits1 ::: edits2) |
|
708 |
st2 |
|
709 |
}) |
|
69012 | 710 |
} |
711 |
||
712 |
def purge_theories(session: Session, nodes: Option[List[Document.Node.Name]]) |
|
713 |
: (List[Document.Node.Name], List[Document.Node.Name]) = |
|
714 |
{ |
|
70783 | 715 |
state.change_result(st => |
716 |
{ |
|
717 |
val ((purged, retained, _), st1) = st.purge_theories(session, nodes) |
|
718 |
((purged, retained), st1) |
|
719 |
}) |
|
69012 | 720 |
} |
67936 | 721 |
} |
67054 | 722 |
} |