--- a/src/Pure/PIDE/headless.scala Mon Oct 07 10:51:20 2019 +0200
+++ b/src/Pure/PIDE/headless.scala Mon Oct 07 11:35:43 2019 +0200
@@ -44,15 +44,12 @@
(nodes.iterator ++ nodes_committed.iterator).forall({ case (_, st) => st.ok })
}
- private type Load = (List[Document.Node.Name], Boolean)
- private val no_load: Load = (Nil, false)
-
private sealed abstract class Load_State
{
def next(
limit: Int,
dep_graph: Document.Node.Name.Graph[Unit],
- finished: Document.Node.Name => Boolean): (Load, Load_State) =
+ finished: Document.Node.Name => Boolean): (List[Document.Node.Name], Load_State) =
{
def make_pending(maximals: List[Document.Node.Name]): List[Document.Node.Name] =
{
@@ -64,60 +61,40 @@
}
}
- def load_checkpoints(checkpoints: List[Document.Node.Name]): (Load, Load_State) =
- Load_Init(checkpoints).next(limit, dep_graph, finished)
-
- def load_requirements(
- pending: List[Document.Node.Name],
- checkpoints: List[Document.Node.Name] = Nil,
- share_common_data: Boolean = false): (Load, Load_State) =
+ def load_requirements(pending: List[Document.Node.Name])
+ : (List[Document.Node.Name], Load_State) =
{
- if (pending.isEmpty) load_checkpoints(checkpoints)
+ if (pending.isEmpty) (Nil, Load_Finished)
else if (limit == 0) {
val requirements = dep_graph.all_preds(pending).reverse
- ((requirements, share_common_data), Load_Bulk(pending, Nil, checkpoints))
+ (requirements, Load_Bulk(pending, Nil))
}
else {
val reachable = dep_graph.reachable_limit(limit, _ => 1, dep_graph.imm_preds, pending)
val (pending1, pending2) = pending.partition(reachable)
val requirements = dep_graph.all_preds(pending1).reverse
- ((requirements, share_common_data), Load_Bulk(pending1, pending2, checkpoints))
+ (requirements, Load_Bulk(pending1, pending2))
}
}
- val result: (Load, Load_State) =
+ val (load_theories, st1) =
this match {
- case Load_Init(Nil) =>
+ case Load_Init =>
val pending = make_pending(dep_graph.maximals)
- if (pending.isEmpty) (no_load, Load_Finished)
+ if (pending.isEmpty) (Nil, Load_Finished)
else load_requirements(pending)
- case Load_Init(target :: checkpoints) =>
- val requirements = dep_graph.all_preds(List(target)).reverse
- ((requirements, false), Load_Target(target, checkpoints))
- case Load_Target(pending, checkpoints) if finished(pending) =>
- val dep_graph1 =
- if (checkpoints.isEmpty) dep_graph
- else dep_graph.exclude(dep_graph.all_succs(checkpoints).toSet)
- val dep_graph2 =
- dep_graph1.restrict(dep_graph.all_succs(List(pending)).toSet)
- val pending2 = make_pending(dep_graph.maximals.filter(dep_graph2.defined))
- load_requirements(pending2, checkpoints = checkpoints, share_common_data = true)
- case Load_Bulk(pending, remaining, checkpoints) if pending.forall(finished) =>
- load_requirements(remaining, checkpoints = checkpoints)
- case st => (no_load, st)
+ case Load_Bulk(pending, remaining) if pending.forall(finished) =>
+ load_requirements(remaining)
+ case st => (Nil, st)
}
- val ((load_theories, share_common_data), st1) = result
- ((load_theories.filterNot(finished), share_common_data), st1)
+ (load_theories.filterNot(finished), st1)
}
}
- private case class Load_Init(checkpoints: List[Document.Node.Name]) extends Load_State
- private case class Load_Target(
- pending: Document.Node.Name, checkpoints: List[Document.Node.Name]) extends Load_State
+ private case object Load_Init extends Load_State
private case class Load_Bulk(
pending: List[Document.Node.Name],
- remaining: List[Document.Node.Name],
- checkpoints: List[Document.Node.Name]) extends Load_State
+ remaining: List[Document.Node.Name]) extends Load_State
private case object Load_Finished extends Load_State
class Session private[Headless](
@@ -224,7 +201,7 @@
}
def check(state: Document.State, version: Document.Version, beyond_limit: Boolean)
- : ((List[Document.Node.Name], Boolean), Use_Theories_State) =
+ : (List[Document.Node.Name], Use_Theories_State) =
{
val already_committed1 =
commit match {
@@ -272,9 +249,10 @@
}
else result
- val (load, load_state1) = load_state.next(load_limit, dep_graph, finished_theory(_))
+ val (load_theories, load_state1) =
+ load_state.next(load_limit, dep_graph, finished_theory(_))
- (load,
+ (load_theories,
copy(already_committed = already_committed1, result = result1, load_state = load_state1))
}
}
@@ -289,7 +267,6 @@
watchdog_timeout: Time = default_watchdog_timeout,
nodes_status_delay: Time = default_nodes_status_delay,
id: UUID.T = UUID.random(),
- checkpoints: Set[Document.Node.Name] = Set.empty,
// commit: must not block, must not fail
commit: Option[(Document.Snapshot, Document_Status.Node_Status) => Unit] = None,
commit_cleanup_delay: Time = default_commit_cleanup_delay,
@@ -309,24 +286,16 @@
map(path => Document.Node.Name(resources.append("", path)))
val use_theories_state =
- {
- val load_state =
- Load_Init(
- if (checkpoints.isEmpty) Nil
- else dependencies.theory_graph.topological_order.filter(checkpoints(_)))
Synchronized(
- Use_Theories_State(dependencies.theory_graph, load_state, watchdog_timeout, commit))
- }
+ Use_Theories_State(dependencies.theory_graph, Load_Init, watchdog_timeout, commit))
def check_state(beyond_limit: Boolean = false)
{
val state = session.get_state()
for (version <- state.stable_tip_version) {
- val (load_theories, share_common_data) =
- use_theories_state.change_result(_.check(state, version, beyond_limit))
+ val load_theories = use_theories_state.change_result(_.check(state, version, beyond_limit))
if (load_theories.nonEmpty) {
- resources.load_theories(
- session, id, load_theories, dep_files, unicode_symbols, share_common_data, progress)
+ resources.load_theories(session, id, load_theories, dep_files, unicode_symbols, progress)
}
}
}
@@ -648,7 +617,6 @@
theories: List[Document.Node.Name],
files: List[Document.Node.Name],
unicode_symbols: Boolean,
- share_common_data: Boolean,
progress: Progress)
{
val loaded_theories =
@@ -682,8 +650,7 @@
for { node_name <- files if doc_blobs1.changed(node_name) }
yield st1.blob_edits(node_name, st.blobs.get(node_name))
- session.update(doc_blobs1, theory_edits.flatMap(_._1) ::: file_edits.flatten,
- share_common_data = share_common_data)
+ session.update(doc_blobs1, theory_edits.flatMap(_._1) ::: file_edits.flatten)
st1.update_theories(theory_edits.map(_._2))
})
}