merged
authorwenzelm
Tue, 19 Dec 2023 23:06:26 +0100
changeset 79314 de58e518ed61
parent 79297 963570d120b2 (diff)
parent 79313 3b03af5125de (current diff)
child 79315 140c7fac037a
merged
--- a/etc/options	Tue Dec 19 22:56:32 2023 +0100
+++ b/etc/options	Tue Dec 19 23:06:26 2023 +0100
@@ -213,6 +213,12 @@
 option build_cluster_identifier : string = "build_cluster"
   -- "ISABELLE_IDENTIFIER for remote build cluster sessions"
 
+option build_schedule_outdated_delay : real = 300.0
+  -- "delay schedule generation loop"
+
+option build_schedule_outdated_limit : int = 20
+  -- "maximum number of sessions for which schedule stays valid"
+
 
 section "Editor Session"
 
--- a/src/HOL/Int.thy	Tue Dec 19 22:56:32 2023 +0100
+++ b/src/HOL/Int.thy	Tue Dec 19 23:06:26 2023 +0100
@@ -25,10 +25,6 @@
   show "transp intrel" by (auto simp: transp_def)
 qed
 
-lemma eq_Abs_Integ [case_names Abs_Integ, cases type: int]:
-  "(\<And>x y. z = Abs_Integ (x, y) \<Longrightarrow> P) \<Longrightarrow> P"
-  by (induct z) auto
-
 
 subsection \<open>Integers form a commutative ring\<close>
 
--- a/src/Pure/System/benchmark.scala	Tue Dec 19 22:56:32 2023 +0100
+++ b/src/Pure/System/benchmark.scala	Tue Dec 19 23:06:26 2023 +0100
@@ -8,6 +8,9 @@
 
 
 object Benchmark {
+  /* ZF-Constructible as representative benchmark session with short build time and requirements */
+
+  val benchmark_session = "ZF-Constructible"
 
   def benchmark_command(
     host: Build_Cluster.Host,
@@ -19,28 +22,84 @@
       Options.Spec.bash_strings(options, bg = true)
   }
 
+  def benchmark_requirements(options: Options, progress: Progress = new Progress()): Unit = {
+    val res =
+      Build.build(
+        options.string("build_engine") = Build.Default_Engine().name,
+        selection = Sessions.Selection(requirements = true, sessions = List(benchmark_session)),
+        progress = progress, build_heap = true)
+    if (!res.ok) error("Failed building requirements")
+  }
+
   def benchmark(options: Options, progress: Progress = new Progress()): Unit = {
     val hostname = options.string("build_hostname")
     val store = Store(options)
 
     using(store.open_server()) { server =>
-      val db = store.open_build_database(path = Host.private_data.database, server = server)
+      using_optional(store.maybe_open_database_server(server = server)) { database_server =>
+        val db = store.open_build_database(path = Host.private_data.database, server = server)
+
+        progress.echo("Starting benchmark...")
+        val selection = Sessions.Selection(sessions = List(benchmark_session))
+        val full_sessions = Sessions.load_structure(options.int("threads") = 1)
+
+        val build_context =
+          Build.Context(store, new Build.Default_Engine,
+            Sessions.deps(full_sessions.selection(selection)).check_errors)
+
+        val sessions = Build_Process.Sessions.empty.init(build_context, database_server, progress)
+        val session = sessions(benchmark_session)
 
-      progress.echo("Starting benchmark...")
-      val start = Time.now()
+        val heaps = session.ancestors.map(store.output_heap)
+        ML_Heap.restore(database_server, heaps, cache = store.cache.compress)
+
+        val local_options =
+          options
+            .bool.update("build_database_server", false)
+            .bool.update("build_database", false)
 
-      // TODO proper benchmark
-      def fib(n: Long): Long = if (n < 2) 1 else fib(n - 2) + fib(n - 1)
-      val result = fib(42)
+        benchmark_requirements(local_options, progress)
+        ML_Heap.restore(database_server, heaps, cache = store.cache.compress)
+
+        def get_shasum(session_name: String): SHA1.Shasum = {
+          val ancestor_shasums = sessions(session_name).ancestors.map(get_shasum)
+
+          val input_shasum =
+            if (ancestor_shasums.isEmpty) ML_Process.bootstrap_shasum()
+            else SHA1.flat_shasum(ancestor_shasums)
 
-      val stop = Time.now()
-      val timing = stop - start
+          store.check_output(
+            database_server, session_name,
+            session_options = build_context.sessions_structure(session_name).options,
+            sources_shasum = sessions(session_name).sources_shasum,
+            input_shasum = input_shasum,
+            fresh_build = false,
+            store_heap = false)._2
+        }
+
+        val deps = Sessions.deps(full_sessions.selection(selection)).check_errors
+        val background = deps.background(benchmark_session)
+        val input_shasum = get_shasum(benchmark_session)
+        val node_info = Host.Node_Info(hostname, None, Nil)
+
+        val local_build_context = build_context.copy(store = Store(local_options))
 
-      val score = Time.seconds(100).ms.toDouble / (1 + timing.ms)
-      progress.echo(
-        "Finished benchmark in " + timing.message + ". Score: " + String.format("%.2f", score))
+        val build =
+          Build_Job.start_session(local_build_context, session, progress, No_Logger, server,
+            background, session.sources_shasum, input_shasum, node_info, false)
 
-      Host.write_info(db, Host.Info.gather(hostname, score = Some(score)))
+        val timing =
+          build.join match {
+            case Some(result) if result.process_result.ok => result.process_result.timing
+            case _ => error("Failed to build benchmark session")
+          }
+
+        val score = Time.seconds(1000).ms.toDouble / (1 + timing.elapsed.ms)
+        progress.echo(
+          "Finished benchmark in " + timing.message + ". Score: " + String.format("%.2f", score))
+
+        Host.write_info(db, Host.Info.gather(hostname, score = Some(score)))
+      }
     }
   }
 
--- a/src/Pure/Tools/build_schedule.scala	Tue Dec 19 22:56:32 2023 +0100
+++ b/src/Pure/Tools/build_schedule.scala	Tue Dec 19 23:06:26 2023 +0100
@@ -459,8 +459,13 @@
 
     def num_built(state: Build_Process.State): Int = graph.keys.count(state.results.contains)
     def elapsed(): Time = Time.now() - start.time
-    def is_outdated(state: Build_Process.State, time_limit: Time, built_limit: Int): Boolean =
-      graph.is_empty || (elapsed() > time_limit && num_built(state) > built_limit)
+    def is_empty: Boolean = graph.is_empty
+    def is_outdated(options: Options, state: Build_Process.State): Boolean =
+      if (is_empty) true
+      else {
+        num_built(state) > options.int("build_schedule_outdated_limit") &&
+          elapsed() > options.seconds("build_schedule_outdated_delay")
+      }
 
     def next(hostname: String, state: Build_Process.State): List[String] =
       for {
@@ -799,29 +804,16 @@
   }
 
 
-  /* process for scheduled build */
+  /* master and slave processes for scheduled build */
 
-  abstract class Scheduled_Build_Process(
+  class Scheduled_Build_Process(
     build_context: Build.Context,
     build_progress: Progress,
     server: SSH.Server,
   ) extends Build_Process(build_context, build_progress, server) {
-    protected val start_date = Date.now()
-
-    def init_scheduler(timing_data: Timing_Data): Scheduler
-
     /* global resources with common close() operation */
 
-    private final lazy val _log_store: Build_Log.Store = Build_Log.store(build_options)
-    private final lazy val _log_database: SQL.Database =
-      try {
-        val db = _log_store.open_database(server = this.server)
-        _log_store.init_database(db)
-        db
-      }
-      catch { case exn: Throwable => close(); throw exn }
-
-    private val _build_database: Option[SQL.Database] =
+    protected final lazy val _build_database: Option[SQL.Database] =
       try {
         for (db <- store.maybe_open_build_database(server = server)) yield {
           if (build_context.master) {
@@ -838,15 +830,14 @@
       catch { case exn: Throwable => close(); throw exn }
 
     override def close(): Unit = {
+      Option(_build_database).flatten.foreach(_.close())
       super.close()
-      Option(_log_database).foreach(_.close())
-      Option(_build_database).flatten.foreach(_.close())
     }
 
 
     /* global state: internal var vs. external database */
 
-    private var _schedule = Schedule.init(build_uuid)
+    protected var _schedule = Schedule.init(build_uuid)
 
     override protected def synchronized_database[A](label: String)(body: => A): A =
       super.synchronized_database(label) {
@@ -864,16 +855,63 @@
       }
 
 
+    /* build process */
+
+    override def next_node_info(state: Build_Process.State, session_name: String): Node_Info =
+      _schedule.graph.get_node(session_name).node_info
+
+    override def next_jobs(state: Build_Process.State): List[String] =
+      if (progress.stopped || _schedule.is_empty) Nil else _schedule.next(hostname, state)
+  }
+
+  abstract class Scheduler_Build_Process(
+    build_context: Build.Context,
+    build_progress: Progress,
+    server: SSH.Server,
+  ) extends Scheduled_Build_Process(build_context, build_progress, server) {
+    require(build_context.master)
+
+    protected val start_date = Date.now()
+
+    def init_scheduler(timing_data: Timing_Data): Scheduler
+
+
+    /* global resources with common close() operation */
+
+    private final lazy val _log_store: Build_Log.Store = Build_Log.store(build_options)
+    private final lazy val _log_database: SQL.Database =
+      try {
+        val db = _log_store.open_database(server = this.server)
+        _log_store.init_database(db)
+        db
+      }
+      catch { case exn: Throwable => close(); throw exn }
+
+    override def close(): Unit = {
+      Option(_log_database).foreach(_.close())
+      super.close()
+    }
+
+
     /* previous results via build log */
 
     override def open_build_cluster(): Build_Cluster = {
       val build_cluster = super.open_build_cluster()
       build_cluster.init()
-      if (build_context.master && build_context.max_jobs > 0) {
+
+      Benchmark.benchmark_requirements(build_options)
+
+      if (build_context.max_jobs > 0) {
         val benchmark_options = build_options.string("build_hostname") = hostname
         Benchmark.benchmark(benchmark_options, progress)
       }
       build_cluster.benchmark()
+
+      for (db <- _build_database)
+        Build_Process.private_data.transaction_lock(db, label = "Scheduler_Build_Process.init") {
+          Build_Process.private_data.clean_build(db)
+        }
+
       build_cluster
     }
 
@@ -944,9 +982,6 @@
 
     /* build process */
 
-    override def next_node_info(state: Build_Process.State, session_name: String): Node_Info =
-      _schedule.graph.get_node(session_name).node_info
-
     def is_current(state: Build_Process.State, session_name: String): Boolean =
       state.ancestor_results(session_name) match {
         case Some(ancestor_results) if ancestor_results.forall(_.current) =>
@@ -971,10 +1006,8 @@
     }
 
     override def next_jobs(state: Build_Process.State): List[String] =
-      if (!progress.stopped && !_schedule.is_outdated(state, Time.minutes(3), 10))
-        _schedule.next(hostname, state)
-      else if (!build_context.master) Nil
-      else if (progress.stopped) state.next_ready.map(_.name)
+      if (progress.stopped) state.next_ready.map(_.name)
+      else if (!_schedule.is_outdated(build_options, state)) _schedule.next(hostname, state)
       else {
         val current = state.next_ready.filter(task => is_current(state, task.name))
         if (current.nonEmpty) current.map(_.name)
@@ -983,7 +1016,7 @@
 
           val new_schedule = scheduler.build_schedule(state).update(state)
           val schedule =
-            if (_schedule.graph.is_empty) new_schedule
+            if (_schedule.is_empty) new_schedule
             else List(_schedule.update(state), new_schedule).minBy(_.end)(Date.Ordering)
 
           val elapsed = Time.now() - start
@@ -998,7 +1031,7 @@
 
     override def run(): Build.Results = {
       val results = super.run()
-      if (build_context.master) write_build_log(results, snapshot().results)
+      write_build_log(results, snapshot().results)
       results
     }
   }
@@ -1041,7 +1074,8 @@
             val build_uuid = res.string(Schedules.build_uuid)
             val generator = res.string(Schedules.generator)
             val start = res.date(Schedules.start)
-            Schedule(build_uuid, generator, start, Graph.empty)
+            val serial = res.long(Schedules.serial)
+            Schedule(build_uuid, generator, start, Graph.empty, serial)
           })
 
       for (schedule <- schedules.sortBy(_.start)(Date.Ordering)) yield {
@@ -1207,7 +1241,8 @@
       progress: Progress,
       server: SSH.Server
     ): Build_Process =
-      new Scheduled_Build_Process(context, progress, server) {
+      if (!context.master) new Scheduled_Build_Process(context, progress, server)
+      else new Scheduler_Build_Process(context, progress, server) {
         def init_scheduler(timing_data: Timing_Data): Scheduler = scheduler(timing_data, context)
       }
   }
@@ -1532,7 +1567,7 @@
           numa_shuffling = isabelle.Host.numa_check(progress, numa_shuffling),
           build_hosts = build_hosts.toList)
 
-      if (!schedule.graph.is_empty && output_file.nonEmpty)
+      if (!schedule.is_empty && output_file.nonEmpty)
         write_schedule_graphic(schedule, output_file.get)
     })
 }