| author | wenzelm | 
| Tue, 28 Jul 2009 14:35:27 +0200 | |
| changeset 32249 | 3e48bf962e05 | 
| parent 32248 | 0241916a5f06 | 
| child 32253 | d9def420c84e | 
| permissions | -rw-r--r-- | 
| 28156 | 1  | 
(* Title: Pure/Concurrent/future.ML  | 
2  | 
Author: Makarius  | 
|
3  | 
||
| 32246 | 4  | 
Future values, see also  | 
5  | 
http://www4.in.tum.de/~wenzelm/papers/parallel-isabelle.pdf  | 
|
| 28201 | 6  | 
|
7  | 
Notes:  | 
|
8  | 
||
9  | 
* Futures are similar to delayed evaluation, i.e. delay/force is  | 
|
10  | 
generalized to fork/join (and variants). The idea is to model  | 
|
11  | 
parallel value-oriented computations, but *not* communicating  | 
|
12  | 
processes.  | 
|
13  | 
||
14  | 
* Futures are grouped; failure of one group member causes the whole  | 
|
| 32220 | 15  | 
group to be interrupted eventually. Groups are block-structured.  | 
| 28201 | 16  | 
|
17  | 
* Forked futures are evaluated spontaneously by a farm of worker  | 
|
18  | 
threads in the background; join resynchronizes the computation and  | 
|
19  | 
delivers results (values or exceptions).  | 
|
20  | 
||
21  | 
* The pool of worker threads is limited, usually in correlation with  | 
|
22  | 
the number of physical cores on the machine. Note that allocation  | 
|
23  | 
of runtime resources is distorted either if workers yield CPU time  | 
|
24  | 
(e.g. via system sleep or wait operations), or if non-worker  | 
|
25  | 
threads contend for significant runtime resources independently.  | 
|
| 28156 | 26  | 
*)  | 
27  | 
||
28  | 
signature FUTURE =  | 
|
29  | 
sig  | 
|
| 28645 | 30  | 
val enabled: unit -> bool  | 
| 29119 | 31  | 
type task = Task_Queue.task  | 
32  | 
type group = Task_Queue.group  | 
|
| 32058 | 33  | 
val is_worker: unit -> bool  | 
| 32102 | 34  | 
val worker_group: unit -> Task_Queue.group option  | 
| 28972 | 35  | 
type 'a future  | 
36  | 
val task_of: 'a future -> task  | 
|
37  | 
val group_of: 'a future -> group  | 
|
38  | 
val peek: 'a future -> 'a Exn.result option  | 
|
39  | 
val is_finished: 'a future -> bool  | 
|
| 28997 | 40  | 
val value: 'a -> 'a future  | 
| 28972 | 41  | 
val fork: (unit -> 'a) -> 'a future  | 
| 
28979
 
3ce619d8d432
fork/map: no inheritance of group (structure is nested, not parallel);
 
wenzelm 
parents: 
28972 
diff
changeset
 | 
42  | 
val fork_group: group -> (unit -> 'a) -> 'a future  | 
| 
 
3ce619d8d432
fork/map: no inheritance of group (structure is nested, not parallel);
 
wenzelm 
parents: 
28972 
diff
changeset
 | 
43  | 
val fork_deps: 'b future list -> (unit -> 'a) -> 'a future  | 
| 29119 | 44  | 
val fork_pri: int -> (unit -> 'a) -> 'a future  | 
| 28972 | 45  | 
val join_results: 'a future list -> 'a Exn.result list  | 
46  | 
val join_result: 'a future -> 'a Exn.result  | 
|
47  | 
val join: 'a future -> 'a  | 
|
48  | 
  val map: ('a -> 'b) -> 'a future -> 'b future
 | 
|
| 
30618
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
49  | 
  val interruptible_task: ('a -> 'b) -> 'a -> 'b
 | 
| 29431 | 50  | 
val cancel_group: group -> unit  | 
| 28972 | 51  | 
val cancel: 'a future -> unit  | 
| 28203 | 52  | 
val shutdown: unit -> unit  | 
| 28156 | 53  | 
end;  | 
54  | 
||
55  | 
structure Future: FUTURE =  | 
|
56  | 
struct  | 
|
57  | 
||
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
58  | 
(** future values **)  | 
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
59  | 
|
| 28645 | 60  | 
fun enabled () =  | 
| 29118 | 61  | 
Multithreading.enabled () andalso  | 
| 28645 | 62  | 
not (Multithreading.self_critical ());  | 
63  | 
||
64  | 
||
| 28167 | 65  | 
(* identifiers *)  | 
66  | 
||
| 29119 | 67  | 
type task = Task_Queue.task;  | 
68  | 
type group = Task_Queue.group;  | 
|
| 28167 | 69  | 
|
| 32058 | 70  | 
local  | 
71  | 
val tag = Universal.tag () : (string * task * group) option Universal.tag;  | 
|
72  | 
in  | 
|
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
73  | 
fun thread_data () = the_default NONE (Thread.getLocal tag);  | 
| 32058 | 74  | 
fun setmp_thread_data data f x =  | 
75  | 
Library.setmp_thread_data tag (thread_data ()) (SOME data) f x;  | 
|
| 28167 | 76  | 
end;  | 
77  | 
||
| 32058 | 78  | 
val is_worker = is_some o thread_data;  | 
| 32102 | 79  | 
val worker_group = Option.map #3 o thread_data;  | 
| 32058 | 80  | 
|
| 28167 | 81  | 
|
82  | 
(* datatype future *)  | 
|
83  | 
||
| 28972 | 84  | 
datatype 'a future = Future of  | 
| 28167 | 85  | 
 {task: task,
 | 
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
86  | 
group: group,  | 
| 28167 | 87  | 
result: 'a Exn.result option ref};  | 
88  | 
||
89  | 
fun task_of (Future {task, ...}) = task;
 | 
|
90  | 
fun group_of (Future {group, ...}) = group;
 | 
|
91  | 
||
| 28558 | 92  | 
fun peek (Future {result, ...}) = ! result;
 | 
93  | 
fun is_finished x = is_some (peek x);  | 
|
| 28320 | 94  | 
|
| 28997 | 95  | 
fun value x = Future  | 
| 29119 | 96  | 
 {task = Task_Queue.new_task 0,
 | 
| 32102 | 97  | 
group = Task_Queue.new_group NONE,  | 
| 28997 | 98  | 
result = ref (SOME (Exn.Result x))};  | 
99  | 
||
| 28167 | 100  | 
|
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
101  | 
|
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
102  | 
(** scheduling **)  | 
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
103  | 
|
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
104  | 
(* global state *)  | 
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
105  | 
|
| 29119 | 106  | 
val queue = ref Task_Queue.empty;  | 
| 28468 | 107  | 
val next = ref 0;  | 
| 28192 | 108  | 
val workers = ref ([]: (Thread.thread * bool) list);  | 
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
109  | 
val scheduler = ref (NONE: Thread.thread option);  | 
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
110  | 
val excessive = ref 0;  | 
| 29119 | 111  | 
val canceled = ref ([]: Task_Queue.group list);  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
112  | 
val do_shutdown = ref false;  | 
| 
28177
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
113  | 
|
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
114  | 
|
| 
 
8c0335bc9336
inherit group from running thread, or create a new one -- make it harder to re-use canceled groups;
 
wenzelm 
parents: 
28170 
diff
changeset
 | 
115  | 
(* synchronization *)  | 
| 28156 | 116  | 
|
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
117  | 
val scheduler_event = ConditionVar.conditionVar ();  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
118  | 
val work_available = ConditionVar.conditionVar ();  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
119  | 
val work_finished = ConditionVar.conditionVar ();  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
120  | 
|
| 28156 | 121  | 
local  | 
122  | 
val lock = Mutex.mutex ();  | 
|
123  | 
in  | 
|
124  | 
||
| 28575 | 125  | 
fun SYNCHRONIZED name = SimpleThread.synchronized name lock;  | 
| 28156 | 126  | 
|
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
127  | 
fun wait cond = (*requires SYNCHRONIZED*)  | 
| 32229 | 128  | 
ConditionVar.wait (cond, lock) handle Exn.Interrupt => ();  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
129  | 
|
| 32229 | 130  | 
fun wait_interruptible cond timeout = (*requires SYNCHRONIZED*)  | 
131  | 
interruptible (fn () =>  | 
|
132  | 
ignore (ConditionVar.waitUntil (cond, lock, Time.+ (Time.now (), timeout)))) ();  | 
|
| 
28166
 
43087721a66e
moved task, thread_data, group, queue to task_queue.ML;
 
wenzelm 
parents: 
28163 
diff
changeset
 | 
133  | 
|
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
134  | 
fun signal cond = (*requires SYNCHRONIZED*)  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
135  | 
ConditionVar.signal cond;  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
136  | 
|
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
137  | 
fun broadcast cond = (*requires SYNCHRONIZED*)  | 
| 
28166
 
43087721a66e
moved task, thread_data, group, queue to task_queue.ML;
 
wenzelm 
parents: 
28163 
diff
changeset
 | 
138  | 
ConditionVar.broadcast cond;  | 
| 28156 | 139  | 
|
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
140  | 
fun broadcast_work () = (*requires SYNCHRONIZED*)  | 
| 
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
141  | 
(ConditionVar.broadcast work_available;  | 
| 
32225
 
d5d6f47fb018
cancel: improved reactivity due to more careful broadcasting;
 
wenzelm 
parents: 
32224 
diff
changeset
 | 
142  | 
ConditionVar.broadcast work_finished);  | 
| 
 
d5d6f47fb018
cancel: improved reactivity due to more careful broadcasting;
 
wenzelm 
parents: 
32224 
diff
changeset
 | 
143  | 
|
| 28156 | 144  | 
end;  | 
145  | 
||
146  | 
||
| 32099 | 147  | 
(* execute future jobs *)  | 
148  | 
||
149  | 
fun future_job group (e: unit -> 'a) =  | 
|
150  | 
let  | 
|
151  | 
val result = ref (NONE: 'a Exn.result option);  | 
|
| 
32107
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
152  | 
fun job ok =  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
153  | 
let  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
154  | 
val res =  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
155  | 
if ok then  | 
| 
32230
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
156  | 
Exn.capture (fn () =>  | 
| 
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
157  | 
(Thread.testInterrupt ();  | 
| 
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
158  | 
Multithreading.with_attributes Multithreading.restricted_interrupts  | 
| 
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
159  | 
(fn _ => fn () => e ())) ()) ()  | 
| 
32107
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
160  | 
else Exn.Exn Exn.Interrupt;  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
161  | 
val _ = result := SOME res;  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
162  | 
in  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
163  | 
(case res of  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
164  | 
Exn.Exn exn => (Task_Queue.cancel_group group exn; false)  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
165  | 
| Exn.Result _ => true)  | 
| 
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
166  | 
end;  | 
| 32099 | 167  | 
in (result, job) end;  | 
| 28156 | 168  | 
|
| 
29341
 
6bb007a0f9f2
more reactive scheduler: reduced loop timeout, propagate broadcast interrupt via TaskQueue.cancel_all;
 
wenzelm 
parents: 
29119 
diff
changeset
 | 
169  | 
fun do_cancel group = (*requires SYNCHRONIZED*)  | 
| 
32225
 
d5d6f47fb018
cancel: improved reactivity due to more careful broadcasting;
 
wenzelm 
parents: 
32224 
diff
changeset
 | 
170  | 
(change canceled (insert Task_Queue.eq_group group); broadcast scheduler_event);  | 
| 
29341
 
6bb007a0f9f2
more reactive scheduler: reduced loop timeout, propagate broadcast interrupt via TaskQueue.cancel_all;
 
wenzelm 
parents: 
29119 
diff
changeset
 | 
171  | 
|
| 29366 | 172  | 
fun execute name (task, group, jobs) =  | 
| 28167 | 173  | 
let  | 
| 32102 | 174  | 
val valid = not (Task_Queue.is_canceled group);  | 
| 32058 | 175  | 
val ok = setmp_thread_data (name, task, group) (fn () =>  | 
| 
29384
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
176  | 
fold (fn job => fn ok => job valid andalso ok) jobs true) ();  | 
| 32246 | 177  | 
val _ = SYNCHRONIZED "finish" (fn () =>  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
178  | 
let  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
179  | 
val maximal = change_result queue (Task_Queue.finish task);  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
180  | 
val _ =  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
181  | 
if ok then ()  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
182  | 
else if Task_Queue.cancel (! queue) group then ()  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
183  | 
else do_cancel group;  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
184  | 
val _ = broadcast work_finished;  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
185  | 
val _ = if maximal then () else broadcast work_available;  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
186  | 
in () end);  | 
| 28167 | 187  | 
in () end;  | 
188  | 
||
189  | 
||
| 32246 | 190  | 
(* worker activity *)  | 
191  | 
||
192  | 
fun count_active () = (*requires SYNCHRONIZED*)  | 
|
193  | 
fold (fn (_, active) => fn i => if active then i + 1 else i) (! workers) 0;  | 
|
194  | 
||
195  | 
fun change_active active = (*requires SYNCHRONIZED*)  | 
|
196  | 
change workers (AList.update Thread.equal (Thread.self (), active));  | 
|
197  | 
||
198  | 
||
| 28167 | 199  | 
(* worker threads *)  | 
200  | 
||
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
201  | 
fun worker_wait cond = (*requires SYNCHRONIZED*)  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
202  | 
(change_active false; wait cond; change_active true);  | 
| 28162 | 203  | 
|
| 29119 | 204  | 
fun worker_next () = (*requires SYNCHRONIZED*)  | 
| 28167 | 205  | 
if ! excessive > 0 then  | 
206  | 
(dec excessive;  | 
|
| 28192 | 207  | 
change workers (filter_out (fn (thread, _) => Thread.equal (thread, Thread.self ())));  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
208  | 
broadcast scheduler_event;  | 
| 28167 | 209  | 
NONE)  | 
| 32246 | 210  | 
else if count_active () > Multithreading.max_threads_value () then  | 
211  | 
(worker_wait scheduler_event; worker_next ())  | 
|
| 
28166
 
43087721a66e
moved task, thread_data, group, queue to task_queue.ML;
 
wenzelm 
parents: 
28163 
diff
changeset
 | 
212  | 
else  | 
| 32249 | 213  | 
(case change_result queue (Task_Queue.dequeue (Thread.self ())) of  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
214  | 
NONE => (worker_wait work_available; worker_next ())  | 
| 
28166
 
43087721a66e
moved task, thread_data, group, queue to task_queue.ML;
 
wenzelm 
parents: 
28163 
diff
changeset
 | 
215  | 
| some => some);  | 
| 28156 | 216  | 
|
| 28167 | 217  | 
fun worker_loop name =  | 
| 
32107
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
218  | 
(case SYNCHRONIZED name (fn () => worker_next ()) of  | 
| 29119 | 219  | 
NONE => ()  | 
| 28167 | 220  | 
| SOME work => (execute name work; worker_loop name));  | 
| 28156 | 221  | 
|
| 28167 | 222  | 
fun worker_start name = (*requires SYNCHRONIZED*)  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
223  | 
change workers (cons (SimpleThread.fork false (fn () =>  | 
| 
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
224  | 
(broadcast scheduler_event; worker_loop name)), true));  | 
| 28156 | 225  | 
|
226  | 
||
227  | 
(* scheduler *)  | 
|
228  | 
||
| 32226 | 229  | 
val last_status = ref Time.zeroTime;  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
230  | 
val next_status = Time.fromMilliseconds 500;  | 
| 
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
231  | 
val next_round = Time.fromMilliseconds 50;  | 
| 32226 | 232  | 
|
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
233  | 
fun scheduler_next () = (*requires SYNCHRONIZED*)  | 
| 28156 | 234  | 
let  | 
| 32226 | 235  | 
(*queue and worker status*)  | 
236  | 
val _ =  | 
|
237  | 
let val now = Time.now () in  | 
|
238  | 
if Time.> (Time.+ (! last_status, next_status), now) then ()  | 
|
239  | 
else  | 
|
240  | 
(last_status := now; Multithreading.tracing 1 (fn () =>  | 
|
241  | 
let  | 
|
242  | 
              val {ready, pending, running} = Task_Queue.status (! queue);
 | 
|
243  | 
val total = length (! workers);  | 
|
| 32246 | 244  | 
val active = count_active ();  | 
| 32226 | 245  | 
in  | 
246  | 
"SCHEDULE: " ^  | 
|
247  | 
string_of_int ready ^ " ready, " ^  | 
|
248  | 
string_of_int pending ^ " pending, " ^  | 
|
249  | 
string_of_int running ^ " running; " ^  | 
|
250  | 
string_of_int total ^ " workers, " ^  | 
|
251  | 
string_of_int active ^ " active"  | 
|
252  | 
end))  | 
|
253  | 
end;  | 
|
| 32053 | 254  | 
|
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
255  | 
(*worker threads*)  | 
| 28191 | 256  | 
val _ =  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
257  | 
if forall (Thread.isActive o #1) (! workers) then ()  | 
| 32095 | 258  | 
else  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
259  | 
(case List.partition (Thread.isActive o #1) (! workers) of  | 
| 32095 | 260  | 
(_, []) => ()  | 
| 32220 | 261  | 
| (alive, dead) =>  | 
262  | 
(workers := alive; Multithreading.tracing 0 (fn () =>  | 
|
263  | 
"SCHEDULE: disposed " ^ string_of_int (length dead) ^ " dead worker threads")));  | 
|
| 28191 | 264  | 
|
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
265  | 
val m = if ! do_shutdown then 0 else Multithreading.max_threads_value ();  | 
| 32095 | 266  | 
val mm = (m * 3) div 2;  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
267  | 
val l = length (! workers);  | 
| 32095 | 268  | 
val _ = excessive := l - mm;  | 
| 28203 | 269  | 
val _ =  | 
| 32095 | 270  | 
if mm > l then  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
271  | 
        funpow (mm - l) (fn () => worker_start ("worker " ^ string_of_int (inc next))) ()
 | 
| 28203 | 272  | 
else ();  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
273  | 
|
| 
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
274  | 
(*canceled groups*)  | 
| 
32225
 
d5d6f47fb018
cancel: improved reactivity due to more careful broadcasting;
 
wenzelm 
parents: 
32224 
diff
changeset
 | 
275  | 
val _ =  | 
| 
 
d5d6f47fb018
cancel: improved reactivity due to more careful broadcasting;
 
wenzelm 
parents: 
32224 
diff
changeset
 | 
276  | 
if null (! canceled) then ()  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
277  | 
else (change canceled (filter_out (Task_Queue.cancel (! queue))); broadcast_work ());  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
278  | 
|
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
279  | 
(*delay loop*)  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
280  | 
val _ = wait_interruptible scheduler_event next_round  | 
| 
32230
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
281  | 
handle Exn.Interrupt =>  | 
| 
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
282  | 
(Multithreading.tracing 1 (fn () => "Interrupt");  | 
| 
 
9f6461b1c9cc
interruptible: Thread.testInterrupt before changing thread attributes;
 
wenzelm 
parents: 
32229 
diff
changeset
 | 
283  | 
List.app do_cancel (Task_Queue.cancel_all (! queue)));  | 
| 28167 | 284  | 
|
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
285  | 
(*shutdown*)  | 
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
286  | 
val _ = if Task_Queue.is_empty (! queue) then do_shutdown := true else ();  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
287  | 
val continue = not (! do_shutdown andalso null (! workers));  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
288  | 
val _ = if continue then () else scheduler := NONE;  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
289  | 
val _ = broadcast scheduler_event;  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
290  | 
in continue end;  | 
| 
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
291  | 
|
| 
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
292  | 
fun scheduler_loop () =  | 
| 
32107
 
47d0da617fcc
future_job: tight scope for interrupts, to prevent shooting ourselves in the foot via cancel_group;
 
wenzelm 
parents: 
32102 
diff
changeset
 | 
293  | 
while SYNCHRONIZED "scheduler" (fn () => scheduler_next ()) do ();  | 
| 28156 | 294  | 
|
| 28203 | 295  | 
fun scheduler_active () = (*requires SYNCHRONIZED*)  | 
296  | 
(case ! scheduler of NONE => false | SOME thread => Thread.isActive thread);  | 
|
297  | 
||
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
298  | 
fun scheduler_check () = (*requires SYNCHRONIZED*)  | 
| 
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
299  | 
(do_shutdown := false;  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
300  | 
if scheduler_active () then ()  | 
| 
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
301  | 
else scheduler := SOME (SimpleThread.fork false scheduler_loop));  | 
| 28156 | 302  | 
|
303  | 
||
| 29366 | 304  | 
|
305  | 
(** futures **)  | 
|
| 28156 | 306  | 
|
| 29366 | 307  | 
(* fork *)  | 
308  | 
||
309  | 
fun fork_future opt_group deps pri e =  | 
|
310  | 
let  | 
|
| 32102 | 311  | 
val group =  | 
312  | 
(case opt_group of  | 
|
313  | 
SOME group => group  | 
|
314  | 
| NONE => Task_Queue.new_group (worker_group ()));  | 
|
| 29366 | 315  | 
val (result, job) = future_job group e;  | 
| 32246 | 316  | 
val task = SYNCHRONIZED "enqueue" (fn () =>  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
317  | 
let  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
318  | 
val (task, minimal) = change_result queue (Task_Queue.enqueue group deps pri job);  | 
| 
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
319  | 
val _ = if minimal then signal work_available else ();  | 
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
320  | 
val _ = scheduler_check ();  | 
| 
32219
 
9a2566d1fdbd
more specific conditions: scheduler_event, work_available, work_finished -- considereably reduces overhead with many threads;
 
wenzelm 
parents: 
32186 
diff
changeset
 | 
321  | 
in task end);  | 
| 
28166
 
43087721a66e
moved task, thread_data, group, queue to task_queue.ML;
 
wenzelm 
parents: 
28163 
diff
changeset
 | 
322  | 
  in Future {task = task, group = group, result = result} end;
 | 
| 28162 | 323  | 
|
| 29366 | 324  | 
fun fork e = fork_future NONE [] 0 e;  | 
325  | 
fun fork_group group e = fork_future (SOME group) [] 0 e;  | 
|
326  | 
fun fork_deps deps e = fork_future NONE (map task_of deps) 0 e;  | 
|
327  | 
fun fork_pri pri e = fork_future NONE [] pri e;  | 
|
| 28186 | 328  | 
|
329  | 
||
| 29366 | 330  | 
(* join *)  | 
331  | 
||
| 
29551
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
332  | 
local  | 
| 
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
333  | 
|
| 32099 | 334  | 
fun get_result x =  | 
335  | 
(case peek x of  | 
|
| 32102 | 336  | 
NONE => Exn.Exn (SYS_ERROR "unfinished future")  | 
337  | 
| SOME (Exn.Exn Exn.Interrupt) =>  | 
|
338  | 
Exn.Exn (Exn.EXCEPTIONS (Exn.flatten_list (Task_Queue.group_status (group_of x))))  | 
|
339  | 
| SOME res => res);  | 
|
| 28186 | 340  | 
|
| 32224 | 341  | 
fun join_wait x =  | 
342  | 
if SYNCHRONIZED "join_wait" (fn () =>  | 
|
343  | 
is_finished x orelse (wait work_finished; false))  | 
|
344  | 
then () else join_wait x;  | 
|
345  | 
||
| 32095 | 346  | 
fun join_next deps = (*requires SYNCHRONIZED*)  | 
| 32224 | 347  | 
if null deps then NONE  | 
348  | 
else  | 
|
| 32249 | 349  | 
(case change_result queue (Task_Queue.dequeue_towards (Thread.self ()) deps) of  | 
| 32224 | 350  | 
(NONE, []) => NONE  | 
351  | 
| (NONE, deps') => (worker_wait work_finished; join_next deps')  | 
|
352  | 
| (SOME work, deps') => SOME (work, deps'));  | 
|
| 32095 | 353  | 
|
| 32224 | 354  | 
fun join_work deps =  | 
| 32095 | 355  | 
(case SYNCHRONIZED "join" (fn () => join_next deps) of  | 
| 
29551
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
356  | 
NONE => ()  | 
| 32224 | 357  | 
| SOME (work, deps') => (execute "join" work; join_work deps'));  | 
| 
29551
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
358  | 
|
| 
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
359  | 
in  | 
| 
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
360  | 
|
| 29366 | 361  | 
fun join_results xs =  | 
362  | 
if forall is_finished xs then map get_result xs  | 
|
| 32246 | 363  | 
else if Multithreading.self_critical () then  | 
364  | 
error "Cannot join future values within critical section"  | 
|
| 29366 | 365  | 
else uninterruptible (fn _ => fn () =>  | 
| 32246 | 366  | 
(if is_worker ()  | 
367  | 
then join_work (map task_of xs)  | 
|
368  | 
else List.app join_wait xs;  | 
|
369  | 
map get_result xs)) ();  | 
|
| 28186 | 370  | 
|
| 
29551
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
371  | 
end;  | 
| 
 
95e469919c3e
join_results: when dependencies are resulved (but not finished yet),
 
wenzelm 
parents: 
29431 
diff
changeset
 | 
372  | 
|
| 
28647
 
8068cdc84e7e
join_results: allow CRITICAL join of finished futures;
 
wenzelm 
parents: 
28645 
diff
changeset
 | 
373  | 
fun join_result x = singleton join_results x;  | 
| 
 
8068cdc84e7e
join_results: allow CRITICAL join of finished futures;
 
wenzelm 
parents: 
28645 
diff
changeset
 | 
374  | 
fun join x = Exn.release (join_result x);  | 
| 28156 | 375  | 
|
| 29366 | 376  | 
|
377  | 
(* map *)  | 
|
378  | 
||
| 
29384
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
379  | 
fun map_future f x =  | 
| 29366 | 380  | 
let  | 
| 
29384
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
381  | 
val task = task_of x;  | 
| 32102 | 382  | 
val group = Task_Queue.new_group (SOME (group_of x));  | 
| 
29384
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
383  | 
val (result, job) = future_job group (fn () => f (join x));  | 
| 
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
384  | 
|
| 32246 | 385  | 
val extended = SYNCHRONIZED "extend" (fn () =>  | 
| 29366 | 386  | 
(case Task_Queue.extend task job (! queue) of  | 
387  | 
SOME queue' => (queue := queue'; true)  | 
|
388  | 
| NONE => false));  | 
|
389  | 
in  | 
|
| 
29384
 
a3c7e9ae9b71
more robust propagation of errors through bulk jobs;
 
wenzelm 
parents: 
29366 
diff
changeset
 | 
390  | 
    if extended then Future {task = task, group = group, result = result}
 | 
| 32099 | 391  | 
else fork_future (SOME group) [task] (Task_Queue.pri_of_task task) (fn () => f (join x))  | 
| 29366 | 392  | 
end;  | 
| 
28979
 
3ce619d8d432
fork/map: no inheritance of group (structure is nested, not parallel);
 
wenzelm 
parents: 
28972 
diff
changeset
 | 
393  | 
|
| 28191 | 394  | 
|
| 29431 | 395  | 
(* cancellation *)  | 
| 
28202
 
23cb9a974630
added focus, which indicates a particular collection of high-priority tasks;
 
wenzelm 
parents: 
28201 
diff
changeset
 | 
396  | 
|
| 
30618
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
397  | 
fun interruptible_task f x =  | 
| 
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
398  | 
if Multithreading.available then  | 
| 
32247
 
3e7d1673f96e
interruptible_task: unified treatment of Multithreading.with_attributes (cf. 9f6461b1c9cc);
 
wenzelm 
parents: 
32246 
diff
changeset
 | 
399  | 
(Thread.testInterrupt ();  | 
| 
30618
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
400  | 
Multithreading.with_attributes  | 
| 32058 | 401  | 
(if is_worker ()  | 
| 
30618
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
402  | 
then Multithreading.restricted_interrupts  | 
| 
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
403  | 
else Multithreading.regular_interrupts)  | 
| 
32247
 
3e7d1673f96e
interruptible_task: unified treatment of Multithreading.with_attributes (cf. 9f6461b1c9cc);
 
wenzelm 
parents: 
32246 
diff
changeset
 | 
404  | 
(fn _ => fn x => f x) x)  | 
| 
30618
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
405  | 
else interruptible f x;  | 
| 
 
046f4f986fb5
restricted interrupts for tasks running as future worker thread -- attempt to prevent interrupt race conditions;
 
wenzelm 
parents: 
30612 
diff
changeset
 | 
406  | 
|
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
407  | 
(*cancel: present and future group members will be interrupted eventually*)  | 
| 
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
408  | 
fun cancel_group group = SYNCHRONIZED "cancel" (fn () => do_cancel group);  | 
| 29431 | 409  | 
fun cancel x = cancel_group (group_of x);  | 
| 
28206
 
bcd48c6897d4
eliminated requests, use global state variables uniformly;
 
wenzelm 
parents: 
28203 
diff
changeset
 | 
410  | 
|
| 29366 | 411  | 
|
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
412  | 
(* shutdown *)  | 
| 29366 | 413  | 
|
| 28203 | 414  | 
fun shutdown () =  | 
| 28276 | 415  | 
if Multithreading.available then  | 
416  | 
SYNCHRONIZED "shutdown" (fn () =>  | 
|
| 
32228
 
7622c03141b0
scheduler: shutdown spontaneously (after some delay) if queue is empty;
 
wenzelm 
parents: 
32227 
diff
changeset
 | 
417  | 
while scheduler_active () do  | 
| 
32248
 
0241916a5f06
more precise treatment of scheduler_event: continous pulse (50ms) instead of flooding, which was burning many CPU cycles in spare threads;
 
wenzelm 
parents: 
32247 
diff
changeset
 | 
418  | 
(wait scheduler_event; broadcast_work ()))  | 
| 28276 | 419  | 
else ();  | 
| 28203 | 420  | 
|
| 29366 | 421  | 
|
422  | 
(*final declarations of this structure!*)  | 
|
423  | 
val map = map_future;  | 
|
424  | 
||
| 28156 | 425  | 
end;  | 
| 28972 | 426  | 
|
427  | 
type 'a future = 'a Future.future;  | 
|
428  |