]> git.lizzy.rs Git - rust.git/blob - src/rt/rust_scheduler.cpp
bf40fea36ee0ca807f8fe7e3b239a832db043a1c
[rust.git] / src / rt / rust_scheduler.cpp
1
2 #include <stdarg.h>
3 #include <cassert>
4 #include <pthread.h>
5 #include "rust_internal.h"
6 #include "globals.h"
7
8 #ifndef _WIN32
9 pthread_key_t rust_scheduler::task_key;
10 #else
11 DWORD rust_scheduler::task_key;
12 #endif
13
14 bool rust_scheduler::tls_initialized = false;
15
16 rust_scheduler::rust_scheduler(rust_kernel *kernel,
17                                rust_srv *srv,
18                                int id) :
19     ref_count(1),
20     interrupt_flag(0),
21     _log(srv, this),
22     log_lvl(log_debug),
23     srv(srv),
24     // TODO: calculate a per scheduler name.
25     name("main"),
26     newborn_tasks(this, "newborn"),
27     running_tasks(this, "running"),
28     blocked_tasks(this, "blocked"),
29     dead_tasks(this, "dead"),
30     cache(this),
31     kernel(kernel),
32     id(id),
33     min_stack_size(kernel->env->min_stack_size),
34     env(kernel->env)
35 {
36     LOGPTR(this, "new dom", (uintptr_t)this);
37     isaac_init(this, &rctx);
38 #ifndef __WIN32__
39     pthread_attr_init(&attr);
40     pthread_attr_setstacksize(&attr, 1024 * 1024);
41     pthread_attr_setdetachstate(&attr, true);
42 #endif
43
44     if (!tls_initialized)
45         init_tls();
46 }
47
48 rust_scheduler::~rust_scheduler() {
49     DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this);
50
51     newborn_tasks.delete_all();
52     running_tasks.delete_all();
53     blocked_tasks.delete_all();
54     dead_tasks.delete_all();
55 #ifndef __WIN32__
56     pthread_attr_destroy(&attr);
57 #endif
58 }
59
60 void
61 rust_scheduler::activate(rust_task *task) {
62     task->ctx.next = &c_context;
63     DLOG(this, task, "descheduling...");
64     lock.unlock();
65     task->ctx.swap(c_context);
66     lock.lock();
67     DLOG(this, task, "task has returned");
68 }
69
70 void
71 rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
72     char buf[BUF_BYTES];
73     va_list args;
74     va_start(args, fmt);
75     vsnprintf(buf, sizeof(buf), fmt, args);
76     _log.trace_ln(task, level, buf);
77     va_end(args);
78 }
79
80 void
81 rust_scheduler::fail() {
82     log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
83         name, this);
84     I(this, kernel->rval == 0);
85     kernel->rval = PROC_FAIL_CODE;
86     kernel->fail();
87 }
88
89 void
90 rust_scheduler::kill_all_tasks() {
91     I(this, !lock.lock_held_by_current_thread());
92     scoped_lock with(lock);
93
94     for (size_t i = 0; i < running_tasks.length(); i++) {
95         // We don't want the failure of these tasks to propagate back
96         // to the kernel again since we're already failing everything
97         running_tasks[i]->unsupervise();
98         running_tasks[i]->kill();
99     }
100
101     for (size_t i = 0; i < blocked_tasks.length(); i++) {
102         blocked_tasks[i]->unsupervise();
103         blocked_tasks[i]->kill();
104     }
105 }
106
107 size_t
108 rust_scheduler::number_of_live_tasks() {
109     return running_tasks.length() + blocked_tasks.length();
110 }
111
112 /**
113  * Delete any dead tasks.
114  */
115 void
116 rust_scheduler::reap_dead_tasks(int id) {
117     I(this, lock.lock_held_by_current_thread());
118     if (dead_tasks.length() == 0) {
119         return;
120     }
121
122     // First make a copy of the dead_task list with the lock held
123     size_t dead_tasks_len = dead_tasks.length();
124     rust_task **dead_tasks_copy = (rust_task**)
125         srv->malloc(sizeof(rust_task*) * dead_tasks_len);
126     for (size_t i = 0; i < dead_tasks_len; ++i) {
127         rust_task *task = dead_tasks[i];
128         dead_tasks_copy[i] = task;
129     }
130
131     // Now drop the lock and futz with the tasks. This avoids establishing
132     // a sched->lock then task->lock locking order, which would be devestating
133     // to performance.
134     lock.unlock();
135
136     for (size_t i = 0; i < dead_tasks_len; ++i) {
137         rust_task *task = dead_tasks_copy[i];
138         task->lock.lock();
139         // Make sure this task isn't still running somewhere else...
140         if (task->can_schedule(id)) {
141             DLOG(this, task,
142                 "deleting unreferenced dead task %s @0x%" PRIxPTR,
143                 task->name, task);
144             task->lock.unlock();
145         } else {
146             task->lock.unlock();
147             dead_tasks_copy[i] = NULL;
148         }
149     }
150
151     // Now grab the lock again and remove the tasks that were truly dead
152     lock.lock();
153
154     for (size_t i = 0; i < dead_tasks_len; ++i) {
155         rust_task *task = dead_tasks_copy[i];
156         if (task) {
157             dead_tasks.remove(task);
158         }
159     }
160
161     // Now unlock again because we have to actually free the dead tasks,
162     // and that may end up wanting to lock the task and sched locks
163     // again (via target->send)
164     lock.unlock();
165
166     for (size_t i = 0; i < dead_tasks_len; ++i) {
167         rust_task *task = dead_tasks_copy[i];
168         if (task) {
169             task->deref();
170             sync::decrement(kernel->live_tasks);
171             kernel->wakeup_schedulers();
172         }
173     }
174     srv->free(dead_tasks_copy);
175
176     lock.lock();
177 }
178
179 /**
180  * Schedules a running task for execution. Only running tasks can be
181  * activated.  Blocked tasks have to be unblocked before they can be
182  * activated.
183  *
184  * Returns NULL if no tasks can be scheduled.
185  */
186 rust_task *
187 rust_scheduler::schedule_task(int id) {
188     I(this, this);
189     // FIXME: in the face of failing tasks, this is not always right.
190     // I(this, n_live_tasks() > 0);
191     if (running_tasks.length() > 0) {
192         size_t k = isaac_rand(&rctx);
193         // Look around for a runnable task, starting at k.
194         for(size_t j = 0; j < running_tasks.length(); ++j) {
195             size_t  i = (j + k) % running_tasks.length();
196             if (running_tasks[i]->can_schedule(id)) {
197                 return (rust_task *)running_tasks[i];
198             }
199         }
200     }
201     return NULL;
202 }
203
204 void
205 rust_scheduler::log_state() {
206     if (log_rt_task < log_debug) return;
207
208     if (!running_tasks.is_empty()) {
209         log(NULL, log_debug, "running tasks:");
210         for (size_t i = 0; i < running_tasks.length(); i++) {
211             log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR
212                 " remaining: %" PRId64 " us",
213                 running_tasks[i]->name,
214                 running_tasks[i],
215                 running_tasks[i]->yield_timer.remaining_us());
216         }
217     }
218
219     if (!blocked_tasks.is_empty()) {
220         log(NULL, log_debug, "blocked tasks:");
221         for (size_t i = 0; i < blocked_tasks.length(); i++) {
222             log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR ", blocked on: 0x%"
223                 PRIxPTR " '%s'",
224                 blocked_tasks[i]->name, blocked_tasks[i],
225                 blocked_tasks[i]->cond, blocked_tasks[i]->cond_name);
226         }
227     }
228
229     if (!dead_tasks.is_empty()) {
230         log(NULL, log_debug, "dead tasks:");
231         for (size_t i = 0; i < dead_tasks.length(); i++) {
232             log(NULL, log_debug, "\t task: %s 0x%" PRIxPTR,
233                 dead_tasks[i]->name, dead_tasks[i]);
234         }
235     }
236 }
237 /**
238  * Starts the main scheduler loop which performs task scheduling for this
239  * domain.
240  *
241  * Returns once no more tasks can be scheduled and all task ref_counts
242  * drop to zero.
243  */
244 void
245 rust_scheduler::start_main_loop() {
246     lock.lock();
247
248     // Make sure someone is watching, to pull us out of infinite loops.
249     //
250     // FIXME: time-based interruption is not presently working; worked
251     // in rustboot and has been completely broken in rustc.
252     //
253     // rust_timer timer(this);
254
255     DLOG(this, dom, "started domain loop %d", id);
256
257     while (kernel->live_tasks > 0) {
258         A(this, kernel->is_deadlocked() == false, "deadlock");
259
260         DLOG(this, dom, "worker %d, number_of_live_tasks = %d, total = %d",
261              id, number_of_live_tasks(), kernel->live_tasks);
262
263         rust_task *scheduled_task = schedule_task(id);
264
265         if (scheduled_task == NULL) {
266             log_state();
267             DLOG(this, task,
268                  "all tasks are blocked, scheduler id %d yielding ...",
269                  id);
270             lock.timed_wait(10);
271             reap_dead_tasks(id);
272             DLOG(this, task,
273                  "scheduler %d resuming ...", id);
274             continue;
275         }
276
277         I(this, scheduled_task->running());
278
279         DLOG(this, task,
280              "activating task %s 0x%" PRIxPTR
281              ", sp=0x%" PRIxPTR
282              ", state: %s",
283              scheduled_task->name,
284              (uintptr_t)scheduled_task,
285              scheduled_task->user.rust_sp,
286              scheduled_task->state->name);
287
288         place_task_in_tls(scheduled_task);
289
290         interrupt_flag = 0;
291
292         DLOG(this, task,
293              "Running task %p on worker %d",
294              scheduled_task, id);
295         scheduled_task->running_on = id;
296         activate(scheduled_task);
297         scheduled_task->running_on = -1;
298
299         DLOG(this, task,
300              "returned from task %s @0x%" PRIxPTR
301              " in state '%s', sp=0x%x, worker id=%d" PRIxPTR,
302              scheduled_task->name,
303              (uintptr_t)scheduled_task,
304              scheduled_task->state->name,
305              scheduled_task->user.rust_sp,
306              id);
307
308         reap_dead_tasks(id);
309     }
310
311     DLOG(this, dom,
312          "terminated scheduler loop, reaping dead tasks ...");
313
314     while (dead_tasks.length() > 0) {
315         DLOG(this, dom,
316              "waiting for %d dead tasks to become dereferenced, "
317              "scheduler yielding ...",
318              dead_tasks.length());
319         log_state();
320         lock.unlock();
321         sync::yield();
322         lock.lock();
323         reap_dead_tasks(id);
324     }
325
326     DLOG(this, dom, "finished main-loop %d", id);
327
328     lock.unlock();
329 }
330
331 rust_crate_cache *
332 rust_scheduler::get_cache() {
333     return &cache;
334 }
335
336 rust_task *
337 rust_scheduler::create_task(rust_task *spawner, const char *name) {
338     rust_task *task =
339         new (this->kernel, "rust_task")
340         rust_task (this, &newborn_tasks, spawner, name);
341     DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
342                         task, spawner ? spawner->name : "null", name);
343     if(spawner) {
344         task->pin(spawner->pinned_on);
345     }
346
347     {
348         scoped_lock with(lock);
349         newborn_tasks.append(task);
350     }
351
352     sync::increment(kernel->live_tasks);
353
354     return task;
355 }
356
357 void rust_scheduler::run() {
358     this->start_main_loop();
359 }
360
361 #ifndef _WIN32
362 void
363 rust_scheduler::init_tls() {
364     int result = pthread_key_create(&task_key, NULL);
365     assert(!result && "Couldn't create the TLS key!");
366     tls_initialized = true;
367 }
368
369 void
370 rust_scheduler::place_task_in_tls(rust_task *task) {
371     int result = pthread_setspecific(task_key, task);
372     assert(!result && "Couldn't place the task in TLS!");
373     task->record_stack_limit();
374 }
375
376 rust_task *
377 rust_scheduler::get_task() {
378     if (!tls_initialized)
379         return NULL;
380     rust_task *task = reinterpret_cast<rust_task *>
381         (pthread_getspecific(task_key));
382     assert(task && "Couldn't get the task from TLS!");
383     return task;
384 }
385 #else
386 void
387 rust_scheduler::init_tls() {
388     task_key = TlsAlloc();
389     assert(task_key != TLS_OUT_OF_INDEXES && "Couldn't create the TLS key!");
390     tls_initialized = true;
391 }
392
393 void
394 rust_scheduler::place_task_in_tls(rust_task *task) {
395     BOOL result = TlsSetValue(task_key, task);
396     assert(result && "Couldn't place the task in TLS!");
397     task->record_stack_limit();
398 }
399
400 rust_task *
401 rust_scheduler::get_task() {
402     if (!tls_initialized)
403         return NULL;
404     rust_task *task = reinterpret_cast<rust_task *>(TlsGetValue(task_key));
405     assert(task && "Couldn't get the task from TLS!");
406     return task;
407 }
408 #endif
409
410 //
411 // Local Variables:
412 // mode: C++
413 // fill-column: 70;
414 // indent-tabs-mode: nil
415 // c-basic-offset: 4
416 // buffer-file-coding-system: utf-8-unix
417 // compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
418 // End:
419 //