1 // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 #include "rust_kernel.h"
14 #include "rust_util.h"
15 #include "rust_scheduler.h"
16 #include "rust_sched_launcher.h"
20 KLOG(this, kern, __VA_ARGS__)
21 #define KLOG_ERR_(field, ...) \
22 KLOG_LVL(this, field, log_err, __VA_ARGS__)
24 rust_kernel::rust_kernel(rust_env *env) :
26 max_task_id(INIT_TASK_ID-1), // sync_add_and_fetch increments first
30 already_exiting(false),
35 at_exit_started(false),
38 // Create the single threaded scheduler that will run on the platform's
40 rust_manual_sched_launcher_factory *osmain_launchfac =
41 new rust_manual_sched_launcher_factory();
42 osmain_scheduler = create_scheduler(osmain_launchfac, 1, false);
43 osmain_driver = osmain_launchfac->get_driver();
45 // Create the primary scheduler
46 rust_thread_sched_launcher_factory *main_launchfac =
47 new rust_thread_sched_launcher_factory();
48 main_scheduler = create_scheduler(main_launchfac,
49 env->num_sched_threads,
56 rust_kernel::log(uint32_t level, char const *fmt, ...) {
60 vsnprintf(buf, sizeof(buf), fmt, args);
61 _log.trace_ln(NULL, level, buf);
66 rust_kernel::fatal(char const *fmt, ...) {
70 vsnprintf(buf, sizeof(buf), fmt, args);
71 _log.trace_ln(NULL, (uint32_t)0, buf);
77 rust_kernel::malloc(size_t size, const char *tag) {
78 return exchange_alloc.malloc(size);
82 rust_kernel::realloc(void *mem, size_t size) {
83 return exchange_alloc.realloc(mem, size);
86 void rust_kernel::free(void *mem) {
87 exchange_alloc.free(mem);
91 rust_kernel::create_scheduler(size_t num_threads) {
92 rust_thread_sched_launcher_factory *launchfac =
93 new rust_thread_sched_launcher_factory();
94 return create_scheduler(launchfac, num_threads, true);
98 rust_kernel::create_scheduler(rust_sched_launcher_factory *launchfac,
99 size_t num_threads, bool allow_exit) {
101 rust_scheduler *sched;
103 scoped_lock with(sched_lock);
105 /*if (sched_table.size() == 2) {
106 // The main and OS main schedulers may not exit while there are
108 KLOG_("Disallowing main scheduler to exit");
109 rust_scheduler *main_sched =
110 get_scheduler_by_id_nolock(main_scheduler);
111 assert(main_sched != NULL);
112 main_sched->disallow_exit();
114 if (sched_table.size() == 1) {
115 KLOG_("Disallowing osmain scheduler to exit");
116 rust_scheduler *osmain_sched =
117 get_scheduler_by_id_nolock(osmain_scheduler);
118 assert(osmain_sched != NULL);
119 osmain_sched->disallow_exit();
123 assert(id != INTPTR_MAX && "Hit the maximum scheduler id");
124 sched = new (this, "rust_scheduler")
125 rust_scheduler(this, num_threads, id, allow_exit, killed,
127 bool is_new = sched_table
128 .insert(std::pair<rust_sched_id,
129 rust_scheduler*>(id, sched)).second;
130 assert(is_new && "Reusing a sched id?");
132 sched->start_task_threads();
137 rust_kernel::get_scheduler_by_id(rust_sched_id id) {
138 scoped_lock with(sched_lock);
139 return get_scheduler_by_id_nolock(id);
143 rust_kernel::get_scheduler_by_id_nolock(rust_sched_id id) {
147 sched_lock.must_have_lock();
148 sched_map::iterator iter = sched_table.find(id);
149 if (iter != sched_table.end()) {
157 rust_kernel::release_scheduler_id(rust_sched_id id) {
158 scoped_lock with(sched_lock);
159 join_list.push_back(id);
164 Called by rust_sched_reaper to join every terminating scheduler thread,
165 so that we can be sure they have completely exited before the process exits.
166 If we don't join them then we can see valgrind errors due to un-freed pthread
170 rust_kernel::wait_for_schedulers()
172 scoped_lock with(sched_lock);
173 while (!sched_table.empty()) {
174 while (!join_list.empty()) {
175 rust_sched_id id = join_list.back();
176 KLOG_("Deleting scheduler %d", id);
177 join_list.pop_back();
178 sched_map::iterator iter = sched_table.find(id);
179 assert(iter != sched_table.end());
180 rust_scheduler *sched = iter->second;
181 sched_table.erase(iter);
182 sched->join_task_threads();
184 /*if (sched_table.size() == 2) {
185 KLOG_("Allowing main scheduler to exit");
186 // It's only the main schedulers left. Tell them to exit
187 rust_scheduler *main_sched =
188 get_scheduler_by_id_nolock(main_scheduler);
189 assert(main_sched != NULL);
190 main_sched->allow_exit();
192 if (sched_table.size() == 1) {
193 KLOG_("Allowing osmain scheduler to exit");
194 rust_scheduler *osmain_sched =
195 get_scheduler_by_id_nolock(osmain_scheduler);
196 assert(osmain_sched != NULL);
197 osmain_sched->allow_exit();
200 if (!sched_table.empty()) {
206 /* Called on the main thread to run the osmain scheduler to completion,
207 then wait for schedulers to exit */
210 assert(osmain_driver != NULL);
211 osmain_driver->start_main_loop();
217 rust_kernel::fail() {
218 // FIXME (#908): On windows we're getting "Application has
219 // requested the Runtime to terminate it in an unusual way" when
220 // trying to shutdown cleanly.
221 set_exit_status(PROC_FAIL_CODE);
222 #if defined(__WIN32__)
225 // I think this only needs to be done by one task ever; as it is,
226 // multiple tasks invoking kill_all might get here. Currently libcore
227 // ensures only one task will ever invoke it, but this would really be
228 // fine either way, so I'm leaving it as it is. -- bblum
230 // Copy the list of schedulers so that we don't hold the lock while
231 // running kill_all_tasks. Refcount to ensure they stay alive.
232 std::vector<rust_scheduler*> scheds;
234 scoped_lock with(sched_lock);
235 // All schedulers created after this flag is set will be doomed.
237 for (sched_map::iterator iter = sched_table.begin();
238 iter != sched_table.end(); iter++) {
240 scheds.push_back(iter->second);
244 for (std::vector<rust_scheduler*>::iterator iter = scheds.begin();
245 iter != scheds.end(); iter++) {
246 (*iter)->kill_all_tasks();
252 rust_kernel::generate_task_id() {
253 rust_task_id id = sync::increment(max_task_id);
254 assert(id != INTPTR_MAX && "Hit the maximum task id");
259 rust_kernel::set_exit_status(int code) {
260 scoped_lock with(rval_lock);
261 // If we've already failed then that's the code we're going to use
262 if (rval != PROC_FAIL_CODE) {
268 rust_kernel::inc_live_count() {
269 uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
270 KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
274 rust_kernel::dec_live_count() {
275 uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
276 KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
277 if (new_non_weak_tasks == 0) {
283 rust_kernel::allow_scheduler_exit() {
284 scoped_lock with(sched_lock);
286 KLOG_("Allowing main scheduler to exit");
287 // It's only the main schedulers left. Tell them to exit
288 rust_scheduler *main_sched =
289 get_scheduler_by_id_nolock(main_scheduler);
290 assert(main_sched != NULL);
291 main_sched->allow_exit();
293 KLOG_("Allowing osmain scheduler to exit");
294 rust_scheduler *osmain_sched =
295 get_scheduler_by_id_nolock(osmain_scheduler);
296 assert(osmain_sched != NULL);
297 osmain_sched->allow_exit();
301 rust_kernel::begin_shutdown() {
303 scoped_lock with(sched_lock);
304 // FIXME #4410: This shouldn't be necessary, but because of
305 // unweaken_task this may end up getting called multiple times.
306 if (already_exiting) {
309 already_exiting = true;
313 run_exit_functions();
314 allow_scheduler_exit();
318 rust_kernel::register_exit_function(spawn_fn runner, fn_env_pair *f) {
319 scoped_lock with(at_exit_lock);
321 assert(!at_exit_started && "registering at_exit function after exit");
323 if (at_exit_runner) {
324 // FIXME #2912 Would be very nice to assert this but we can't because
325 // of the way coretest works (the test case ends up using its own
327 //assert(runner == at_exit_runner
328 // && "there can be only one at_exit_runner");
331 at_exit_runner = runner;
332 at_exit_fns.push_back(f);
336 rust_kernel::run_exit_functions() {
340 scoped_lock with(at_exit_lock);
342 assert(!at_exit_started && "running exit functions twice?");
344 at_exit_started = true;
346 if (at_exit_runner == NULL) {
350 rust_scheduler *sched = get_scheduler_by_id(main_sched_id());
352 task = sched->create_task(NULL, "at_exit");
354 final_exit_fns.count = at_exit_fns.size();
355 final_exit_fns.start = at_exit_fns.data();
358 task->start(at_exit_runner, NULL, &final_exit_fns);
365 // indent-tabs-mode: nil
367 // buffer-file-coding-system: utf-8-unix