2 #include "rust_internal.h"
5 #include "vg/valgrind.h"
6 #include "vg/memcheck.h"
18 // The amount of extra space at the end of each stack segment, available
19 // to the rt, compiler and dynamic linker for running small functions
20 // FIXME: We want this to be 128 but need to slim the red zone calls down
21 #define RZ_LINUX_32 (1024*2)
22 #define RZ_LINUX_64 (1024*2)
23 #define RZ_MAC_32 (1024*20)
24 #define RZ_MAC_64 (1024*20)
25 #define RZ_WIN_32 (1024*20)
26 #define RZ_BSD_32 (1024*20)
27 #define RZ_BSD_64 (1024*20)
31 #define RED_ZONE_SIZE RZ_LINUX_32
34 #define RED_ZONE_SIZE RZ_LINUX_64
39 #define RED_ZONE_SIZE RZ_MAC_32
42 #define RED_ZONE_SIZE RZ_MAC_64
47 #define RED_ZONE_SIZE RZ_WIN_32
50 #define RED_ZONE_SIZE RZ_WIN_64
55 #define RED_ZONE_SIZE RZ_BSD_32
58 #define RED_ZONE_SIZE RZ_BSD_64
62 // A value that goes at the end of the stack and must not be touched
63 const uint8_t stack_canary[] = {0xAB, 0xCD, 0xAB, 0xCD,
64 0xAB, 0xCD, 0xAB, 0xCD,
65 0xAB, 0xCD, 0xAB, 0xCD,
66 0xAB, 0xCD, 0xAB, 0xCD};
69 size_t g_custom_min_stack_size = 0;
72 get_min_stk_size(size_t default_size) {
73 if (g_custom_min_stack_size != 0) {
74 return g_custom_min_stack_size;
81 get_next_stk_size(rust_scheduler *sched, rust_task *task,
82 size_t min, size_t current, size_t requested) {
83 LOG(task, mem, "calculating new stack size for 0x%" PRIxPTR, task);
85 "min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
86 min, current, requested);
88 // Allocate at least enough to accomodate the next frame
89 size_t sz = std::max(min, requested);
91 // And double the stack size each allocation
92 const size_t max = 1024 * 1024;
93 size_t next = std::min(max, current * 2);
95 sz = std::max(sz, next);
97 LOG(task, mem, "next stack size: %" PRIdPTR, sz);
98 I(sched, requested <= sz);
102 // Task stack segments. Heap allocated and chained together.
105 config_valgrind_stack(stk_seg *stk) {
107 VALGRIND_STACK_REGISTER(&stk->data[0],
110 // Establish that the stack is accessible. This must be done when reusing
111 // old stack segments, since the act of popping the stack previously
112 // caused valgrind to consider the whole thing inaccessible.
113 size_t sz = stk->end - (uintptr_t)&stk->data[0];
114 VALGRIND_MAKE_MEM_UNDEFINED(stk->data + sizeof(stack_canary),
115 sz - sizeof(stack_canary));
120 unconfig_valgrind_stack(stk_seg *stk) {
121 VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
125 free_stk(rust_task *task, stk_seg *stk) {
126 LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
131 add_stack_canary(stk_seg *stk) {
132 memcpy(stk->data, stack_canary, sizeof(stack_canary));
133 assert(sizeof(stack_canary) == 16 && "Stack canary was not the expected size");
137 check_stack_canary(stk_seg *stk) {
138 assert(!memcmp(stk->data, stack_canary, sizeof(stack_canary))
139 && "Somebody killed the canary");
143 new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
145 LOG(task, mem, "creating new stack for task %" PRIxPTR, task);
147 check_stack_canary(task->stk);
150 // The minimum stack size, in bytes, of a Rust stack, excluding red zone
151 size_t min_sz = get_min_stk_size(sched->min_stack_size);
153 // Try to reuse an existing stack segment
154 if (task->stk != NULL && task->stk->prev != NULL) {
155 size_t prev_sz = (size_t)(task->stk->prev->end
156 - (uintptr_t)&task->stk->prev->data[0]
158 if (min_sz <= prev_sz && requested_sz <= prev_sz) {
159 LOG(task, mem, "reusing existing stack");
160 task->stk = task->stk->prev;
161 A(sched, task->stk->prev == NULL, "Bogus stack ptr");
162 config_valgrind_stack(task->stk);
165 LOG(task, mem, "existing stack is not big enough");
166 free_stk(task, task->stk->prev);
167 task->stk->prev = NULL;
171 // The size of the current stack segment, excluding red zone
172 size_t current_sz = 0;
173 if (task->stk != NULL) {
174 current_sz = (size_t)(task->stk->end
175 - (uintptr_t)&task->stk->data[0]
178 // The calculated size of the new stack, excluding red zone
179 size_t rust_stk_sz = get_next_stk_size(sched, task, min_sz,
180 current_sz, requested_sz);
182 size_t sz = sizeof(stk_seg) + rust_stk_sz + RED_ZONE_SIZE;
183 stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
184 LOGPTR(task->sched, "new stk", (uintptr_t)stk);
185 memset(stk, 0, sizeof(stk_seg));
186 add_stack_canary(stk);
188 stk->next = task->stk;
189 stk->end = (uintptr_t) &stk->data[rust_stk_sz + RED_ZONE_SIZE];
190 LOGPTR(task->sched, "stk end", stk->end);
193 config_valgrind_stack(task->stk);
198 del_stk(rust_task *task, stk_seg *stk)
200 assert(stk == task->stk && "Freeing stack segments out of order!");
201 check_stack_canary(stk);
203 task->stk = stk->next;
205 bool delete_stack = false;
206 if (task->stk != NULL) {
207 // Don't actually delete this stack. Save it to reuse later,
208 // preventing the pathological case where we repeatedly reallocate
209 // the stack for the next frame.
210 task->stk->prev = stk;
212 // This is the last stack, delete it.
216 // Delete the previous previous stack
217 if (stk->prev != NULL) {
218 free_stk(task, stk->prev);
222 unconfig_valgrind_stack(stk);
229 rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
230 rust_task *spawner, const char *name) :
236 kernel(sched->kernel),
247 local_region(&sched->srv->local_region),
250 propagate_failure(true),
254 LOGPTR(sched, "new task", (uintptr_t)this);
255 DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
257 assert((void*)this == (void*)&user);
259 user.notify_enabled = 0;
261 stk = new_stk(sched, this, 0);
262 user.rust_sp = stk->end;
268 rust_task::~rust_task()
270 I(sched, !sched->lock.lock_held_by_current_thread());
271 I(sched, port_table.is_empty());
272 DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
273 name, (uintptr_t)this, ref_count);
279 kernel->release_task_id(user.id);
281 /* FIXME: tighten this up, there are some more
282 assertions that hold at task-lifecycle events. */
283 I(sched, ref_count == 0); // ||
284 // (ref_count == 1 && this == sched->root_task));
286 // Delete all the stacks. There may be more than one if the task failed
287 // and no landing pads stopped to clean up.
288 while (stk != NULL) {
299 struct rust_closure {
301 // ... see trans_closure.rs for full description ...
304 struct rust_boxed_closure {
306 rust_closure closure;
309 struct cleanup_args {
315 cleanup_task(cleanup_args *args) {
316 spawn_args *a = args->spargs;
317 bool failed = args->failed;
318 rust_task *task = a->task;
322 rust_boxed_closure* boxed_env = (rust_boxed_closure*)a->envptr;
324 // free the environment.
325 rust_closure *env = &boxed_env->closure;
326 env->td->drop_glue(NULL, NULL, &env->td, env);
327 env->td->free_glue(NULL, NULL, &env->td, env);
332 if (task->killed && !failed) {
333 LOG(task, task, "Task killed during termination");
337 task->notify(!failed);
341 task->conclude_failure();
343 A(task->sched, false, "Shouldn't happen");
348 // This runs on the Rust stack
350 void task_start_wrapper(spawn_args *a)
352 rust_task *task = a->task;
356 // The first argument is the return pointer; as the task fn
357 // must have void return type, we can safely pass 0.
359 } catch (rust_task *ex) {
360 A(task->sched, ex == task,
361 "Expected this task to be thrown for unwinding");
365 cleanup_args ca = {a, failed};
367 // The cleanup work needs lots of stack
368 task->sched->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
370 task->ctx.next->swap(task->ctx);
374 rust_task::start(spawn_fn spawnee_fn,
377 LOG(this, task, "starting task from fn 0x%" PRIxPTR
378 " with env 0x%" PRIxPTR, spawnee_fn, env);
380 I(sched, stk->data != NULL);
382 char *sp = (char *)user.rust_sp;
384 sp -= sizeof(spawn_args);
386 spawn_args *a = (spawn_args *)sp;
392 ctx.call((void *)task_start_wrapper, a, sp);
397 void rust_task::start()
399 yield_timer.reset_us(0);
400 transition(&sched->newborn_tasks, &sched->running_tasks);
401 sched->lock.signal();
404 // Only run this on the rust stack
406 rust_task::yield(size_t time_in_us, bool *killed) {
411 yield_timer.reset_us(time_in_us);
413 // Return to the scheduler.
424 // Task is already dead, can't kill what's already dead.
429 // Note the distinction here: kill() is when you're in an upcall
430 // from task A and want to force-fail task B, you do B->kill().
431 // If you want to fail yourself you do self->fail().
432 LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
433 // When the task next goes to yield or resume it will fail
435 // Unblock the task so it can unwind.
438 sched->lock.signal();
440 LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
441 // run_on_resume(rust_unwind_glue);
446 // See note in ::kill() regarding who should call this.
447 DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
454 // FIXME: Need unwinding on windows. This will end up aborting
460 rust_task::conclude_failure() {
466 rust_task::fail_parent() {
469 "task %s @0x%" PRIxPTR
470 " propagating failure to supervisor %s @0x%" PRIxPTR,
471 name, this, supervisor->name, supervisor);
474 // FIXME: implement unwinding again.
475 if (NULL == supervisor && propagate_failure)
480 rust_task::unsupervise()
483 "task %s @0x%" PRIxPTR
484 " disconnecting from supervisor %s @0x%" PRIxPTR,
485 name, this, supervisor->name, supervisor);
490 propagate_failure = false;
494 rust_task::get_frame_glue_fns(uintptr_t fp) {
495 fp -= sizeof(uintptr_t);
496 return *((frame_glue_fns**) fp);
502 return state == &sched->running_tasks;
508 return state == &sched->blocked_tasks;
512 rust_task::blocked_on(rust_cond *on)
514 return blocked() && cond == on;
520 return state == &sched->dead_tasks;
524 rust_task::malloc(size_t sz, const char *tag, type_desc *td)
526 return local_region.malloc(sz, tag);
530 rust_task::realloc(void *data, size_t sz, bool is_gc)
532 return local_region.realloc(data, sz);
536 rust_task::free(void *p, bool is_gc)
538 local_region.free(p);
542 rust_task::transition(rust_task_list *src, rust_task_list *dst) {
544 if(!sched->lock.lock_held_by_current_thread()) {
549 "task %s " PTR " state change '%s' -> '%s' while in '%s'",
550 name, (uintptr_t)this, src->name, dst->name, state->name);
551 I(sched, state == src);
556 sched->lock.unlock();
560 rust_task::block(rust_cond *on, const char* name) {
561 I(sched, !lock.lock_held_by_current_thread());
562 scoped_lock with(lock);
563 LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
564 (uintptr_t) on, (uintptr_t) cond);
565 A(sched, cond == NULL, "Cannot block an already blocked task.");
566 A(sched, on != NULL, "Cannot block on a NULL object.");
568 transition(&sched->running_tasks, &sched->blocked_tasks);
574 rust_task::wakeup(rust_cond *from) {
575 I(sched, !lock.lock_held_by_current_thread());
576 scoped_lock with(lock);
577 A(sched, cond != NULL, "Cannot wake up unblocked task.");
578 LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
579 (uintptr_t) cond, (uintptr_t) from);
580 A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
582 transition(&sched->blocked_tasks, &sched->running_tasks);
583 I(sched, cond == from);
587 sched->lock.signal();
592 I(sched, !lock.lock_held_by_current_thread());
593 scoped_lock with(lock);
594 transition(&sched->running_tasks, &sched->dead_tasks);
595 sched->lock.signal();
599 rust_task::unblock() {
601 // FIXME: What if another thread unblocks the task between when
602 // we checked and here?
608 rust_task::get_crate_cache()
611 DLOG(sched, task, "fetching cache for current crate");
612 cache = sched->get_cache();
618 rust_task::backtrace() {
619 if (!log_rt_backtrace) return;
621 void *call_stack[256];
622 int nframes = ::backtrace(call_stack, 256);
623 backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
627 bool rust_task::can_schedule(int id)
629 return yield_timer.has_timed_out() &&
631 (pinned_on == -1 || pinned_on == id);
635 rust_task::calloc(size_t size, const char *tag) {
636 return local_region.calloc(size, tag);
639 void rust_task::pin() {
640 I(this->sched, running_on != -1);
641 pinned_on = running_on;
644 void rust_task::pin(int id) {
645 I(this->sched, running_on == -1);
649 void rust_task::unpin() {
653 rust_port_id rust_task::register_port(rust_port *port) {
654 I(sched, !lock.lock_held_by_current_thread());
655 scoped_lock with(lock);
657 rust_port_id id = next_port_id++;
658 port_table.put(id, port);
662 void rust_task::release_port(rust_port_id id) {
663 I(sched, lock.lock_held_by_current_thread());
664 port_table.remove(id);
667 rust_port *rust_task::get_port_by_id(rust_port_id id) {
668 I(sched, !lock.lock_held_by_current_thread());
669 scoped_lock with(lock);
670 rust_port *port = NULL;
671 port_table.get(id, &port);
679 // Temporary routine to allow boxes on one task's shared heap to be reparented
682 rust_task::release_alloc(void *alloc) {
683 I(sched, !lock.lock_held_by_current_thread());
686 assert(local_allocs.find(alloc) != local_allocs.end());
687 const type_desc *tydesc = local_allocs[alloc];
688 local_allocs.erase(alloc);
690 local_region.release_alloc(alloc);
696 // Temporary routine to allow boxes from one task's shared heap to be
697 // reparented to this one.
699 rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
700 I(sched, !lock.lock_held_by_current_thread());
703 assert(local_allocs.find(alloc) == local_allocs.end());
704 local_allocs[alloc] = tydesc;
705 local_region.claim_alloc(alloc);
711 rust_task::notify(bool success) {
712 // FIXME (1078) Do this in rust code
713 if(user.notify_enabled) {
714 rust_task *target_task = kernel->get_task_by_id(user.notify_chan.task);
716 rust_port *target_port =
717 target_task->get_port_by_id(user.notify_chan.port);
719 task_notification msg;
721 msg.result = !success ? tr_failure : tr_success;
723 target_port->send(&msg);
724 scoped_lock with(target_task->lock);
725 target_port->deref();
727 target_task->deref();
732 extern "C" CDECL void
733 record_sp(void *limit);
736 rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
738 stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
739 A(sched, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
740 "Did not receive enough stack");
741 uint8_t *new_sp = (uint8_t*)stk_seg->end;
742 // Push the function arguments to the new stack
743 new_sp = align_down(new_sp - args_sz);
744 memcpy(new_sp, args_addr, args_sz);
745 record_stack_limit();
750 rust_task::del_stack() {
752 record_stack_limit();
756 rust_task::record_stack_limit() {
757 // The function prolog compares the amount of stack needed to the end of
758 // the stack. As an optimization, when the frame size is less than 256
759 // bytes, it will simply compare %esp to to the stack limit instead of
760 // subtracting the frame size. As a result we need our stack limit to
761 // account for those 256 bytes.
762 const unsigned LIMIT_OFFSET = 256;
764 (uintptr_t)stk->end - RED_ZONE_SIZE
765 - (uintptr_t)stk->data >= LIMIT_OFFSET,
766 "Stack size must be greater than LIMIT_OFFSET");
767 record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
770 extern "C" uintptr_t get_sp();
773 sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
774 // Not positive these bounds for sp are correct. I think that the first
775 // possible value for esp on a new stack is stk->end, which points to the
776 // address before the first value to be pushed onto a new stack. The last
777 // possible address we can push data to is stk->data. Regardless, there's
778 // so much slop at either end that we should never hit one of these
780 return (uintptr_t)stk->data <= sp && sp <= stk->end;
784 Called by landing pads during unwinding to figure out which
785 stack segment we are currently running on, delete the others,
786 and record the stack limit (which was not restored when unwinding
787 through __morestack).
790 rust_task::reset_stack_limit() {
791 uintptr_t sp = get_sp();
792 while (!sp_in_stk_seg(sp, stk)) {
794 A(sched, stk != NULL, "Failed to find the current stack");
796 record_stack_limit();
800 Returns true if we're currently running on the Rust stack
803 rust_task::on_rust_stack() {
804 return sp_in_stk_seg(get_sp(), stk);
808 rust_task::check_stack_canary() {
809 ::check_stack_canary(stk);
816 // indent-tabs-mode: nil
818 // buffer-file-coding-system: utf-8-unix