2 #include "rust_internal.h"
5 #include "vg/valgrind.h"
6 #include "vg/memcheck.h"
18 // The amount of extra space at the end of each stack segment, available
19 // to the rt, compiler and dynamic linker for running small functions
20 // FIXME: We want this to be 128 but need to slim the red zone calls down
21 #define RZ_LINUX_32 (1024*2)
22 #define RZ_LINUX_64 (1024*2)
23 #define RZ_MAC_32 (1024*20)
24 #define RZ_MAC_64 (1024*20)
25 #define RZ_WIN_32 (1024*20)
26 #define RZ_BSD_32 (1024*20)
27 #define RZ_BSD_64 (1024*20)
31 #define RED_ZONE_SIZE RZ_LINUX_32
34 #define RED_ZONE_SIZE RZ_LINUX_64
39 #define RED_ZONE_SIZE RZ_MAC_32
42 #define RED_ZONE_SIZE RZ_MAC_64
47 #define RED_ZONE_SIZE RZ_WIN_32
50 #define RED_ZONE_SIZE RZ_WIN_64
55 #define RED_ZONE_SIZE RZ_BSD_32
58 #define RED_ZONE_SIZE RZ_BSD_64
62 // A value that goes at the end of the stack and must not be touched
63 const uint8_t stack_canary[] = {0xAB, 0xCD, 0xAB, 0xCD,
64 0xAB, 0xCD, 0xAB, 0xCD,
65 0xAB, 0xCD, 0xAB, 0xCD,
66 0xAB, 0xCD, 0xAB, 0xCD};
69 size_t g_custom_min_stack_size = 0;
72 get_min_stk_size(size_t default_size) {
73 if (g_custom_min_stack_size != 0) {
74 return g_custom_min_stack_size;
81 get_next_stk_size(rust_scheduler *sched, rust_task *task,
82 size_t min, size_t current, size_t requested) {
83 LOG(task, mem, "calculating new stack size for 0x%" PRIxPTR, task);
85 "min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
86 min, current, requested);
88 // Allocate at least enough to accomodate the next frame
89 size_t sz = std::max(min, requested);
91 // And double the stack size each allocation
92 const size_t max = 1024 * 1024;
93 size_t next = std::min(max, current * 2);
95 sz = std::max(sz, next);
97 LOG(task, mem, "next stack size: %" PRIdPTR, sz);
98 I(sched, requested <= sz);
102 // Task stack segments. Heap allocated and chained together.
105 config_valgrind_stack(stk_seg *stk) {
107 VALGRIND_STACK_REGISTER(&stk->data[0],
110 // Establish that the stack is accessible. This must be done when reusing
111 // old stack segments, since the act of popping the stack previously
112 // caused valgrind to consider the whole thing inaccessible.
113 size_t sz = stk->end - (uintptr_t)&stk->data[0];
114 VALGRIND_MAKE_MEM_UNDEFINED(stk->data + sizeof(stack_canary),
115 sz - sizeof(stack_canary));
120 unconfig_valgrind_stack(stk_seg *stk) {
121 VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
125 free_stk(rust_task *task, stk_seg *stk) {
126 LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
131 add_stack_canary(stk_seg *stk) {
132 memcpy(stk->data, stack_canary, sizeof(stack_canary));
133 assert(sizeof(stack_canary) == 16 && "Stack canary was not the expected size");
137 check_stack_canary(stk_seg *stk) {
138 assert(!memcmp(stk->data, stack_canary, sizeof(stack_canary))
139 && "Somebody killed the canary");
143 new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
145 LOG(task, mem, "creating new stack for task %" PRIxPTR, task);
147 check_stack_canary(task->stk);
150 // The minimum stack size, in bytes, of a Rust stack, excluding red zone
151 size_t min_sz = get_min_stk_size(sched->min_stack_size);
153 // Try to reuse an existing stack segment
154 if (task->stk != NULL && task->stk->prev != NULL) {
155 size_t prev_sz = (size_t)(task->stk->prev->end
156 - (uintptr_t)&task->stk->prev->data[0]
158 if (min_sz <= prev_sz && requested_sz <= prev_sz) {
159 LOG(task, mem, "reusing existing stack");
160 task->stk = task->stk->prev;
161 A(sched, task->stk->prev == NULL, "Bogus stack ptr");
162 config_valgrind_stack(task->stk);
165 LOG(task, mem, "existing stack is not big enough");
166 free_stk(task, task->stk->prev);
167 task->stk->prev = NULL;
171 // The size of the current stack segment, excluding red zone
172 size_t current_sz = 0;
173 if (task->stk != NULL) {
174 current_sz = (size_t)(task->stk->end
175 - (uintptr_t)&task->stk->data[0]
178 // The calculated size of the new stack, excluding red zone
179 size_t rust_stk_sz = get_next_stk_size(sched, task, min_sz,
180 current_sz, requested_sz);
182 size_t sz = sizeof(stk_seg) + rust_stk_sz + RED_ZONE_SIZE;
183 stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
184 LOGPTR(task->sched, "new stk", (uintptr_t)stk);
185 memset(stk, 0, sizeof(stk_seg));
186 add_stack_canary(stk);
188 stk->next = task->stk;
189 stk->end = (uintptr_t) &stk->data[rust_stk_sz + RED_ZONE_SIZE];
190 LOGPTR(task->sched, "stk end", stk->end);
193 config_valgrind_stack(task->stk);
198 del_stk(rust_task *task, stk_seg *stk)
200 assert(stk == task->stk && "Freeing stack segments out of order!");
201 check_stack_canary(stk);
203 task->stk = stk->next;
205 bool delete_stack = false;
206 if (task->stk != NULL) {
207 // Don't actually delete this stack. Save it to reuse later,
208 // preventing the pathological case where we repeatedly reallocate
209 // the stack for the next frame.
210 task->stk->prev = stk;
212 // This is the last stack, delete it.
216 // Delete the previous previous stack
217 if (stk->prev != NULL) {
218 free_stk(task, stk->prev);
222 unconfig_valgrind_stack(stk);
229 rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
230 rust_task *spawner, const char *name) :
236 kernel(sched->kernel),
247 local_region(&sched->srv->local_region),
250 propagate_failure(true),
254 LOGPTR(sched, "new task", (uintptr_t)this);
255 DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
257 assert((void*)this == (void*)&user);
259 user.notify_enabled = 0;
261 stk = new_stk(sched, this, 0);
262 user.rust_sp = stk->end;
268 rust_task::~rust_task()
270 I(sched, !sched->lock.lock_held_by_current_thread());
271 I(sched, port_table.is_empty());
272 DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
273 name, (uintptr_t)this, ref_count);
279 kernel->release_task_id(user.id);
281 /* FIXME: tighten this up, there are some more
282 assertions that hold at task-lifecycle events. */
283 I(sched, ref_count == 0); // ||
284 // (ref_count == 1 && this == sched->root_task));
286 // Delete all the stacks. There may be more than one if the task failed
287 // and no landing pads stopped to clean up.
288 while (stk != NULL) {
297 void (*CDECL f)(int *, uintptr_t, uintptr_t);
300 struct rust_closure_env {
305 struct cleanup_args {
311 cleanup_task(cleanup_args *args) {
312 spawn_args *a = args->spargs;
313 bool failed = args->failed;
314 rust_task *task = a->task;
318 rust_closure_env* env = (rust_closure_env*)a->a3;
320 // free the environment.
321 I(task->sched, 1 == env->ref_count); // the ref count better be 1
322 //env->td->drop_glue(NULL, task, NULL, env->td->first_param, env);
323 //env->td->free_glue(NULL, task, NULL, env->td->first_param, env);
329 if (task->killed && !failed) {
330 LOG(task, task, "Task killed during termination");
334 task->notify(!failed);
338 task->conclude_failure();
340 A(task->sched, false, "Shouldn't happen");
345 // This runs on the Rust stack
347 void task_start_wrapper(spawn_args *a)
349 rust_task *task = a->task;
354 a->f(&rval, a->a3, a->a4);
355 } catch (rust_task *ex) {
356 A(task->sched, ex == task,
357 "Expected this task to be thrown for unwinding");
361 cleanup_args ca = {a, failed};
363 // The cleanup work needs lots of stack
364 task->sched->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
366 task->ctx.next->swap(task->ctx);
370 rust_task::start(uintptr_t spawnee_fn,
374 LOG(this, task, "starting task from fn 0x%" PRIxPTR
375 " with args 0x%" PRIxPTR, spawnee_fn, args);
377 I(sched, stk->data != NULL);
379 char *sp = (char *)user.rust_sp;
381 sp -= sizeof(spawn_args);
383 spawn_args *a = (spawn_args *)sp;
388 void **f = (void **)&a->f;
389 *f = (void *)spawnee_fn;
391 ctx.call((void *)task_start_wrapper, a, sp);
397 rust_task::start(uintptr_t spawnee_fn,
400 start(spawnee_fn, args, 0);
403 void rust_task::start()
405 yield_timer.reset_us(0);
406 transition(&sched->newborn_tasks, &sched->running_tasks);
407 sched->lock.signal();
410 // Only run this on the rust stack
412 rust_task::yield(size_t time_in_us, bool *killed) {
417 yield_timer.reset_us(time_in_us);
419 // Return to the scheduler.
430 // Task is already dead, can't kill what's already dead.
435 // Note the distinction here: kill() is when you're in an upcall
436 // from task A and want to force-fail task B, you do B->kill().
437 // If you want to fail yourself you do self->fail().
438 LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
439 // When the task next goes to yield or resume it will fail
441 // Unblock the task so it can unwind.
444 sched->lock.signal();
446 LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
447 // run_on_resume(rust_unwind_glue);
452 // See note in ::kill() regarding who should call this.
453 DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
460 // FIXME: Need unwinding on windows. This will end up aborting
466 rust_task::conclude_failure() {
472 rust_task::fail_parent() {
475 "task %s @0x%" PRIxPTR
476 " propagating failure to supervisor %s @0x%" PRIxPTR,
477 name, this, supervisor->name, supervisor);
480 // FIXME: implement unwinding again.
481 if (NULL == supervisor && propagate_failure)
486 rust_task::unsupervise()
489 "task %s @0x%" PRIxPTR
490 " disconnecting from supervisor %s @0x%" PRIxPTR,
491 name, this, supervisor->name, supervisor);
496 propagate_failure = false;
500 rust_task::get_frame_glue_fns(uintptr_t fp) {
501 fp -= sizeof(uintptr_t);
502 return *((frame_glue_fns**) fp);
508 return state == &sched->running_tasks;
514 return state == &sched->blocked_tasks;
518 rust_task::blocked_on(rust_cond *on)
520 return blocked() && cond == on;
526 return state == &sched->dead_tasks;
530 rust_task::malloc(size_t sz, const char *tag, type_desc *td)
532 return local_region.malloc(sz, tag);
536 rust_task::realloc(void *data, size_t sz, bool is_gc)
538 return local_region.realloc(data, sz);
542 rust_task::free(void *p, bool is_gc)
544 local_region.free(p);
548 rust_task::transition(rust_task_list *src, rust_task_list *dst) {
550 if(!sched->lock.lock_held_by_current_thread()) {
555 "task %s " PTR " state change '%s' -> '%s' while in '%s'",
556 name, (uintptr_t)this, src->name, dst->name, state->name);
557 I(sched, state == src);
562 sched->lock.unlock();
566 rust_task::block(rust_cond *on, const char* name) {
567 I(sched, !lock.lock_held_by_current_thread());
568 scoped_lock with(lock);
569 LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
570 (uintptr_t) on, (uintptr_t) cond);
571 A(sched, cond == NULL, "Cannot block an already blocked task.");
572 A(sched, on != NULL, "Cannot block on a NULL object.");
574 transition(&sched->running_tasks, &sched->blocked_tasks);
580 rust_task::wakeup(rust_cond *from) {
581 I(sched, !lock.lock_held_by_current_thread());
582 scoped_lock with(lock);
583 A(sched, cond != NULL, "Cannot wake up unblocked task.");
584 LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
585 (uintptr_t) cond, (uintptr_t) from);
586 A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
588 transition(&sched->blocked_tasks, &sched->running_tasks);
589 I(sched, cond == from);
593 sched->lock.signal();
598 I(sched, !lock.lock_held_by_current_thread());
599 scoped_lock with(lock);
600 transition(&sched->running_tasks, &sched->dead_tasks);
601 sched->lock.signal();
605 rust_task::unblock() {
607 // FIXME: What if another thread unblocks the task between when
608 // we checked and here?
614 rust_task::get_crate_cache()
617 DLOG(sched, task, "fetching cache for current crate");
618 cache = sched->get_cache();
624 rust_task::backtrace() {
625 if (!log_rt_backtrace) return;
627 void *call_stack[256];
628 int nframes = ::backtrace(call_stack, 256);
629 backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
633 bool rust_task::can_schedule(int id)
635 return yield_timer.has_timed_out() &&
637 (pinned_on == -1 || pinned_on == id);
641 rust_task::calloc(size_t size, const char *tag) {
642 return local_region.calloc(size, tag);
645 void rust_task::pin() {
646 I(this->sched, running_on != -1);
647 pinned_on = running_on;
650 void rust_task::pin(int id) {
651 I(this->sched, running_on == -1);
655 void rust_task::unpin() {
659 rust_port_id rust_task::register_port(rust_port *port) {
660 I(sched, !lock.lock_held_by_current_thread());
661 scoped_lock with(lock);
663 rust_port_id id = next_port_id++;
664 port_table.put(id, port);
668 void rust_task::release_port(rust_port_id id) {
669 I(sched, lock.lock_held_by_current_thread());
670 port_table.remove(id);
673 rust_port *rust_task::get_port_by_id(rust_port_id id) {
674 I(sched, !lock.lock_held_by_current_thread());
675 scoped_lock with(lock);
676 rust_port *port = NULL;
677 port_table.get(id, &port);
685 // Temporary routine to allow boxes on one task's shared heap to be reparented
688 rust_task::release_alloc(void *alloc) {
689 I(sched, !lock.lock_held_by_current_thread());
692 assert(local_allocs.find(alloc) != local_allocs.end());
693 const type_desc *tydesc = local_allocs[alloc];
694 local_allocs.erase(alloc);
696 local_region.release_alloc(alloc);
702 // Temporary routine to allow boxes from one task's shared heap to be
703 // reparented to this one.
705 rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
706 I(sched, !lock.lock_held_by_current_thread());
709 assert(local_allocs.find(alloc) == local_allocs.end());
710 local_allocs[alloc] = tydesc;
711 local_region.claim_alloc(alloc);
717 rust_task::notify(bool success) {
718 // FIXME (1078) Do this in rust code
719 if(user.notify_enabled) {
720 rust_task *target_task = kernel->get_task_by_id(user.notify_chan.task);
722 rust_port *target_port =
723 target_task->get_port_by_id(user.notify_chan.port);
725 task_notification msg;
727 msg.result = !success ? tr_failure : tr_success;
729 target_port->send(&msg);
730 scoped_lock with(target_task->lock);
731 target_port->deref();
733 target_task->deref();
738 extern "C" CDECL void
739 record_sp(void *limit);
742 rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
744 stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
745 A(sched, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
746 "Did not receive enough stack");
747 uint8_t *new_sp = (uint8_t*)stk_seg->end;
748 // Push the function arguments to the new stack
749 new_sp = align_down(new_sp - args_sz);
750 memcpy(new_sp, args_addr, args_sz);
751 record_stack_limit();
756 rust_task::del_stack() {
758 record_stack_limit();
762 rust_task::record_stack_limit() {
763 // The function prolog compares the amount of stack needed to the end of
764 // the stack. As an optimization, when the frame size is less than 256
765 // bytes, it will simply compare %esp to to the stack limit instead of
766 // subtracting the frame size. As a result we need our stack limit to
767 // account for those 256 bytes.
768 const unsigned LIMIT_OFFSET = 256;
770 (uintptr_t)stk->end - RED_ZONE_SIZE
771 - (uintptr_t)stk->data >= LIMIT_OFFSET,
772 "Stack size must be greater than LIMIT_OFFSET");
773 record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
776 extern "C" uintptr_t get_sp();
779 sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
780 // Not positive these bounds for sp are correct. I think that the first
781 // possible value for esp on a new stack is stk->end, which points to the
782 // address before the first value to be pushed onto a new stack. The last
783 // possible address we can push data to is stk->data. Regardless, there's
784 // so much slop at either end that we should never hit one of these
786 return (uintptr_t)stk->data <= sp && sp <= stk->end;
790 Called by landing pads during unwinding to figure out which
791 stack segment we are currently running on, delete the others,
792 and record the stack limit (which was not restored when unwinding
793 through __morestack).
796 rust_task::reset_stack_limit() {
797 uintptr_t sp = get_sp();
798 while (!sp_in_stk_seg(sp, stk)) {
800 A(sched, stk != NULL, "Failed to find the current stack");
802 record_stack_limit();
806 Returns true if we're currently running on the Rust stack
809 rust_task::on_rust_stack() {
810 return sp_in_stk_seg(get_sp(), stk);
814 rust_task::check_stack_canary() {
815 ::check_stack_canary(stk);
822 // indent-tabs-mode: nil
824 // buffer-file-coding-system: utf-8-unix