2 #include "rust_internal.h"
5 #include "vg/valgrind.h"
6 #include "vg/memcheck.h"
18 // The amount of extra space at the end of each stack segment, available
19 // to the rt, compiler and dynamic linker for running small functions
20 // FIXME: We want this to be 128 but need to slim the red zone calls down
21 #define RZ_LINUX_32 (1024*2)
22 #define RZ_LINUX_64 (1024*2)
23 #define RZ_MAC_32 (1024*20)
24 #define RZ_MAC_64 (1024*20)
25 #define RZ_WIN_32 (1024*20)
26 #define RZ_BSD_32 (1024*20)
27 #define RZ_BSD_64 (1024*20)
31 #define RED_ZONE_SIZE RZ_LINUX_32
34 #define RED_ZONE_SIZE RZ_LINUX_64
39 #define RED_ZONE_SIZE RZ_MAC_32
42 #define RED_ZONE_SIZE RZ_MAC_64
47 #define RED_ZONE_SIZE RZ_WIN_32
50 #define RED_ZONE_SIZE RZ_WIN_64
55 #define RED_ZONE_SIZE RZ_BSD_32
58 #define RED_ZONE_SIZE RZ_BSD_64
62 // A value that goes at the end of the stack and must not be touched
63 const uint8_t stack_canary[] = {0xAB, 0xCD, 0xAB, 0xCD,
64 0xAB, 0xCD, 0xAB, 0xCD,
65 0xAB, 0xCD, 0xAB, 0xCD,
66 0xAB, 0xCD, 0xAB, 0xCD};
69 size_t g_custom_min_stack_size = 0;
72 get_min_stk_size(size_t default_size) {
73 if (g_custom_min_stack_size != 0) {
74 return g_custom_min_stack_size;
81 get_next_stk_size(rust_scheduler *sched, rust_task *task,
82 size_t min, size_t current, size_t requested) {
83 LOG(task, mem, "calculating new stack size for 0x%" PRIxPTR, task);
85 "min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
86 min, current, requested);
88 // Allocate at least enough to accomodate the next frame
89 size_t sz = std::max(min, requested);
91 // And double the stack size each allocation
92 const size_t max = 1024 * 1024;
93 size_t next = std::min(max, current * 2);
95 sz = std::max(sz, next);
97 LOG(task, mem, "next stack size: %" PRIdPTR, sz);
98 I(sched, requested <= sz);
102 // Task stack segments. Heap allocated and chained together.
105 config_valgrind_stack(stk_seg *stk) {
107 VALGRIND_STACK_REGISTER(&stk->data[0],
110 // Establish that the stack is accessible. This must be done when reusing
111 // old stack segments, since the act of popping the stack previously
112 // caused valgrind to consider the whole thing inaccessible.
113 size_t sz = stk->end - (uintptr_t)&stk->data[0];
114 VALGRIND_MAKE_MEM_UNDEFINED(stk->data + sizeof(stack_canary),
115 sz - sizeof(stack_canary));
120 unconfig_valgrind_stack(stk_seg *stk) {
121 VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
125 free_stk(rust_task *task, stk_seg *stk) {
126 LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
131 add_stack_canary(stk_seg *stk) {
132 memcpy(stk->data, stack_canary, sizeof(stack_canary));
133 assert(sizeof(stack_canary) == 16 && "Stack canary was not the expected size");
137 check_stack_canary(stk_seg *stk) {
138 assert(!memcmp(stk->data, stack_canary, sizeof(stack_canary))
139 && "Somebody killed the canary");
143 new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
145 LOG(task, mem, "creating new stack for task %" PRIxPTR, task);
147 check_stack_canary(task->stk);
150 // The minimum stack size, in bytes, of a Rust stack, excluding red zone
151 size_t min_sz = get_min_stk_size(sched->min_stack_size);
153 // Try to reuse an existing stack segment
154 if (task->stk != NULL && task->stk->prev != NULL) {
155 size_t prev_sz = (size_t)(task->stk->prev->end
156 - (uintptr_t)&task->stk->prev->data[0]
158 if (min_sz <= prev_sz && requested_sz <= prev_sz) {
159 LOG(task, mem, "reusing existing stack");
160 task->stk = task->stk->prev;
161 A(sched, task->stk->prev == NULL, "Bogus stack ptr");
162 config_valgrind_stack(task->stk);
165 LOG(task, mem, "existing stack is not big enough");
166 free_stk(task, task->stk->prev);
167 task->stk->prev = NULL;
171 // The size of the current stack segment, excluding red zone
172 size_t current_sz = 0;
173 if (task->stk != NULL) {
174 current_sz = (size_t)(task->stk->end
175 - (uintptr_t)&task->stk->data[0]
178 // The calculated size of the new stack, excluding red zone
179 size_t rust_stk_sz = get_next_stk_size(sched, task, min_sz,
180 current_sz, requested_sz);
182 size_t sz = sizeof(stk_seg) + rust_stk_sz + RED_ZONE_SIZE;
183 stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
184 LOGPTR(task->sched, "new stk", (uintptr_t)stk);
185 memset(stk, 0, sizeof(stk_seg));
186 add_stack_canary(stk);
188 stk->next = task->stk;
189 stk->end = (uintptr_t) &stk->data[rust_stk_sz + RED_ZONE_SIZE];
190 LOGPTR(task->sched, "stk end", stk->end);
193 config_valgrind_stack(task->stk);
198 del_stk(rust_task *task, stk_seg *stk)
200 assert(stk == task->stk && "Freeing stack segments out of order!");
201 check_stack_canary(stk);
203 task->stk = stk->next;
205 bool delete_stack = false;
206 if (task->stk != NULL) {
207 // Don't actually delete this stack. Save it to reuse later,
208 // preventing the pathological case where we repeatedly reallocate
209 // the stack for the next frame.
210 task->stk->prev = stk;
212 // This is the last stack, delete it.
216 // Delete the previous previous stack
217 if (stk->prev != NULL) {
218 free_stk(task, stk->prev);
222 unconfig_valgrind_stack(stk);
229 rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
230 rust_task *spawner, const char *name) :
236 kernel(sched->kernel),
247 local_region(&sched->srv->local_region),
250 propagate_failure(true),
254 LOGPTR(sched, "new task", (uintptr_t)this);
255 DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
257 assert((void*)this == (void*)&user);
259 user.notify_enabled = 0;
261 stk = new_stk(sched, this, 0);
262 user.rust_sp = stk->end;
268 rust_task::~rust_task()
270 I(sched, !sched->lock.lock_held_by_current_thread());
271 I(sched, port_table.is_empty());
272 DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
273 name, (uintptr_t)this, ref_count);
279 kernel->release_task_id(user.id);
281 /* FIXME: tighten this up, there are some more
282 assertions that hold at task-lifecycle events. */
283 I(sched, ref_count == 0); // ||
284 // (ref_count == 1 && this == sched->root_task));
286 // Delete all the stacks. There may be more than one if the task failed
287 // and no landing pads stopped to clean up.
288 while (stk != NULL) {
296 rust_opaque_closure *envptr;
300 struct cleanup_args {
306 cleanup_task(cleanup_args *args) {
307 spawn_args *a = args->spargs;
308 bool failed = args->failed;
309 rust_task *task = a->task;
315 if (task->killed && !failed) {
316 LOG(task, task, "Task killed during termination");
320 task->notify(!failed);
324 task->conclude_failure();
326 A(task->sched, false, "Shouldn't happen");
331 extern "C" void upcall_shared_free(void* ptr);
333 // This runs on the Rust stack
335 void task_start_wrapper(spawn_args *a)
337 rust_task *task = a->task;
341 // The first argument is the return pointer; as the task fn
342 // must have void return type, we can safely pass 0.
343 a->f(0, a->envptr, a->argptr);
344 } catch (rust_task *ex) {
345 A(task->sched, ex == task,
346 "Expected this task to be thrown for unwinding");
350 rust_opaque_closure* env = a->envptr;
352 // free the environment.
353 const type_desc *td = env->td;
354 LOG(task, task, "Freeing env %p with td %p", env, td);
355 td->drop_glue(NULL, NULL, td->first_param, env);
356 upcall_shared_free(env);
359 // The cleanup work needs lots of stack
360 cleanup_args ca = {a, failed};
361 task->sched->c_context.call_shim_on_c_stack(&ca, (void*)cleanup_task);
363 task->ctx.next->swap(task->ctx);
367 rust_task::start(spawn_fn spawnee_fn,
368 rust_opaque_closure *envptr,
371 LOG(this, task, "starting task from fn 0x%" PRIxPTR
372 " with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
373 spawnee_fn, envptr, argptr);
375 I(sched, stk->data != NULL);
377 char *sp = (char *)user.rust_sp;
379 sp -= sizeof(spawn_args);
381 spawn_args *a = (spawn_args *)sp;
388 ctx.call((void *)task_start_wrapper, a, sp);
393 void rust_task::start()
395 yield_timer.reset_us(0);
396 transition(&sched->newborn_tasks, &sched->running_tasks);
397 sched->lock.signal();
400 // Only run this on the rust stack
402 rust_task::yield(size_t time_in_us, bool *killed) {
407 yield_timer.reset_us(time_in_us);
409 // Return to the scheduler.
420 // Task is already dead, can't kill what's already dead.
425 // Note the distinction here: kill() is when you're in an upcall
426 // from task A and want to force-fail task B, you do B->kill().
427 // If you want to fail yourself you do self->fail().
428 LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
429 // When the task next goes to yield or resume it will fail
431 // Unblock the task so it can unwind.
434 sched->lock.signal();
436 LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
437 // run_on_resume(rust_unwind_glue);
442 // See note in ::kill() regarding who should call this.
443 DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
450 // FIXME: Need unwinding on windows. This will end up aborting
456 rust_task::conclude_failure() {
462 rust_task::fail_parent() {
465 "task %s @0x%" PRIxPTR
466 " propagating failure to supervisor %s @0x%" PRIxPTR,
467 name, this, supervisor->name, supervisor);
470 // FIXME: implement unwinding again.
471 if (NULL == supervisor && propagate_failure)
476 rust_task::unsupervise()
479 "task %s @0x%" PRIxPTR
480 " disconnecting from supervisor %s @0x%" PRIxPTR,
481 name, this, supervisor->name, supervisor);
486 propagate_failure = false;
490 rust_task::get_frame_glue_fns(uintptr_t fp) {
491 fp -= sizeof(uintptr_t);
492 return *((frame_glue_fns**) fp);
498 return state == &sched->running_tasks;
504 return state == &sched->blocked_tasks;
508 rust_task::blocked_on(rust_cond *on)
510 return blocked() && cond == on;
516 return state == &sched->dead_tasks;
520 rust_task::malloc(size_t sz, const char *tag, type_desc *td)
522 return local_region.malloc(sz, tag);
526 rust_task::realloc(void *data, size_t sz, bool is_gc)
528 return local_region.realloc(data, sz);
532 rust_task::free(void *p, bool is_gc)
534 local_region.free(p);
538 rust_task::transition(rust_task_list *src, rust_task_list *dst) {
540 if(!sched->lock.lock_held_by_current_thread()) {
545 "task %s " PTR " state change '%s' -> '%s' while in '%s'",
546 name, (uintptr_t)this, src->name, dst->name, state->name);
547 I(sched, state == src);
552 sched->lock.unlock();
556 rust_task::block(rust_cond *on, const char* name) {
557 I(sched, !lock.lock_held_by_current_thread());
558 scoped_lock with(lock);
559 LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
560 (uintptr_t) on, (uintptr_t) cond);
561 A(sched, cond == NULL, "Cannot block an already blocked task.");
562 A(sched, on != NULL, "Cannot block on a NULL object.");
564 transition(&sched->running_tasks, &sched->blocked_tasks);
570 rust_task::wakeup(rust_cond *from) {
571 I(sched, !lock.lock_held_by_current_thread());
572 scoped_lock with(lock);
573 A(sched, cond != NULL, "Cannot wake up unblocked task.");
574 LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
575 (uintptr_t) cond, (uintptr_t) from);
576 A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
578 transition(&sched->blocked_tasks, &sched->running_tasks);
579 I(sched, cond == from);
583 sched->lock.signal();
588 I(sched, !lock.lock_held_by_current_thread());
589 scoped_lock with(lock);
590 transition(&sched->running_tasks, &sched->dead_tasks);
591 sched->lock.signal();
595 rust_task::unblock() {
597 // FIXME: What if another thread unblocks the task between when
598 // we checked and here?
604 rust_task::get_crate_cache()
607 DLOG(sched, task, "fetching cache for current crate");
608 cache = sched->get_cache();
614 rust_task::backtrace() {
615 if (!log_rt_backtrace) return;
617 void *call_stack[256];
618 int nframes = ::backtrace(call_stack, 256);
619 backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
623 bool rust_task::can_schedule(int id)
625 return yield_timer.has_timed_out() &&
627 (pinned_on == -1 || pinned_on == id);
631 rust_task::calloc(size_t size, const char *tag) {
632 return local_region.calloc(size, tag);
635 void rust_task::pin() {
636 I(this->sched, running_on != -1);
637 pinned_on = running_on;
640 void rust_task::pin(int id) {
641 I(this->sched, running_on == -1);
645 void rust_task::unpin() {
649 rust_port_id rust_task::register_port(rust_port *port) {
650 I(sched, !lock.lock_held_by_current_thread());
651 scoped_lock with(lock);
653 rust_port_id id = next_port_id++;
654 port_table.put(id, port);
658 void rust_task::release_port(rust_port_id id) {
659 I(sched, lock.lock_held_by_current_thread());
660 port_table.remove(id);
663 rust_port *rust_task::get_port_by_id(rust_port_id id) {
664 I(sched, !lock.lock_held_by_current_thread());
665 scoped_lock with(lock);
666 rust_port *port = NULL;
667 port_table.get(id, &port);
675 // Temporary routine to allow boxes on one task's shared heap to be reparented
678 rust_task::release_alloc(void *alloc) {
679 I(sched, !lock.lock_held_by_current_thread());
682 assert(local_allocs.find(alloc) != local_allocs.end());
683 const type_desc *tydesc = local_allocs[alloc];
684 local_allocs.erase(alloc);
686 local_region.release_alloc(alloc);
692 // Temporary routine to allow boxes from one task's shared heap to be
693 // reparented to this one.
695 rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
696 I(sched, !lock.lock_held_by_current_thread());
699 assert(local_allocs.find(alloc) == local_allocs.end());
700 local_allocs[alloc] = tydesc;
701 local_region.claim_alloc(alloc);
707 rust_task::notify(bool success) {
708 // FIXME (1078) Do this in rust code
709 if(user.notify_enabled) {
710 rust_task *target_task = kernel->get_task_by_id(user.notify_chan.task);
712 rust_port *target_port =
713 target_task->get_port_by_id(user.notify_chan.port);
715 task_notification msg;
717 msg.result = !success ? tr_failure : tr_success;
719 target_port->send(&msg);
720 scoped_lock with(target_task->lock);
721 target_port->deref();
723 target_task->deref();
728 extern "C" CDECL void
729 record_sp(void *limit);
732 rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
734 stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
735 A(sched, stk_seg->end - (uintptr_t)stk_seg->data >= stk_sz + args_sz,
736 "Did not receive enough stack");
737 uint8_t *new_sp = (uint8_t*)stk_seg->end;
738 // Push the function arguments to the new stack
739 new_sp = align_down(new_sp - args_sz);
740 memcpy(new_sp, args_addr, args_sz);
741 record_stack_limit();
746 rust_task::del_stack() {
748 record_stack_limit();
752 rust_task::record_stack_limit() {
753 // The function prolog compares the amount of stack needed to the end of
754 // the stack. As an optimization, when the frame size is less than 256
755 // bytes, it will simply compare %esp to to the stack limit instead of
756 // subtracting the frame size. As a result we need our stack limit to
757 // account for those 256 bytes.
758 const unsigned LIMIT_OFFSET = 256;
760 (uintptr_t)stk->end - RED_ZONE_SIZE
761 - (uintptr_t)stk->data >= LIMIT_OFFSET,
762 "Stack size must be greater than LIMIT_OFFSET");
763 record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
766 extern "C" uintptr_t get_sp();
769 sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
770 // Not positive these bounds for sp are correct. I think that the first
771 // possible value for esp on a new stack is stk->end, which points to the
772 // address before the first value to be pushed onto a new stack. The last
773 // possible address we can push data to is stk->data. Regardless, there's
774 // so much slop at either end that we should never hit one of these
776 return (uintptr_t)stk->data <= sp && sp <= stk->end;
780 Called by landing pads during unwinding to figure out which
781 stack segment we are currently running on, delete the others,
782 and record the stack limit (which was not restored when unwinding
783 through __morestack).
786 rust_task::reset_stack_limit() {
787 uintptr_t sp = get_sp();
788 while (!sp_in_stk_seg(sp, stk)) {
790 A(sched, stk != NULL, "Failed to find the current stack");
792 record_stack_limit();
796 Returns true if we're currently running on the Rust stack
799 rust_task::on_rust_stack() {
800 return sp_in_stk_seg(get_sp(), stk);
804 rust_task::check_stack_canary() {
805 ::check_stack_canary(stk);
812 // indent-tabs-mode: nil
814 // buffer-file-coding-system: utf-8-unix