4 These are runtime functions that the compiler knows about and generates
5 calls to. They are called on the Rust stack and, in most cases, immediately
11 #include "rust_internal.h"
12 #include "rust_scheduler.h"
13 #include "rust_unwind.h"
14 #include "rust_upcall.h"
18 // This is called to ensure we've set up our rust stacks
19 // correctly. Strategically placed at entry to upcalls because they begin on
20 // the rust stack and happen frequently enough to catch most stack changes,
21 // including at the beginning of all landing pads.
22 // FIXME: Enable this for windows
23 #if defined __linux__ || defined __APPLE__ || defined __FreeBSD__
25 check_stack_alignment() __attribute__ ((aligned (16)));
27 static void check_stack_alignment() { }
30 #define UPCALL_SWITCH_STACK(A, F) call_upcall_on_c_stack((void*)A, (void*)F)
33 call_upcall_on_c_stack(void *args, void *fn_ptr) {
34 check_stack_alignment();
35 rust_task *task = rust_scheduler::get_task();
36 rust_scheduler *sched = task->sched;
37 sched->c_context.call_shim_on_c_stack(args, fn_ptr);
40 extern "C" void record_sp(void *limit);
42 /**********************************************************************
43 * Switches to the C-stack and invokes |fn_ptr|, passing |args| as argument.
44 * This is used by the C compiler to call native functions and by other
45 * upcalls to switch to the C stack. The return value is passed through a
46 * field in the args parameter. This upcall is specifically for switching
47 * to the shim functions generated by rustc.
50 upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
51 rust_task *task = rust_scheduler::get_task();
53 // FIXME (1226) - The shim functions generated by rustc contain the
54 // morestack prologue, so we need to let them know they have enough
58 rust_scheduler *sched = task->sched;
60 sched->c_context.call_shim_on_c_stack(args, fn_ptr);
62 A(sched, false, "Native code threw an exception");
65 task = rust_scheduler::get_task();
66 task->record_stack_limit();
69 /**********************************************************************/
78 upcall_s_fail(s_fail_args *args) {
79 rust_task *task = rust_scheduler::get_task();
80 LOG_UPCALL_ENTRY(task);
81 LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR,
82 args->expr, args->file, args->line);
87 upcall_fail(char const *expr,
90 s_fail_args args = {expr,file,line};
91 UPCALL_SWITCH_STACK(&args, upcall_s_fail);
94 /**********************************************************************
95 * Allocate an object in the task-local heap.
98 struct s_malloc_args {
104 extern "C" CDECL void
105 upcall_s_malloc(s_malloc_args *args) {
106 rust_task *task = rust_scheduler::get_task();
107 LOG_UPCALL_ENTRY(task);
110 "upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
111 args->nbytes, args->td);
116 // TODO: Maybe use dladdr here to find a more useful name for the
119 void *p = task->malloc(args->nbytes, "tdesc", args->td);
120 memset(p, '\0', args->nbytes);
122 task->local_allocs[p] = args->td;
123 debug::maybe_track_origin(task, p);
126 "upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR,
127 args->nbytes, args->td, (uintptr_t)p);
128 args->retval = (uintptr_t) p;
131 extern "C" CDECL uintptr_t
132 upcall_malloc(size_t nbytes, type_desc *td) {
133 s_malloc_args args = {0, nbytes, td};
134 UPCALL_SWITCH_STACK(&args, upcall_s_malloc);
138 /**********************************************************************
139 * Called whenever an object in the task-local heap is freed.
147 extern "C" CDECL void
148 upcall_s_free(s_free_args *args) {
149 rust_task *task = rust_scheduler::get_task();
150 LOG_UPCALL_ENTRY(task);
152 rust_scheduler *sched = task->sched;
154 "upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
155 (uintptr_t)args->ptr, args->is_gc);
157 task->local_allocs.erase(args->ptr);
158 debug::maybe_untrack_origin(task, args->ptr);
160 task->free(args->ptr, (bool) args->is_gc);
163 extern "C" CDECL void
164 upcall_free(void* ptr, uintptr_t is_gc) {
165 s_free_args args = {ptr, is_gc};
166 UPCALL_SWITCH_STACK(&args, upcall_s_free);
169 /**********************************************************************
170 * Allocate an object in the exchange heap.
173 struct s_shared_malloc_args {
179 extern "C" CDECL void
180 upcall_s_shared_malloc(s_shared_malloc_args *args) {
181 rust_task *task = rust_scheduler::get_task();
182 LOG_UPCALL_ENTRY(task);
185 "upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
186 args->nbytes, args->td);
187 void *p = task->kernel->malloc(args->nbytes, "shared malloc");
188 memset(p, '\0', args->nbytes);
190 "upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
192 args->nbytes, args->td, (uintptr_t)p);
193 args->retval = (uintptr_t) p;
196 extern "C" CDECL uintptr_t
197 upcall_shared_malloc(size_t nbytes, type_desc *td) {
198 s_shared_malloc_args args = {0, nbytes, td};
199 UPCALL_SWITCH_STACK(&args, upcall_s_shared_malloc);
203 /**********************************************************************
204 * Called whenever an object in the exchange heap is freed.
207 struct s_shared_free_args {
211 extern "C" CDECL void
212 upcall_s_shared_free(s_shared_free_args *args) {
213 rust_task *task = rust_scheduler::get_task();
214 LOG_UPCALL_ENTRY(task);
216 rust_scheduler *sched = task->sched;
218 "upcall shared_free(0x%" PRIxPTR")",
219 (uintptr_t)args->ptr);
220 task->kernel->free(args->ptr);
223 extern "C" CDECL void
224 upcall_shared_free(void* ptr) {
225 s_shared_free_args args = {ptr};
226 UPCALL_SWITCH_STACK(&args, upcall_s_shared_free);
229 /**********************************************************************
230 * Called to deep copy a type descriptor onto the exchange heap.
231 * Used when sending closures. It's possible that we should have
232 * a central hashtable to avoid copying and re-copying the same
236 struct s_create_shared_type_desc_args {
241 void upcall_s_create_shared_type_desc(s_create_shared_type_desc_args *args)
243 rust_task *task = rust_scheduler::get_task();
244 LOG_UPCALL_ENTRY(task);
246 // Copy the main part of the type descriptor:
247 const type_desc *td = args->td;
248 int n_params = td->n_params;
249 size_t sz = sizeof(type_desc) + sizeof(type_desc*) * n_params;
250 args->res = (type_desc*) task->kernel->malloc(sz, "create_shared_type_desc");
251 memcpy(args->res, td, sizeof(type_desc));
253 // Recursively copy any referenced descriptors:
255 args->res->first_param = NULL;
257 args->res->first_param = &args->res->descs[1];
258 args->res->descs[0] = args->res;
259 for (int i = 0; i < n_params; i++) {
260 s_create_shared_type_desc_args rec_args = {
261 td->first_param[i], 0
263 upcall_s_create_shared_type_desc(&rec_args);
264 args->res->first_param[i] = rec_args.res;
269 extern "C" CDECL type_desc *
270 upcall_create_shared_type_desc(type_desc *td) {
271 s_create_shared_type_desc_args args = { td, 0 };
272 UPCALL_SWITCH_STACK(&args, upcall_s_create_shared_type_desc);
276 /**********************************************************************
277 * Called to deep free a type descriptor from the exchange heap.
280 void upcall_s_free_shared_type_desc(type_desc *td)
282 rust_task *task = rust_scheduler::get_task();
283 LOG_UPCALL_ENTRY(task);
285 // Recursively free any referenced descriptors:
286 for (unsigned i = 0; i < td->n_params; i++) {
287 upcall_s_free_shared_type_desc((type_desc*) td->first_param[i]);
290 task->kernel->free(td);
293 extern "C" CDECL void
294 upcall_free_shared_type_desc(type_desc *td) {
295 UPCALL_SWITCH_STACK(td, upcall_s_free_shared_type_desc);
298 /**********************************************************************
299 * Called to intern a task-local type descriptor into the hashtable
300 * associated with each scheduler.
303 struct s_get_type_desc_args {
308 type_desc const **descs;
309 uintptr_t n_obj_params;
312 extern "C" CDECL void
313 upcall_s_get_type_desc(s_get_type_desc_args *args) {
314 rust_task *task = rust_scheduler::get_task();
315 LOG_UPCALL_ENTRY(task);
317 LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
318 ", align=%" PRIdPTR ", %" PRIdPTR " descs", args->size, args->align,
320 rust_crate_cache *cache = task->get_crate_cache();
321 type_desc *td = cache->get_type_desc(args->size, args->align, args->n_descs,
322 args->descs, args->n_obj_params);
323 LOG(task, cache, "returning tydesc 0x%" PRIxPTR, td);
327 extern "C" CDECL type_desc *
328 upcall_get_type_desc(void *curr_crate, // ignored, legacy compat.
332 type_desc const **descs,
333 uintptr_t n_obj_params) {
334 s_get_type_desc_args args = {0,size,align,n_descs,descs,n_obj_params};
335 UPCALL_SWITCH_STACK(&args, upcall_s_get_type_desc);
339 /**********************************************************************/
341 struct s_vec_grow_args {
346 extern "C" CDECL void
347 upcall_s_vec_grow(s_vec_grow_args *args) {
348 rust_task *task = rust_scheduler::get_task();
349 LOG_UPCALL_ENTRY(task);
350 reserve_vec(task, args->vp, args->new_sz);
351 (*args->vp)->fill = args->new_sz;
354 extern "C" CDECL void
355 upcall_vec_grow(rust_vec** vp, size_t new_sz) {
356 s_vec_grow_args args = {vp, new_sz};
357 UPCALL_SWITCH_STACK(&args, upcall_s_vec_grow);
360 // Copy elements from one vector to another,
361 // dealing with reference counts
363 copy_elements(rust_task *task, type_desc *elem_t,
364 void *pdst, void *psrc, size_t n) {
365 char *dst = (char *)pdst, *src = (char *)psrc;
366 memmove(dst, src, n);
368 // increment the refcount of each element of the vector
369 if (elem_t->take_glue) {
370 glue_fn *take_glue = elem_t->take_glue;
371 size_t elem_size = elem_t->size;
372 const type_desc **tydescs = elem_t->first_param;
373 for (char *p = dst; p < dst+n; p += elem_size) {
374 take_glue(NULL, NULL, tydescs, p);
379 /**********************************************************************/
381 struct s_vec_push_args {
387 extern "C" CDECL void
388 upcall_s_vec_push(s_vec_push_args *args) {
389 rust_task *task = rust_scheduler::get_task();
390 LOG_UPCALL_ENTRY(task);
391 size_t new_sz = (*args->vp)->fill + args->elt_ty->size;
392 reserve_vec(task, args->vp, new_sz);
393 rust_vec* v = *args->vp;
394 copy_elements(task, args->elt_ty, &v->data[0] + v->fill,
395 args->elt, args->elt_ty->size);
396 v->fill += args->elt_ty->size;
399 extern "C" CDECL void
400 upcall_vec_push(rust_vec** vp, type_desc* elt_ty, void* elt) {
401 // FIXME: Switching stacks here causes crashes, probably
402 // because this upcall calls take glue
403 s_vec_push_args args = {vp, elt_ty, elt};
404 upcall_s_vec_push(&args);
406 // Do the stack check to make sure this op, on the Rust stack, is behaving
407 rust_task *task = rust_scheduler::get_task();
408 task->check_stack_canary();
411 /**********************************************************************
412 * Returns a token that can be used to deallocate all of the allocated space
413 * space in the dynamic stack.
416 struct s_dynastack_mark_args {
420 extern "C" CDECL void
421 upcall_s_dynastack_mark(s_dynastack_mark_args *args) {
422 args->retval = rust_scheduler::get_task()->dynastack.mark();
425 extern "C" CDECL void *
426 upcall_dynastack_mark() {
427 s_dynastack_mark_args args = {0};
428 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_mark);
432 /**********************************************************************
433 * Allocates space in the dynamic stack and returns it.
435 * FIXME: Deprecated since dynamic stacks need to be self-describing for GC.
438 struct s_dynastack_alloc_args {
443 extern "C" CDECL void
444 upcall_s_dynastack_alloc(s_dynastack_alloc_args *args) {
445 size_t sz = args->sz;
447 rust_scheduler::get_task()->dynastack.alloc(sz, NULL) : NULL;
450 extern "C" CDECL void *
451 upcall_dynastack_alloc(size_t sz) {
452 s_dynastack_alloc_args args = {0, sz};
453 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_alloc);
457 /**********************************************************************
458 * Allocates space associated with a type descriptor in the dynamic stack and
462 struct s_dynastack_alloc_2_args {
468 extern "C" CDECL void
469 upcall_s_dynastack_alloc_2(s_dynastack_alloc_2_args *args) {
470 size_t sz = args->sz;
471 type_desc *ty = args->ty;
473 rust_scheduler::get_task()->dynastack.alloc(sz, ty) : NULL;
476 extern "C" CDECL void *
477 upcall_dynastack_alloc_2(size_t sz, type_desc *ty) {
478 s_dynastack_alloc_2_args args = {0, sz, ty};
479 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_alloc_2);
483 struct s_dynastack_free_args {
487 extern "C" CDECL void
488 upcall_s_dynastack_free(s_dynastack_free_args *args) {
489 return rust_scheduler::get_task()->dynastack.free(args->ptr);
492 /** Frees space in the dynamic stack. */
493 extern "C" CDECL void
494 upcall_dynastack_free(void *ptr) {
495 s_dynastack_free_args args = {ptr};
496 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_free);
499 extern "C" _Unwind_Reason_Code
500 __gxx_personality_v0(int version,
501 _Unwind_Action actions,
502 uint64_t exception_class,
503 _Unwind_Exception *ue_header,
504 _Unwind_Context *context);
506 struct s_rust_personality_args {
507 _Unwind_Reason_Code retval;
509 _Unwind_Action actions;
510 uint64_t exception_class;
511 _Unwind_Exception *ue_header;
512 _Unwind_Context *context;
516 upcall_s_rust_personality(s_rust_personality_args *args) {
517 args->retval = __gxx_personality_v0(args->version,
519 args->exception_class,
525 The exception handling personality function. It figures
526 out what to do with each landing pad. Just a stack-switching
527 wrapper around the C++ personality function.
529 extern "C" _Unwind_Reason_Code
530 upcall_rust_personality(int version,
531 _Unwind_Action actions,
532 uint64_t exception_class,
533 _Unwind_Exception *ue_header,
534 _Unwind_Context *context) {
535 s_rust_personality_args args = {(_Unwind_Reason_Code)0,
536 version, actions, exception_class,
538 rust_task *task = rust_scheduler::get_task();
540 // The personality function is run on the stack of the
541 // last function that threw or landed, which is going
542 // to sometimes be the C stack. If we're on the Rust stack
543 // then switch to the C stack.
545 if (task->on_rust_stack()) {
546 UPCALL_SWITCH_STACK(&args, upcall_s_rust_personality);
548 upcall_s_rust_personality(&args);
554 shape_cmp_type(int8_t *result, const type_desc *tydesc,
555 const type_desc **subtydescs, uint8_t *data_0,
556 uint8_t *data_1, uint8_t cmp_type);
558 struct s_cmp_type_args {
560 const type_desc *tydesc;
561 const type_desc **subtydescs;
568 upcall_s_cmp_type(s_cmp_type_args *args) {
569 shape_cmp_type(args->result, args->tydesc, args->subtydescs,
570 args->data_0, args->data_1, args->cmp_type);
574 upcall_cmp_type(int8_t *result, const type_desc *tydesc,
575 const type_desc **subtydescs, uint8_t *data_0,
576 uint8_t *data_1, uint8_t cmp_type) {
577 s_cmp_type_args args = {result, tydesc, subtydescs, data_0, data_1, cmp_type};
578 UPCALL_SWITCH_STACK(&args, upcall_s_cmp_type);
582 shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level);
584 struct s_log_type_args {
585 const type_desc *tydesc;
591 upcall_s_log_type(s_log_type_args *args) {
592 shape_log_type(args->tydesc, args->data, args->level);
596 upcall_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
597 s_log_type_args args = {tydesc, data, level};
598 UPCALL_SWITCH_STACK(&args, upcall_s_log_type);
601 struct s_new_stack_args {
608 extern "C" CDECL void
609 upcall_s_new_stack(struct s_new_stack_args *args) {
610 rust_task *task = rust_scheduler::get_task();
611 args->result = task->new_stack(args->stk_sz,
616 extern "C" CDECL void *
617 upcall_new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
618 s_new_stack_args args = {NULL, stk_sz, args_addr, args_sz};
619 UPCALL_SWITCH_STACK(&args, upcall_s_new_stack);
623 extern "C" CDECL void
624 upcall_s_del_stack() {
625 rust_task *task = rust_scheduler::get_task();
629 extern "C" CDECL void
631 UPCALL_SWITCH_STACK(NULL, upcall_s_del_stack);
634 // Landing pads need to call this to insert the
635 // correct limit into TLS.
636 // NB: This must run on the Rust stack because it
637 // needs to acquire the value of the stack pointer
638 extern "C" CDECL void
639 upcall_reset_stack_limit() {
640 rust_task *task = rust_scheduler::get_task();
641 task->reset_stack_limit();
648 // indent-tabs-mode: nil
650 // buffer-file-coding-system: utf-8-unix