4 These are runtime functions that the compiler knows about and generates
5 calls to. They are called on the Rust stack and, in most cases, immediately
11 #include "rust_internal.h"
12 #include "rust_scheduler.h"
13 #include "rust_unwind.h"
14 #include "rust_upcall.h"
18 // This is called to ensure we've set up our rust stacks
19 // correctly. Strategically placed at entry to upcalls because they begin on
20 // the rust stack and happen frequently enough to catch most stack changes,
21 // including at the beginning of all landing pads.
22 // FIXME: Enable this for windows
23 #if defined __linux__ || defined __APPLE__ || defined __FreeBSD__
25 check_stack_alignment() __attribute__ ((aligned (16)));
27 static void check_stack_alignment() { }
30 #define UPCALL_SWITCH_STACK(A, F) call_upcall_on_c_stack((void*)A, (void*)F)
33 call_upcall_on_c_stack(void *args, void *fn_ptr) {
34 check_stack_alignment();
35 rust_task *task = rust_scheduler::get_task();
36 rust_scheduler *sched = task->sched;
37 sched->c_context.call_shim_on_c_stack(args, fn_ptr);
40 extern "C" void record_sp(void *limit);
42 /**********************************************************************
43 * Switches to the C-stack and invokes |fn_ptr|, passing |args| as argument.
44 * This is used by the C compiler to call native functions and by other
45 * upcalls to switch to the C stack. The return value is passed through a
46 * field in the args parameter. This upcall is specifically for switching
47 * to the shim functions generated by rustc.
50 upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
51 rust_task *task = rust_scheduler::get_task();
53 // FIXME (1226) - The shim functions generated by rustc contain the
54 // morestack prologue, so we need to let them know they have enough
58 rust_scheduler *sched = task->sched;
60 sched->c_context.call_shim_on_c_stack(args, fn_ptr);
62 A(sched, false, "Native code threw an exception");
65 task = rust_scheduler::get_task();
66 task->record_stack_limit();
69 /**********************************************************************/
78 upcall_s_fail(s_fail_args *args) {
79 rust_task *task = rust_scheduler::get_task();
80 LOG_UPCALL_ENTRY(task);
81 LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR,
82 args->expr, args->file, args->line);
87 upcall_fail(char const *expr,
90 s_fail_args args = {expr,file,line};
91 UPCALL_SWITCH_STACK(&args, upcall_s_fail);
94 /**********************************************************************
95 * Allocate an object in the task-local heap.
98 struct s_malloc_args {
104 extern "C" CDECL void
105 upcall_s_malloc(s_malloc_args *args) {
106 rust_task *task = rust_scheduler::get_task();
107 LOG_UPCALL_ENTRY(task);
110 "upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
111 args->nbytes, args->td);
116 // TODO: Maybe use dladdr here to find a more useful name for the
119 void *p = task->malloc(args->nbytes, "tdesc", args->td);
120 memset(p, '\0', args->nbytes);
122 task->local_allocs[p] = args->td;
123 debug::maybe_track_origin(task, p);
126 "upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR,
127 args->nbytes, args->td, (uintptr_t)p);
128 args->retval = (uintptr_t) p;
131 extern "C" CDECL uintptr_t
132 upcall_malloc(size_t nbytes, type_desc *td) {
133 s_malloc_args args = {0, nbytes, td};
134 UPCALL_SWITCH_STACK(&args, upcall_s_malloc);
138 /**********************************************************************
139 * Called whenever an object in the task-local heap is freed.
147 extern "C" CDECL void
148 upcall_s_free(s_free_args *args) {
149 rust_task *task = rust_scheduler::get_task();
150 LOG_UPCALL_ENTRY(task);
152 rust_scheduler *sched = task->sched;
154 "upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
155 (uintptr_t)args->ptr, args->is_gc);
157 task->local_allocs.erase(args->ptr);
158 debug::maybe_untrack_origin(task, args->ptr);
160 task->free(args->ptr, (bool) args->is_gc);
163 extern "C" CDECL void
164 upcall_free(void* ptr, uintptr_t is_gc) {
165 s_free_args args = {ptr, is_gc};
166 UPCALL_SWITCH_STACK(&args, upcall_s_free);
169 /**********************************************************************
170 * Allocate an object in the exchange heap.
173 struct s_shared_malloc_args {
179 extern "C" CDECL void
180 upcall_s_shared_malloc(s_shared_malloc_args *args) {
181 rust_task *task = rust_scheduler::get_task();
182 LOG_UPCALL_ENTRY(task);
185 "upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
186 args->nbytes, args->td);
187 void *p = task->kernel->malloc(args->nbytes, "shared malloc");
188 memset(p, '\0', args->nbytes);
190 "upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
192 args->nbytes, args->td, (uintptr_t)p);
193 args->retval = (uintptr_t) p;
196 extern "C" CDECL uintptr_t
197 upcall_shared_malloc(size_t nbytes, type_desc *td) {
198 s_shared_malloc_args args = {0, nbytes, td};
199 UPCALL_SWITCH_STACK(&args, upcall_s_shared_malloc);
203 /**********************************************************************
204 * Called whenever an object in the exchange heap is freed.
207 struct s_shared_free_args {
211 extern "C" CDECL void
212 upcall_s_shared_free(s_shared_free_args *args) {
213 rust_task *task = rust_scheduler::get_task();
214 LOG_UPCALL_ENTRY(task);
216 rust_scheduler *sched = task->sched;
218 "upcall shared_free(0x%" PRIxPTR")",
219 (uintptr_t)args->ptr);
220 task->kernel->free(args->ptr);
223 extern "C" CDECL void
224 upcall_shared_free(void* ptr) {
225 s_shared_free_args args = {ptr};
226 UPCALL_SWITCH_STACK(&args, upcall_s_shared_free);
229 /**********************************************************************
230 * Called to deep copy a type descriptor onto the exchange heap.
231 * Used when sending closures. It's possible that we should have
232 * a central hashtable to avoid copying and re-copying the same
236 struct s_create_shared_type_desc_args {
241 void upcall_s_create_shared_type_desc(s_create_shared_type_desc_args *args)
243 rust_task *task = rust_scheduler::get_task();
244 LOG_UPCALL_ENTRY(task);
246 // Copy the main part of the type descriptor:
247 const type_desc *td = args->td;
248 int n_params = td->n_params;
249 size_t sz = sizeof(type_desc) + sizeof(type_desc*) * n_params;
250 args->res = (type_desc*) task->kernel->malloc(sz, "create_shared_type_desc");
251 memcpy(args->res, td, sizeof(type_desc));
253 // Recursively copy any referenced descriptors:
255 args->res->first_param = NULL;
257 args->res->first_param = &args->res->descs[1];
258 args->res->descs[0] = args->res;
259 for (int i = 0; i < n_params; i++) {
260 s_create_shared_type_desc_args rec_args = {
261 td->first_param[i], 0
263 upcall_s_create_shared_type_desc(&rec_args);
264 args->res->first_param[i] = rec_args.res;
269 extern "C" CDECL type_desc *
270 upcall_create_shared_type_desc(type_desc *td) {
271 s_create_shared_type_desc_args args = { td, 0 };
272 UPCALL_SWITCH_STACK(&args, upcall_s_create_shared_type_desc);
276 /**********************************************************************
277 * Called to deep free a type descriptor from the exchange heap.
280 void upcall_s_free_shared_type_desc(type_desc *td)
282 rust_task *task = rust_scheduler::get_task();
283 LOG_UPCALL_ENTRY(task);
285 // Recursively free any referenced descriptors:
286 for (unsigned i = 0; i < td->n_params; i++) {
287 upcall_s_free_shared_type_desc((type_desc*) td->first_param[i]);
290 task->kernel->free(td);
293 extern "C" CDECL void
294 upcall_free_shared_type_desc(type_desc *td) {
295 UPCALL_SWITCH_STACK(td, upcall_s_free_shared_type_desc);
298 /**********************************************************************
299 * Called to intern a task-local type descriptor into the hashtable
300 * associated with each scheduler.
303 struct s_get_type_desc_args {
308 type_desc const **descs;
309 uintptr_t n_obj_params;
312 extern "C" CDECL void
313 upcall_s_get_type_desc(s_get_type_desc_args *args) {
314 rust_task *task = rust_scheduler::get_task();
315 LOG_UPCALL_ENTRY(task);
317 LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
318 ", align=%" PRIdPTR ", %" PRIdPTR " descs", args->size, args->align,
320 rust_crate_cache *cache = task->get_crate_cache();
321 type_desc *td = cache->get_type_desc(args->size, args->align, args->n_descs,
322 args->descs, args->n_obj_params);
323 LOG(task, cache, "returning tydesc 0x%" PRIxPTR, td);
327 extern "C" CDECL type_desc *
328 upcall_get_type_desc(void *curr_crate, // ignored, legacy compat.
332 type_desc const **descs,
333 uintptr_t n_obj_params) {
334 s_get_type_desc_args args = {0,size,align,n_descs,descs,n_obj_params};
335 UPCALL_SWITCH_STACK(&args, upcall_s_get_type_desc);
339 /**********************************************************************
340 * Called to get a heap-allocated dict. These are interned and kept
341 * around indefinitely
344 struct s_intern_dict_args {
350 extern "C" CDECL void
351 upcall_s_intern_dict(s_intern_dict_args *args) {
352 rust_task *task = rust_scheduler::get_task();
353 LOG_UPCALL_ENTRY(task);
354 rust_crate_cache *cache = task->get_crate_cache();
355 args->res = cache->get_dict(args->n_fields, args->dict);
358 extern "C" CDECL void**
359 upcall_intern_dict(size_t n_fields, void** dict) {
360 s_intern_dict_args args = {n_fields, dict, 0 };
361 UPCALL_SWITCH_STACK(&args, upcall_s_intern_dict);
365 /**********************************************************************/
367 struct s_vec_grow_args {
372 extern "C" CDECL void
373 upcall_s_vec_grow(s_vec_grow_args *args) {
374 rust_task *task = rust_scheduler::get_task();
375 LOG_UPCALL_ENTRY(task);
376 reserve_vec(task, args->vp, args->new_sz);
377 (*args->vp)->fill = args->new_sz;
380 extern "C" CDECL void
381 upcall_vec_grow(rust_vec** vp, size_t new_sz) {
382 s_vec_grow_args args = {vp, new_sz};
383 UPCALL_SWITCH_STACK(&args, upcall_s_vec_grow);
386 // Copy elements from one vector to another,
387 // dealing with reference counts
389 copy_elements(rust_task *task, type_desc *elem_t,
390 void *pdst, void *psrc, size_t n) {
391 char *dst = (char *)pdst, *src = (char *)psrc;
392 memmove(dst, src, n);
394 // increment the refcount of each element of the vector
395 if (elem_t->take_glue) {
396 glue_fn *take_glue = elem_t->take_glue;
397 size_t elem_size = elem_t->size;
398 const type_desc **tydescs = elem_t->first_param;
399 for (char *p = dst; p < dst+n; p += elem_size) {
400 take_glue(NULL, NULL, tydescs, p);
405 /**********************************************************************/
407 struct s_vec_push_args {
413 extern "C" CDECL void
414 upcall_s_vec_push(s_vec_push_args *args) {
415 rust_task *task = rust_scheduler::get_task();
416 LOG_UPCALL_ENTRY(task);
417 size_t new_sz = (*args->vp)->fill + args->elt_ty->size;
418 reserve_vec(task, args->vp, new_sz);
419 rust_vec* v = *args->vp;
420 copy_elements(task, args->elt_ty, &v->data[0] + v->fill,
421 args->elt, args->elt_ty->size);
422 v->fill += args->elt_ty->size;
425 extern "C" CDECL void
426 upcall_vec_push(rust_vec** vp, type_desc* elt_ty, void* elt) {
427 // FIXME: Switching stacks here causes crashes, probably
428 // because this upcall calls take glue
429 s_vec_push_args args = {vp, elt_ty, elt};
430 upcall_s_vec_push(&args);
432 // Do the stack check to make sure this op, on the Rust stack, is behaving
433 rust_task *task = rust_scheduler::get_task();
434 task->check_stack_canary();
437 /**********************************************************************
438 * Returns a token that can be used to deallocate all of the allocated space
439 * space in the dynamic stack.
442 struct s_dynastack_mark_args {
446 extern "C" CDECL void
447 upcall_s_dynastack_mark(s_dynastack_mark_args *args) {
448 args->retval = rust_scheduler::get_task()->dynastack.mark();
451 extern "C" CDECL void *
452 upcall_dynastack_mark() {
453 s_dynastack_mark_args args = {0};
454 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_mark);
458 /**********************************************************************
459 * Allocates space in the dynamic stack and returns it.
461 * FIXME: Deprecated since dynamic stacks need to be self-describing for GC.
464 struct s_dynastack_alloc_args {
469 extern "C" CDECL void
470 upcall_s_dynastack_alloc(s_dynastack_alloc_args *args) {
471 size_t sz = args->sz;
473 rust_scheduler::get_task()->dynastack.alloc(sz, NULL) : NULL;
476 extern "C" CDECL void *
477 upcall_dynastack_alloc(size_t sz) {
478 s_dynastack_alloc_args args = {0, sz};
479 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_alloc);
483 /**********************************************************************
484 * Allocates space associated with a type descriptor in the dynamic stack and
488 struct s_dynastack_alloc_2_args {
494 extern "C" CDECL void
495 upcall_s_dynastack_alloc_2(s_dynastack_alloc_2_args *args) {
496 size_t sz = args->sz;
497 type_desc *ty = args->ty;
499 rust_scheduler::get_task()->dynastack.alloc(sz, ty) : NULL;
502 extern "C" CDECL void *
503 upcall_dynastack_alloc_2(size_t sz, type_desc *ty) {
504 s_dynastack_alloc_2_args args = {0, sz, ty};
505 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_alloc_2);
509 struct s_dynastack_free_args {
513 extern "C" CDECL void
514 upcall_s_dynastack_free(s_dynastack_free_args *args) {
515 return rust_scheduler::get_task()->dynastack.free(args->ptr);
518 /** Frees space in the dynamic stack. */
519 extern "C" CDECL void
520 upcall_dynastack_free(void *ptr) {
521 s_dynastack_free_args args = {ptr};
522 UPCALL_SWITCH_STACK(&args, upcall_s_dynastack_free);
525 extern "C" _Unwind_Reason_Code
526 __gxx_personality_v0(int version,
527 _Unwind_Action actions,
528 uint64_t exception_class,
529 _Unwind_Exception *ue_header,
530 _Unwind_Context *context);
532 struct s_rust_personality_args {
533 _Unwind_Reason_Code retval;
535 _Unwind_Action actions;
536 uint64_t exception_class;
537 _Unwind_Exception *ue_header;
538 _Unwind_Context *context;
542 upcall_s_rust_personality(s_rust_personality_args *args) {
543 args->retval = __gxx_personality_v0(args->version,
545 args->exception_class,
551 The exception handling personality function. It figures
552 out what to do with each landing pad. Just a stack-switching
553 wrapper around the C++ personality function.
555 extern "C" _Unwind_Reason_Code
556 upcall_rust_personality(int version,
557 _Unwind_Action actions,
558 uint64_t exception_class,
559 _Unwind_Exception *ue_header,
560 _Unwind_Context *context) {
561 s_rust_personality_args args = {(_Unwind_Reason_Code)0,
562 version, actions, exception_class,
564 rust_task *task = rust_scheduler::get_task();
566 // The personality function is run on the stack of the
567 // last function that threw or landed, which is going
568 // to sometimes be the C stack. If we're on the Rust stack
569 // then switch to the C stack.
571 if (task->on_rust_stack()) {
572 UPCALL_SWITCH_STACK(&args, upcall_s_rust_personality);
574 upcall_s_rust_personality(&args);
580 shape_cmp_type(int8_t *result, const type_desc *tydesc,
581 const type_desc **subtydescs, uint8_t *data_0,
582 uint8_t *data_1, uint8_t cmp_type);
584 struct s_cmp_type_args {
586 const type_desc *tydesc;
587 const type_desc **subtydescs;
594 upcall_s_cmp_type(s_cmp_type_args *args) {
595 shape_cmp_type(args->result, args->tydesc, args->subtydescs,
596 args->data_0, args->data_1, args->cmp_type);
600 upcall_cmp_type(int8_t *result, const type_desc *tydesc,
601 const type_desc **subtydescs, uint8_t *data_0,
602 uint8_t *data_1, uint8_t cmp_type) {
603 s_cmp_type_args args = {result, tydesc, subtydescs, data_0, data_1, cmp_type};
604 UPCALL_SWITCH_STACK(&args, upcall_s_cmp_type);
608 shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level);
610 struct s_log_type_args {
611 const type_desc *tydesc;
617 upcall_s_log_type(s_log_type_args *args) {
618 shape_log_type(args->tydesc, args->data, args->level);
622 upcall_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
623 s_log_type_args args = {tydesc, data, level};
624 UPCALL_SWITCH_STACK(&args, upcall_s_log_type);
627 struct s_new_stack_args {
634 extern "C" CDECL void
635 upcall_s_new_stack(struct s_new_stack_args *args) {
636 rust_task *task = rust_scheduler::get_task();
637 args->result = task->new_stack(args->stk_sz,
642 extern "C" CDECL void *
643 upcall_new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
644 s_new_stack_args args = {NULL, stk_sz, args_addr, args_sz};
645 UPCALL_SWITCH_STACK(&args, upcall_s_new_stack);
649 extern "C" CDECL void
650 upcall_s_del_stack() {
651 rust_task *task = rust_scheduler::get_task();
655 extern "C" CDECL void
657 UPCALL_SWITCH_STACK(NULL, upcall_s_del_stack);
660 // Landing pads need to call this to insert the
661 // correct limit into TLS.
662 // NB: This must run on the Rust stack because it
663 // needs to acquire the value of the stack pointer
664 extern "C" CDECL void
665 upcall_reset_stack_limit() {
666 rust_task *task = rust_scheduler::get_task();
667 task->reset_stack_limit();
674 // indent-tabs-mode: nil
676 // buffer-file-coding-system: utf-8-unix