1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use back::{link, abi};
13 use lib::llvm::{Pointer, ValueRef};
15 use middle::trans::base::*;
16 use middle::trans::cabi;
17 use middle::trans::cabi_x86;
18 use middle::trans::cabi_x86_64;
19 use middle::trans::cabi_arm;
20 use middle::trans::cabi_mips;
21 use middle::trans::build::*;
22 use middle::trans::callee::*;
23 use middle::trans::common::*;
24 use middle::trans::datum::*;
25 use middle::trans::expr::Ignore;
26 use middle::trans::machine::llsize_of;
27 use middle::trans::glue;
28 use middle::trans::machine;
29 use middle::trans::type_of::*;
30 use middle::trans::type_of;
32 use middle::ty::FnSig;
33 use util::ppaux::ty_to_str;
37 use syntax::codemap::span;
38 use syntax::{ast, ast_util};
39 use syntax::{attr, ast_map};
41 use syntax::parse::token::special_idents;
42 use syntax::parse::token;
43 use syntax::abi::{X86, X86_64, Arm, Mips};
44 use syntax::abi::{RustIntrinsic, Rust, Stdcall, Fastcall,
46 use middle::trans::type_::Type;
48 fn abi_info(ccx: @mut CrateContext) -> @cabi::ABIInfo {
49 return match ccx.sess.targ_cfg.arch {
50 X86 => cabi_x86::abi_info(ccx),
51 X86_64 => cabi_x86_64::abi_info(),
52 Arm => cabi_arm::abi_info(),
53 Mips => cabi_mips::abi_info(),
57 pub fn link_name(ccx: &CrateContext, i: &ast::foreign_item) -> @str {
58 match attr::first_attr_value_str_by_name(i.attrs, "link_name") {
59 None => ccx.sess.str_of(i.ident),
67 /// LLVM types that will appear on the foreign function
70 /// True if there is a return value (not bottom, not unit)
73 /// Type of the struct we will use to shuttle values back and forth.
74 /// This is always derived from the llsig.
77 /// Type of the shim function itself.
80 /// Adapter object for handling native ABI rules (trust me, you
81 /// don't want to know).
85 struct LlvmSignature {
91 fn foreign_signature(ccx: &mut CrateContext, fn_sig: &ty::FnSig)
94 * The ForeignSignature is the LLVM types of the arguments/return type
95 * of a function. Note that these LLVM types are not quite the same
96 * as the LLVM types would be for a native Rust function because foreign
97 * functions just plain ignore modes. They also don't pass aggregate
98 * values by pointer like we do.
101 let llarg_tys = fn_sig.inputs.map(|arg_ty| type_of(ccx, *arg_ty));
102 let llret_ty = type_of::type_of(ccx, fn_sig.output);
104 llarg_tys: llarg_tys,
106 sret: !ty::type_is_immediate(ccx.tcx, fn_sig.output),
110 fn shim_types(ccx: @mut CrateContext, id: ast::node_id) -> ShimTypes {
111 let fn_sig = match ty::get(ty::node_id_to_type(ccx.tcx, id)).sty {
112 ty::ty_bare_fn(ref fn_ty) => fn_ty.sig.clone(),
113 _ => ccx.sess.bug("c_arg_and_ret_lltys called on non-function type")
115 let llsig = foreign_signature(ccx, &fn_sig);
116 let bundle_ty = Type::struct_(llsig.llarg_tys + &[llsig.llret_ty.ptr_to()], false);
117 let ret_def = !ty::type_is_bot(fn_sig.output) &&
118 !ty::type_is_nil(fn_sig.output);
119 let fn_ty = abi_info(ccx).compute_info(llsig.llarg_tys, llsig.llret_ty, ret_def);
124 bundle_ty: bundle_ty,
125 shim_fn_ty: Type::func([bundle_ty.ptr_to()], &Type::void()),
130 type shim_arg_builder<'self> =
131 &'self fn(bcx: block, tys: &ShimTypes,
132 llargbundle: ValueRef) -> ~[ValueRef];
134 type shim_ret_builder<'self> =
135 &'self fn(bcx: block, tys: &ShimTypes,
136 llargbundle: ValueRef,
139 fn build_shim_fn_(ccx: @mut CrateContext,
143 cc: lib::llvm::CallConv,
144 arg_builder: shim_arg_builder,
145 ret_builder: shim_ret_builder)
147 let llshimfn = decl_internal_cdecl_fn(
148 ccx.llmod, shim_name, tys.shim_fn_ty);
150 // Declare the body of the shim function:
151 let fcx = new_fn_ctxt(ccx, ~[], llshimfn, tys.fn_sig.output, None);
152 let bcx = fcx.entry_bcx.get();
154 let llargbundle = get_param(llshimfn, 0u);
155 let llargvals = arg_builder(bcx, tys, llargbundle);
157 // Create the call itself and store the return value:
158 let llretval = CallWithConv(bcx, llbasefn, llargvals, cc);
160 ret_builder(bcx, tys, llargbundle, llretval);
162 // Don't finish up the function in the usual way, because this doesn't
163 // follow the normal Rust calling conventions.
164 let ret_cx = match fcx.llreturn {
165 Some(llreturn) => raw_block(fcx, false, llreturn),
174 type wrap_arg_builder<'self> = &'self fn(bcx: block,
177 llargbundle: ValueRef);
179 type wrap_ret_builder<'self> = &'self fn(bcx: block,
181 llargbundle: ValueRef);
183 fn build_wrap_fn_(ccx: @mut CrateContext,
187 shim_upcall: ValueRef,
188 needs_c_return: bool,
189 arg_builder: wrap_arg_builder,
190 ret_builder: wrap_ret_builder) {
191 let _icx = push_ctxt("foreign::build_wrap_fn_");
192 let fcx = new_fn_ctxt(ccx, ~[], llwrapfn, tys.fn_sig.output, None);
193 let bcx = fcx.entry_bcx.get();
195 // Patch up the return type if it's not immediate and we're returning via
197 if needs_c_return && !ty::type_is_immediate(ccx.tcx, tys.fn_sig.output) {
198 let lloutputtype = type_of::type_of(fcx.ccx, tys.fn_sig.output);
199 fcx.llretptr = Some(alloca(bcx, lloutputtype, ""));
202 // Allocate the struct and write the arguments into it.
203 let llargbundle = alloca(bcx, tys.bundle_ty, "__llargbundle");
204 arg_builder(bcx, tys, llwrapfn, llargbundle);
206 // Create call itself.
207 let llshimfnptr = PointerCast(bcx, llshimfn, Type::i8p());
208 let llrawargbundle = PointerCast(bcx, llargbundle, Type::i8p());
209 Call(bcx, shim_upcall, [llrawargbundle, llshimfnptr]);
210 ret_builder(bcx, tys, llargbundle);
212 // Then return according to the C ABI.
213 let return_context = match fcx.llreturn {
214 Some(llreturn) => raw_block(fcx, false, llreturn),
218 let llfunctiontype = val_ty(llwrapfn);
219 let llfunctiontype = llfunctiontype.element_type();
220 let return_type = llfunctiontype.return_type();
221 if return_type.kind() == ::lib::llvm::Void {
222 // XXX: This might be wrong if there are any functions for which
223 // the C ABI specifies a void output pointer and the Rust ABI
225 RetVoid(return_context);
227 // Cast if we have to...
228 // XXX: This is ugly.
229 let llretptr = BitCast(return_context, fcx.llretptr.get(), return_type.ptr_to());
230 Ret(return_context, Load(return_context, llretptr));
235 // For each foreign function F, we generate a wrapper function W and a shim
236 // function S that all work together. The wrapper function W is the function
237 // that other rust code actually invokes. Its job is to marshall the
238 // arguments into a struct. It then uses a small bit of assembly to switch
239 // over to the C stack and invoke the shim function. The shim function S then
240 // unpacks the arguments from the struct and invokes the actual function F
241 // according to its specified calling convention.
243 // Example: Given a foreign c-stack function F(x: X, y: Y) -> Z,
244 // we generate a wrapper function W that looks like:
246 // void W(Z* dest, void *env, X x, Y y) {
247 // struct { X x; Y y; Z *z; } args = { x, y, z };
248 // call_on_c_stack_shim(S, &args);
251 // The shim function S then looks something like:
253 // void S(struct { X x; Y y; Z *z; } *args) {
254 // *args->z = F(args->x, args->y);
257 // However, if the return type of F is dynamically sized or of aggregate type,
258 // the shim function looks like:
260 // void S(struct { X x; Y y; Z *z; } *args) {
261 // F(args->z, args->x, args->y);
264 // Note: on i386, the layout of the args struct is generally the same
265 // as the desired layout of the arguments on the C stack. Therefore,
266 // we could use upcall_alloc_c_stack() to allocate the `args`
267 // structure and switch the stack pointer appropriately to avoid a
268 // round of copies. (In fact, the shim function itself is
269 // unnecessary). We used to do this, in fact, and will perhaps do so
271 pub fn trans_foreign_mod(ccx: @mut CrateContext,
272 path: &ast_map::path,
273 foreign_mod: &ast::foreign_mod) {
274 let _icx = push_ctxt("foreign::trans_foreign_mod");
276 let arch = ccx.sess.targ_cfg.arch;
277 let abi = match foreign_mod.abis.for_arch(arch) {
280 fmt!("No suitable ABI for target architecture \
282 ast_map::path_to_str(*path,
289 for foreign_mod.items.iter().advance |&foreign_item| {
290 match foreign_item.node {
291 ast::foreign_item_fn(*) => {
292 let id = foreign_item.id;
295 // Intrinsics are emitted by monomorphic fn
299 // FIXME(#3678) Implement linking to foreign fns with Rust ABI
301 fmt!("Foreign functions with Rust ABI"));
305 build_foreign_fn(ccx, id, foreign_item,
306 lib::llvm::X86StdcallCallConv);
310 build_foreign_fn(ccx, id, foreign_item,
311 lib::llvm::X86FastcallCallConv);
315 // FIXME(#3678) should really be more specific
316 build_foreign_fn(ccx, id, foreign_item,
317 lib::llvm::CCallConv);
321 // FIXME(#3678) should really be more specific
322 build_foreign_fn(ccx, id, foreign_item,
323 lib::llvm::CCallConv);
327 build_foreign_fn(ccx, id, foreign_item,
328 lib::llvm::CCallConv);
332 ast::foreign_item_static(*) => {
333 let ident = token::ident_to_str(&foreign_item.ident);
334 ccx.item_symbols.insert(foreign_item.id, /* bad */ident.to_owned());
339 fn build_foreign_fn(ccx: @mut CrateContext,
341 foreign_item: @ast::foreign_item,
342 cc: lib::llvm::CallConv) {
343 let llwrapfn = get_item_val(ccx, id);
344 let tys = shim_types(ccx, id);
345 if attr::contains_name(foreign_item.attrs, "rust_stack") {
346 build_direct_fn(ccx, llwrapfn, foreign_item,
348 } else if attr::contains_name(foreign_item.attrs, "fast_ffi") {
349 build_fast_ffi_fn(ccx, llwrapfn, foreign_item, &tys, cc);
351 let llshimfn = build_shim_fn(ccx, foreign_item, &tys, cc);
352 build_wrap_fn(ccx, &tys, llshimfn, llwrapfn);
356 fn build_shim_fn(ccx: @mut CrateContext,
357 foreign_item: &ast::foreign_item,
359 cc: lib::llvm::CallConv)
363 * Build S, from comment above:
365 * void S(struct { X x; Y y; Z *z; } *args) {
366 * F(args->z, args->x, args->y);
370 let _icx = push_ctxt("foreign::build_shim_fn");
372 fn build_args(bcx: block, tys: &ShimTypes, llargbundle: ValueRef)
374 let _icx = push_ctxt("foreign::shim::build_args");
375 tys.fn_ty.build_shim_args(bcx, tys.llsig.llarg_tys, llargbundle)
378 fn build_ret(bcx: block,
380 llargbundle: ValueRef,
381 llretval: ValueRef) {
382 let _icx = push_ctxt("foreign::shim::build_ret");
383 tys.fn_ty.build_shim_ret(bcx,
390 let lname = link_name(ccx, foreign_item);
391 let llbasefn = base_fn(ccx, lname, tys, cc);
392 // Name the shim function
393 let shim_name = fmt!("%s__c_stack_shim", lname);
403 fn base_fn(ccx: &CrateContext,
406 cc: lib::llvm::CallConv)
408 // Declare the "prototype" for the base function F:
409 do tys.fn_ty.decl_fn |fnty| {
410 decl_fn(ccx.llmod, lname, cc, fnty)
414 // FIXME (#2535): this is very shaky and probably gets ABIs wrong all
416 fn build_direct_fn(ccx: @mut CrateContext,
418 item: &ast::foreign_item,
420 cc: lib::llvm::CallConv) {
421 debug!("build_direct_fn(%s)", link_name(ccx, item));
423 let fcx = new_fn_ctxt(ccx, ~[], decl, tys.fn_sig.output, None);
424 let bcx = fcx.entry_bcx.get();
425 let llbasefn = base_fn(ccx, link_name(ccx, item), tys, cc);
426 let ty = ty::lookup_item_type(ccx.tcx,
427 ast_util::local_def(item.id)).ty;
428 let ret_ty = ty::ty_fn_ret(ty);
429 let args = vec::from_fn(ty::ty_fn_args(ty).len(), |i| {
430 get_param(decl, fcx.arg_pos(i))
432 let retval = Call(bcx, llbasefn, args);
433 if !ty::type_is_nil(ret_ty) && !ty::type_is_bot(ret_ty) {
434 Store(bcx, retval, fcx.llretptr.get());
439 // FIXME (#2535): this is very shaky and probably gets ABIs wrong all
441 fn build_fast_ffi_fn(ccx: @mut CrateContext,
443 item: &ast::foreign_item,
445 cc: lib::llvm::CallConv) {
446 debug!("build_fast_ffi_fn(%s)", link_name(ccx, item));
448 let fcx = new_fn_ctxt(ccx, ~[], decl, tys.fn_sig.output, None);
449 let bcx = fcx.entry_bcx.get();
450 let llbasefn = base_fn(ccx, link_name(ccx, item), tys, cc);
451 set_no_inline(fcx.llfn);
452 set_fixed_stack_segment(fcx.llfn);
453 let ty = ty::lookup_item_type(ccx.tcx,
454 ast_util::local_def(item.id)).ty;
455 let ret_ty = ty::ty_fn_ret(ty);
456 let args = vec::from_fn(ty::ty_fn_args(ty).len(), |i| {
457 get_param(decl, fcx.arg_pos(i))
459 let retval = Call(bcx, llbasefn, args);
460 if !ty::type_is_nil(ret_ty) && !ty::type_is_bot(ret_ty) {
461 Store(bcx, retval, fcx.llretptr.get());
466 fn build_wrap_fn(ccx: @mut CrateContext,
469 llwrapfn: ValueRef) {
472 * Build W, from comment above:
474 * void W(Z* dest, void *env, X x, Y y) {
475 * struct { X x; Y y; Z *z; } args = { x, y, z };
476 * call_on_c_stack_shim(S, &args);
479 * One thing we have to be very careful of is to
480 * account for the Rust modes.
483 let _icx = push_ctxt("foreign::build_wrap_fn");
489 ccx.upcalls.call_shim_on_c_stack,
494 fn build_args(bcx: block,
497 llargbundle: ValueRef) {
498 let _icx = push_ctxt("foreign::wrap::build_args");
500 let n = tys.llsig.llarg_tys.len();
501 for uint::range(0, n) |i| {
502 let arg_i = bcx.fcx.arg_pos(i);
503 let mut llargval = get_param(llwrapfn, arg_i);
505 // In some cases, Rust will pass a pointer which the
506 // native C type doesn't have. In that case, just
507 // load the value from the pointer.
508 if type_of::arg_is_indirect(ccx, &tys.fn_sig.inputs[i]) {
509 llargval = Load(bcx, llargval);
512 store_inbounds(bcx, llargval, llargbundle, [0u, i]);
515 for bcx.fcx.llretptr.iter().advance |&retptr| {
516 store_inbounds(bcx, retptr, llargbundle, [0u, n]);
520 fn build_ret(bcx: block,
521 shim_types: &ShimTypes,
522 llargbundle: ValueRef) {
523 let _icx = push_ctxt("foreign::wrap::build_ret");
524 let arg_count = shim_types.fn_sig.inputs.len();
525 for bcx.fcx.llretptr.iter().advance |&retptr| {
526 let llretptr = load_inbounds(bcx, llargbundle, [0, arg_count]);
527 Store(bcx, Load(bcx, llretptr), retptr);
533 pub fn trans_intrinsic(ccx: @mut CrateContext,
535 item: &ast::foreign_item,
537 substs: @param_substs,
538 attributes: &[ast::Attribute],
539 ref_id: Option<ast::node_id>) {
540 debug!("trans_intrinsic(item.ident=%s)", ccx.sess.str_of(item.ident));
542 fn simple_llvm_intrinsic(bcx: block, name: &'static str, num_args: uint) {
543 assert!(num_args <= 4);
544 let mut args = [0 as ValueRef, ..4];
545 let first_real_arg = bcx.fcx.arg_pos(0u);
546 for uint::range(0, num_args) |i| {
547 args[i] = get_param(bcx.fcx.llfn, first_real_arg + i);
549 let llfn = bcx.ccx().intrinsics.get_copy(&name);
550 Ret(bcx, Call(bcx, llfn, args.slice(0, num_args)));
553 fn memcpy_intrinsic(bcx: block, name: &'static str, tp_ty: ty::t, sizebits: u8) {
555 let lltp_ty = type_of::type_of(ccx, tp_ty);
556 let align = C_i32(machine::llalign_of_min(ccx, lltp_ty) as i32);
557 let size = match sizebits {
558 32 => C_i32(machine::llsize_of_real(ccx, lltp_ty) as i32),
559 64 => C_i64(machine::llsize_of_real(ccx, lltp_ty) as i64),
560 _ => ccx.sess.fatal("Invalid value for sizebits")
563 let decl = bcx.fcx.llfn;
564 let first_real_arg = bcx.fcx.arg_pos(0u);
565 let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p());
566 let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p());
567 let count = get_param(decl, first_real_arg + 2);
568 let volatile = C_i1(false);
569 let llfn = bcx.ccx().intrinsics.get_copy(&name);
570 Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, volatile]);
574 fn memset_intrinsic(bcx: block, name: &'static str, tp_ty: ty::t, sizebits: u8) {
576 let lltp_ty = type_of::type_of(ccx, tp_ty);
577 let align = C_i32(machine::llalign_of_min(ccx, lltp_ty) as i32);
578 let size = match sizebits {
579 32 => C_i32(machine::llsize_of_real(ccx, lltp_ty) as i32),
580 64 => C_i64(machine::llsize_of_real(ccx, lltp_ty) as i64),
581 _ => ccx.sess.fatal("Invalid value for sizebits")
584 let decl = bcx.fcx.llfn;
585 let first_real_arg = bcx.fcx.arg_pos(0u);
586 let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p());
587 let val = get_param(decl, first_real_arg + 1);
588 let count = get_param(decl, first_real_arg + 2);
589 let volatile = C_i1(false);
590 let llfn = bcx.ccx().intrinsics.get_copy(&name);
591 Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, volatile]);
595 fn count_zeros_intrinsic(bcx: block, name: &'static str) {
596 let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
598 let llfn = bcx.ccx().intrinsics.get_copy(&name);
599 Ret(bcx, Call(bcx, llfn, [x, y]));
602 let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx, item.id));
604 let fcx = new_fn_ctxt_w_id(ccx,
614 set_always_inline(fcx.llfn);
616 // Set the fixed stack segment flag if necessary.
617 if attr::contains_name(attributes, "fixed_stack_segment") {
618 set_fixed_stack_segment(fcx.llfn);
621 let mut bcx = fcx.entry_bcx.get();
622 let first_real_arg = fcx.arg_pos(0u);
624 let nm = ccx.sess.str_of(item.ident);
625 let name = nm.as_slice();
627 // This requires that atomic intrinsics follow a specific naming pattern:
628 // "atomic_<operation>[_<ordering>], and no ordering means SeqCst
629 if name.starts_with("atomic_") {
630 let split : ~[&str] = name.split_iter('_').collect();
631 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
632 let order = if split.len() == 2 {
633 lib::llvm::SequentiallyConsistent
636 "relaxed" => lib::llvm::Monotonic,
637 "acq" => lib::llvm::Acquire,
638 "rel" => lib::llvm::Release,
639 "acqrel" => lib::llvm::AcquireRelease,
640 _ => ccx.sess.fatal("Unknown ordering in atomic intrinsic")
646 let old = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
647 get_param(decl, first_real_arg + 1u),
648 get_param(decl, first_real_arg + 2u),
653 let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
658 AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
659 get_param(decl, first_real_arg),
664 // These are all AtomicRMW ops
665 let atom_op = match op {
666 "xchg" => lib::llvm::Xchg,
667 "xadd" => lib::llvm::Add,
668 "xsub" => lib::llvm::Sub,
669 "and" => lib::llvm::And,
670 "nand" => lib::llvm::Nand,
671 "or" => lib::llvm::Or,
672 "xor" => lib::llvm::Xor,
673 "max" => lib::llvm::Max,
674 "min" => lib::llvm::Min,
675 "umax" => lib::llvm::UMax,
676 "umin" => lib::llvm::UMin,
677 _ => ccx.sess.fatal("Unknown atomic operation")
680 let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
681 get_param(decl, first_real_arg + 1u),
693 let tp_ty = substs.tys[0];
694 let lltp_ty = type_of::type_of(ccx, tp_ty);
695 Ret(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty)));
698 // Create a datum reflecting the value being moved.
699 // Use `appropriate_mode` so that the datum is by ref
700 // if the value is non-immediate. Note that, with
701 // intrinsics, there are no argument cleanups to
702 // concern ourselves with.
703 let tp_ty = substs.tys[0];
704 let mode = appropriate_mode(ccx.tcx, tp_ty);
705 let src = Datum {val: get_param(decl, first_real_arg + 1u),
706 ty: tp_ty, mode: mode};
707 bcx = src.move_to(bcx, DROP_EXISTING,
708 get_param(decl, first_real_arg));
712 // See comments for `"move_val"`.
713 let tp_ty = substs.tys[0];
714 let mode = appropriate_mode(ccx.tcx, tp_ty);
715 let src = Datum {val: get_param(decl, first_real_arg + 1u),
716 ty: tp_ty, mode: mode};
717 bcx = src.move_to(bcx, INIT, get_param(decl, first_real_arg));
721 let tp_ty = substs.tys[0];
722 let lltp_ty = type_of::type_of(ccx, tp_ty);
723 Ret(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty)));
726 let tp_ty = substs.tys[0];
727 let lltp_ty = type_of::type_of(ccx, tp_ty);
728 Ret(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)));
731 let tp_ty = substs.tys[0];
732 let static_ti = get_tydesc(ccx, tp_ty);
733 glue::lazily_emit_all_tydesc_glue(ccx, static_ti);
735 // FIXME (#3730): ideally this shouldn't need a cast,
736 // but there's a circularity between translating rust types to llvm
737 // types and having a tydesc type available. So I can't directly access
738 // the llvm type of intrinsic::TyDesc struct.
739 let userland_tydesc_ty = type_of::type_of(ccx, output_type);
740 let td = PointerCast(bcx, static_ti.tydesc, userland_tydesc_ty);
744 let tp_ty = substs.tys[0];
745 let lltp_ty = type_of::type_of(ccx, tp_ty);
746 match bcx.fcx.llretptr {
747 Some(ptr) => { Store(bcx, C_null(lltp_ty), ptr); RetVoid(bcx); }
748 None if ty::type_is_nil(tp_ty) => RetVoid(bcx),
749 None => Ret(bcx, C_null(lltp_ty)),
753 // Do nothing, this is effectively a no-op
754 let retty = substs.tys[0];
755 if ty::type_is_immediate(ccx.tcx, retty) && !ty::type_is_nil(retty) {
757 Ret(bcx, lib::llvm::llvm::LLVMGetUndef(type_of(ccx, retty).to_ref()));
767 let (in_type, out_type) = (substs.tys[0], substs.tys[1]);
768 let llintype = type_of::type_of(ccx, in_type);
769 let llouttype = type_of::type_of(ccx, out_type);
771 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
772 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
773 if in_type_size != out_type_size {
774 let sp = match ccx.tcx.items.get_copy(&ref_id.get()) {
775 ast_map::node_expr(e) => e.span,
776 _ => fail!("transmute has non-expr arg"),
778 let pluralize = |n| if 1u == n { "" } else { "s" };
779 ccx.sess.span_fatal(sp,
780 fmt!("transmute called on types with \
781 different sizes: %s (%u bit%s) to \
783 ty_to_str(ccx.tcx, in_type),
785 pluralize(in_type_size),
786 ty_to_str(ccx.tcx, out_type),
788 pluralize(out_type_size)));
791 if !ty::type_is_nil(out_type) {
792 let llsrcval = get_param(decl, first_real_arg);
793 if ty::type_is_immediate(ccx.tcx, in_type) {
796 Store(bcx, llsrcval, PointerCast(bcx, llretptr, llintype.ptr_to()));
799 None => match (llintype.kind(), llouttype.kind()) {
800 (Pointer, other) | (other, Pointer) if other != Pointer => {
801 let tmp = Alloca(bcx, llouttype, "");
802 Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
803 Ret(bcx, Load(bcx, tmp));
805 _ => Ret(bcx, BitCast(bcx, llsrcval, llouttype))
809 // NB: Do not use a Load and Store here. This causes massive
810 // code bloat when `transmute` is used on large structural
812 let lldestptr = fcx.llretptr.get();
813 let lldestptr = PointerCast(bcx, lldestptr, Type::i8p());
814 let llsrcptr = PointerCast(bcx, llsrcval, Type::i8p());
816 let llsize = llsize_of(ccx, llintype);
817 call_memcpy(bcx, lldestptr, llsrcptr, llsize, 1);
825 let tp_ty = substs.tys[0];
826 Ret(bcx, C_bool(ty::type_needs_drop(ccx.tcx, tp_ty)));
828 "contains_managed" => {
829 let tp_ty = substs.tys[0];
830 Ret(bcx, C_bool(ty::type_contents(ccx.tcx, tp_ty).contains_managed()));
833 let td = get_param(decl, first_real_arg);
834 let visitor = get_param(decl, first_real_arg + 1u);
835 //let llvisitorptr = alloca(bcx, val_ty(visitor));
836 //Store(bcx, visitor, llvisitorptr);
837 let td = PointerCast(bcx, td, ccx.tydesc_type.ptr_to());
838 glue::call_tydesc_glue_full(bcx, visitor, td,
839 abi::tydesc_field_visit_glue, None);
843 let frameaddress = ccx.intrinsics.get_copy(& &"llvm.frameaddress");
844 let frameaddress_val = Call(bcx, frameaddress, [C_i32(0i32)]);
845 let star_u8 = ty::mk_imm_ptr(
847 ty::mk_mach_uint(ast::ty_u8));
848 let fty = ty::mk_closure(bcx.tcx(), ty::ClosureTy {
849 purity: ast::impure_fn,
850 sigil: ast::BorrowedSigil,
852 region: ty::re_bound(ty::br_anon(0)),
853 bounds: ty::EmptyBuiltinBounds(),
855 bound_lifetime_names: opt_vec::Empty,
856 inputs: ~[ star_u8 ],
860 let datum = Datum {val: get_param(decl, first_real_arg),
861 mode: ByRef(ZeroMem), ty: fty};
862 let arg_vals = ~[frameaddress_val];
863 bcx = trans_call_inner(
864 bcx, None, fty, ty::mk_nil(),
865 |bcx| Callee {bcx: bcx, data: Closure(datum)},
866 ArgVals(arg_vals), Some(Ignore), DontAutorefArg).bcx;
869 "morestack_addr" => {
870 // XXX This is a hack to grab the address of this particular
871 // native function. There should be a general in-language
873 let llfty = type_of_fn(bcx.ccx(), [], ty::mk_nil());
874 let morestack_addr = decl_cdecl_fn(
875 bcx.ccx().llmod, "__morestack", llfty);
876 let morestack_addr = PointerCast(bcx, morestack_addr, Type::nil().ptr_to());
877 Ret(bcx, morestack_addr);
879 "memcpy32" => memcpy_intrinsic(bcx, "llvm.memcpy.p0i8.p0i8.i32", substs.tys[0], 32),
880 "memcpy64" => memcpy_intrinsic(bcx, "llvm.memcpy.p0i8.p0i8.i64", substs.tys[0], 64),
881 "memmove32" => memcpy_intrinsic(bcx, "llvm.memmove.p0i8.p0i8.i32", substs.tys[0], 32),
882 "memmove64" => memcpy_intrinsic(bcx, "llvm.memmove.p0i8.p0i8.i64", substs.tys[0], 64),
883 "memset32" => memset_intrinsic(bcx, "llvm.memset.p0i8.i32", substs.tys[0], 32),
884 "memset64" => memset_intrinsic(bcx, "llvm.memset.p0i8.i64", substs.tys[0], 64),
885 "sqrtf32" => simple_llvm_intrinsic(bcx, "llvm.sqrt.f32", 1),
886 "sqrtf64" => simple_llvm_intrinsic(bcx, "llvm.sqrt.f64", 1),
887 "powif32" => simple_llvm_intrinsic(bcx, "llvm.powi.f32", 2),
888 "powif64" => simple_llvm_intrinsic(bcx, "llvm.powi.f64", 2),
889 "sinf32" => simple_llvm_intrinsic(bcx, "llvm.sin.f32", 1),
890 "sinf64" => simple_llvm_intrinsic(bcx, "llvm.sin.f64", 1),
891 "cosf32" => simple_llvm_intrinsic(bcx, "llvm.cos.f32", 1),
892 "cosf64" => simple_llvm_intrinsic(bcx, "llvm.cos.f64", 1),
893 "powf32" => simple_llvm_intrinsic(bcx, "llvm.pow.f32", 2),
894 "powf64" => simple_llvm_intrinsic(bcx, "llvm.pow.f64", 2),
895 "expf32" => simple_llvm_intrinsic(bcx, "llvm.exp.f32", 1),
896 "expf64" => simple_llvm_intrinsic(bcx, "llvm.exp.f64", 1),
897 "exp2f32" => simple_llvm_intrinsic(bcx, "llvm.exp2.f32", 1),
898 "exp2f64" => simple_llvm_intrinsic(bcx, "llvm.exp2.f64", 1),
899 "logf32" => simple_llvm_intrinsic(bcx, "llvm.log.f32", 1),
900 "logf64" => simple_llvm_intrinsic(bcx, "llvm.log.f64", 1),
901 "log10f32" => simple_llvm_intrinsic(bcx, "llvm.log10.f32", 1),
902 "log10f64" => simple_llvm_intrinsic(bcx, "llvm.log10.f64", 1),
903 "log2f32" => simple_llvm_intrinsic(bcx, "llvm.log2.f32", 1),
904 "log2f64" => simple_llvm_intrinsic(bcx, "llvm.log2.f64", 1),
905 "fmaf32" => simple_llvm_intrinsic(bcx, "llvm.fma.f32", 3),
906 "fmaf64" => simple_llvm_intrinsic(bcx, "llvm.fma.f64", 3),
907 "fabsf32" => simple_llvm_intrinsic(bcx, "llvm.fabs.f32", 1),
908 "fabsf64" => simple_llvm_intrinsic(bcx, "llvm.fabs.f64", 1),
909 "floorf32" => simple_llvm_intrinsic(bcx, "llvm.floor.f32", 1),
910 "floorf64" => simple_llvm_intrinsic(bcx, "llvm.floor.f64", 1),
911 "ceilf32" => simple_llvm_intrinsic(bcx, "llvm.ceil.f32", 1),
912 "ceilf64" => simple_llvm_intrinsic(bcx, "llvm.ceil.f64", 1),
913 "truncf32" => simple_llvm_intrinsic(bcx, "llvm.trunc.f32", 1),
914 "truncf64" => simple_llvm_intrinsic(bcx, "llvm.trunc.f64", 1),
915 "ctpop8" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i8", 1),
916 "ctpop16" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i16", 1),
917 "ctpop32" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i32", 1),
918 "ctpop64" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i64", 1),
919 "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
920 "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
921 "ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),
922 "ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
923 "cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
924 "cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
925 "cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
926 "cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),
927 "bswap16" => simple_llvm_intrinsic(bcx, "llvm.bswap.i16", 1),
928 "bswap32" => simple_llvm_intrinsic(bcx, "llvm.bswap.i32", 1),
929 "bswap64" => simple_llvm_intrinsic(bcx, "llvm.bswap.i64", 1),
931 // Could we make this an enum rather than a string? does it get
933 ccx.sess.span_bug(item.span, "unknown intrinsic");
940 * Translates a "crust" fn, meaning a Rust fn that can be called
941 * from C code. In this case, we have to perform some adaptation
942 * to (1) switch back to the Rust stack and (2) adapt the C calling
943 * convention to our own.
945 * Example: Given a crust fn F(x: X, y: Y) -> Z, we generate a
946 * Rust function R as normal:
948 * void R(Z* dest, void *env, X x, Y y) {...}
950 * and then we generate a wrapper function W that looks like:
953 * struct { X x; Y y; Z *z; } args = { x, y, z };
954 * call_on_c_stack_shim(S, &args);
957 * Note that the wrapper follows the foreign (typically "C") ABI.
958 * The wrapper is the actual "value" of the foreign fn. Finally,
959 * we generate a shim function S that looks like:
961 * void S(struct { X x; Y y; Z *z; } *args) {
962 * R(args->z, NULL, args->x, args->y);
965 pub fn trans_foreign_fn(ccx: @mut CrateContext,
971 let _icx = push_ctxt("foreign::build_foreign_fn");
973 fn build_rust_fn(ccx: @mut CrateContext,
974 path: &ast_map::path,
979 let _icx = push_ctxt("foreign::foreign::build_rust_fn");
980 let t = ty::node_id_to_type(ccx.tcx, id);
982 let ps = link::mangle_internal_name_by_path(
984 vec::append_one((*path).clone(),
986 special_idents::clownshoe_abi)));
987 let llty = type_of_fn_from_ty(ccx, t);
988 let llfndecl = decl_internal_cdecl_fn(ccx.llmod, ps, llty);
1001 fn build_shim_fn(ccx: @mut CrateContext,
1002 path: ast_map::path,
1008 * Generate the shim S:
1010 * void S(struct { X x; Y y; Z *z; } *args) {
1011 * R(args->z, NULL, &args->x, args->y);
1014 * One complication is that we must adapt to the Rust
1015 * calling convention, which introduces indirection
1016 * in some cases. To demonstrate this, I wrote one of the
1017 * entries above as `&args->x`, because presumably `X` is
1018 * one of those types that is passed by pointer in Rust.
1021 let _icx = push_ctxt("foreign::foreign::build_shim_fn");
1023 fn build_args(bcx: block, tys: &ShimTypes, llargbundle: ValueRef)
1025 let _icx = push_ctxt("foreign::extern::shim::build_args");
1026 let ccx = bcx.ccx();
1027 let mut llargvals = ~[];
1029 let n = tys.fn_sig.inputs.len();
1031 if !ty::type_is_immediate(bcx.tcx(), tys.fn_sig.output) {
1032 let llretptr = load_inbounds(bcx, llargbundle, [0u, n]);
1033 llargvals.push(llretptr);
1036 let llenvptr = C_null(Type::opaque_box(bcx.ccx()).ptr_to());
1037 llargvals.push(llenvptr);
1039 // Get a pointer to the argument:
1040 let mut llargval = GEPi(bcx, llargbundle, [0u, i]);
1042 if !type_of::arg_is_indirect(ccx, &tys.fn_sig.inputs[i]) {
1043 // If Rust would pass this by value, load the value.
1044 llargval = Load(bcx, llargval);
1047 llargvals.push(llargval);
1053 fn build_ret(bcx: block,
1054 shim_types: &ShimTypes,
1055 llargbundle: ValueRef,
1056 llretval: ValueRef) {
1057 if bcx.fcx.llretptr.is_some() &&
1058 ty::type_is_immediate(bcx.tcx(), shim_types.fn_sig.output) {
1059 // Write the value into the argument bundle.
1060 let arg_count = shim_types.fn_sig.inputs.len();
1061 let llretptr = load_inbounds(bcx,
1064 Store(bcx, llretval, llretptr);
1066 // NB: The return pointer in the Rust ABI function is wired
1067 // directly into the return slot in the shim struct.
1071 let shim_name = link::mangle_internal_name_by_path(
1073 vec::append_one(path, ast_map::path_name(
1074 special_idents::clownshoe_stack_shim
1080 lib::llvm::CCallConv,
1085 fn build_wrap_fn(ccx: @mut CrateContext,
1091 * Generate the wrapper W:
1094 * struct { X x; Y y; Z *z; } args = { x, y, z };
1095 * call_on_c_stack_shim(S, &args);
1099 let _icx = push_ctxt("foreign::foreign::build_wrap_fn");
1105 ccx.upcalls.call_shim_on_rust_stack,
1110 fn build_args(bcx: block,
1113 llargbundle: ValueRef) {
1114 let _icx = push_ctxt("foreign::foreign::wrap::build_args");
1115 tys.fn_ty.build_wrap_args(bcx,
1121 fn build_ret(bcx: block, tys: &ShimTypes, llargbundle: ValueRef) {
1122 let _icx = push_ctxt("foreign::foreign::wrap::build_ret");
1123 tys.fn_ty.build_wrap_ret(bcx, tys.llsig.llarg_tys, llargbundle);
1127 let tys = shim_types(ccx, id);
1128 // The internal Rust ABI function - runs on the Rust stack
1130 let llrustfn = build_rust_fn(ccx, &path, decl, body, id);
1131 // The internal shim function - runs on the Rust stack
1132 let llshimfn = build_shim_fn(ccx, path, llrustfn, &tys);
1133 // The foreign C function - runs on the C stack
1134 build_wrap_fn(ccx, llshimfn, llwrapfn, &tys)
1137 pub fn register_foreign_fn(ccx: @mut CrateContext,
1139 path: ast_map::path,
1140 node_id: ast::node_id,
1141 attrs: &[ast::Attribute])
1143 let _icx = push_ctxt("foreign::register_foreign_fn");
1145 let t = ty::node_id_to_type(ccx.tcx, node_id);
1147 let tys = shim_types(ccx, node_id);
1148 do tys.fn_ty.decl_fn |fnty| {
1149 // XXX(pcwalton): We should not copy the path.
1150 register_fn_fuller(ccx,
1156 lib::llvm::CCallConv,