1 use crate::llvm::{self, AttributePlace};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::type_::Type;
5 use crate::type_of::{LayoutLlvmExt, PointerKind};
6 use crate::value::Value;
7 use rustc_codegen_ssa::MemFlags;
8 use rustc_codegen_ssa::mir::place::PlaceRef;
9 use rustc_codegen_ssa::mir::operand::OperandValue;
10 use rustc_target::abi::call::ArgType;
12 use rustc_codegen_ssa::traits::*;
14 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
15 use rustc::ty::{self, Ty, Instance};
16 use rustc::ty::layout;
20 pub use rustc_target::spec::abi::Abi;
21 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
22 pub use rustc_target::abi::call::*;
24 macro_rules! for_each_kind {
25 ($flags: ident, $f: ident, $($kind: ident),+) => ({
26 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
30 trait ArgAttributeExt {
31 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
34 impl ArgAttributeExt for ArgAttribute {
35 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
36 for_each_kind!(self, f,
37 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
41 pub trait ArgAttributesExt {
42 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
43 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
46 impl ArgAttributesExt for ArgAttributes {
47 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
48 let mut regular = self.regular;
50 let deref = self.pointee_size.bytes();
52 if regular.contains(ArgAttribute::NonNull) {
53 llvm::LLVMRustAddDereferenceableAttr(llfn,
57 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
61 regular -= ArgAttribute::NonNull;
63 if let Some(align) = self.pointee_align {
64 llvm::LLVMRustAddAlignmentAttr(llfn,
66 align.bytes() as u32);
68 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
72 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
73 let mut regular = self.regular;
75 let deref = self.pointee_size.bytes();
77 if regular.contains(ArgAttribute::NonNull) {
78 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
82 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
86 regular -= ArgAttribute::NonNull;
88 if let Some(align) = self.pointee_align {
89 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
91 align.bytes() as u32);
93 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
99 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
102 impl LlvmType for Reg {
103 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
105 RegKind::Integer => cx.type_ix(self.size.bits()),
107 match self.size.bits() {
110 _ => bug!("unsupported float: {:?}", self)
114 cx.type_vector(cx.type_i8(), self.size.bytes())
120 impl LlvmType for CastTarget {
121 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
122 let rest_ll_unit = self.rest.unit.llvm_type(cx);
123 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
126 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
127 self.rest.total.bytes() % self.rest.unit.size.bytes())
130 if self.prefix.iter().all(|x| x.is_none()) {
131 // Simplify to a single unit when there is no prefix and size <= unit size
132 if self.rest.total <= self.rest.unit.size {
136 // Simplify to array when all chunks are the same size and type
138 return cx.type_array(rest_ll_unit, rest_count);
142 // Create list of fields in the main structure
143 let mut args: Vec<_> =
144 self.prefix.iter().flat_map(|option_kind| option_kind.map(
145 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
146 .chain((0..rest_count).map(|_| rest_ll_unit))
149 // Append final integer
151 // Only integers can be really split further.
152 assert_eq!(self.rest.unit.kind, RegKind::Integer);
153 args.push(cx.type_ix(rem_bytes * 8));
156 cx.type_struct(&args, false)
160 pub trait ArgTypeExt<'ll, 'tcx> {
161 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
164 bx: &mut Builder<'_, 'll, 'tcx>,
166 dst: PlaceRef<'tcx, &'ll Value>,
170 bx: &mut Builder<'_, 'll, 'tcx>,
172 dst: PlaceRef<'tcx, &'ll Value>,
176 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
177 /// Gets the LLVM type for a place of the original Rust type of
178 /// this argument/return, i.e., the result of `type_of::type_of`.
179 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
180 self.layout.llvm_type(cx)
183 /// Stores a direct/indirect value described by this ArgType into a
184 /// place for the original Rust type of this argument/return.
185 /// Can be used for both storing formal arguments into Rust variables
186 /// or results of call/invoke instructions into their destinations.
189 bx: &mut Builder<'_, 'll, 'tcx>,
191 dst: PlaceRef<'tcx, &'ll Value>,
193 if self.is_ignore() {
196 if self.is_sized_indirect() {
197 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
198 } else if self.is_unsized_indirect() {
199 bug!("unsized ArgType must be handled through store_fn_arg");
200 } else if let PassMode::Cast(cast) = self.mode {
201 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
202 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
203 let can_store_through_cast_ptr = false;
204 if can_store_through_cast_ptr {
205 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
206 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
207 bx.store(val, cast_dst, self.layout.align.abi);
209 // The actual return type is a struct, but the ABI
210 // adaptation code has cast it into some scalar type. The
211 // code that follows is the only reliable way I have
212 // found to do a transform like i64 -> {i32,i32}.
213 // Basically we dump the data onto the stack then memcpy it.
215 // Other approaches I tried:
216 // - Casting rust ret pointer to the foreign type and using Store
217 // is (a) unsafe if size of foreign type > size of rust type and
218 // (b) runs afoul of strict aliasing rules, yielding invalid
219 // assembly under -O (specifically, the store gets removed).
220 // - Truncating foreign type to correct integral type and then
221 // bitcasting to the struct type yields invalid cast errors.
223 // We instead thus allocate some scratch space...
224 let scratch_size = cast.size(bx);
225 let scratch_align = cast.align(bx);
226 let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
227 bx.lifetime_start(llscratch, scratch_size);
229 // ...where we first store the value...
230 bx.store(val, llscratch, scratch_align);
232 // ...and then memcpy it to the intended destination.
235 self.layout.align.abi,
238 bx.const_usize(self.layout.size.bytes()),
242 bx.lifetime_end(llscratch, scratch_size);
245 OperandValue::Immediate(val).store(bx, dst);
251 bx: &mut Builder<'a, 'll, 'tcx>,
253 dst: PlaceRef<'tcx, &'ll Value>,
256 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
261 PassMode::Ignore(_) => {}
262 PassMode::Pair(..) => {
263 OperandValue::Pair(next(), next()).store(bx, dst);
265 PassMode::Indirect(_, Some(_)) => {
266 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
268 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
269 let next_arg = next();
270 self.store(bx, next_arg, dst);
276 impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
279 ty: &ArgType<'tcx, Ty<'tcx>>,
280 idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
282 ty.store_fn_arg(self, idx, dst)
286 ty: &ArgType<'tcx, Ty<'tcx>>,
288 dst: PlaceRef<'tcx, &'ll Value>
290 ty.store(self, val, dst)
292 fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
297 pub trait FnTypeExt<'tcx> {
298 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
299 fn new(cx: &CodegenCx<'ll, 'tcx>,
300 sig: ty::FnSig<'tcx>,
301 extra_args: &[Ty<'tcx>]) -> Self;
302 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
303 sig: ty::FnSig<'tcx>,
304 extra_args: &[Ty<'tcx>]) -> Self;
306 cx: &CodegenCx<'ll, 'tcx>,
307 sig: ty::FnSig<'tcx>,
308 extra_args: &[Ty<'tcx>],
309 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
311 fn adjust_for_abi(&mut self,
312 cx: &CodegenCx<'ll, 'tcx>,
314 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
315 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
316 fn llvm_cconv(&self) -> llvm::CallConv;
317 fn apply_attrs_llfn(&self, llfn: &'ll Value);
318 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
321 impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
322 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
323 let sig = instance.fn_sig(cx.tcx);
324 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
325 FnType::new(cx, sig, &[])
328 fn new(cx: &CodegenCx<'ll, 'tcx>,
329 sig: ty::FnSig<'tcx>,
330 extra_args: &[Ty<'tcx>]) -> Self {
331 FnType::new_internal(cx, sig, extra_args, |ty, _| {
332 ArgType::new(cx.layout_of(ty))
336 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
337 sig: ty::FnSig<'tcx>,
338 extra_args: &[Ty<'tcx>]) -> Self {
339 FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
340 let mut layout = cx.layout_of(ty);
341 // Don't pass the vtable, it's not an argument of the virtual fn.
342 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
343 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
344 if arg_idx == Some(0) {
345 let fat_pointer_ty = if layout.is_unsized() {
346 // unsized `self` is passed as a pointer to `self`
347 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
348 cx.tcx.mk_mut_ptr(layout.ty)
351 LayoutAbi::ScalarPair(..) => (),
352 _ => bug!("receiver type has unsupported layout: {:?}", layout)
355 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
356 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
357 // elsewhere in the compiler as a method on a `dyn Trait`.
358 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
359 // get a built-in pointer type
360 let mut fat_pointer_layout = layout;
361 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
362 && !fat_pointer_layout.ty.is_region_ptr()
364 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
365 let field_layout = fat_pointer_layout.field(cx, i);
367 if !field_layout.is_zst() {
368 fat_pointer_layout = field_layout;
369 continue 'descend_newtypes
373 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
376 fat_pointer_layout.ty
379 // we now have a type like `*mut RcBox<dyn Trait>`
380 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
381 // this is understood as a special case elsewhere in the compiler
382 let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
383 layout = cx.layout_of(unit_pointer_ty);
384 layout.ty = fat_pointer_ty;
391 cx: &CodegenCx<'ll, 'tcx>,
392 sig: ty::FnSig<'tcx>,
393 extra_args: &[Ty<'tcx>],
394 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
396 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
399 let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
400 RustIntrinsic | PlatformIntrinsic |
401 Rust | RustCall => Conv::C,
403 // It's the ABI's job to select this, not ours.
404 System => bug!("system abi should be selected elsewhere"),
406 Stdcall => Conv::X86Stdcall,
407 Fastcall => Conv::X86Fastcall,
408 Vectorcall => Conv::X86VectorCall,
409 Thiscall => Conv::X86ThisCall,
411 Unadjusted => Conv::C,
412 Win64 => Conv::X86_64Win64,
413 SysV64 => Conv::X86_64SysV,
414 Aapcs => Conv::ArmAapcs,
415 PtxKernel => Conv::PtxKernel,
416 Msp430Interrupt => Conv::Msp430Intr,
417 X86Interrupt => Conv::X86Intr,
418 AmdGpuKernel => Conv::AmdGpuKernel,
420 // These API constants ought to be more specific...
424 let mut inputs = sig.inputs();
425 let extra_args = if sig.abi == RustCall {
426 assert!(!sig.c_variadic && extra_args.is_empty());
428 match sig.inputs().last().unwrap().sty {
429 ty::Tuple(ref tupled_arguments) => {
430 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
434 bug!("argument to function with \"rust-call\" ABI \
439 assert!(sig.c_variadic || extra_args.is_empty());
443 let target = &cx.sess().target.target;
444 let win_x64_gnu = target.target_os == "windows"
445 && target.arch == "x86_64"
446 && target.target_env == "gnu";
447 let linux_s390x = target.target_os == "linux"
448 && target.arch == "s390x"
449 && target.target_env == "gnu";
450 let linux_sparc64 = target.target_os == "linux"
451 && target.arch == "sparc64"
452 && target.target_env == "gnu";
453 let rust_abi = match sig.abi {
454 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
458 // Handle safe Rust thin and fat pointers.
459 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
460 scalar: &layout::Scalar,
461 layout: TyLayout<'tcx, Ty<'tcx>>,
464 // Booleans are always an i1 that needs to be zero-extended.
465 if scalar.is_bool() {
466 attrs.set(ArgAttribute::ZExt);
470 // Only pointer types handled below.
471 if scalar.value != layout::Pointer {
475 if scalar.valid_range.start() < scalar.valid_range.end() {
476 if *scalar.valid_range.start() > 0 {
477 attrs.set(ArgAttribute::NonNull);
481 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
482 if let Some(kind) = pointee.safe {
483 attrs.pointee_size = pointee.size;
484 attrs.pointee_align = Some(pointee.align);
486 // `Box` pointer parameters never alias because ownership is transferred
487 // `&mut` pointer parameters never alias other parameters,
488 // or mutable global data
490 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
491 // and can be marked as both `readonly` and `noalias`, as
492 // LLVM's definition of `noalias` is based solely on memory
493 // dependencies rather than pointer equality
494 let no_alias = match kind {
495 PointerKind::Shared => false,
496 PointerKind::UniqueOwned => true,
497 PointerKind::Frozen |
498 PointerKind::UniqueBorrowed => !is_return
501 attrs.set(ArgAttribute::NoAlias);
504 if kind == PointerKind::Frozen && !is_return {
505 attrs.set(ArgAttribute::ReadOnly);
511 // Store the index of the last argument. This is useful for working with
512 // C-compatible variadic arguments.
513 let last_arg_idx = if sig.inputs().is_empty() {
516 Some(sig.inputs().len() - 1)
519 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
520 let is_return = arg_idx.is_none();
521 let mut arg = mk_arg_type(ty, arg_idx);
522 if arg.layout.is_zst() {
523 // For some forsaken reason, x86_64-pc-windows-gnu
524 // doesn't ignore zero-sized struct arguments.
525 // The same is true for s390x-unknown-linux-gnu
526 // and sparc64-unknown-linux-gnu.
527 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
528 arg.mode = PassMode::Ignore(IgnoreMode::Zst);
532 // If this is a C-variadic function, this is not the return value,
533 // and there is one or more fixed arguments; ensure that the `VaList`
534 // is ignored as an argument.
536 match (last_arg_idx, arg_idx) {
537 (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
538 let va_list_did = match cx.tcx.lang_items().va_list() {
540 None => bug!("`va_list` lang item required for C-variadic functions"),
543 ty::Adt(def, _) if def.did == va_list_did => {
544 // This is the "spoofed" `VaList`. Set the arguments mode
545 // so that it will be ignored.
546 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
555 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
556 if !is_return && rust_abi {
557 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
558 let mut a_attrs = ArgAttributes::new();
559 let mut b_attrs = ArgAttributes::new();
560 adjust_for_rust_scalar(&mut a_attrs,
565 adjust_for_rust_scalar(&mut b_attrs,
568 a.value.size(cx).align_to(b.value.align(cx).abi),
570 arg.mode = PassMode::Pair(a_attrs, b_attrs);
575 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
576 if let PassMode::Direct(ref mut attrs) = arg.mode {
577 adjust_for_rust_scalar(attrs,
588 let mut fn_ty = FnType {
589 ret: arg_of(sig.output(), None),
590 args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
593 c_variadic: sig.c_variadic,
596 fn_ty.adjust_for_abi(cx, sig.abi);
600 fn adjust_for_abi(&mut self,
601 cx: &CodegenCx<'ll, 'tcx>,
603 if abi == Abi::Unadjusted { return }
605 if abi == Abi::Rust || abi == Abi::RustCall ||
606 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
607 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
608 if arg.is_ignore() { return; }
610 match arg.layout.abi {
611 layout::Abi::Aggregate { .. } => {}
613 // This is a fun case! The gist of what this is doing is
614 // that we want callers and callees to always agree on the
615 // ABI of how they pass SIMD arguments. If we were to *not*
616 // make these arguments indirect then they'd be immediates
617 // in LLVM, which means that they'd used whatever the
618 // appropriate ABI is for the callee and the caller. That
619 // means, for example, if the caller doesn't have AVX
620 // enabled but the callee does, then passing an AVX argument
621 // across this boundary would cause corrupt data to show up.
623 // This problem is fixed by unconditionally passing SIMD
624 // arguments through memory between callers and callees
625 // which should get them all to agree on ABI regardless of
626 // target feature sets. Some more information about this
627 // issue can be found in #44367.
629 // Note that the platform intrinsic ABI is exempt here as
630 // that's how we connect up to LLVM and it's unstable
631 // anyway, we control all calls to it in libstd.
632 layout::Abi::Vector { .. }
633 if abi != Abi::PlatformIntrinsic &&
634 cx.sess().target.target.options.simd_types_indirect =>
643 let size = arg.layout.size;
644 if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
647 // We want to pass small aggregates as immediates, but using
648 // a LLVM aggregate type for this leads to bad optimizations,
649 // so we pick an appropriately sized integer type instead.
651 kind: RegKind::Integer,
656 fixup(&mut self.ret);
657 for arg in &mut self.args {
660 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
661 attrs.set(ArgAttribute::StructRet);
666 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
667 cx.sess().fatal(&msg);
671 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
672 let args_capacity: usize = self.args.iter().map(|arg|
673 if arg.pad.is_some() { 1 } else { 0 } +
674 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
676 let mut llargument_tys = Vec::with_capacity(
677 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
680 let llreturn_ty = match self.ret.mode {
681 PassMode::Ignore(IgnoreMode::Zst) => cx.type_void(),
682 PassMode::Ignore(IgnoreMode::CVarArgs) =>
683 bug!("`va_list` should never be a return type"),
684 PassMode::Direct(_) | PassMode::Pair(..) => {
685 self.ret.layout.immediate_llvm_type(cx)
687 PassMode::Cast(cast) => cast.llvm_type(cx),
688 PassMode::Indirect(..) => {
689 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
694 for arg in &self.args {
696 if let Some(ty) = arg.pad {
697 llargument_tys.push(ty.llvm_type(cx));
700 let llarg_ty = match arg.mode {
701 PassMode::Ignore(_) => continue,
702 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
703 PassMode::Pair(..) => {
704 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
705 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
708 PassMode::Indirect(_, Some(_)) => {
709 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
710 let ptr_layout = cx.layout_of(ptr_ty);
711 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
712 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
715 PassMode::Cast(cast) => cast.llvm_type(cx),
716 PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
718 llargument_tys.push(llarg_ty);
722 cx.type_variadic_func(&llargument_tys, llreturn_ty)
724 cx.type_func(&llargument_tys, llreturn_ty)
728 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
730 llvm::LLVMPointerType(self.llvm_type(cx),
731 cx.data_layout().instruction_address_space as c_uint)
735 fn llvm_cconv(&self) -> llvm::CallConv {
737 Conv::C => llvm::CCallConv,
738 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
739 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
740 Conv::Msp430Intr => llvm::Msp430Intr,
741 Conv::PtxKernel => llvm::PtxKernel,
742 Conv::X86Fastcall => llvm::X86FastcallCallConv,
743 Conv::X86Intr => llvm::X86_Intr,
744 Conv::X86Stdcall => llvm::X86StdcallCallConv,
745 Conv::X86ThisCall => llvm::X86_ThisCall,
746 Conv::X86VectorCall => llvm::X86_VectorCall,
747 Conv::X86_64SysV => llvm::X86_64_SysV,
748 Conv::X86_64Win64 => llvm::X86_64_Win64,
752 fn apply_attrs_llfn(&self, llfn: &'ll Value) {
754 let mut apply = |attrs: &ArgAttributes| {
755 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
758 match self.ret.mode {
759 PassMode::Direct(ref attrs) => {
760 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
762 PassMode::Indirect(ref attrs, _) => apply(attrs),
765 for arg in &self.args {
766 if arg.pad.is_some() {
767 apply(&ArgAttributes::new());
770 PassMode::Ignore(_) => {}
771 PassMode::Direct(ref attrs) |
772 PassMode::Indirect(ref attrs, None) => apply(attrs),
773 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
777 PassMode::Pair(ref a, ref b) => {
781 PassMode::Cast(_) => apply(&ArgAttributes::new()),
786 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
788 let mut apply = |attrs: &ArgAttributes| {
789 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
792 match self.ret.mode {
793 PassMode::Direct(ref attrs) => {
794 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
796 PassMode::Indirect(ref attrs, _) => apply(attrs),
799 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
800 // If the value is a boolean, the range is 0..2 and that ultimately
801 // become 0..0 when the type becomes i1, which would be rejected
802 // by the LLVM verifier.
803 if let layout::Int(..) = scalar.value {
804 if !scalar.is_bool() {
805 let range = scalar.valid_range_exclusive(bx);
806 if range.start != range.end {
807 bx.range_metadata(callsite, range);
812 for arg in &self.args {
813 if arg.pad.is_some() {
814 apply(&ArgAttributes::new());
817 PassMode::Ignore(_) => {}
818 PassMode::Direct(ref attrs) |
819 PassMode::Indirect(ref attrs, None) => apply(attrs),
820 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
824 PassMode::Pair(ref a, ref b) => {
828 PassMode::Cast(_) => apply(&ArgAttributes::new()),
832 let cconv = self.llvm_cconv();
833 if cconv != llvm::CCallConv {
834 llvm::SetInstructionCallConv(callsite, cconv);
839 impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
840 fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
841 FnType::new(&self, sig, extra_args)
845 sig: ty::FnSig<'tcx>,
846 extra_args: &[Ty<'tcx>]
847 ) -> FnType<'tcx, Ty<'tcx>> {
848 FnType::new_vtable(&self, sig, extra_args)
850 fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
851 FnType::of_instance(&self, instance)
855 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
856 fn apply_attrs_callsite(
858 ty: &FnType<'tcx, Ty<'tcx>>,
859 callsite: Self::Value
861 ty.apply_attrs_callsite(self, callsite)
864 fn get_param(&self, index: usize) -> Self::Value {
865 llvm::get_param(self.llfn(), index as c_uint)