1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, AttributePlace};
13 use builder::{Builder, MemFlags};
14 use context::CodegenCx;
15 use mir::place::PlaceRef;
16 use mir::operand::OperandValue;
18 use type_of::{LayoutLlvmExt, PointerKind};
21 use interfaces::{BuilderMethods, CommonMethods, TypeMethods};
23 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
24 use rustc::ty::{self, Ty};
25 use rustc::ty::layout;
29 pub use rustc_target::spec::abi::Abi;
30 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
31 pub use rustc_target::abi::call::*;
33 macro_rules! for_each_kind {
34 ($flags: ident, $f: ident, $($kind: ident),+) => ({
35 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
39 trait ArgAttributeExt {
40 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
43 impl ArgAttributeExt for ArgAttribute {
44 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
45 for_each_kind!(self, f,
46 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
50 pub trait ArgAttributesExt {
51 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
52 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
55 impl ArgAttributesExt for ArgAttributes {
56 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
57 let mut regular = self.regular;
59 let deref = self.pointee_size.bytes();
61 if regular.contains(ArgAttribute::NonNull) {
62 llvm::LLVMRustAddDereferenceableAttr(llfn,
66 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
70 regular -= ArgAttribute::NonNull;
72 if let Some(align) = self.pointee_align {
73 llvm::LLVMRustAddAlignmentAttr(llfn,
77 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
81 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
82 let mut regular = self.regular;
84 let deref = self.pointee_size.bytes();
86 if regular.contains(ArgAttribute::NonNull) {
87 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
91 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
95 regular -= ArgAttribute::NonNull;
97 if let Some(align) = self.pointee_align {
98 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
102 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
108 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
111 impl LlvmType for Reg {
112 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
114 RegKind::Integer => cx.type_ix(self.size.bits()),
116 match self.size.bits() {
119 _ => bug!("unsupported float: {:?}", self)
123 cx.type_vector(cx.type_i8(), self.size.bytes())
129 impl LlvmType for CastTarget {
130 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
131 let rest_ll_unit = self.rest.unit.llvm_type(cx);
132 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
135 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
136 self.rest.total.bytes() % self.rest.unit.size.bytes())
139 if self.prefix.iter().all(|x| x.is_none()) {
140 // Simplify to a single unit when there is no prefix and size <= unit size
141 if self.rest.total <= self.rest.unit.size {
145 // Simplify to array when all chunks are the same size and type
147 return cx.type_array(rest_ll_unit, rest_count);
151 // Create list of fields in the main structure
152 let mut args: Vec<_> =
153 self.prefix.iter().flat_map(|option_kind| option_kind.map(
154 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
155 .chain((0..rest_count).map(|_| rest_ll_unit))
158 // Append final integer
160 // Only integers can be really split further.
161 assert_eq!(self.rest.unit.kind, RegKind::Integer);
162 args.push(cx.type_ix(rem_bytes * 8));
165 cx.type_struct(&args, false)
169 pub trait ArgTypeExt<'ll, 'tcx> {
170 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
173 bx: &Builder<'_, 'll, 'tcx>,
175 dst: PlaceRef<'tcx, &'ll Value>,
179 bx: &Builder<'_, 'll, 'tcx>,
181 dst: PlaceRef<'tcx, &'ll Value>,
185 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
186 /// Get the LLVM type for a place of the original Rust type of
187 /// this argument/return, i.e. the result of `type_of::type_of`.
188 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
189 self.layout.llvm_type(cx)
192 /// Store a direct/indirect value described by this ArgType into a
193 /// place for the original Rust type of this argument/return.
194 /// Can be used for both storing formal arguments into Rust variables
195 /// or results of call/invoke instructions into their destinations.
198 bx: &Builder<'_, 'll, 'tcx>,
200 dst: PlaceRef<'tcx, &'ll Value>,
202 if self.is_ignore() {
206 if self.is_sized_indirect() {
207 OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
208 } else if self.is_unsized_indirect() {
209 bug!("unsized ArgType must be handled through store_fn_arg");
210 } else if let PassMode::Cast(cast) = self.mode {
211 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
212 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
213 let can_store_through_cast_ptr = false;
214 if can_store_through_cast_ptr {
215 let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
216 bx.store(val, cast_dst, self.layout.align);
218 // The actual return type is a struct, but the ABI
219 // adaptation code has cast it into some scalar type. The
220 // code that follows is the only reliable way I have
221 // found to do a transform like i64 -> {i32,i32}.
222 // Basically we dump the data onto the stack then memcpy it.
224 // Other approaches I tried:
225 // - Casting rust ret pointer to the foreign type and using Store
226 // is (a) unsafe if size of foreign type > size of rust type and
227 // (b) runs afoul of strict aliasing rules, yielding invalid
228 // assembly under -O (specifically, the store gets removed).
229 // - Truncating foreign type to correct integral type and then
230 // bitcasting to the struct type yields invalid cast errors.
232 // We instead thus allocate some scratch space...
233 let scratch_size = cast.size(cx);
234 let scratch_align = cast.align(cx);
235 let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
236 bx.lifetime_start(llscratch, scratch_size);
238 // ...where we first store the value...
239 bx.store(val, llscratch, scratch_align);
241 // ...and then memcpy it to the intended destination.
242 base::call_memcpy(bx,
243 bx.pointercast(dst.llval, cx.type_i8p()),
245 bx.pointercast(llscratch, cx.type_i8p()),
247 cx.const_usize(self.layout.size.bytes()),
250 bx.lifetime_end(llscratch, scratch_size);
253 OperandValue::Immediate(val).store(bx, dst);
259 bx: &Builder<'a, 'll, 'tcx>,
261 dst: PlaceRef<'tcx, &'ll Value>,
264 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
269 PassMode::Ignore => {},
270 PassMode::Pair(..) => {
271 OperandValue::Pair(next(), next()).store(bx, dst);
273 PassMode::Indirect(_, Some(_)) => {
274 OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
276 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
277 self.store(bx, next(), dst);
283 pub trait FnTypeExt<'tcx> {
284 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
285 fn new(cx: &CodegenCx<'ll, 'tcx>,
286 sig: ty::FnSig<'tcx>,
287 extra_args: &[Ty<'tcx>]) -> Self;
288 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
289 sig: ty::FnSig<'tcx>,
290 extra_args: &[Ty<'tcx>]) -> Self;
292 cx: &CodegenCx<'ll, 'tcx>,
293 sig: ty::FnSig<'tcx>,
294 extra_args: &[Ty<'tcx>],
295 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
297 fn adjust_for_abi(&mut self,
298 cx: &CodegenCx<'ll, 'tcx>,
300 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
301 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
302 fn llvm_cconv(&self) -> llvm::CallConv;
303 fn apply_attrs_llfn(&self, llfn: &'ll Value);
304 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
307 impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
308 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
309 let sig = instance.fn_sig(cx.tcx);
310 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
311 FnType::new(cx, sig, &[])
314 fn new(cx: &CodegenCx<'ll, 'tcx>,
315 sig: ty::FnSig<'tcx>,
316 extra_args: &[Ty<'tcx>]) -> Self {
317 FnType::new_internal(cx, sig, extra_args, |ty, _| {
318 ArgType::new(cx.layout_of(ty))
322 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
323 sig: ty::FnSig<'tcx>,
324 extra_args: &[Ty<'tcx>]) -> Self {
325 FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
326 let mut layout = cx.layout_of(ty);
327 // Don't pass the vtable, it's not an argument of the virtual fn.
328 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
329 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
330 if arg_idx == Some(0) {
331 let fat_pointer_ty = if layout.is_unsized() {
332 // unsized `self` is passed as a pointer to `self`
333 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
334 cx.tcx.mk_mut_ptr(layout.ty)
337 LayoutAbi::ScalarPair(..) => (),
338 _ => bug!("receiver type has unsupported layout: {:?}", layout)
341 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
342 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
343 // elsewhere in the compiler as a method on a `dyn Trait`.
344 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
345 // get a built-in pointer type
346 let mut fat_pointer_layout = layout;
347 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
348 && !fat_pointer_layout.ty.is_region_ptr()
350 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
351 let field_layout = fat_pointer_layout.field(cx, i);
353 if !field_layout.is_zst() {
354 fat_pointer_layout = field_layout;
355 continue 'descend_newtypes
359 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
362 fat_pointer_layout.ty
365 // we now have a type like `*mut RcBox<dyn Trait>`
366 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
367 // this is understood as a special case elsewhere in the compiler
368 let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
369 layout = cx.layout_of(unit_pointer_ty);
370 layout.ty = fat_pointer_ty;
377 cx: &CodegenCx<'ll, 'tcx>,
378 sig: ty::FnSig<'tcx>,
379 extra_args: &[Ty<'tcx>],
380 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
382 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
385 let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
386 RustIntrinsic | PlatformIntrinsic |
387 Rust | RustCall => Conv::C,
389 // It's the ABI's job to select this, not ours.
390 System => bug!("system abi should be selected elsewhere"),
392 Stdcall => Conv::X86Stdcall,
393 Fastcall => Conv::X86Fastcall,
394 Vectorcall => Conv::X86VectorCall,
395 Thiscall => Conv::X86ThisCall,
397 Unadjusted => Conv::C,
398 Win64 => Conv::X86_64Win64,
399 SysV64 => Conv::X86_64SysV,
400 Aapcs => Conv::ArmAapcs,
401 PtxKernel => Conv::PtxKernel,
402 Msp430Interrupt => Conv::Msp430Intr,
403 X86Interrupt => Conv::X86Intr,
404 AmdGpuKernel => Conv::AmdGpuKernel,
406 // These API constants ought to be more specific...
410 let mut inputs = sig.inputs();
411 let extra_args = if sig.abi == RustCall {
412 assert!(!sig.variadic && extra_args.is_empty());
414 match sig.inputs().last().unwrap().sty {
415 ty::Tuple(ref tupled_arguments) => {
416 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
420 bug!("argument to function with \"rust-call\" ABI \
425 assert!(sig.variadic || extra_args.is_empty());
429 let target = &cx.sess().target.target;
430 let win_x64_gnu = target.target_os == "windows"
431 && target.arch == "x86_64"
432 && target.target_env == "gnu";
433 let linux_s390x = target.target_os == "linux"
434 && target.arch == "s390x"
435 && target.target_env == "gnu";
436 let rust_abi = match sig.abi {
437 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
441 // Handle safe Rust thin and fat pointers.
442 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
443 scalar: &layout::Scalar,
444 layout: TyLayout<'tcx, Ty<'tcx>>,
447 // Booleans are always an i1 that needs to be zero-extended.
448 if scalar.is_bool() {
449 attrs.set(ArgAttribute::ZExt);
453 // Only pointer types handled below.
454 if scalar.value != layout::Pointer {
458 if scalar.valid_range.start() < scalar.valid_range.end() {
459 if *scalar.valid_range.start() > 0 {
460 attrs.set(ArgAttribute::NonNull);
464 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
465 if let Some(kind) = pointee.safe {
466 attrs.pointee_size = pointee.size;
467 attrs.pointee_align = Some(pointee.align);
469 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
470 // with align attributes, and those calls later block optimizations.
471 if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes {
472 attrs.pointee_align = None;
475 // `Box` pointer parameters never alias because ownership is transferred
476 // `&mut` pointer parameters never alias other parameters,
477 // or mutable global data
479 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
480 // and can be marked as both `readonly` and `noalias`, as
481 // LLVM's definition of `noalias` is based solely on memory
482 // dependencies rather than pointer equality
483 let no_alias = match kind {
484 PointerKind::Shared => false,
485 PointerKind::UniqueOwned => true,
486 PointerKind::Frozen |
487 PointerKind::UniqueBorrowed => !is_return
490 attrs.set(ArgAttribute::NoAlias);
493 if kind == PointerKind::Frozen && !is_return {
494 attrs.set(ArgAttribute::ReadOnly);
500 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
501 let is_return = arg_idx.is_none();
502 let mut arg = mk_arg_type(ty, arg_idx);
503 if arg.layout.is_zst() {
504 // For some forsaken reason, x86_64-pc-windows-gnu
505 // doesn't ignore zero-sized struct arguments.
506 // The same is true for s390x-unknown-linux-gnu.
507 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
508 arg.mode = PassMode::Ignore;
512 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
513 if !is_return && rust_abi {
514 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
515 let mut a_attrs = ArgAttributes::new();
516 let mut b_attrs = ArgAttributes::new();
517 adjust_for_rust_scalar(&mut a_attrs,
522 adjust_for_rust_scalar(&mut b_attrs,
525 a.value.size(cx).abi_align(b.value.align(cx)),
527 arg.mode = PassMode::Pair(a_attrs, b_attrs);
532 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
533 if let PassMode::Direct(ref mut attrs) = arg.mode {
534 adjust_for_rust_scalar(attrs,
545 let mut fn_ty = FnType {
546 ret: arg_of(sig.output(), None),
547 args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
550 variadic: sig.variadic,
553 fn_ty.adjust_for_abi(cx, sig.abi);
557 fn adjust_for_abi(&mut self,
558 cx: &CodegenCx<'ll, 'tcx>,
560 if abi == Abi::Unadjusted { return }
562 if abi == Abi::Rust || abi == Abi::RustCall ||
563 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
564 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
565 if arg.is_ignore() { return; }
567 match arg.layout.abi {
568 layout::Abi::Aggregate { .. } => {}
570 // This is a fun case! The gist of what this is doing is
571 // that we want callers and callees to always agree on the
572 // ABI of how they pass SIMD arguments. If we were to *not*
573 // make these arguments indirect then they'd be immediates
574 // in LLVM, which means that they'd used whatever the
575 // appropriate ABI is for the callee and the caller. That
576 // means, for example, if the caller doesn't have AVX
577 // enabled but the callee does, then passing an AVX argument
578 // across this boundary would cause corrupt data to show up.
580 // This problem is fixed by unconditionally passing SIMD
581 // arguments through memory between callers and callees
582 // which should get them all to agree on ABI regardless of
583 // target feature sets. Some more information about this
584 // issue can be found in #44367.
586 // Note that the platform intrinsic ABI is exempt here as
587 // that's how we connect up to LLVM and it's unstable
588 // anyway, we control all calls to it in libstd.
589 layout::Abi::Vector { .. }
590 if abi != Abi::PlatformIntrinsic &&
591 cx.sess().target.target.options.simd_types_indirect =>
600 let size = arg.layout.size;
601 if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
604 // We want to pass small aggregates as immediates, but using
605 // a LLVM aggregate type for this leads to bad optimizations,
606 // so we pick an appropriately sized integer type instead.
608 kind: RegKind::Integer,
613 fixup(&mut self.ret);
614 for arg in &mut self.args {
617 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
618 attrs.set(ArgAttribute::StructRet);
623 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
624 cx.sess().fatal(&msg);
628 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
629 let args_capacity: usize = self.args.iter().map(|arg|
630 if arg.pad.is_some() { 1 } else { 0 } +
631 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
633 let mut llargument_tys = Vec::with_capacity(
634 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
637 let llreturn_ty = match self.ret.mode {
638 PassMode::Ignore => cx.type_void(),
639 PassMode::Direct(_) | PassMode::Pair(..) => {
640 self.ret.layout.immediate_llvm_type(cx)
642 PassMode::Cast(cast) => cast.llvm_type(cx),
643 PassMode::Indirect(..) => {
644 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
649 for arg in &self.args {
651 if let Some(ty) = arg.pad {
652 llargument_tys.push(ty.llvm_type(cx));
655 let llarg_ty = match arg.mode {
656 PassMode::Ignore => continue,
657 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
658 PassMode::Pair(..) => {
659 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
660 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
663 PassMode::Indirect(_, Some(_)) => {
664 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
665 let ptr_layout = cx.layout_of(ptr_ty);
666 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
667 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
670 PassMode::Cast(cast) => cast.llvm_type(cx),
671 PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
673 llargument_tys.push(llarg_ty);
677 cx.type_variadic_func(&llargument_tys, llreturn_ty)
679 cx.type_func(&llargument_tys, llreturn_ty)
683 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
685 llvm::LLVMPointerType(self.llvm_type(cx),
686 cx.data_layout().instruction_address_space as c_uint)
690 fn llvm_cconv(&self) -> llvm::CallConv {
692 Conv::C => llvm::CCallConv,
693 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
694 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
695 Conv::Msp430Intr => llvm::Msp430Intr,
696 Conv::PtxKernel => llvm::PtxKernel,
697 Conv::X86Fastcall => llvm::X86FastcallCallConv,
698 Conv::X86Intr => llvm::X86_Intr,
699 Conv::X86Stdcall => llvm::X86StdcallCallConv,
700 Conv::X86ThisCall => llvm::X86_ThisCall,
701 Conv::X86VectorCall => llvm::X86_VectorCall,
702 Conv::X86_64SysV => llvm::X86_64_SysV,
703 Conv::X86_64Win64 => llvm::X86_64_Win64,
707 fn apply_attrs_llfn(&self, llfn: &'ll Value) {
709 let mut apply = |attrs: &ArgAttributes| {
710 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
713 match self.ret.mode {
714 PassMode::Direct(ref attrs) => {
715 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
717 PassMode::Indirect(ref attrs, _) => apply(attrs),
720 for arg in &self.args {
721 if arg.pad.is_some() {
722 apply(&ArgAttributes::new());
725 PassMode::Ignore => {}
726 PassMode::Direct(ref attrs) |
727 PassMode::Indirect(ref attrs, None) => apply(attrs),
728 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
732 PassMode::Pair(ref a, ref b) => {
736 PassMode::Cast(_) => apply(&ArgAttributes::new()),
741 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
743 let mut apply = |attrs: &ArgAttributes| {
744 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
747 match self.ret.mode {
748 PassMode::Direct(ref attrs) => {
749 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
751 PassMode::Indirect(ref attrs, _) => apply(attrs),
754 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
755 // If the value is a boolean, the range is 0..2 and that ultimately
756 // become 0..0 when the type becomes i1, which would be rejected
757 // by the LLVM verifier.
758 if let layout::Int(..) = scalar.value {
759 if !scalar.is_bool() {
760 let range = scalar.valid_range_exclusive(bx.cx());
761 if range.start != range.end {
762 bx.range_metadata(callsite, range);
767 for arg in &self.args {
768 if arg.pad.is_some() {
769 apply(&ArgAttributes::new());
772 PassMode::Ignore => {}
773 PassMode::Direct(ref attrs) |
774 PassMode::Indirect(ref attrs, None) => apply(attrs),
775 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
779 PassMode::Pair(ref a, ref b) => {
783 PassMode::Cast(_) => apply(&ArgAttributes::new()),
787 let cconv = self.llvm_cconv();
788 if cconv != llvm::CCallConv {
789 llvm::SetInstructionCallConv(callsite, cconv);