1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, AttributePlace};
13 use builder::{Builder, MemFlags};
15 use context::CodegenCx;
16 use mir::place::PlaceRef;
17 use mir::operand::OperandValue;
19 use type_of::{LayoutLlvmExt, PointerKind};
22 use traits::BuilderMethods;
24 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
25 use rustc::ty::{self, Ty};
26 use rustc::ty::layout;
30 pub use rustc_target::spec::abi::Abi;
31 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
32 pub use rustc_target::abi::call::*;
34 macro_rules! for_each_kind {
35 ($flags: ident, $f: ident, $($kind: ident),+) => ({
36 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
40 trait ArgAttributeExt {
41 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
44 impl ArgAttributeExt for ArgAttribute {
45 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
46 for_each_kind!(self, f,
47 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
51 pub trait ArgAttributesExt {
52 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
53 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
56 impl ArgAttributesExt for ArgAttributes {
57 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
58 let mut regular = self.regular;
60 let deref = self.pointee_size.bytes();
62 if regular.contains(ArgAttribute::NonNull) {
63 llvm::LLVMRustAddDereferenceableAttr(llfn,
67 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
71 regular -= ArgAttribute::NonNull;
73 if let Some(align) = self.pointee_align {
74 llvm::LLVMRustAddAlignmentAttr(llfn,
78 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
82 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
83 let mut regular = self.regular;
85 let deref = self.pointee_size.bytes();
87 if regular.contains(ArgAttribute::NonNull) {
88 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
92 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
96 regular -= ArgAttribute::NonNull;
98 if let Some(align) = self.pointee_align {
99 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
103 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
109 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
112 impl LlvmType for Reg {
113 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
115 RegKind::Integer => Type::ix(cx, self.size.bits()),
117 match self.size.bits() {
120 _ => bug!("unsupported float: {:?}", self)
124 Type::vector::<Value>(Type::i8(cx), self.size.bytes())
130 impl LlvmType for CastTarget {
131 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
132 let rest_ll_unit = self.rest.unit.llvm_type(cx);
133 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
136 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
137 self.rest.total.bytes() % self.rest.unit.size.bytes())
140 if self.prefix.iter().all(|x| x.is_none()) {
141 // Simplify to a single unit when there is no prefix and size <= unit size
142 if self.rest.total <= self.rest.unit.size {
146 // Simplify to array when all chunks are the same size and type
148 return Type::array::<Value>(rest_ll_unit, rest_count);
152 // Create list of fields in the main structure
153 let mut args: Vec<_> =
154 self.prefix.iter().flat_map(|option_kind| option_kind.map(
155 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
156 .chain((0..rest_count).map(|_| rest_ll_unit))
159 // Append final integer
161 // Only integers can be really split further.
162 assert_eq!(self.rest.unit.kind, RegKind::Integer);
163 args.push(Type::ix(cx, rem_bytes * 8));
166 Type::struct_(cx, &args, false)
170 pub trait ArgTypeExt<'ll, 'tcx> {
171 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
174 bx: &Builder<'_, 'll, 'tcx>,
176 dst: PlaceRef<'tcx, &'ll Value>,
180 bx: &Builder<'_, 'll, 'tcx>,
182 dst: PlaceRef<'tcx, &'ll Value>,
186 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
187 /// Get the LLVM type for a place of the original Rust type of
188 /// this argument/return, i.e. the result of `type_of::type_of`.
189 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
190 self.layout.llvm_type(cx)
193 /// Store a direct/indirect value described by this ArgType into a
194 /// place for the original Rust type of this argument/return.
195 /// Can be used for both storing formal arguments into Rust variables
196 /// or results of call/invoke instructions into their destinations.
199 bx: &Builder<'_, 'll, 'tcx>,
201 dst: PlaceRef<'tcx, &'ll Value>,
203 if self.is_ignore() {
207 if self.is_sized_indirect() {
208 OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
209 } else if self.is_unsized_indirect() {
210 bug!("unsized ArgType must be handled through store_fn_arg");
211 } else if let PassMode::Cast(cast) = self.mode {
212 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
213 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
214 let can_store_through_cast_ptr = false;
215 if can_store_through_cast_ptr {
216 let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
217 bx.store(val, cast_dst, self.layout.align);
219 // The actual return type is a struct, but the ABI
220 // adaptation code has cast it into some scalar type. The
221 // code that follows is the only reliable way I have
222 // found to do a transform like i64 -> {i32,i32}.
223 // Basically we dump the data onto the stack then memcpy it.
225 // Other approaches I tried:
226 // - Casting rust ret pointer to the foreign type and using Store
227 // is (a) unsafe if size of foreign type > size of rust type and
228 // (b) runs afoul of strict aliasing rules, yielding invalid
229 // assembly under -O (specifically, the store gets removed).
230 // - Truncating foreign type to correct integral type and then
231 // bitcasting to the struct type yields invalid cast errors.
233 // We instead thus allocate some scratch space...
234 let scratch_size = cast.size(cx);
235 let scratch_align = cast.align(cx);
236 let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
237 bx.lifetime_start(llscratch, scratch_size);
239 // ...where we first store the value...
240 bx.store(val, llscratch, scratch_align);
242 // ...and then memcpy it to the intended destination.
243 base::call_memcpy(bx,
244 bx.pointercast(dst.llval, Type::i8p(cx)),
246 bx.pointercast(llscratch, Type::i8p(cx)),
248 C_usize(cx, self.layout.size.bytes()),
251 bx.lifetime_end(llscratch, scratch_size);
254 OperandValue::Immediate(val).store(bx, dst);
260 bx: &Builder<'a, 'll, 'tcx>,
262 dst: PlaceRef<'tcx, &'ll Value>,
265 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
270 PassMode::Ignore => {},
271 PassMode::Pair(..) => {
272 OperandValue::Pair(next(), next()).store(bx, dst);
274 PassMode::Indirect(_, Some(_)) => {
275 OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
277 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
278 self.store(bx, next(), dst);
284 pub trait FnTypeExt<'tcx> {
285 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
286 fn new(cx: &CodegenCx<'ll, 'tcx>,
287 sig: ty::FnSig<'tcx>,
288 extra_args: &[Ty<'tcx>]) -> Self;
289 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
290 sig: ty::FnSig<'tcx>,
291 extra_args: &[Ty<'tcx>]) -> Self;
293 cx: &CodegenCx<'ll, 'tcx>,
294 sig: ty::FnSig<'tcx>,
295 extra_args: &[Ty<'tcx>],
296 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
298 fn adjust_for_abi(&mut self,
299 cx: &CodegenCx<'ll, 'tcx>,
301 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
302 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
303 fn llvm_cconv(&self) -> llvm::CallConv;
304 fn apply_attrs_llfn(&self, llfn: &'ll Value);
305 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
308 impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
309 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
310 let sig = instance.fn_sig(cx.tcx);
311 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
312 FnType::new(cx, sig, &[])
315 fn new(cx: &CodegenCx<'ll, 'tcx>,
316 sig: ty::FnSig<'tcx>,
317 extra_args: &[Ty<'tcx>]) -> Self {
318 FnType::new_internal(cx, sig, extra_args, |ty, _| {
319 ArgType::new(cx.layout_of(ty))
323 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
324 sig: ty::FnSig<'tcx>,
325 extra_args: &[Ty<'tcx>]) -> Self {
326 FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
327 let mut layout = cx.layout_of(ty);
328 // Don't pass the vtable, it's not an argument of the virtual fn.
329 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
330 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
331 if arg_idx == Some(0) {
332 let fat_pointer_ty = if layout.is_unsized() {
333 // unsized `self` is passed as a pointer to `self`
334 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
335 cx.tcx.mk_mut_ptr(layout.ty)
338 LayoutAbi::ScalarPair(..) => (),
339 _ => bug!("receiver type has unsupported layout: {:?}", layout)
342 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
343 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
344 // elsewhere in the compiler as a method on a `dyn Trait`.
345 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
346 // get a built-in pointer type
347 let mut fat_pointer_layout = layout;
348 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
349 && !fat_pointer_layout.ty.is_region_ptr()
351 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
352 let field_layout = fat_pointer_layout.field(cx, i);
354 if !field_layout.is_zst() {
355 fat_pointer_layout = field_layout;
356 continue 'descend_newtypes
360 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
363 fat_pointer_layout.ty
366 // we now have a type like `*mut RcBox<dyn Trait>`
367 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
368 // this is understood as a special case elsewhere in the compiler
369 let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
370 layout = cx.layout_of(unit_pointer_ty);
371 layout.ty = fat_pointer_ty;
378 cx: &CodegenCx<'ll, 'tcx>,
379 sig: ty::FnSig<'tcx>,
380 extra_args: &[Ty<'tcx>],
381 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
383 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
386 let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
387 RustIntrinsic | PlatformIntrinsic |
388 Rust | RustCall => Conv::C,
390 // It's the ABI's job to select this, not ours.
391 System => bug!("system abi should be selected elsewhere"),
393 Stdcall => Conv::X86Stdcall,
394 Fastcall => Conv::X86Fastcall,
395 Vectorcall => Conv::X86VectorCall,
396 Thiscall => Conv::X86ThisCall,
398 Unadjusted => Conv::C,
399 Win64 => Conv::X86_64Win64,
400 SysV64 => Conv::X86_64SysV,
401 Aapcs => Conv::ArmAapcs,
402 PtxKernel => Conv::PtxKernel,
403 Msp430Interrupt => Conv::Msp430Intr,
404 X86Interrupt => Conv::X86Intr,
405 AmdGpuKernel => Conv::AmdGpuKernel,
407 // These API constants ought to be more specific...
411 let mut inputs = sig.inputs();
412 let extra_args = if sig.abi == RustCall {
413 assert!(!sig.variadic && extra_args.is_empty());
415 match sig.inputs().last().unwrap().sty {
416 ty::Tuple(ref tupled_arguments) => {
417 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
421 bug!("argument to function with \"rust-call\" ABI \
426 assert!(sig.variadic || extra_args.is_empty());
430 let target = &cx.sess().target.target;
431 let win_x64_gnu = target.target_os == "windows"
432 && target.arch == "x86_64"
433 && target.target_env == "gnu";
434 let linux_s390x = target.target_os == "linux"
435 && target.arch == "s390x"
436 && target.target_env == "gnu";
437 let rust_abi = match sig.abi {
438 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
442 // Handle safe Rust thin and fat pointers.
443 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
444 scalar: &layout::Scalar,
445 layout: TyLayout<'tcx, Ty<'tcx>>,
448 // Booleans are always an i1 that needs to be zero-extended.
449 if scalar.is_bool() {
450 attrs.set(ArgAttribute::ZExt);
454 // Only pointer types handled below.
455 if scalar.value != layout::Pointer {
459 if scalar.valid_range.start() < scalar.valid_range.end() {
460 if *scalar.valid_range.start() > 0 {
461 attrs.set(ArgAttribute::NonNull);
465 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
466 if let Some(kind) = pointee.safe {
467 attrs.pointee_size = pointee.size;
468 attrs.pointee_align = Some(pointee.align);
470 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
471 // with align attributes, and those calls later block optimizations.
472 if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes {
473 attrs.pointee_align = None;
476 // `Box` pointer parameters never alias because ownership is transferred
477 // `&mut` pointer parameters never alias other parameters,
478 // or mutable global data
480 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
481 // and can be marked as both `readonly` and `noalias`, as
482 // LLVM's definition of `noalias` is based solely on memory
483 // dependencies rather than pointer equality
484 let no_alias = match kind {
485 PointerKind::Shared => false,
486 PointerKind::UniqueOwned => true,
487 PointerKind::Frozen |
488 PointerKind::UniqueBorrowed => !is_return
491 attrs.set(ArgAttribute::NoAlias);
494 if kind == PointerKind::Frozen && !is_return {
495 attrs.set(ArgAttribute::ReadOnly);
501 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
502 let is_return = arg_idx.is_none();
503 let mut arg = mk_arg_type(ty, arg_idx);
504 if arg.layout.is_zst() {
505 // For some forsaken reason, x86_64-pc-windows-gnu
506 // doesn't ignore zero-sized struct arguments.
507 // The same is true for s390x-unknown-linux-gnu.
508 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
509 arg.mode = PassMode::Ignore;
513 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
514 if !is_return && rust_abi {
515 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
516 let mut a_attrs = ArgAttributes::new();
517 let mut b_attrs = ArgAttributes::new();
518 adjust_for_rust_scalar(&mut a_attrs,
523 adjust_for_rust_scalar(&mut b_attrs,
526 a.value.size(cx).abi_align(b.value.align(cx)),
528 arg.mode = PassMode::Pair(a_attrs, b_attrs);
533 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
534 if let PassMode::Direct(ref mut attrs) = arg.mode {
535 adjust_for_rust_scalar(attrs,
546 let mut fn_ty = FnType {
547 ret: arg_of(sig.output(), None),
548 args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
551 variadic: sig.variadic,
554 fn_ty.adjust_for_abi(cx, sig.abi);
558 fn adjust_for_abi(&mut self,
559 cx: &CodegenCx<'ll, 'tcx>,
561 if abi == Abi::Unadjusted { return }
563 if abi == Abi::Rust || abi == Abi::RustCall ||
564 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
565 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
566 if arg.is_ignore() { return; }
568 match arg.layout.abi {
569 layout::Abi::Aggregate { .. } => {}
571 // This is a fun case! The gist of what this is doing is
572 // that we want callers and callees to always agree on the
573 // ABI of how they pass SIMD arguments. If we were to *not*
574 // make these arguments indirect then they'd be immediates
575 // in LLVM, which means that they'd used whatever the
576 // appropriate ABI is for the callee and the caller. That
577 // means, for example, if the caller doesn't have AVX
578 // enabled but the callee does, then passing an AVX argument
579 // across this boundary would cause corrupt data to show up.
581 // This problem is fixed by unconditionally passing SIMD
582 // arguments through memory between callers and callees
583 // which should get them all to agree on ABI regardless of
584 // target feature sets. Some more information about this
585 // issue can be found in #44367.
587 // Note that the platform intrinsic ABI is exempt here as
588 // that's how we connect up to LLVM and it's unstable
589 // anyway, we control all calls to it in libstd.
590 layout::Abi::Vector { .. }
591 if abi != Abi::PlatformIntrinsic &&
592 cx.sess().target.target.options.simd_types_indirect =>
601 let size = arg.layout.size;
602 if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
605 // We want to pass small aggregates as immediates, but using
606 // a LLVM aggregate type for this leads to bad optimizations,
607 // so we pick an appropriately sized integer type instead.
609 kind: RegKind::Integer,
614 fixup(&mut self.ret);
615 for arg in &mut self.args {
618 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
619 attrs.set(ArgAttribute::StructRet);
624 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
625 cx.sess().fatal(&msg);
629 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
630 let args_capacity: usize = self.args.iter().map(|arg|
631 if arg.pad.is_some() { 1 } else { 0 } +
632 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
634 let mut llargument_tys = Vec::with_capacity(
635 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
638 let llreturn_ty = match self.ret.mode {
639 PassMode::Ignore => Type::void(cx),
640 PassMode::Direct(_) | PassMode::Pair(..) => {
641 self.ret.layout.immediate_llvm_type(cx)
643 PassMode::Cast(cast) => cast.llvm_type(cx),
644 PassMode::Indirect(..) => {
645 llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
650 for arg in &self.args {
652 if let Some(ty) = arg.pad {
653 llargument_tys.push(ty.llvm_type(cx));
656 let llarg_ty = match arg.mode {
657 PassMode::Ignore => continue,
658 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
659 PassMode::Pair(..) => {
660 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
661 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
664 PassMode::Indirect(_, Some(_)) => {
665 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
666 let ptr_layout = cx.layout_of(ptr_ty);
667 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
668 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
671 PassMode::Cast(cast) => cast.llvm_type(cx),
672 PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(),
674 llargument_tys.push(llarg_ty);
678 Type::variadic_func::<Value>(&llargument_tys, llreturn_ty)
680 Type::func::<Value>(&llargument_tys, llreturn_ty)
684 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
686 llvm::LLVMPointerType(self.llvm_type(cx),
687 cx.data_layout().instruction_address_space as c_uint)
691 fn llvm_cconv(&self) -> llvm::CallConv {
693 Conv::C => llvm::CCallConv,
694 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
695 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
696 Conv::Msp430Intr => llvm::Msp430Intr,
697 Conv::PtxKernel => llvm::PtxKernel,
698 Conv::X86Fastcall => llvm::X86FastcallCallConv,
699 Conv::X86Intr => llvm::X86_Intr,
700 Conv::X86Stdcall => llvm::X86StdcallCallConv,
701 Conv::X86ThisCall => llvm::X86_ThisCall,
702 Conv::X86VectorCall => llvm::X86_VectorCall,
703 Conv::X86_64SysV => llvm::X86_64_SysV,
704 Conv::X86_64Win64 => llvm::X86_64_Win64,
708 fn apply_attrs_llfn(&self, llfn: &'ll Value) {
710 let mut apply = |attrs: &ArgAttributes| {
711 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
714 match self.ret.mode {
715 PassMode::Direct(ref attrs) => {
716 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
718 PassMode::Indirect(ref attrs, _) => apply(attrs),
721 for arg in &self.args {
722 if arg.pad.is_some() {
723 apply(&ArgAttributes::new());
726 PassMode::Ignore => {}
727 PassMode::Direct(ref attrs) |
728 PassMode::Indirect(ref attrs, None) => apply(attrs),
729 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
733 PassMode::Pair(ref a, ref b) => {
737 PassMode::Cast(_) => apply(&ArgAttributes::new()),
742 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
744 let mut apply = |attrs: &ArgAttributes| {
745 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
748 match self.ret.mode {
749 PassMode::Direct(ref attrs) => {
750 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
752 PassMode::Indirect(ref attrs, _) => apply(attrs),
755 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
756 // If the value is a boolean, the range is 0..2 and that ultimately
757 // become 0..0 when the type becomes i1, which would be rejected
758 // by the LLVM verifier.
759 if let layout::Int(..) = scalar.value {
760 if !scalar.is_bool() {
761 let range = scalar.valid_range_exclusive(bx.cx);
762 if range.start != range.end {
763 bx.range_metadata(callsite, range);
768 for arg in &self.args {
769 if arg.pad.is_some() {
770 apply(&ArgAttributes::new());
773 PassMode::Ignore => {}
774 PassMode::Direct(ref attrs) |
775 PassMode::Indirect(ref attrs, None) => apply(attrs),
776 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
780 PassMode::Pair(ref a, ref b) => {
784 PassMode::Cast(_) => apply(&ArgAttributes::new()),
788 let cconv = self.llvm_cconv();
789 if cconv != llvm::CCallConv {
790 llvm::SetInstructionCallConv(callsite, cconv);