1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, AttributePlace};
13 use builder::{Builder, MemFlags};
15 use context::CodegenCx;
16 use mir::place::PlaceRef;
17 use mir::operand::OperandValue;
19 use type_of::{LayoutLlvmExt, PointerKind};
22 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
23 use rustc::ty::{self, Ty};
24 use rustc::ty::layout;
28 pub use rustc_target::spec::abi::Abi;
29 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
30 pub use rustc_target::abi::call::*;
32 macro_rules! for_each_kind {
33 ($flags: ident, $f: ident, $($kind: ident),+) => ({
34 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
38 trait ArgAttributeExt {
39 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
42 impl ArgAttributeExt for ArgAttribute {
43 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
44 for_each_kind!(self, f,
45 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
49 pub trait ArgAttributesExt {
50 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
51 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
54 impl ArgAttributesExt for ArgAttributes {
55 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
56 let mut regular = self.regular;
58 let deref = self.pointee_size.bytes();
60 if regular.contains(ArgAttribute::NonNull) {
61 llvm::LLVMRustAddDereferenceableAttr(llfn,
65 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
69 regular -= ArgAttribute::NonNull;
71 if let Some(align) = self.pointee_align {
72 llvm::LLVMRustAddAlignmentAttr(llfn,
76 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
80 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
81 let mut regular = self.regular;
83 let deref = self.pointee_size.bytes();
85 if regular.contains(ArgAttribute::NonNull) {
86 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
90 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
94 regular -= ArgAttribute::NonNull;
96 if let Some(align) = self.pointee_align {
97 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
101 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
107 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
110 impl LlvmType for Reg {
111 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
113 RegKind::Integer => Type::ix(cx, self.size.bits()),
115 match self.size.bits() {
118 _ => bug!("unsupported float: {:?}", self)
122 Type::vector(Type::i8(cx), self.size.bytes())
128 impl LlvmType for CastTarget {
129 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
130 let rest_ll_unit = self.rest.unit.llvm_type(cx);
131 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
134 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
135 self.rest.total.bytes() % self.rest.unit.size.bytes())
138 if self.prefix.iter().all(|x| x.is_none()) {
139 // Simplify to a single unit when there is no prefix and size <= unit size
140 if self.rest.total <= self.rest.unit.size {
144 // Simplify to array when all chunks are the same size and type
146 return Type::array(rest_ll_unit, rest_count);
150 // Create list of fields in the main structure
151 let mut args: Vec<_> =
152 self.prefix.iter().flat_map(|option_kind| option_kind.map(
153 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
154 .chain((0..rest_count).map(|_| rest_ll_unit))
157 // Append final integer
159 // Only integers can be really split further.
160 assert_eq!(self.rest.unit.kind, RegKind::Integer);
161 args.push(Type::ix(cx, rem_bytes * 8));
164 Type::struct_(cx, &args, false)
168 pub trait ArgTypeExt<'ll, 'tcx> {
169 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
170 fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>);
171 fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>);
174 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
175 /// Get the LLVM type for a place of the original Rust type of
176 /// this argument/return, i.e. the result of `type_of::type_of`.
177 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
178 self.layout.llvm_type(cx)
181 /// Store a direct/indirect value described by this ArgType into a
182 /// place for the original Rust type of this argument/return.
183 /// Can be used for both storing formal arguments into Rust variables
184 /// or results of call/invoke instructions into their destinations.
185 fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) {
186 if self.is_ignore() {
190 if self.is_sized_indirect() {
191 OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
192 } else if self.is_unsized_indirect() {
193 bug!("unsized ArgType must be handled through store_fn_arg");
194 } else if let PassMode::Cast(cast) = self.mode {
195 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
196 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
197 let can_store_through_cast_ptr = false;
198 if can_store_through_cast_ptr {
199 let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
200 bx.store(val, cast_dst, self.layout.align);
202 // The actual return type is a struct, but the ABI
203 // adaptation code has cast it into some scalar type. The
204 // code that follows is the only reliable way I have
205 // found to do a transform like i64 -> {i32,i32}.
206 // Basically we dump the data onto the stack then memcpy it.
208 // Other approaches I tried:
209 // - Casting rust ret pointer to the foreign type and using Store
210 // is (a) unsafe if size of foreign type > size of rust type and
211 // (b) runs afoul of strict aliasing rules, yielding invalid
212 // assembly under -O (specifically, the store gets removed).
213 // - Truncating foreign type to correct integral type and then
214 // bitcasting to the struct type yields invalid cast errors.
216 // We instead thus allocate some scratch space...
217 let scratch_size = cast.size(cx);
218 let scratch_align = cast.align(cx);
219 let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
220 bx.lifetime_start(llscratch, scratch_size);
222 // ...where we first store the value...
223 bx.store(val, llscratch, scratch_align);
225 // ...and then memcpy it to the intended destination.
226 base::call_memcpy(bx,
227 bx.pointercast(dst.llval, Type::i8p(cx)),
229 bx.pointercast(llscratch, Type::i8p(cx)),
231 C_usize(cx, self.layout.size.bytes()),
234 bx.lifetime_end(llscratch, scratch_size);
237 OperandValue::Immediate(val).store(bx, dst);
241 fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) {
243 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
248 PassMode::Ignore => {},
249 PassMode::Pair(..) => {
250 OperandValue::Pair(next(), next()).store(bx, dst);
252 PassMode::Indirect(_, Some(_)) => {
253 OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
255 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
256 self.store(bx, next(), dst);
262 pub trait FnTypeExt<'tcx> {
263 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
264 fn new(cx: &CodegenCx<'ll, 'tcx>,
265 sig: ty::FnSig<'tcx>,
266 extra_args: &[Ty<'tcx>]) -> Self;
267 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
268 sig: ty::FnSig<'tcx>,
269 extra_args: &[Ty<'tcx>]) -> Self;
271 cx: &CodegenCx<'ll, 'tcx>,
272 sig: ty::FnSig<'tcx>,
273 extra_args: &[Ty<'tcx>],
274 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
276 fn adjust_for_abi(&mut self,
277 cx: &CodegenCx<'ll, 'tcx>,
279 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
280 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
281 fn llvm_cconv(&self) -> llvm::CallConv;
282 fn apply_attrs_llfn(&self, llfn: &'ll Value);
283 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
286 impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
287 fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
288 let sig = instance.fn_sig(cx.tcx);
289 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
290 FnType::new(cx, sig, &[])
293 fn new(cx: &CodegenCx<'ll, 'tcx>,
294 sig: ty::FnSig<'tcx>,
295 extra_args: &[Ty<'tcx>]) -> Self {
296 FnType::new_internal(cx, sig, extra_args, |ty, _| {
297 ArgType::new(cx.layout_of(ty))
301 fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
302 sig: ty::FnSig<'tcx>,
303 extra_args: &[Ty<'tcx>]) -> Self {
304 FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
305 let mut layout = cx.layout_of(ty);
306 // Don't pass the vtable, it's not an argument of the virtual fn.
307 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
308 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
309 if arg_idx == Some(0) {
310 let fat_pointer_ty = if layout.is_unsized() {
311 // unsized `self` is passed as a pointer to `self`
312 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
313 cx.tcx.mk_mut_ptr(layout.ty)
316 LayoutAbi::ScalarPair(..) => (),
317 _ => bug!("receiver type has unsupported layout: {:?}", layout)
320 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
321 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
322 // elsewhere in the compiler as a method on a `dyn Trait`.
323 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
324 // get a built-in pointer type
325 let mut fat_pointer_layout = layout;
326 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
327 && !fat_pointer_layout.ty.is_region_ptr()
329 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
330 let field_layout = fat_pointer_layout.field(cx, i);
332 if !field_layout.is_zst() {
333 fat_pointer_layout = field_layout;
334 continue 'descend_newtypes
338 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
341 fat_pointer_layout.ty
344 // we now have a type like `*mut RcBox<dyn Trait>`
345 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
346 // this is understood as a special case elsewhere in the compiler
347 let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
348 layout = cx.layout_of(unit_pointer_ty);
349 layout.ty = fat_pointer_ty;
356 cx: &CodegenCx<'ll, 'tcx>,
357 sig: ty::FnSig<'tcx>,
358 extra_args: &[Ty<'tcx>],
359 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
361 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
364 let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
365 RustIntrinsic | PlatformIntrinsic |
366 Rust | RustCall => Conv::C,
368 // It's the ABI's job to select this, not ours.
369 System => bug!("system abi should be selected elsewhere"),
371 Stdcall => Conv::X86Stdcall,
372 Fastcall => Conv::X86Fastcall,
373 Vectorcall => Conv::X86VectorCall,
374 Thiscall => Conv::X86ThisCall,
376 Unadjusted => Conv::C,
377 Win64 => Conv::X86_64Win64,
378 SysV64 => Conv::X86_64SysV,
379 Aapcs => Conv::ArmAapcs,
380 PtxKernel => Conv::PtxKernel,
381 Msp430Interrupt => Conv::Msp430Intr,
382 X86Interrupt => Conv::X86Intr,
383 AmdGpuKernel => Conv::AmdGpuKernel,
385 // These API constants ought to be more specific...
389 let mut inputs = sig.inputs();
390 let extra_args = if sig.abi == RustCall {
391 assert!(!sig.variadic && extra_args.is_empty());
393 match sig.inputs().last().unwrap().sty {
394 ty::Tuple(ref tupled_arguments) => {
395 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
399 bug!("argument to function with \"rust-call\" ABI \
404 assert!(sig.variadic || extra_args.is_empty());
408 let target = &cx.sess().target.target;
409 let win_x64_gnu = target.target_os == "windows"
410 && target.arch == "x86_64"
411 && target.target_env == "gnu";
412 let linux_s390x = target.target_os == "linux"
413 && target.arch == "s390x"
414 && target.target_env == "gnu";
415 let rust_abi = match sig.abi {
416 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
420 // Handle safe Rust thin and fat pointers.
421 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
422 scalar: &layout::Scalar,
423 layout: TyLayout<'tcx, Ty<'tcx>>,
426 // Booleans are always an i1 that needs to be zero-extended.
427 if scalar.is_bool() {
428 attrs.set(ArgAttribute::ZExt);
432 // Only pointer types handled below.
433 if scalar.value != layout::Pointer {
437 if scalar.valid_range.start() < scalar.valid_range.end() {
438 if *scalar.valid_range.start() > 0 {
439 attrs.set(ArgAttribute::NonNull);
443 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
444 if let Some(kind) = pointee.safe {
445 attrs.pointee_size = pointee.size;
446 attrs.pointee_align = Some(pointee.align);
448 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
449 // with align attributes, and those calls later block optimizations.
450 if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes {
451 attrs.pointee_align = None;
454 // `Box` pointer parameters never alias because ownership is transferred
455 // `&mut` pointer parameters never alias other parameters,
456 // or mutable global data
458 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
459 // and can be marked as both `readonly` and `noalias`, as
460 // LLVM's definition of `noalias` is based solely on memory
461 // dependencies rather than pointer equality
462 let no_alias = match kind {
463 PointerKind::Shared => false,
464 PointerKind::UniqueOwned => true,
465 PointerKind::Frozen |
466 PointerKind::UniqueBorrowed => !is_return
469 attrs.set(ArgAttribute::NoAlias);
472 if kind == PointerKind::Frozen && !is_return {
473 attrs.set(ArgAttribute::ReadOnly);
479 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
480 let is_return = arg_idx.is_none();
481 let mut arg = mk_arg_type(ty, arg_idx);
482 if arg.layout.is_zst() {
483 // For some forsaken reason, x86_64-pc-windows-gnu
484 // doesn't ignore zero-sized struct arguments.
485 // The same is true for s390x-unknown-linux-gnu.
486 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
487 arg.mode = PassMode::Ignore;
491 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
492 if !is_return && rust_abi {
493 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
494 let mut a_attrs = ArgAttributes::new();
495 let mut b_attrs = ArgAttributes::new();
496 adjust_for_rust_scalar(&mut a_attrs,
501 adjust_for_rust_scalar(&mut b_attrs,
504 a.value.size(cx).abi_align(b.value.align(cx)),
506 arg.mode = PassMode::Pair(a_attrs, b_attrs);
511 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
512 if let PassMode::Direct(ref mut attrs) = arg.mode {
513 adjust_for_rust_scalar(attrs,
524 let mut fn_ty = FnType {
525 ret: arg_of(sig.output(), None),
526 args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
529 variadic: sig.variadic,
532 fn_ty.adjust_for_abi(cx, sig.abi);
536 fn adjust_for_abi(&mut self,
537 cx: &CodegenCx<'ll, 'tcx>,
539 if abi == Abi::Unadjusted { return }
541 if abi == Abi::Rust || abi == Abi::RustCall ||
542 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
543 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
544 if arg.is_ignore() { return; }
546 match arg.layout.abi {
547 layout::Abi::Aggregate { .. } => {}
549 // This is a fun case! The gist of what this is doing is
550 // that we want callers and callees to always agree on the
551 // ABI of how they pass SIMD arguments. If we were to *not*
552 // make these arguments indirect then they'd be immediates
553 // in LLVM, which means that they'd used whatever the
554 // appropriate ABI is for the callee and the caller. That
555 // means, for example, if the caller doesn't have AVX
556 // enabled but the callee does, then passing an AVX argument
557 // across this boundary would cause corrupt data to show up.
559 // This problem is fixed by unconditionally passing SIMD
560 // arguments through memory between callers and callees
561 // which should get them all to agree on ABI regardless of
562 // target feature sets. Some more information about this
563 // issue can be found in #44367.
565 // Note that the platform intrinsic ABI is exempt here as
566 // that's how we connect up to LLVM and it's unstable
567 // anyway, we control all calls to it in libstd.
568 layout::Abi::Vector { .. }
569 if abi != Abi::PlatformIntrinsic &&
570 cx.sess().target.target.options.simd_types_indirect =>
579 let size = arg.layout.size;
580 if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
583 // We want to pass small aggregates as immediates, but using
584 // a LLVM aggregate type for this leads to bad optimizations,
585 // so we pick an appropriately sized integer type instead.
587 kind: RegKind::Integer,
592 fixup(&mut self.ret);
593 for arg in &mut self.args {
596 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
597 attrs.set(ArgAttribute::StructRet);
602 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
603 cx.sess().fatal(&msg);
607 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
608 let args_capacity: usize = self.args.iter().map(|arg|
609 if arg.pad.is_some() { 1 } else { 0 } +
610 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
612 let mut llargument_tys = Vec::with_capacity(
613 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
616 let llreturn_ty = match self.ret.mode {
617 PassMode::Ignore => Type::void(cx),
618 PassMode::Direct(_) | PassMode::Pair(..) => {
619 self.ret.layout.immediate_llvm_type(cx)
621 PassMode::Cast(cast) => cast.llvm_type(cx),
622 PassMode::Indirect(..) => {
623 llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
628 for arg in &self.args {
630 if let Some(ty) = arg.pad {
631 llargument_tys.push(ty.llvm_type(cx));
634 let llarg_ty = match arg.mode {
635 PassMode::Ignore => continue,
636 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
637 PassMode::Pair(..) => {
638 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
639 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
642 PassMode::Indirect(_, Some(_)) => {
643 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
644 let ptr_layout = cx.layout_of(ptr_ty);
645 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
646 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
649 PassMode::Cast(cast) => cast.llvm_type(cx),
650 PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(),
652 llargument_tys.push(llarg_ty);
656 Type::variadic_func(&llargument_tys, llreturn_ty)
658 Type::func(&llargument_tys, llreturn_ty)
662 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
664 llvm::LLVMPointerType(self.llvm_type(cx),
665 cx.data_layout().instruction_address_space as c_uint)
669 fn llvm_cconv(&self) -> llvm::CallConv {
671 Conv::C => llvm::CCallConv,
672 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
673 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
674 Conv::Msp430Intr => llvm::Msp430Intr,
675 Conv::PtxKernel => llvm::PtxKernel,
676 Conv::X86Fastcall => llvm::X86FastcallCallConv,
677 Conv::X86Intr => llvm::X86_Intr,
678 Conv::X86Stdcall => llvm::X86StdcallCallConv,
679 Conv::X86ThisCall => llvm::X86_ThisCall,
680 Conv::X86VectorCall => llvm::X86_VectorCall,
681 Conv::X86_64SysV => llvm::X86_64_SysV,
682 Conv::X86_64Win64 => llvm::X86_64_Win64,
686 fn apply_attrs_llfn(&self, llfn: &'ll Value) {
688 let mut apply = |attrs: &ArgAttributes| {
689 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
692 match self.ret.mode {
693 PassMode::Direct(ref attrs) => {
694 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
696 PassMode::Indirect(ref attrs, _) => apply(attrs),
699 for arg in &self.args {
700 if arg.pad.is_some() {
701 apply(&ArgAttributes::new());
704 PassMode::Ignore => {}
705 PassMode::Direct(ref attrs) |
706 PassMode::Indirect(ref attrs, None) => apply(attrs),
707 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
711 PassMode::Pair(ref a, ref b) => {
715 PassMode::Cast(_) => apply(&ArgAttributes::new()),
720 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
722 let mut apply = |attrs: &ArgAttributes| {
723 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
726 match self.ret.mode {
727 PassMode::Direct(ref attrs) => {
728 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
730 PassMode::Indirect(ref attrs, _) => apply(attrs),
733 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
734 // If the value is a boolean, the range is 0..2 and that ultimately
735 // become 0..0 when the type becomes i1, which would be rejected
736 // by the LLVM verifier.
737 if let layout::Int(..) = scalar.value {
738 if !scalar.is_bool() {
739 let range = scalar.valid_range_exclusive(bx.cx);
740 if range.start != range.end {
741 bx.range_metadata(callsite, range);
746 for arg in &self.args {
747 if arg.pad.is_some() {
748 apply(&ArgAttributes::new());
751 PassMode::Ignore => {}
752 PassMode::Direct(ref attrs) |
753 PassMode::Indirect(ref attrs, None) => apply(attrs),
754 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
758 PassMode::Pair(ref a, ref b) => {
762 PassMode::Cast(_) => apply(&ArgAttributes::new()),
766 let cconv = self.llvm_cconv();
767 if cconv != llvm::CCallConv {
768 llvm::SetInstructionCallConv(callsite, cconv);