1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef, AttributePlace};
14 use common::{instance_ty, ty_fn_sig, C_usize};
15 use context::CrateContext;
33 use mir::place::{Alignment, PlaceRef};
34 use mir::operand::OperandValue;
36 use type_of::{LayoutLlvmExt, PointerKind};
38 use rustc::ty::{self, Ty};
39 use rustc::ty::layout::{self, Align, Size, TyLayout};
40 use rustc::ty::layout::{HasDataLayout, LayoutOf};
45 pub use syntax::abi::Abi;
46 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
48 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
50 /// Ignore the argument (useful for empty struct).
52 /// Pass the argument directly.
53 Direct(ArgAttributes),
54 /// Pass a pair's elements directly in two arguments.
55 Pair(ArgAttributes, ArgAttributes),
56 /// Pass the argument after casting it, to either
57 /// a single uniform or a pair of registers.
59 /// Pass the argument indirectly via a hidden pointer.
60 Indirect(ArgAttributes),
63 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
65 pub use self::attr_impl::ArgAttribute;
67 #[allow(non_upper_case_globals)]
70 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
73 pub struct ArgAttribute: u16 {
75 const NoAlias = 1 << 1;
76 const NoCapture = 1 << 2;
77 const NonNull = 1 << 3;
78 const ReadOnly = 1 << 4;
80 const StructRet = 1 << 6;
87 macro_rules! for_each_kind {
88 ($flags: ident, $f: ident, $($kind: ident),+) => ({
89 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
94 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
95 for_each_kind!(self, f,
96 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
100 /// A compact representation of LLVM attributes (at least those relevant for this module)
101 /// that can be manipulated without interacting with LLVM's Attribute machinery.
102 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
103 pub struct ArgAttributes {
104 regular: ArgAttribute,
106 pointee_align: Option<Align>
112 regular: ArgAttribute::default(),
113 pointee_size: Size::from_bytes(0),
118 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
119 self.regular = self.regular | attr;
123 pub fn contains(&self, attr: ArgAttribute) -> bool {
124 self.regular.contains(attr)
127 pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
128 let mut regular = self.regular;
130 let deref = self.pointee_size.bytes();
132 if regular.contains(ArgAttribute::NonNull) {
133 llvm::LLVMRustAddDereferenceableAttr(llfn,
137 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
141 regular -= ArgAttribute::NonNull;
143 if let Some(align) = self.pointee_align {
144 llvm::LLVMRustAddAlignmentAttr(llfn,
148 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
152 pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
153 let mut regular = self.regular;
155 let deref = self.pointee_size.bytes();
157 if regular.contains(ArgAttribute::NonNull) {
158 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
162 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
166 regular -= ArgAttribute::NonNull;
168 if let Some(align) = self.pointee_align {
169 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
173 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
177 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
184 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
190 macro_rules! reg_ctor {
191 ($name:ident, $kind:ident, $bits:expr) => {
192 pub fn $name() -> Reg {
194 kind: RegKind::$kind,
195 size: Size::from_bits($bits)
202 reg_ctor!(i8, Integer, 8);
203 reg_ctor!(i16, Integer, 16);
204 reg_ctor!(i32, Integer, 32);
205 reg_ctor!(i64, Integer, 64);
207 reg_ctor!(f32, Float, 32);
208 reg_ctor!(f64, Float, 64);
212 pub fn align(&self, ccx: &CrateContext) -> Align {
213 let dl = ccx.data_layout();
215 RegKind::Integer => {
216 match self.size.bits() {
218 2...8 => dl.i8_align,
219 9...16 => dl.i16_align,
220 17...32 => dl.i32_align,
221 33...64 => dl.i64_align,
222 65...128 => dl.i128_align,
223 _ => bug!("unsupported integer: {:?}", self)
227 match self.size.bits() {
230 _ => bug!("unsupported float: {:?}", self)
233 RegKind::Vector => dl.vector_align(self.size)
237 pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
239 RegKind::Integer => Type::ix(ccx, self.size.bits()),
241 match self.size.bits() {
242 32 => Type::f32(ccx),
243 64 => Type::f64(ccx),
244 _ => bug!("unsupported float: {:?}", self)
248 Type::vector(&Type::i8(ccx), self.size.bytes())
254 /// An argument passed entirely registers with the
255 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
256 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
260 /// The total size of the argument, which can be:
261 /// * equal to `unit.size` (one scalar/vector)
262 /// * a multiple of `unit.size` (an array of scalar/vectors)
263 /// * if `unit.kind` is `Integer`, the last element
264 /// can be shorter, i.e. `{ i64, i64, i32 }` for
265 /// 64-bit integers with a total size of 20 bytes
269 impl From<Reg> for Uniform {
270 fn from(unit: Reg) -> Uniform {
279 pub fn align(&self, ccx: &CrateContext) -> Align {
283 pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
284 let llunit = self.unit.llvm_type(ccx);
286 if self.total <= self.unit.size {
290 let count = self.total.bytes() / self.unit.size.bytes();
291 let rem_bytes = self.total.bytes() % self.unit.size.bytes();
294 return Type::array(&llunit, count);
297 // Only integers can be really split further.
298 assert_eq!(self.unit.kind, RegKind::Integer);
300 let args: Vec<_> = (0..count).map(|_| llunit)
301 .chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
304 Type::struct_(ccx, &args, false)
308 pub trait LayoutExt<'tcx> {
309 fn is_aggregate(&self) -> bool;
310 fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
313 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
314 fn is_aggregate(&self) -> bool {
316 layout::Abi::Uninhabited |
317 layout::Abi::Scalar(_) |
318 layout::Abi::Vector => false,
319 layout::Abi::ScalarPair(..) |
320 layout::Abi::Aggregate { .. } => true
324 fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
326 layout::Abi::Uninhabited => None,
328 // The primitive for this algorithm.
329 layout::Abi::Scalar(ref scalar) => {
330 let kind = match scalar.value {
332 layout::Pointer => RegKind::Integer,
334 layout::F64 => RegKind::Float
342 layout::Abi::Vector => {
344 kind: RegKind::Vector,
349 layout::Abi::ScalarPair(..) |
350 layout::Abi::Aggregate { .. } => {
351 let mut total = Size::from_bytes(0);
352 let mut result = None;
354 let is_union = match self.fields {
355 layout::FieldPlacement::Array { count, .. } => {
357 return self.field(ccx, 0).homogeneous_aggregate(ccx);
362 layout::FieldPlacement::Union(_) => true,
363 layout::FieldPlacement::Arbitrary { .. } => false
366 for i in 0..self.fields.count() {
367 if !is_union && total != self.fields.offset(i) {
371 let field = self.field(ccx, i);
372 match (result, field.homogeneous_aggregate(ccx)) {
373 // The field itself must be a homogeneous aggregate.
374 (_, None) => return None,
375 // If this is the first field, record the unit.
376 (None, Some(unit)) => {
379 // For all following fields, the unit must be the same.
380 (Some(prev_unit), Some(unit)) => {
381 if prev_unit != unit {
387 // Keep track of the offset (without padding).
388 let size = field.size;
390 total = cmp::max(total, size);
396 // There needs to be no padding.
397 if total != self.size {
407 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
408 pub enum CastTarget {
413 impl From<Reg> for CastTarget {
414 fn from(unit: Reg) -> CastTarget {
415 CastTarget::Uniform(Uniform::from(unit))
419 impl From<Uniform> for CastTarget {
420 fn from(uniform: Uniform) -> CastTarget {
421 CastTarget::Uniform(uniform)
426 pub fn size(&self, ccx: &CrateContext) -> Size {
428 CastTarget::Uniform(u) => u.total,
429 CastTarget::Pair(a, b) => {
430 (a.size.abi_align(a.align(ccx)) + b.size)
431 .abi_align(self.align(ccx))
436 pub fn align(&self, ccx: &CrateContext) -> Align {
438 CastTarget::Uniform(u) => u.align(ccx),
439 CastTarget::Pair(a, b) => {
440 ccx.data_layout().aggregate_align
447 pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
449 CastTarget::Uniform(u) => u.llvm_type(ccx),
450 CastTarget::Pair(a, b) => {
451 Type::struct_(ccx, &[
460 /// Information about how to pass an argument to,
461 /// or return a value from, a function, under some ABI.
463 pub struct ArgType<'tcx> {
464 pub layout: TyLayout<'tcx>,
466 /// Dummy argument, which is emitted before the real argument.
467 pub pad: Option<Reg>,
472 impl<'a, 'tcx> ArgType<'tcx> {
473 fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
477 mode: PassMode::Direct(ArgAttributes::new()),
481 pub fn make_indirect(&mut self) {
482 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
484 // Start with fresh attributes for the pointer.
485 let mut attrs = ArgAttributes::new();
487 // For non-immediate arguments the callee gets its own copy of
488 // the value on the stack, so there are no aliases. It's also
489 // program-invisible so can't possibly capture
490 attrs.set(ArgAttribute::NoAlias)
491 .set(ArgAttribute::NoCapture)
492 .set(ArgAttribute::NonNull);
493 attrs.pointee_size = self.layout.size;
494 // FIXME(eddyb) We should be doing this, but at least on
495 // i686-pc-windows-msvc, it results in wrong stack offsets.
496 // attrs.pointee_align = Some(self.layout.align);
498 self.mode = PassMode::Indirect(attrs);
501 pub fn make_indirect_byval(&mut self) {
502 self.make_indirect();
504 PassMode::Indirect(ref mut attrs) => {
505 attrs.set(ArgAttribute::ByVal);
511 pub fn extend_integer_width_to(&mut self, bits: u64) {
512 // Only integers have signedness
513 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
514 if let layout::Int(i, signed) = scalar.value {
515 if i.size().bits() < bits {
516 if let PassMode::Direct(ref mut attrs) = self.mode {
517 attrs.set(if signed {
528 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
529 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
530 self.mode = PassMode::Cast(target.into());
533 pub fn pad_with(&mut self, reg: Reg) {
534 self.pad = Some(reg);
537 pub fn is_indirect(&self) -> bool {
539 PassMode::Indirect(_) => true,
544 pub fn is_ignore(&self) -> bool {
545 self.mode == PassMode::Ignore
548 /// Get the LLVM type for an place of the original Rust type of
549 /// this argument/return, i.e. the result of `type_of::type_of`.
550 pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
551 self.layout.llvm_type(ccx)
554 /// Store a direct/indirect value described by this ArgType into a
555 /// place for the original Rust type of this argument/return.
556 /// Can be used for both storing formal arguments into Rust variables
557 /// or results of call/invoke instructions into their destinations.
558 pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
559 if self.is_ignore() {
563 if self.is_indirect() {
564 OperandValue::Ref(val, Alignment::AbiAligned).store(bcx, dst)
565 } else if let PassMode::Cast(cast) = self.mode {
566 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
567 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
568 let can_store_through_cast_ptr = false;
569 if can_store_through_cast_ptr {
570 let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to());
571 bcx.store(val, cast_dst, Some(self.layout.align));
573 // The actual return type is a struct, but the ABI
574 // adaptation code has cast it into some scalar type. The
575 // code that follows is the only reliable way I have
576 // found to do a transform like i64 -> {i32,i32}.
577 // Basically we dump the data onto the stack then memcpy it.
579 // Other approaches I tried:
580 // - Casting rust ret pointer to the foreign type and using Store
581 // is (a) unsafe if size of foreign type > size of rust type and
582 // (b) runs afoul of strict aliasing rules, yielding invalid
583 // assembly under -O (specifically, the store gets removed).
584 // - Truncating foreign type to correct integral type and then
585 // bitcasting to the struct type yields invalid cast errors.
587 // We instead thus allocate some scratch space...
588 let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx));
589 let scratch_size = cast.size(ccx);
590 bcx.lifetime_start(llscratch, scratch_size);
592 // ...where we first store the value...
593 bcx.store(val, llscratch, None);
595 // ...and then memcpy it to the intended destination.
596 base::call_memcpy(bcx,
597 bcx.pointercast(dst.llval, Type::i8p(ccx)),
598 bcx.pointercast(llscratch, Type::i8p(ccx)),
599 C_usize(ccx, self.layout.size.bytes()),
600 self.layout.align.min(cast.align(ccx)));
602 bcx.lifetime_end(llscratch, scratch_size);
605 OperandValue::Immediate(val).store(bcx, dst);
609 pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
610 if self.pad.is_some() {
614 let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
619 PassMode::Ignore => {},
620 PassMode::Pair(..) => {
621 OperandValue::Pair(next(), next()).store(bcx, dst);
623 PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
624 self.store(bcx, next(), dst);
630 /// Metadata describing how the arguments to a native function
631 /// should be passed in order to respect the native ABI.
633 /// I will do my best to describe this structure, but these
634 /// comments are reverse-engineered and may be inaccurate. -NDM
636 pub struct FnType<'tcx> {
637 /// The LLVM types of each argument.
638 pub args: Vec<ArgType<'tcx>>,
640 /// LLVM return type.
641 pub ret: ArgType<'tcx>,
645 pub cconv: llvm::CallConv
648 impl<'a, 'tcx> FnType<'tcx> {
649 pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>)
651 let fn_ty = instance_ty(ccx.tcx(), &instance);
652 let sig = ty_fn_sig(ccx, fn_ty);
653 let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
654 FnType::new(ccx, sig, &[])
657 pub fn new(ccx: &CrateContext<'a, 'tcx>,
658 sig: ty::FnSig<'tcx>,
659 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
660 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
661 fn_ty.adjust_for_abi(ccx, sig.abi);
665 pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>,
666 sig: ty::FnSig<'tcx>,
667 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
668 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
669 // Don't pass the vtable, it's not an argument of the virtual fn.
671 let self_arg = &mut fn_ty.args[0];
672 match self_arg.mode {
673 PassMode::Pair(data_ptr, _) => {
674 self_arg.mode = PassMode::Direct(data_ptr);
676 _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg)
679 let pointee = self_arg.layout.ty.builtin_deref(true, ty::NoPreference)
681 bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
683 let fat_ptr_ty = ccx.tcx().mk_mut_ptr(pointee);
684 self_arg.layout = ccx.layout_of(fat_ptr_ty).field(ccx, 0);
686 fn_ty.adjust_for_abi(ccx, sig.abi);
690 pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
691 sig: ty::FnSig<'tcx>,
692 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
693 debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
696 let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
697 RustIntrinsic | PlatformIntrinsic |
698 Rust | RustCall => llvm::CCallConv,
700 // It's the ABI's job to select this, not us.
701 System => bug!("system abi should be selected elsewhere"),
703 Stdcall => llvm::X86StdcallCallConv,
704 Fastcall => llvm::X86FastcallCallConv,
705 Vectorcall => llvm::X86_VectorCall,
706 Thiscall => llvm::X86_ThisCall,
707 C => llvm::CCallConv,
708 Unadjusted => llvm::CCallConv,
709 Win64 => llvm::X86_64_Win64,
710 SysV64 => llvm::X86_64_SysV,
711 Aapcs => llvm::ArmAapcsCallConv,
712 PtxKernel => llvm::PtxKernel,
713 Msp430Interrupt => llvm::Msp430Intr,
714 X86Interrupt => llvm::X86_Intr,
716 // These API constants ought to be more specific...
717 Cdecl => llvm::CCallConv,
720 let mut inputs = sig.inputs();
721 let extra_args = if sig.abi == RustCall {
722 assert!(!sig.variadic && extra_args.is_empty());
724 match sig.inputs().last().unwrap().sty {
725 ty::TyTuple(ref tupled_arguments, _) => {
726 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
730 bug!("argument to function with \"rust-call\" ABI \
735 assert!(sig.variadic || extra_args.is_empty());
739 let target = &ccx.sess().target.target;
740 let win_x64_gnu = target.target_os == "windows"
741 && target.arch == "x86_64"
742 && target.target_env == "gnu";
743 let linux_s390x = target.target_os == "linux"
744 && target.arch == "s390x"
745 && target.target_env == "gnu";
746 let rust_abi = match sig.abi {
747 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
751 // Handle safe Rust thin and fat pointers.
752 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
753 scalar: &layout::Scalar,
754 layout: TyLayout<'tcx>,
757 // Booleans are always an i1 that needs to be zero-extended.
758 if scalar.is_bool() {
759 attrs.set(ArgAttribute::ZExt);
763 // Only pointer types handled below.
764 if scalar.value != layout::Pointer {
768 if scalar.valid_range.start < scalar.valid_range.end {
769 if scalar.valid_range.start > 0 {
770 attrs.set(ArgAttribute::NonNull);
774 if let Some(pointee) = layout.pointee_info_at(ccx, offset) {
775 if let Some(kind) = pointee.safe {
776 attrs.pointee_size = pointee.size;
777 attrs.pointee_align = Some(pointee.align);
779 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
780 // with align attributes, and those calls later block optimizations.
782 attrs.pointee_align = None;
785 // `Box` pointer parameters never alias because ownership is transferred
786 // `&mut` pointer parameters never alias other parameters,
787 // or mutable global data
789 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
790 // and can be marked as both `readonly` and `noalias`, as
791 // LLVM's definition of `noalias` is based solely on memory
792 // dependencies rather than pointer equality
793 let no_alias = match kind {
794 PointerKind::Shared => false,
795 PointerKind::UniqueOwned => true,
796 PointerKind::Frozen |
797 PointerKind::UniqueBorrowed => !is_return
800 attrs.set(ArgAttribute::NoAlias);
803 if kind == PointerKind::Frozen && !is_return {
804 attrs.set(ArgAttribute::ReadOnly);
810 let arg_of = |ty: Ty<'tcx>, is_return: bool| {
811 let mut arg = ArgType::new(ccx.layout_of(ty));
812 if arg.layout.is_zst() {
813 // For some forsaken reason, x86_64-pc-windows-gnu
814 // doesn't ignore zero-sized struct arguments.
815 // The same is true for s390x-unknown-linux-gnu.
816 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
817 arg.mode = PassMode::Ignore;
821 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
822 if !is_return && rust_abi {
823 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
824 let mut a_attrs = ArgAttributes::new();
825 let mut b_attrs = ArgAttributes::new();
826 adjust_for_rust_scalar(&mut a_attrs,
831 adjust_for_rust_scalar(&mut b_attrs,
834 a.value.size(ccx).abi_align(b.value.align(ccx)),
836 arg.mode = PassMode::Pair(a_attrs, b_attrs);
841 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
842 if let PassMode::Direct(ref mut attrs) = arg.mode {
843 adjust_for_rust_scalar(attrs,
855 ret: arg_of(sig.output(), true),
856 args: inputs.iter().chain(extra_args.iter()).map(|ty| {
859 variadic: sig.variadic,
864 fn adjust_for_abi(&mut self,
865 ccx: &CrateContext<'a, 'tcx>,
867 if abi == Abi::Unadjusted { return }
869 if abi == Abi::Rust || abi == Abi::RustCall ||
870 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
871 let fixup = |arg: &mut ArgType<'tcx>| {
872 if arg.is_ignore() { return; }
874 match arg.layout.abi {
875 layout::Abi::Aggregate { .. } => {}
879 let size = arg.layout.size;
880 if size > layout::Pointer.size(ccx) {
883 // We want to pass small aggregates as immediates, but using
884 // a LLVM aggregate type for this leads to bad optimizations,
885 // so we pick an appropriately sized integer type instead.
887 kind: RegKind::Integer,
892 fixup(&mut self.ret);
893 for arg in &mut self.args {
896 if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
897 attrs.set(ArgAttribute::StructRet);
902 match &ccx.sess().target.target.arch[..] {
904 let flavor = if abi == Abi::Fastcall {
905 cabi_x86::Flavor::Fastcall
907 cabi_x86::Flavor::General
909 cabi_x86::compute_abi_info(ccx, self, flavor);
911 "x86_64" => if abi == Abi::SysV64 {
912 cabi_x86_64::compute_abi_info(ccx, self);
913 } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
914 cabi_x86_win64::compute_abi_info(self);
916 cabi_x86_64::compute_abi_info(ccx, self);
918 "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
919 "arm" => cabi_arm::compute_abi_info(ccx, self),
920 "mips" => cabi_mips::compute_abi_info(ccx, self),
921 "mips64" => cabi_mips64::compute_abi_info(ccx, self),
922 "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
923 "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
924 "s390x" => cabi_s390x::compute_abi_info(ccx, self),
925 "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
926 "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
927 "msp430" => cabi_msp430::compute_abi_info(self),
928 "sparc" => cabi_sparc::compute_abi_info(ccx, self),
929 "sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
930 "nvptx" => cabi_nvptx::compute_abi_info(self),
931 "nvptx64" => cabi_nvptx64::compute_abi_info(self),
932 "hexagon" => cabi_hexagon::compute_abi_info(self),
933 a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
936 if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
937 attrs.set(ArgAttribute::StructRet);
941 pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
942 let mut llargument_tys = Vec::new();
944 let llreturn_ty = match self.ret.mode {
945 PassMode::Ignore => Type::void(ccx),
946 PassMode::Direct(_) | PassMode::Pair(..) => {
947 self.ret.layout.immediate_llvm_type(ccx)
949 PassMode::Cast(cast) => cast.llvm_type(ccx),
950 PassMode::Indirect(_) => {
951 llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
956 for arg in &self.args {
958 if let Some(ty) = arg.pad {
959 llargument_tys.push(ty.llvm_type(ccx));
962 let llarg_ty = match arg.mode {
963 PassMode::Ignore => continue,
964 PassMode::Direct(_) => arg.layout.immediate_llvm_type(ccx),
965 PassMode::Pair(..) => {
966 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 0));
967 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 1));
970 PassMode::Cast(cast) => cast.llvm_type(ccx),
971 PassMode::Indirect(_) => arg.memory_ty(ccx).ptr_to(),
973 llargument_tys.push(llarg_ty);
977 Type::variadic_func(&llargument_tys, &llreturn_ty)
979 Type::func(&llargument_tys, &llreturn_ty)
983 pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
985 let mut apply = |attrs: &ArgAttributes| {
986 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
989 match self.ret.mode {
990 PassMode::Direct(ref attrs) => {
991 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
993 PassMode::Indirect(ref attrs) => apply(attrs),
996 for arg in &self.args {
997 if arg.pad.is_some() {
998 apply(&ArgAttributes::new());
1001 PassMode::Ignore => {}
1002 PassMode::Direct(ref attrs) |
1003 PassMode::Indirect(ref attrs) => apply(attrs),
1004 PassMode::Pair(ref a, ref b) => {
1008 PassMode::Cast(_) => apply(&ArgAttributes::new()),
1013 pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
1015 let mut apply = |attrs: &ArgAttributes| {
1016 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
1019 match self.ret.mode {
1020 PassMode::Direct(ref attrs) => {
1021 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
1023 PassMode::Indirect(ref attrs) => apply(attrs),
1026 for arg in &self.args {
1027 if arg.pad.is_some() {
1028 apply(&ArgAttributes::new());
1031 PassMode::Ignore => {}
1032 PassMode::Direct(ref attrs) |
1033 PassMode::Indirect(ref attrs) => apply(attrs),
1034 PassMode::Pair(ref a, ref b) => {
1038 PassMode::Cast(_) => apply(&ArgAttributes::new()),
1042 if self.cconv != llvm::CCallConv {
1043 llvm::SetInstructionCallConv(callsite, self.cconv);