1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef, AttributePlace};
14 use common::{ty_fn_sig, C_usize};
15 use context::CodegenCx;
34 use mir::place::PlaceRef;
35 use mir::operand::OperandValue;
37 use type_of::{LayoutLlvmExt, PointerKind};
39 use rustc::ty::{self, Ty};
40 use rustc::ty::layout::{self, Align, Size, TyLayout};
41 use rustc::ty::layout::{HasDataLayout, LayoutOf};
46 pub use syntax::abi::Abi;
47 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
49 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
51 /// Ignore the argument (useful for empty struct).
53 /// Pass the argument directly.
54 Direct(ArgAttributes),
55 /// Pass a pair's elements directly in two arguments.
56 Pair(ArgAttributes, ArgAttributes),
57 /// Pass the argument after casting it, to either
58 /// a single uniform or a pair of registers.
60 /// Pass the argument indirectly via a hidden pointer.
61 Indirect(ArgAttributes),
64 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
66 pub use self::attr_impl::ArgAttribute;
68 #[allow(non_upper_case_globals)]
71 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
74 pub struct ArgAttribute: u16 {
76 const NoAlias = 1 << 1;
77 const NoCapture = 1 << 2;
78 const NonNull = 1 << 3;
79 const ReadOnly = 1 << 4;
81 const StructRet = 1 << 6;
88 macro_rules! for_each_kind {
89 ($flags: ident, $f: ident, $($kind: ident),+) => ({
90 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
95 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
96 for_each_kind!(self, f,
97 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
101 /// A compact representation of LLVM attributes (at least those relevant for this module)
102 /// that can be manipulated without interacting with LLVM's Attribute machinery.
103 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
104 pub struct ArgAttributes {
105 regular: ArgAttribute,
107 pointee_align: Option<Align>
113 regular: ArgAttribute::default(),
114 pointee_size: Size::from_bytes(0),
119 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
120 self.regular = self.regular | attr;
124 pub fn contains(&self, attr: ArgAttribute) -> bool {
125 self.regular.contains(attr)
128 pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
129 let mut regular = self.regular;
131 let deref = self.pointee_size.bytes();
133 if regular.contains(ArgAttribute::NonNull) {
134 llvm::LLVMRustAddDereferenceableAttr(llfn,
138 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
142 regular -= ArgAttribute::NonNull;
144 if let Some(align) = self.pointee_align {
145 llvm::LLVMRustAddAlignmentAttr(llfn,
149 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
153 pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
154 let mut regular = self.regular;
156 let deref = self.pointee_size.bytes();
158 if regular.contains(ArgAttribute::NonNull) {
159 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
163 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
167 regular -= ArgAttribute::NonNull;
169 if let Some(align) = self.pointee_align {
170 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
174 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
178 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
185 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
191 macro_rules! reg_ctor {
192 ($name:ident, $kind:ident, $bits:expr) => {
193 pub fn $name() -> Reg {
195 kind: RegKind::$kind,
196 size: Size::from_bits($bits)
203 reg_ctor!(i8, Integer, 8);
204 reg_ctor!(i16, Integer, 16);
205 reg_ctor!(i32, Integer, 32);
206 reg_ctor!(i64, Integer, 64);
208 reg_ctor!(f32, Float, 32);
209 reg_ctor!(f64, Float, 64);
213 pub fn align(&self, cx: &CodegenCx) -> Align {
214 let dl = cx.data_layout();
216 RegKind::Integer => {
217 match self.size.bits() {
219 2...8 => dl.i8_align,
220 9...16 => dl.i16_align,
221 17...32 => dl.i32_align,
222 33...64 => dl.i64_align,
223 65...128 => dl.i128_align,
224 _ => bug!("unsupported integer: {:?}", self)
228 match self.size.bits() {
231 _ => bug!("unsupported float: {:?}", self)
234 RegKind::Vector => dl.vector_align(self.size)
238 pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
240 RegKind::Integer => Type::ix(cx, self.size.bits()),
242 match self.size.bits() {
245 _ => bug!("unsupported float: {:?}", self)
249 Type::vector(&Type::i8(cx), self.size.bytes())
255 /// An argument passed entirely registers with the
256 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
257 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
261 /// The total size of the argument, which can be:
262 /// * equal to `unit.size` (one scalar/vector)
263 /// * a multiple of `unit.size` (an array of scalar/vectors)
264 /// * if `unit.kind` is `Integer`, the last element
265 /// can be shorter, i.e. `{ i64, i64, i32 }` for
266 /// 64-bit integers with a total size of 20 bytes
270 impl From<Reg> for Uniform {
271 fn from(unit: Reg) -> Uniform {
280 pub fn align(&self, cx: &CodegenCx) -> Align {
285 pub trait LayoutExt<'tcx> {
286 fn is_aggregate(&self) -> bool;
287 fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg>;
290 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
291 fn is_aggregate(&self) -> bool {
293 layout::Abi::Uninhabited |
294 layout::Abi::Scalar(_) |
295 layout::Abi::Vector { .. } => false,
296 layout::Abi::ScalarPair(..) |
297 layout::Abi::Aggregate { .. } => true
301 fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg> {
303 layout::Abi::Uninhabited => None,
305 // The primitive for this algorithm.
306 layout::Abi::Scalar(ref scalar) => {
307 let kind = match scalar.value {
309 layout::Pointer => RegKind::Integer,
311 layout::F64 => RegKind::Float
319 layout::Abi::Vector { .. } => {
321 kind: RegKind::Vector,
326 layout::Abi::ScalarPair(..) |
327 layout::Abi::Aggregate { .. } => {
328 let mut total = Size::from_bytes(0);
329 let mut result = None;
331 let is_union = match self.fields {
332 layout::FieldPlacement::Array { count, .. } => {
334 return self.field(cx, 0).homogeneous_aggregate(cx);
339 layout::FieldPlacement::Union(_) => true,
340 layout::FieldPlacement::Arbitrary { .. } => false
343 for i in 0..self.fields.count() {
344 if !is_union && total != self.fields.offset(i) {
348 let field = self.field(cx, i);
349 match (result, field.homogeneous_aggregate(cx)) {
350 // The field itself must be a homogeneous aggregate.
351 (_, None) => return None,
352 // If this is the first field, record the unit.
353 (None, Some(unit)) => {
356 // For all following fields, the unit must be the same.
357 (Some(prev_unit), Some(unit)) => {
358 if prev_unit != unit {
364 // Keep track of the offset (without padding).
365 let size = field.size;
367 total = cmp::max(total, size);
373 // There needs to be no padding.
374 if total != self.size {
384 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
385 pub struct CastTarget {
386 pub prefix: [Option<RegKind>; 8],
387 pub prefix_chunk: Size,
391 impl From<Reg> for CastTarget {
392 fn from(unit: Reg) -> CastTarget {
393 CastTarget::from(Uniform::from(unit))
397 impl From<Uniform> for CastTarget {
398 fn from(uniform: Uniform) -> CastTarget {
401 prefix_chunk: Size::from_bytes(0),
408 pub fn pair(a: Reg, b: Reg) -> CastTarget {
410 prefix: [Some(a.kind), None, None, None, None, None, None, None],
411 prefix_chunk: a.size,
412 rest: Uniform::from(b)
416 pub fn size(&self, cx: &CodegenCx) -> Size {
417 (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
418 .abi_align(self.rest.align(cx)) + self.rest.total
421 pub fn align(&self, cx: &CodegenCx) -> Align {
423 .filter_map(|x| x.map(|kind| Reg { kind: kind, size: self.prefix_chunk }.align(cx)))
424 .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
425 |acc, align| acc.max(align))
428 pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
429 let rest_ll_unit = self.rest.unit.llvm_type(cx);
430 let rest_count = self.rest.total.bytes() / self.rest.unit.size.bytes();
431 let rem_bytes = self.rest.total.bytes() % self.rest.unit.size.bytes();
433 if self.prefix.iter().all(|x| x.is_none()) {
434 // Simplify to a single unit when there is no prefix and size <= unit size
435 if self.rest.total <= self.rest.unit.size {
439 // Simplify to array when all chunks are the same size and type
441 return Type::array(&rest_ll_unit, rest_count);
445 // Create list of fields in the main structure
446 let mut args: Vec<_> =
447 self.prefix.iter().flat_map(|option_kind| option_kind.map(
448 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
449 .chain((0..rest_count).map(|_| rest_ll_unit))
452 // Append final integer
454 // Only integers can be really split further.
455 assert_eq!(self.rest.unit.kind, RegKind::Integer);
456 args.push(Type::ix(cx, rem_bytes * 8));
459 Type::struct_(cx, &args, false)
463 /// Information about how to pass an argument to,
464 /// or return a value from, a function, under some ABI.
466 pub struct ArgType<'tcx> {
467 pub layout: TyLayout<'tcx>,
469 /// Dummy argument, which is emitted before the real argument.
470 pub pad: Option<Reg>,
475 impl<'a, 'tcx> ArgType<'tcx> {
476 fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
480 mode: PassMode::Direct(ArgAttributes::new()),
484 pub fn make_indirect(&mut self) {
485 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
487 // Start with fresh attributes for the pointer.
488 let mut attrs = ArgAttributes::new();
490 // For non-immediate arguments the callee gets its own copy of
491 // the value on the stack, so there are no aliases. It's also
492 // program-invisible so can't possibly capture
493 attrs.set(ArgAttribute::NoAlias)
494 .set(ArgAttribute::NoCapture)
495 .set(ArgAttribute::NonNull);
496 attrs.pointee_size = self.layout.size;
497 // FIXME(eddyb) We should be doing this, but at least on
498 // i686-pc-windows-msvc, it results in wrong stack offsets.
499 // attrs.pointee_align = Some(self.layout.align);
501 self.mode = PassMode::Indirect(attrs);
504 pub fn make_indirect_byval(&mut self) {
505 self.make_indirect();
507 PassMode::Indirect(ref mut attrs) => {
508 attrs.set(ArgAttribute::ByVal);
514 pub fn extend_integer_width_to(&mut self, bits: u64) {
515 // Only integers have signedness
516 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
517 if let layout::Int(i, signed) = scalar.value {
518 if i.size().bits() < bits {
519 if let PassMode::Direct(ref mut attrs) = self.mode {
520 attrs.set(if signed {
531 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
532 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
533 self.mode = PassMode::Cast(target.into());
536 pub fn pad_with(&mut self, reg: Reg) {
537 self.pad = Some(reg);
540 pub fn is_indirect(&self) -> bool {
542 PassMode::Indirect(_) => true,
547 pub fn is_ignore(&self) -> bool {
548 self.mode == PassMode::Ignore
551 /// Get the LLVM type for a place of the original Rust type of
552 /// this argument/return, i.e. the result of `type_of::type_of`.
553 pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
554 self.layout.llvm_type(cx)
557 /// Store a direct/indirect value described by this ArgType into a
558 /// place for the original Rust type of this argument/return.
559 /// Can be used for both storing formal arguments into Rust variables
560 /// or results of call/invoke instructions into their destinations.
561 pub fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
562 if self.is_ignore() {
566 if self.is_indirect() {
567 OperandValue::Ref(val, self.layout.align).store(bx, dst)
568 } else if let PassMode::Cast(cast) = self.mode {
569 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
570 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
571 let can_store_through_cast_ptr = false;
572 if can_store_through_cast_ptr {
573 let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
574 bx.store(val, cast_dst, self.layout.align);
576 // The actual return type is a struct, but the ABI
577 // adaptation code has cast it into some scalar type. The
578 // code that follows is the only reliable way I have
579 // found to do a transform like i64 -> {i32,i32}.
580 // Basically we dump the data onto the stack then memcpy it.
582 // Other approaches I tried:
583 // - Casting rust ret pointer to the foreign type and using Store
584 // is (a) unsafe if size of foreign type > size of rust type and
585 // (b) runs afoul of strict aliasing rules, yielding invalid
586 // assembly under -O (specifically, the store gets removed).
587 // - Truncating foreign type to correct integral type and then
588 // bitcasting to the struct type yields invalid cast errors.
590 // We instead thus allocate some scratch space...
591 let scratch_size = cast.size(cx);
592 let scratch_align = cast.align(cx);
593 let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
594 bx.lifetime_start(llscratch, scratch_size);
596 // ...where we first store the value...
597 bx.store(val, llscratch, scratch_align);
599 // ...and then memcpy it to the intended destination.
600 base::call_memcpy(bx,
601 bx.pointercast(dst.llval, Type::i8p(cx)),
602 bx.pointercast(llscratch, Type::i8p(cx)),
603 C_usize(cx, self.layout.size.bytes()),
604 self.layout.align.min(scratch_align));
606 bx.lifetime_end(llscratch, scratch_size);
609 OperandValue::Immediate(val).store(bx, dst);
613 pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
615 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
620 PassMode::Ignore => {},
621 PassMode::Pair(..) => {
622 OperandValue::Pair(next(), next()).store(bx, dst);
624 PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
625 self.store(bx, next(), dst);
631 /// Metadata describing how the arguments to a native function
632 /// should be passed in order to respect the native ABI.
634 /// I will do my best to describe this structure, but these
635 /// comments are reverse-engineered and may be inaccurate. -NDM
637 pub struct FnType<'tcx> {
638 /// The LLVM types of each argument.
639 pub args: Vec<ArgType<'tcx>>,
641 /// LLVM return type.
642 pub ret: ArgType<'tcx>,
646 pub cconv: llvm::CallConv
649 impl<'a, 'tcx> FnType<'tcx> {
650 pub fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
652 let fn_ty = instance.ty(cx.tcx);
653 let sig = ty_fn_sig(cx, fn_ty);
654 let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
655 FnType::new(cx, sig, &[])
658 pub fn new(cx: &CodegenCx<'a, 'tcx>,
659 sig: ty::FnSig<'tcx>,
660 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
661 let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
662 fn_ty.adjust_for_abi(cx, sig.abi);
666 pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
667 sig: ty::FnSig<'tcx>,
668 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
669 let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
670 // Don't pass the vtable, it's not an argument of the virtual fn.
672 let self_arg = &mut fn_ty.args[0];
673 match self_arg.mode {
674 PassMode::Pair(data_ptr, _) => {
675 self_arg.mode = PassMode::Direct(data_ptr);
677 _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg)
680 let pointee = self_arg.layout.ty.builtin_deref(true)
682 bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
684 let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee);
685 self_arg.layout = cx.layout_of(fat_ptr_ty).field(cx, 0);
687 fn_ty.adjust_for_abi(cx, sig.abi);
691 pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
692 sig: ty::FnSig<'tcx>,
693 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
694 debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
697 let cconv = match cx.sess().target.target.adjust_abi(sig.abi) {
698 RustIntrinsic | PlatformIntrinsic |
699 Rust | RustCall => llvm::CCallConv,
701 // It's the ABI's job to select this, not us.
702 System => bug!("system abi should be selected elsewhere"),
704 Stdcall => llvm::X86StdcallCallConv,
705 Fastcall => llvm::X86FastcallCallConv,
706 Vectorcall => llvm::X86_VectorCall,
707 Thiscall => llvm::X86_ThisCall,
708 C => llvm::CCallConv,
709 Unadjusted => llvm::CCallConv,
710 Win64 => llvm::X86_64_Win64,
711 SysV64 => llvm::X86_64_SysV,
712 Aapcs => llvm::ArmAapcsCallConv,
713 PtxKernel => llvm::PtxKernel,
714 Msp430Interrupt => llvm::Msp430Intr,
715 X86Interrupt => llvm::X86_Intr,
717 // These API constants ought to be more specific...
718 Cdecl => llvm::CCallConv,
721 let mut inputs = sig.inputs();
722 let extra_args = if sig.abi == RustCall {
723 assert!(!sig.variadic && extra_args.is_empty());
725 match sig.inputs().last().unwrap().sty {
726 ty::TyTuple(ref tupled_arguments, _) => {
727 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
731 bug!("argument to function with \"rust-call\" ABI \
736 assert!(sig.variadic || extra_args.is_empty());
740 let target = &cx.sess().target.target;
741 let win_x64_gnu = target.target_os == "windows"
742 && target.arch == "x86_64"
743 && target.target_env == "gnu";
744 let linux_s390x = target.target_os == "linux"
745 && target.arch == "s390x"
746 && target.target_env == "gnu";
747 let rust_abi = match sig.abi {
748 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
752 // Handle safe Rust thin and fat pointers.
753 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
754 scalar: &layout::Scalar,
755 layout: TyLayout<'tcx>,
758 // Booleans are always an i1 that needs to be zero-extended.
759 if scalar.is_bool() {
760 attrs.set(ArgAttribute::ZExt);
764 // Only pointer types handled below.
765 if scalar.value != layout::Pointer {
769 if scalar.valid_range.start < scalar.valid_range.end {
770 if scalar.valid_range.start > 0 {
771 attrs.set(ArgAttribute::NonNull);
775 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
776 if let Some(kind) = pointee.safe {
777 attrs.pointee_size = pointee.size;
778 attrs.pointee_align = Some(pointee.align);
780 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
781 // with align attributes, and those calls later block optimizations.
783 attrs.pointee_align = None;
786 // `Box` pointer parameters never alias because ownership is transferred
787 // `&mut` pointer parameters never alias other parameters,
788 // or mutable global data
790 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
791 // and can be marked as both `readonly` and `noalias`, as
792 // LLVM's definition of `noalias` is based solely on memory
793 // dependencies rather than pointer equality
794 let no_alias = match kind {
795 PointerKind::Shared => false,
796 PointerKind::UniqueOwned => true,
797 PointerKind::Frozen |
798 PointerKind::UniqueBorrowed => !is_return
801 attrs.set(ArgAttribute::NoAlias);
804 if kind == PointerKind::Frozen && !is_return {
805 attrs.set(ArgAttribute::ReadOnly);
811 let arg_of = |ty: Ty<'tcx>, is_return: bool| {
812 let mut arg = ArgType::new(cx.layout_of(ty));
813 if arg.layout.is_zst() {
814 // For some forsaken reason, x86_64-pc-windows-gnu
815 // doesn't ignore zero-sized struct arguments.
816 // The same is true for s390x-unknown-linux-gnu.
817 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
818 arg.mode = PassMode::Ignore;
822 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
823 if !is_return && rust_abi {
824 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
825 let mut a_attrs = ArgAttributes::new();
826 let mut b_attrs = ArgAttributes::new();
827 adjust_for_rust_scalar(&mut a_attrs,
832 adjust_for_rust_scalar(&mut b_attrs,
835 a.value.size(cx).abi_align(b.value.align(cx)),
837 arg.mode = PassMode::Pair(a_attrs, b_attrs);
842 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
843 if let PassMode::Direct(ref mut attrs) = arg.mode {
844 adjust_for_rust_scalar(attrs,
856 ret: arg_of(sig.output(), true),
857 args: inputs.iter().chain(extra_args.iter()).map(|ty| {
860 variadic: sig.variadic,
865 fn adjust_for_abi(&mut self,
866 cx: &CodegenCx<'a, 'tcx>,
868 if abi == Abi::Unadjusted { return }
870 if abi == Abi::Rust || abi == Abi::RustCall ||
871 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
872 let fixup = |arg: &mut ArgType<'tcx>| {
873 if arg.is_ignore() { return; }
875 match arg.layout.abi {
876 layout::Abi::Aggregate { .. } => {}
878 // This is a fun case! The gist of what this is doing is
879 // that we want callers and callees to always agree on the
880 // ABI of how they pass SIMD arguments. If we were to *not*
881 // make these arguments indirect then they'd be immediates
882 // in LLVM, which means that they'd used whatever the
883 // appropriate ABI is for the callee and the caller. That
884 // means, for example, if the caller doesn't have AVX
885 // enabled but the callee does, then passing an AVX argument
886 // across this boundary would cause corrupt data to show up.
888 // This problem is fixed by unconditionally passing SIMD
889 // arguments through memory between callers and callees
890 // which should get them all to agree on ABI regardless of
891 // target feature sets. Some more information about this
892 // issue can be found in #44367.
894 // Note that the platform intrinsic ABI is exempt here as
895 // that's how we connect up to LLVM and it's unstable
896 // anyway, we control all calls to it in libstd.
897 layout::Abi::Vector { .. } if abi != Abi::PlatformIntrinsic => {
905 let size = arg.layout.size;
906 if size > layout::Pointer.size(cx) {
909 // We want to pass small aggregates as immediates, but using
910 // a LLVM aggregate type for this leads to bad optimizations,
911 // so we pick an appropriately sized integer type instead.
913 kind: RegKind::Integer,
918 fixup(&mut self.ret);
919 for arg in &mut self.args {
922 if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
923 attrs.set(ArgAttribute::StructRet);
928 match &cx.sess().target.target.arch[..] {
930 let flavor = if abi == Abi::Fastcall {
931 cabi_x86::Flavor::Fastcall
933 cabi_x86::Flavor::General
935 cabi_x86::compute_abi_info(cx, self, flavor);
937 "x86_64" => if abi == Abi::SysV64 {
938 cabi_x86_64::compute_abi_info(cx, self);
939 } else if abi == Abi::Win64 || cx.sess().target.target.options.is_like_windows {
940 cabi_x86_win64::compute_abi_info(self);
942 cabi_x86_64::compute_abi_info(cx, self);
944 "aarch64" => cabi_aarch64::compute_abi_info(cx, self),
945 "arm" => cabi_arm::compute_abi_info(cx, self),
946 "mips" => cabi_mips::compute_abi_info(cx, self),
947 "mips64" => cabi_mips64::compute_abi_info(cx, self),
948 "powerpc" => cabi_powerpc::compute_abi_info(cx, self),
949 "powerpc64" => cabi_powerpc64::compute_abi_info(cx, self),
950 "s390x" => cabi_s390x::compute_abi_info(cx, self),
951 "asmjs" => cabi_asmjs::compute_abi_info(cx, self),
953 if cx.sess().opts.target_triple.contains("emscripten") {
954 cabi_asmjs::compute_abi_info(cx, self)
956 cabi_wasm32::compute_abi_info(cx, self)
959 "msp430" => cabi_msp430::compute_abi_info(self),
960 "sparc" => cabi_sparc::compute_abi_info(cx, self),
961 "sparc64" => cabi_sparc64::compute_abi_info(cx, self),
962 "nvptx" => cabi_nvptx::compute_abi_info(self),
963 "nvptx64" => cabi_nvptx64::compute_abi_info(self),
964 "hexagon" => cabi_hexagon::compute_abi_info(self),
965 a => cx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
968 if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
969 attrs.set(ArgAttribute::StructRet);
973 pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
974 let mut llargument_tys = Vec::new();
976 let llreturn_ty = match self.ret.mode {
977 PassMode::Ignore => Type::void(cx),
978 PassMode::Direct(_) | PassMode::Pair(..) => {
979 self.ret.layout.immediate_llvm_type(cx)
981 PassMode::Cast(cast) => cast.llvm_type(cx),
982 PassMode::Indirect(_) => {
983 llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
988 for arg in &self.args {
990 if let Some(ty) = arg.pad {
991 llargument_tys.push(ty.llvm_type(cx));
994 let llarg_ty = match arg.mode {
995 PassMode::Ignore => continue,
996 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
997 PassMode::Pair(..) => {
998 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0));
999 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1));
1002 PassMode::Cast(cast) => cast.llvm_type(cx),
1003 PassMode::Indirect(_) => arg.memory_ty(cx).ptr_to(),
1005 llargument_tys.push(llarg_ty);
1009 Type::variadic_func(&llargument_tys, &llreturn_ty)
1011 Type::func(&llargument_tys, &llreturn_ty)
1015 pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
1017 let mut apply = |attrs: &ArgAttributes| {
1018 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
1021 match self.ret.mode {
1022 PassMode::Direct(ref attrs) => {
1023 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
1025 PassMode::Indirect(ref attrs) => apply(attrs),
1028 for arg in &self.args {
1029 if arg.pad.is_some() {
1030 apply(&ArgAttributes::new());
1033 PassMode::Ignore => {}
1034 PassMode::Direct(ref attrs) |
1035 PassMode::Indirect(ref attrs) => apply(attrs),
1036 PassMode::Pair(ref a, ref b) => {
1040 PassMode::Cast(_) => apply(&ArgAttributes::new()),
1045 pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
1047 let mut apply = |attrs: &ArgAttributes| {
1048 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
1051 match self.ret.mode {
1052 PassMode::Direct(ref attrs) => {
1053 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
1055 PassMode::Indirect(ref attrs) => apply(attrs),
1058 for arg in &self.args {
1059 if arg.pad.is_some() {
1060 apply(&ArgAttributes::new());
1063 PassMode::Ignore => {}
1064 PassMode::Direct(ref attrs) |
1065 PassMode::Indirect(ref attrs) => apply(attrs),
1066 PassMode::Pair(ref a, ref b) => {
1070 PassMode::Cast(_) => apply(&ArgAttributes::new()),
1074 if self.cconv != llvm::CCallConv {
1075 llvm::SetInstructionCallConv(callsite, self.cconv);