1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef, AttributePlace};
14 use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_uint};
15 use context::CrateContext;
33 use machine::llalign_of_min;
38 use rustc::ty::{self, Ty};
39 use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size};
45 pub use syntax::abi::Abi;
46 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
48 #[derive(Clone, Copy, PartialEq, Debug)]
50 /// Pass the argument directly using the normal converted
51 /// LLVM type or by coercing to another specified type
53 /// Pass the argument indirectly via a hidden pointer
55 /// Ignore the argument (useful for empty struct)
59 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
61 pub use self::attr_impl::ArgAttribute;
63 #[allow(non_upper_case_globals)]
66 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
68 #[derive(Default, Debug)]
69 flags ArgAttribute : u16 {
71 const NoAlias = 1 << 1,
72 const NoCapture = 1 << 2,
73 const NonNull = 1 << 3,
74 const ReadOnly = 1 << 4,
76 const StructRet = 1 << 6,
83 macro_rules! for_each_kind {
84 ($flags: ident, $f: ident, $($kind: ident),+) => ({
85 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
90 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
91 for_each_kind!(self, f,
92 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
96 /// A compact representation of LLVM attributes (at least those relevant for this module)
97 /// that can be manipulated without interacting with LLVM's Attribute machinery.
98 #[derive(Copy, Clone, Debug, Default)]
99 pub struct ArgAttributes {
100 regular: ArgAttribute,
101 dereferenceable_bytes: u64,
105 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
106 self.regular = self.regular | attr;
110 pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
111 self.dereferenceable_bytes = bytes;
115 pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
117 self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
118 if self.dereferenceable_bytes != 0 {
119 llvm::LLVMRustAddDereferenceableAttr(llfn,
121 self.dereferenceable_bytes);
126 pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
128 self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
129 if self.dereferenceable_bytes != 0 {
130 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
132 self.dereferenceable_bytes);
137 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
144 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
150 macro_rules! reg_ctor {
151 ($name:ident, $kind:ident, $bits:expr) => {
152 pub fn $name() -> Reg {
154 kind: RegKind::$kind,
155 size: Size::from_bits($bits)
162 reg_ctor!(i8, Integer, 8);
163 reg_ctor!(i16, Integer, 16);
164 reg_ctor!(i32, Integer, 32);
165 reg_ctor!(i64, Integer, 64);
167 reg_ctor!(f32, Float, 32);
168 reg_ctor!(f64, Float, 64);
172 fn llvm_type(&self, ccx: &CrateContext) -> Type {
174 RegKind::Integer => Type::ix(ccx, self.size.bits()),
176 match self.size.bits() {
177 32 => Type::f32(ccx),
178 64 => Type::f64(ccx),
179 _ => bug!("unsupported float: {:?}", self)
183 Type::vector(&Type::i8(ccx), self.size.bytes())
189 /// An argument passed entirely registers with the
190 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
191 #[derive(Copy, Clone)]
195 /// The total size of the argument, which can be:
196 /// * equal to `unit.size` (one scalar/vector)
197 /// * a multiple of `unit.size` (an array of scalar/vectors)
198 /// * if `unit.kind` is `Integer`, the last element
199 /// can be shorter, i.e. `{ i64, i64, i32 }` for
200 /// 64-bit integers with a total size of 20 bytes
204 impl From<Reg> for Uniform {
205 fn from(unit: Reg) -> Uniform {
214 fn llvm_type(&self, ccx: &CrateContext) -> Type {
215 let llunit = self.unit.llvm_type(ccx);
217 if self.total <= self.unit.size {
221 let count = self.total.bytes() / self.unit.size.bytes();
222 let rem_bytes = self.total.bytes() % self.unit.size.bytes();
225 return Type::array(&llunit, count);
228 // Only integers can be really split further.
229 assert_eq!(self.unit.kind, RegKind::Integer);
231 let args: Vec<_> = (0..count).map(|_| llunit)
232 .chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
235 Type::struct_(ccx, &args, false)
239 pub trait LayoutExt<'tcx> {
240 fn is_aggregate(&self) -> bool;
241 fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
244 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
245 fn is_aggregate(&self) -> bool {
247 Layout::Scalar { .. } |
248 Layout::RawNullablePointer { .. } |
249 Layout::CEnum { .. } |
250 Layout::Vector { .. } => false,
252 Layout::Array { .. } |
253 Layout::FatPointer { .. } |
254 Layout::Univariant { .. } |
255 Layout::UntaggedUnion { .. } |
256 Layout::General { .. } |
257 Layout::StructWrappedNullablePointer { .. } => true
261 fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
263 // The primitives for this algorithm.
264 Layout::Scalar { value, .. } |
265 Layout::RawNullablePointer { value, .. } => {
266 let kind = match value {
268 layout::Pointer => RegKind::Integer,
270 layout::F64 => RegKind::Float
278 Layout::CEnum { .. } => {
280 kind: RegKind::Integer,
285 Layout::Vector { .. } => {
287 kind: RegKind::Vector,
292 Layout::Array { count, .. } => {
294 self.field(ccx, 0).homogenous_aggregate(ccx)
300 Layout::Univariant { ref variant, .. } => {
301 let mut unaligned_offset = Size::from_bytes(0);
302 let mut result = None;
304 for i in 0..self.field_count() {
305 if unaligned_offset != variant.offsets[i] {
309 let field = self.field(ccx, i);
310 match (result, field.homogenous_aggregate(ccx)) {
311 // The field itself must be a homogenous aggregate.
312 (_, None) => return None,
313 // If this is the first field, record the unit.
314 (None, Some(unit)) => {
317 // For all following fields, the unit must be the same.
318 (Some(prev_unit), Some(unit)) => {
319 if prev_unit != unit {
325 // Keep track of the offset (without padding).
326 let size = field.size(ccx);
327 match unaligned_offset.checked_add(size, ccx) {
328 Some(offset) => unaligned_offset = offset,
333 // There needs to be no padding.
334 if unaligned_offset != self.size(ccx) {
341 Layout::UntaggedUnion { .. } => {
342 let mut max = Size::from_bytes(0);
343 let mut result = None;
345 for i in 0..self.field_count() {
346 let field = self.field(ccx, i);
347 match (result, field.homogenous_aggregate(ccx)) {
348 // The field itself must be a homogenous aggregate.
349 (_, None) => return None,
350 // If this is the first field, record the unit.
351 (None, Some(unit)) => {
354 // For all following fields, the unit must be the same.
355 (Some(prev_unit), Some(unit)) => {
356 if prev_unit != unit {
362 // Keep track of the offset (without padding).
363 let size = field.size(ccx);
369 // There needs to be no padding.
370 if max != self.size(ccx) {
377 // Rust-specific types, which we can ignore for C ABIs.
378 Layout::FatPointer { .. } |
379 Layout::General { .. } |
380 Layout::StructWrappedNullablePointer { .. } => None
385 pub enum CastTarget {
390 impl From<Reg> for CastTarget {
391 fn from(unit: Reg) -> CastTarget {
392 CastTarget::Uniform(Uniform::from(unit))
396 impl From<Uniform> for CastTarget {
397 fn from(uniform: Uniform) -> CastTarget {
398 CastTarget::Uniform(uniform)
403 fn llvm_type(&self, ccx: &CrateContext) -> Type {
405 CastTarget::Uniform(u) => u.llvm_type(ccx),
406 CastTarget::Pair(a, b) => {
407 Type::struct_(ccx, &[
416 /// Information about how a specific C type
417 /// should be passed to or returned from a function
419 /// This is borrowed from clang's ABIInfo.h
420 #[derive(Clone, Copy, Debug)]
421 pub struct ArgType<'tcx> {
423 pub layout: TyLayout<'tcx>,
424 /// Coerced LLVM Type
425 pub cast: Option<Type>,
426 /// Dummy argument, which is emitted before the real argument
427 pub pad: Option<Type>,
428 /// LLVM attributes of argument
429 pub attrs: ArgAttributes
432 impl<'a, 'tcx> ArgType<'tcx> {
433 fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
435 kind: ArgKind::Direct,
439 attrs: ArgAttributes::default()
443 pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) {
444 assert_eq!(self.kind, ArgKind::Direct);
446 // Wipe old attributes, likely not valid through indirection.
447 self.attrs = ArgAttributes::default();
449 let llarg_sz = self.layout.size(ccx).bytes();
451 // For non-immediate arguments the callee gets its own copy of
452 // the value on the stack, so there are no aliases. It's also
453 // program-invisible so can't possibly capture
454 self.attrs.set(ArgAttribute::NoAlias)
455 .set(ArgAttribute::NoCapture)
456 .set_dereferenceable(llarg_sz);
458 self.kind = ArgKind::Indirect;
461 pub fn ignore(&mut self) {
462 assert_eq!(self.kind, ArgKind::Direct);
463 self.kind = ArgKind::Ignore;
466 pub fn extend_integer_width_to(&mut self, bits: u64) {
467 // Only integers have signedness
468 let (i, signed) = match *self.layout {
469 Layout::Scalar { value, .. } => {
472 if self.layout.ty.is_integral() {
473 (i, self.layout.ty.is_signed())
482 // Rust enum types that map onto C enums also need to follow
483 // the target ABI zero-/sign-extension rules.
484 Layout::CEnum { discr, signed, .. } => (discr, signed),
489 if i.size().bits() < bits {
490 self.attrs.set(if signed {
498 pub fn cast_to<T: Into<CastTarget>>(&mut self, ccx: &CrateContext, target: T) {
499 self.cast = Some(target.into().llvm_type(ccx));
502 pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) {
503 self.pad = Some(reg.llvm_type(ccx));
506 pub fn is_indirect(&self) -> bool {
507 self.kind == ArgKind::Indirect
510 pub fn is_ignore(&self) -> bool {
511 self.kind == ArgKind::Ignore
514 /// Get the LLVM type for an lvalue of the original Rust type of
515 /// this argument/return, i.e. the result of `type_of::type_of`.
516 pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
517 type_of::type_of(ccx, self.layout.ty)
520 /// Store a direct/indirect value described by this ArgType into a
521 /// lvalue for the original Rust type of this argument/return.
522 /// Can be used for both storing formal arguments into Rust variables
523 /// or results of call/invoke instructions into their destinations.
524 pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) {
525 if self.is_ignore() {
529 if self.is_indirect() {
530 let llsz = C_uint(ccx, self.layout.size(ccx).bytes());
531 let llalign = self.layout.align(ccx).abi();
532 base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
533 } else if let Some(ty) = self.cast {
534 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
535 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
536 let can_store_through_cast_ptr = false;
537 if can_store_through_cast_ptr {
538 let cast_dst = bcx.pointercast(dst, ty.ptr_to());
539 let llalign = self.layout.align(ccx).abi();
540 bcx.store(val, cast_dst, Some(llalign as u32));
542 // The actual return type is a struct, but the ABI
543 // adaptation code has cast it into some scalar type. The
544 // code that follows is the only reliable way I have
545 // found to do a transform like i64 -> {i32,i32}.
546 // Basically we dump the data onto the stack then memcpy it.
548 // Other approaches I tried:
549 // - Casting rust ret pointer to the foreign type and using Store
550 // is (a) unsafe if size of foreign type > size of rust type and
551 // (b) runs afoul of strict aliasing rules, yielding invalid
552 // assembly under -O (specifically, the store gets removed).
553 // - Truncating foreign type to correct integral type and then
554 // bitcasting to the struct type yields invalid cast errors.
556 // We instead thus allocate some scratch space...
557 let llscratch = bcx.alloca(ty, "abi_cast", None);
558 base::Lifetime::Start.call(bcx, llscratch);
560 // ...where we first store the value...
561 bcx.store(val, llscratch, None);
563 // ...and then memcpy it to the intended destination.
564 base::call_memcpy(bcx,
565 bcx.pointercast(dst, Type::i8p(ccx)),
566 bcx.pointercast(llscratch, Type::i8p(ccx)),
567 C_uint(ccx, self.layout.size(ccx).bytes()),
568 cmp::min(self.layout.align(ccx).abi() as u32,
569 llalign_of_min(ccx, ty)));
571 base::Lifetime::End.call(bcx, llscratch);
574 if self.layout.ty == ccx.tcx().types.bool {
575 val = bcx.zext(val, Type::i8(ccx));
577 bcx.store(val, dst, None);
581 pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) {
582 if self.pad.is_some() {
585 if self.is_ignore() {
588 let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
590 self.store(bcx, val, dst);
594 /// Metadata describing how the arguments to a native function
595 /// should be passed in order to respect the native ABI.
597 /// I will do my best to describe this structure, but these
598 /// comments are reverse-engineered and may be inaccurate. -NDM
599 #[derive(Clone, Debug)]
600 pub struct FnType<'tcx> {
601 /// The LLVM types of each argument.
602 pub args: Vec<ArgType<'tcx>>,
604 /// LLVM return type.
605 pub ret: ArgType<'tcx>,
609 pub cconv: llvm::CallConv
612 impl<'a, 'tcx> FnType<'tcx> {
613 pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>)
615 let fn_ty = instance_ty(ccx.shared(), &instance);
616 let sig = ty_fn_sig(ccx, fn_ty);
617 let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
618 Self::new(ccx, sig, &[])
621 pub fn new(ccx: &CrateContext<'a, 'tcx>,
622 sig: ty::FnSig<'tcx>,
623 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
624 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
625 fn_ty.adjust_for_abi(ccx, sig);
629 pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>,
630 sig: ty::FnSig<'tcx>,
631 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
632 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
633 // Don't pass the vtable, it's not an argument of the virtual fn.
634 fn_ty.args[1].ignore();
635 fn_ty.adjust_for_abi(ccx, sig);
639 pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
640 sig: ty::FnSig<'tcx>,
641 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
642 debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
645 let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
646 RustIntrinsic | PlatformIntrinsic |
647 Rust | RustCall => llvm::CCallConv,
649 // It's the ABI's job to select this, not us.
650 System => bug!("system abi should be selected elsewhere"),
652 Stdcall => llvm::X86StdcallCallConv,
653 Fastcall => llvm::X86FastcallCallConv,
654 Vectorcall => llvm::X86_VectorCall,
655 Thiscall => llvm::X86_ThisCall,
656 C => llvm::CCallConv,
657 Unadjusted => llvm::CCallConv,
658 Win64 => llvm::X86_64_Win64,
659 SysV64 => llvm::X86_64_SysV,
660 Aapcs => llvm::ArmAapcsCallConv,
661 PtxKernel => llvm::PtxKernel,
662 Msp430Interrupt => llvm::Msp430Intr,
663 X86Interrupt => llvm::X86_Intr,
665 // These API constants ought to be more specific...
666 Cdecl => llvm::CCallConv,
669 let mut inputs = sig.inputs();
670 let extra_args = if sig.abi == RustCall {
671 assert!(!sig.variadic && extra_args.is_empty());
673 match sig.inputs().last().unwrap().sty {
674 ty::TyTuple(ref tupled_arguments, _) => {
675 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
679 bug!("argument to function with \"rust-call\" ABI \
684 assert!(sig.variadic || extra_args.is_empty());
688 let target = &ccx.sess().target.target;
689 let win_x64_gnu = target.target_os == "windows"
690 && target.arch == "x86_64"
691 && target.target_env == "gnu";
692 let linux_s390x = target.target_os == "linux"
693 && target.arch == "s390x"
694 && target.target_env == "gnu";
695 let rust_abi = match sig.abi {
696 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
700 let arg_of = |ty: Ty<'tcx>, is_return: bool| {
701 let mut arg = ArgType::new(ccx.layout_of(ty));
703 arg.attrs.set(ArgAttribute::ZExt);
705 if arg.layout.size(ccx).bytes() == 0 {
706 // For some forsaken reason, x86_64-pc-windows-gnu
707 // doesn't ignore zero-sized struct arguments.
708 // The same is true for s390x-unknown-linux-gnu.
709 if is_return || rust_abi ||
710 (!win_x64_gnu && !linux_s390x) {
718 let ret_ty = sig.output();
719 let mut ret = arg_of(ret_ty, true);
721 if !type_is_fat_ptr(ccx, ret_ty) {
722 // The `noalias` attribute on the return value is useful to a
723 // function ptr caller.
725 // `Box` pointer return values never alias because ownership
727 ret.attrs.set(ArgAttribute::NoAlias);
730 // We can also mark the return value as `dereferenceable` in certain cases
732 // These are not really pointers but pairs, (pointer, len)
733 ty::TyRef(_, ty::TypeAndMut { ty, .. }) => {
734 ret.attrs.set_dereferenceable(ccx.size_of(ty));
736 ty::TyAdt(def, _) if def.is_box() => {
737 ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty()));
743 let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
745 // Handle safe Rust thin and fat pointers.
746 let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
747 // `Box` pointer parameters never alias because ownership is transferred
748 ty::TyAdt(def, _) if def.is_box() => {
749 arg.attrs.set(ArgAttribute::NoAlias);
753 ty::TyRef(b, mt) => {
754 use rustc::ty::{BrAnon, ReLateBound};
756 // `&mut` pointer parameters never alias other parameters, or mutable global data
758 // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
759 // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
760 // on memory dependencies rather than pointer equality
761 let is_freeze = ccx.shared().type_is_freeze(mt.ty);
763 if mt.mutbl != hir::MutMutable && is_freeze {
764 arg.attrs.set(ArgAttribute::NoAlias);
767 if mt.mutbl == hir::MutImmutable && is_freeze {
768 arg.attrs.set(ArgAttribute::ReadOnly);
771 // When a reference in an argument has no named lifetime, it's
772 // impossible for that reference to escape this function
773 // (returned or stored beyond the call by a closure).
774 if let ReLateBound(_, BrAnon(_)) = *b {
775 arg.attrs.set(ArgAttribute::NoCapture);
783 for ty in inputs.iter().chain(extra_args.iter()) {
784 let mut arg = arg_of(ty, false);
786 if let ty::layout::FatPointer { .. } = *arg.layout {
787 let mut data = ArgType::new(arg.layout.field(ccx, 0));
788 let mut info = ArgType::new(arg.layout.field(ccx, 1));
790 if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
791 data.attrs.set(ArgAttribute::NonNull);
792 if ccx.tcx().struct_tail(inner).is_trait() {
793 // vtables can be safely marked non-null, readonly
795 info.attrs.set(ArgAttribute::NonNull);
796 info.attrs.set(ArgAttribute::ReadOnly);
797 info.attrs.set(ArgAttribute::NoAlias);
803 if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
804 arg.attrs.set_dereferenceable(ccx.size_of(inner));
813 variadic: sig.variadic,
818 fn adjust_for_abi(&mut self,
819 ccx: &CrateContext<'a, 'tcx>,
820 sig: ty::FnSig<'tcx>) {
822 if abi == Abi::Unadjusted { return }
824 if abi == Abi::Rust || abi == Abi::RustCall ||
825 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
826 let fixup = |arg: &mut ArgType<'tcx>| {
827 if !arg.layout.is_aggregate() {
831 let size = arg.layout.size(ccx);
833 if let Some(unit) = arg.layout.homogenous_aggregate(ccx) {
834 // Replace newtypes with their inner-most type.
835 if unit.size == size {
836 // Needs a cast as we've unpacked a newtype.
837 arg.cast_to(ccx, unit);
842 if unit.kind == RegKind::Float {
843 if unit.size.checked_mul(2, ccx) == Some(size) {
844 // FIXME(eddyb) This should be using Uniform instead of a pair,
845 // but the resulting [2 x float/double] breaks emscripten.
846 // See https://github.com/kripken/emscripten-fastcomp/issues/178.
847 arg.cast_to(ccx, CastTarget::Pair(unit, unit));
853 if size > layout::Pointer.size(ccx) {
854 arg.make_indirect(ccx);
856 // We want to pass small aggregates as immediates, but using
857 // a LLVM aggregate type for this leads to bad optimizations,
858 // so we pick an appropriately sized integer type instead.
859 arg.cast_to(ccx, Reg {
860 kind: RegKind::Integer,
865 // Fat pointers are returned by-value.
866 if !self.ret.is_ignore() {
867 if !type_is_fat_ptr(ccx, sig.output()) {
868 fixup(&mut self.ret);
871 for arg in &mut self.args {
872 if arg.is_ignore() { continue; }
875 if self.ret.is_indirect() {
876 self.ret.attrs.set(ArgAttribute::StructRet);
881 match &ccx.sess().target.target.arch[..] {
883 let flavor = if abi == Abi::Fastcall {
884 cabi_x86::Flavor::Fastcall
886 cabi_x86::Flavor::General
888 cabi_x86::compute_abi_info(ccx, self, flavor);
890 "x86_64" => if abi == Abi::SysV64 {
891 cabi_x86_64::compute_abi_info(ccx, self);
892 } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
893 cabi_x86_win64::compute_abi_info(ccx, self);
895 cabi_x86_64::compute_abi_info(ccx, self);
897 "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
898 "arm" => cabi_arm::compute_abi_info(ccx, self),
899 "mips" => cabi_mips::compute_abi_info(ccx, self),
900 "mips64" => cabi_mips64::compute_abi_info(ccx, self),
901 "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
902 "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
903 "s390x" => cabi_s390x::compute_abi_info(ccx, self),
904 "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
905 "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
906 "msp430" => cabi_msp430::compute_abi_info(ccx, self),
907 "sparc" => cabi_sparc::compute_abi_info(ccx, self),
908 "sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
909 "nvptx" => cabi_nvptx::compute_abi_info(ccx, self),
910 "nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self),
911 "hexagon" => cabi_hexagon::compute_abi_info(ccx, self),
912 a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
915 if self.ret.is_indirect() {
916 self.ret.attrs.set(ArgAttribute::StructRet);
920 pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
921 let mut llargument_tys = Vec::new();
923 let llreturn_ty = if self.ret.is_ignore() {
925 } else if self.ret.is_indirect() {
926 llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
929 self.ret.cast.unwrap_or_else(|| {
930 type_of::immediate_type_of(ccx, self.ret.layout.ty)
934 for arg in &self.args {
939 if let Some(ty) = arg.pad {
940 llargument_tys.push(ty);
943 let llarg_ty = if arg.is_indirect() {
944 arg.memory_ty(ccx).ptr_to()
946 arg.cast.unwrap_or_else(|| {
947 type_of::immediate_type_of(ccx, arg.layout.ty)
951 llargument_tys.push(llarg_ty);
955 Type::variadic_func(&llargument_tys, &llreturn_ty)
957 Type::func(&llargument_tys, &llreturn_ty)
961 pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
962 let mut i = if self.ret.is_indirect() { 1 } else { 0 };
963 if !self.ret.is_ignore() {
964 self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
967 for arg in &self.args {
968 if !arg.is_ignore() {
969 if arg.pad.is_some() { i += 1; }
970 arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
976 pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
977 let mut i = if self.ret.is_indirect() { 1 } else { 0 };
978 if !self.ret.is_ignore() {
979 self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
982 for arg in &self.args {
983 if !arg.is_ignore() {
984 if arg.pad.is_some() { i += 1; }
985 arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
990 if self.cconv != llvm::CCallConv {
991 llvm::SetInstructionCallConv(callsite, self.cconv);
996 pub fn align_up_to(off: u64, a: u64) -> u64 {
997 (off + a - 1) / a * a