1 use crate::llvm::{self, AttributePlace};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::type_::Type;
5 use crate::value::Value;
6 use crate::type_of::{LayoutLlvmExt};
7 use rustc_codegen_ssa::MemFlags;
8 use rustc_codegen_ssa::mir::place::PlaceRef;
9 use rustc_codegen_ssa::mir::operand::OperandValue;
10 use rustc_target::abi::call::ArgType;
12 use rustc_codegen_ssa::traits::*;
14 use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
15 use rustc::ty::{self, Ty, Instance};
16 use rustc::ty::layout::{self, PointerKind};
20 pub use rustc_target::spec::abi::Abi;
21 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
22 pub use rustc_target::abi::call::*;
24 macro_rules! for_each_kind {
25 ($flags: ident, $f: ident, $($kind: ident),+) => ({
26 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
30 trait ArgAttributeExt {
31 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
34 impl ArgAttributeExt for ArgAttribute {
35 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
36 for_each_kind!(self, f,
37 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
41 pub trait ArgAttributesExt {
42 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
43 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
46 impl ArgAttributesExt for ArgAttributes {
47 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
48 let mut regular = self.regular;
50 let deref = self.pointee_size.bytes();
52 if regular.contains(ArgAttribute::NonNull) {
53 llvm::LLVMRustAddDereferenceableAttr(llfn,
57 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
61 regular -= ArgAttribute::NonNull;
63 if let Some(align) = self.pointee_align {
64 llvm::LLVMRustAddAlignmentAttr(llfn,
66 align.bytes() as u32);
68 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
72 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
73 let mut regular = self.regular;
75 let deref = self.pointee_size.bytes();
77 if regular.contains(ArgAttribute::NonNull) {
78 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
82 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
86 regular -= ArgAttribute::NonNull;
88 if let Some(align) = self.pointee_align {
89 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
91 align.bytes() as u32);
93 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
99 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
102 impl LlvmType for Reg {
103 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
105 RegKind::Integer => cx.type_ix(self.size.bits()),
107 match self.size.bits() {
110 _ => bug!("unsupported float: {:?}", self)
114 cx.type_vector(cx.type_i8(), self.size.bytes())
120 impl LlvmType for CastTarget {
121 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
122 let rest_ll_unit = self.rest.unit.llvm_type(cx);
123 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
126 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
127 self.rest.total.bytes() % self.rest.unit.size.bytes())
130 if self.prefix.iter().all(|x| x.is_none()) {
131 // Simplify to a single unit when there is no prefix and size <= unit size
132 if self.rest.total <= self.rest.unit.size {
136 // Simplify to array when all chunks are the same size and type
138 return cx.type_array(rest_ll_unit, rest_count);
142 // Create list of fields in the main structure
143 let mut args: Vec<_> =
144 self.prefix.iter().flat_map(|option_kind| option_kind.map(
145 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
146 .chain((0..rest_count).map(|_| rest_ll_unit))
149 // Append final integer
151 // Only integers can be really split further.
152 assert_eq!(self.rest.unit.kind, RegKind::Integer);
153 args.push(cx.type_ix(rem_bytes * 8));
156 cx.type_struct(&args, false)
160 pub trait ArgTypeExt<'ll, 'tcx> {
161 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
164 bx: &mut Builder<'_, 'll, 'tcx>,
166 dst: PlaceRef<'tcx, &'ll Value>,
170 bx: &mut Builder<'_, 'll, 'tcx>,
172 dst: PlaceRef<'tcx, &'ll Value>,
176 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
177 /// Gets the LLVM type for a place of the original Rust type of
178 /// this argument/return, i.e., the result of `type_of::type_of`.
179 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
180 self.layout.llvm_type(cx)
183 /// Stores a direct/indirect value described by this ArgType into a
184 /// place for the original Rust type of this argument/return.
185 /// Can be used for both storing formal arguments into Rust variables
186 /// or results of call/invoke instructions into their destinations.
189 bx: &mut Builder<'_, 'll, 'tcx>,
191 dst: PlaceRef<'tcx, &'ll Value>,
193 if self.is_ignore() {
196 if self.is_sized_indirect() {
197 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
198 } else if self.is_unsized_indirect() {
199 bug!("unsized ArgType must be handled through store_fn_arg");
200 } else if let PassMode::Cast(cast) = self.mode {
201 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
202 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
203 let can_store_through_cast_ptr = false;
204 if can_store_through_cast_ptr {
205 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
206 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
207 bx.store(val, cast_dst, self.layout.align.abi);
209 // The actual return type is a struct, but the ABI
210 // adaptation code has cast it into some scalar type. The
211 // code that follows is the only reliable way I have
212 // found to do a transform like i64 -> {i32,i32}.
213 // Basically we dump the data onto the stack then memcpy it.
215 // Other approaches I tried:
216 // - Casting rust ret pointer to the foreign type and using Store
217 // is (a) unsafe if size of foreign type > size of rust type and
218 // (b) runs afoul of strict aliasing rules, yielding invalid
219 // assembly under -O (specifically, the store gets removed).
220 // - Truncating foreign type to correct integral type and then
221 // bitcasting to the struct type yields invalid cast errors.
223 // We instead thus allocate some scratch space...
224 let scratch_size = cast.size(bx);
225 let scratch_align = cast.align(bx);
226 let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
227 bx.lifetime_start(llscratch, scratch_size);
229 // ...where we first store the value...
230 bx.store(val, llscratch, scratch_align);
232 // ...and then memcpy it to the intended destination.
235 self.layout.align.abi,
238 bx.const_usize(self.layout.size.bytes()),
242 bx.lifetime_end(llscratch, scratch_size);
245 OperandValue::Immediate(val).store(bx, dst);
251 bx: &mut Builder<'a, 'll, 'tcx>,
253 dst: PlaceRef<'tcx, &'ll Value>,
256 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
261 PassMode::Ignore(_) => {}
262 PassMode::Pair(..) => {
263 OperandValue::Pair(next(), next()).store(bx, dst);
265 PassMode::Indirect(_, Some(_)) => {
266 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
268 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
269 let next_arg = next();
270 self.store(bx, next_arg, dst);
276 impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
279 ty: &ArgType<'tcx, Ty<'tcx>>,
280 idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
282 ty.store_fn_arg(self, idx, dst)
286 ty: &ArgType<'tcx, Ty<'tcx>>,
288 dst: PlaceRef<'tcx, &'ll Value>
290 ty.store(self, val, dst)
292 fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
297 pub trait FnTypeExt<'tcx> {
298 fn adjust_for_abi(&mut self,
299 cx: &CodegenCx<'ll, 'tcx>,
301 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
302 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
303 fn llvm_cconv(&self) -> llvm::CallConv;
304 fn apply_attrs_llfn(&self, llfn: &'ll Value);
305 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
308 impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
309 fn adjust_for_abi(&mut self,
310 cx: &CodegenCx<'ll, 'tcx>,
312 if abi == Abi::Unadjusted { return }
314 if abi == Abi::Rust || abi == Abi::RustCall ||
315 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
316 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
317 if arg.is_ignore() { return; }
319 match arg.layout.abi {
320 layout::Abi::Aggregate { .. } => {}
322 // This is a fun case! The gist of what this is doing is
323 // that we want callers and callees to always agree on the
324 // ABI of how they pass SIMD arguments. If we were to *not*
325 // make these arguments indirect then they'd be immediates
326 // in LLVM, which means that they'd used whatever the
327 // appropriate ABI is for the callee and the caller. That
328 // means, for example, if the caller doesn't have AVX
329 // enabled but the callee does, then passing an AVX argument
330 // across this boundary would cause corrupt data to show up.
332 // This problem is fixed by unconditionally passing SIMD
333 // arguments through memory between callers and callees
334 // which should get them all to agree on ABI regardless of
335 // target feature sets. Some more information about this
336 // issue can be found in #44367.
338 // Note that the platform intrinsic ABI is exempt here as
339 // that's how we connect up to LLVM and it's unstable
340 // anyway, we control all calls to it in libstd.
341 layout::Abi::Vector { .. }
342 if abi != Abi::PlatformIntrinsic &&
343 cx.sess().target.target.options.simd_types_indirect =>
352 let size = arg.layout.size;
353 if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
356 // We want to pass small aggregates as immediates, but using
357 // a LLVM aggregate type for this leads to bad optimizations,
358 // so we pick an appropriately sized integer type instead.
360 kind: RegKind::Integer,
365 fixup(&mut self.ret);
366 for arg in &mut self.args {
369 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
370 attrs.set(ArgAttribute::StructRet);
375 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
376 cx.sess().fatal(&msg);
380 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
381 let args_capacity: usize = self.args.iter().map(|arg|
382 if arg.pad.is_some() { 1 } else { 0 } +
383 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
385 let mut llargument_tys = Vec::with_capacity(
386 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
389 let llreturn_ty = match self.ret.mode {
390 PassMode::Ignore(IgnoreMode::Zst) => cx.type_void(),
391 PassMode::Ignore(IgnoreMode::CVarArgs) =>
392 bug!("`va_list` should never be a return type"),
393 PassMode::Direct(_) | PassMode::Pair(..) => {
394 self.ret.layout.immediate_llvm_type(cx)
396 PassMode::Cast(cast) => cast.llvm_type(cx),
397 PassMode::Indirect(..) => {
398 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
403 for arg in &self.args {
405 if let Some(ty) = arg.pad {
406 llargument_tys.push(ty.llvm_type(cx));
409 let llarg_ty = match arg.mode {
410 PassMode::Ignore(_) => continue,
411 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
412 PassMode::Pair(..) => {
413 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
414 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
417 PassMode::Indirect(_, Some(_)) => {
418 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
419 let ptr_layout = cx.layout_of(ptr_ty);
420 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
421 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
424 PassMode::Cast(cast) => cast.llvm_type(cx),
425 PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
427 llargument_tys.push(llarg_ty);
431 cx.type_variadic_func(&llargument_tys, llreturn_ty)
433 cx.type_func(&llargument_tys, llreturn_ty)
437 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
439 llvm::LLVMPointerType(self.llvm_type(cx),
440 cx.data_layout().instruction_address_space as c_uint)
444 fn llvm_cconv(&self) -> llvm::CallConv {
446 Conv::C => llvm::CCallConv,
447 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
448 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
449 Conv::Msp430Intr => llvm::Msp430Intr,
450 Conv::PtxKernel => llvm::PtxKernel,
451 Conv::X86Fastcall => llvm::X86FastcallCallConv,
452 Conv::X86Intr => llvm::X86_Intr,
453 Conv::X86Stdcall => llvm::X86StdcallCallConv,
454 Conv::X86ThisCall => llvm::X86_ThisCall,
455 Conv::X86VectorCall => llvm::X86_VectorCall,
456 Conv::X86_64SysV => llvm::X86_64_SysV,
457 Conv::X86_64Win64 => llvm::X86_64_Win64,
461 fn apply_attrs_llfn(&self, llfn: &'ll Value) {
463 let mut apply = |attrs: &ArgAttributes| {
464 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
467 match self.ret.mode {
468 PassMode::Direct(ref attrs) => {
469 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
471 PassMode::Indirect(ref attrs, _) => apply(attrs),
474 for arg in &self.args {
475 if arg.pad.is_some() {
476 apply(&ArgAttributes::new());
479 PassMode::Ignore(_) => {}
480 PassMode::Direct(ref attrs) |
481 PassMode::Indirect(ref attrs, None) => apply(attrs),
482 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
486 PassMode::Pair(ref a, ref b) => {
490 PassMode::Cast(_) => apply(&ArgAttributes::new()),
495 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
497 let mut apply = |attrs: &ArgAttributes| {
498 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
501 match self.ret.mode {
502 PassMode::Direct(ref attrs) => {
503 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
505 PassMode::Indirect(ref attrs, _) => apply(attrs),
508 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
509 // If the value is a boolean, the range is 0..2 and that ultimately
510 // become 0..0 when the type becomes i1, which would be rejected
511 // by the LLVM verifier.
512 if let layout::Int(..) = scalar.value {
513 if !scalar.is_bool() {
514 let range = scalar.valid_range_exclusive(bx);
515 if range.start != range.end {
516 bx.range_metadata(callsite, range);
521 for arg in &self.args {
522 if arg.pad.is_some() {
523 apply(&ArgAttributes::new());
526 PassMode::Ignore(_) => {}
527 PassMode::Direct(ref attrs) |
528 PassMode::Indirect(ref attrs, None) => apply(attrs),
529 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
533 PassMode::Pair(ref a, ref b) => {
537 PassMode::Cast(_) => apply(&ArgAttributes::new()),
541 let cconv = self.llvm_cconv();
542 if cconv != llvm::CCallConv {
543 llvm::SetInstructionCallConv(callsite, cconv);
548 impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
549 fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
550 FnType::new(&self, sig, extra_args)
554 sig: ty::FnSig<'tcx>,
555 extra_args: &[Ty<'tcx>]
556 ) -> FnType<'tcx, Ty<'tcx>> {
557 FnType::new_vtable(&self, sig, extra_args)
559 fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
560 FnType::of_instance(&self, instance)
564 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
565 fn apply_attrs_callsite(
567 ty: &FnType<'tcx, Ty<'tcx>>,
568 callsite: Self::Value
570 ty.apply_attrs_callsite(self, callsite)
573 fn get_param(&self, index: usize) -> Self::Value {
574 llvm::get_param(self.llfn(), index as c_uint)