1 use crate::builder::Builder;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, AttributePlace};
4 use crate::type_::Type;
5 use crate::type_of::LayoutLlvmExt;
6 use crate::value::Value;
8 use rustc_codegen_ssa::mir::operand::OperandValue;
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::*;
11 use rustc_codegen_ssa::MemFlags;
12 use rustc_middle::bug;
13 use rustc_middle::ty::layout::LayoutOf;
14 pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
15 use rustc_middle::ty::Ty;
16 use rustc_target::abi::call::ArgAbi;
17 pub use rustc_target::abi::call::*;
18 use rustc_target::abi::{self, HasDataLayout, Int};
19 pub use rustc_target::spec::abi::Abi;
23 macro_rules! for_each_kind {
24 ($flags: ident, $f: ident, $($kind: ident),+) => ({
25 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
29 trait ArgAttributeExt {
30 fn for_each_kind<F>(&self, f: F)
32 F: FnMut(llvm::Attribute);
35 impl ArgAttributeExt for ArgAttribute {
36 fn for_each_kind<F>(&self, mut f: F)
38 F: FnMut(llvm::Attribute),
40 for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, InReg)
44 pub trait ArgAttributesExt {
45 fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
46 fn apply_attrs_to_callsite(
49 cx: &CodegenCx<'_, '_>,
54 fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
55 // LLVM prior to version 12 had known miscompiles in the presence of
56 // noalias attributes (see #54878), but we don't support earlier
57 // versions at all anymore. We now enable mutable noalias by default.
58 cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true)
61 impl ArgAttributesExt for ArgAttributes {
62 fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
63 let mut regular = self.regular;
65 let deref = self.pointee_size.bytes();
67 if regular.contains(ArgAttribute::NonNull) {
68 llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
70 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
72 regular -= ArgAttribute::NonNull;
74 if let Some(align) = self.pointee_align {
75 llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
77 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
78 if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
79 llvm::Attribute::NoAlias.apply_llfn(idx, llfn);
82 ArgExtension::None => {}
83 ArgExtension::Zext => {
84 llvm::Attribute::ZExt.apply_llfn(idx, llfn);
86 ArgExtension::Sext => {
87 llvm::Attribute::SExt.apply_llfn(idx, llfn);
93 fn apply_attrs_to_callsite(
96 cx: &CodegenCx<'_, '_>,
99 let mut regular = self.regular;
101 let deref = self.pointee_size.bytes();
103 if regular.contains(ArgAttribute::NonNull) {
104 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
106 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
112 regular -= ArgAttribute::NonNull;
114 if let Some(align) = self.pointee_align {
115 llvm::LLVMRustAddAlignmentCallSiteAttr(
118 align.bytes() as u32,
121 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
122 if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
123 llvm::Attribute::NoAlias.apply_callsite(idx, callsite);
126 ArgExtension::None => {}
127 ArgExtension::Zext => {
128 llvm::Attribute::ZExt.apply_callsite(idx, callsite);
130 ArgExtension::Sext => {
131 llvm::Attribute::SExt.apply_callsite(idx, callsite);
139 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
142 impl LlvmType for Reg {
143 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
145 RegKind::Integer => cx.type_ix(self.size.bits()),
146 RegKind::Float => match self.size.bits() {
149 _ => bug!("unsupported float: {:?}", self),
151 RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
156 impl LlvmType for CastTarget {
157 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
158 let rest_ll_unit = self.rest.unit.llvm_type(cx);
159 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
163 self.rest.total.bytes() / self.rest.unit.size.bytes(),
164 self.rest.total.bytes() % self.rest.unit.size.bytes(),
168 if self.prefix.iter().all(|x| x.is_none()) {
169 // Simplify to a single unit when there is no prefix and size <= unit size
170 if self.rest.total <= self.rest.unit.size {
174 // Simplify to array when all chunks are the same size and type
176 return cx.type_array(rest_ll_unit, rest_count);
180 // Create list of fields in the main structure
181 let mut args: Vec<_> = self
184 .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
185 .chain((0..rest_count).map(|_| rest_ll_unit))
188 // Append final integer
190 // Only integers can be really split further.
191 assert_eq!(self.rest.unit.kind, RegKind::Integer);
192 args.push(cx.type_ix(rem_bytes * 8));
195 cx.type_struct(&args, false)
199 pub trait ArgAbiExt<'ll, 'tcx> {
200 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
203 bx: &mut Builder<'_, 'll, 'tcx>,
205 dst: PlaceRef<'tcx, &'ll Value>,
209 bx: &mut Builder<'_, 'll, 'tcx>,
211 dst: PlaceRef<'tcx, &'ll Value>,
215 impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
216 /// Gets the LLVM type for a place of the original Rust type of
217 /// this argument/return, i.e., the result of `type_of::type_of`.
218 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
219 self.layout.llvm_type(cx)
222 /// Stores a direct/indirect value described by this ArgAbi into a
223 /// place for the original Rust type of this argument/return.
224 /// Can be used for both storing formal arguments into Rust variables
225 /// or results of call/invoke instructions into their destinations.
228 bx: &mut Builder<'_, 'll, 'tcx>,
230 dst: PlaceRef<'tcx, &'ll Value>,
232 if self.is_ignore() {
235 if self.is_sized_indirect() {
236 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
237 } else if self.is_unsized_indirect() {
238 bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
239 } else if let PassMode::Cast(cast) = self.mode {
240 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
241 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
242 let can_store_through_cast_ptr = false;
243 if can_store_through_cast_ptr {
244 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
245 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
246 bx.store(val, cast_dst, self.layout.align.abi);
248 // The actual return type is a struct, but the ABI
249 // adaptation code has cast it into some scalar type. The
250 // code that follows is the only reliable way I have
251 // found to do a transform like i64 -> {i32,i32}.
252 // Basically we dump the data onto the stack then memcpy it.
254 // Other approaches I tried:
255 // - Casting rust ret pointer to the foreign type and using Store
256 // is (a) unsafe if size of foreign type > size of rust type and
257 // (b) runs afoul of strict aliasing rules, yielding invalid
258 // assembly under -O (specifically, the store gets removed).
259 // - Truncating foreign type to correct integral type and then
260 // bitcasting to the struct type yields invalid cast errors.
262 // We instead thus allocate some scratch space...
263 let scratch_size = cast.size(bx);
264 let scratch_align = cast.align(bx);
265 let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
266 bx.lifetime_start(llscratch, scratch_size);
268 // ... where we first store the value...
269 bx.store(val, llscratch, scratch_align);
271 // ... and then memcpy it to the intended destination.
274 self.layout.align.abi,
277 bx.const_usize(self.layout.size.bytes()),
281 bx.lifetime_end(llscratch, scratch_size);
284 OperandValue::Immediate(val).store(bx, dst);
290 bx: &mut Builder<'a, 'll, 'tcx>,
292 dst: PlaceRef<'tcx, &'ll Value>,
295 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
300 PassMode::Ignore => {}
301 PassMode::Pair(..) => {
302 OperandValue::Pair(next(), next()).store(bx, dst);
304 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
305 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
308 | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
309 | PassMode::Cast(_) => {
310 let next_arg = next();
311 self.store(bx, next_arg, dst);
317 impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
320 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
322 dst: PlaceRef<'tcx, Self::Value>,
324 arg_abi.store_fn_arg(self, idx, dst)
328 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
330 dst: PlaceRef<'tcx, &'ll Value>,
332 arg_abi.store(self, val, dst)
334 fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
335 arg_abi.memory_ty(self)
339 pub trait FnAbiLlvmExt<'tcx> {
340 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
341 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
342 fn llvm_cconv(&self) -> llvm::CallConv;
343 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
344 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
347 impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
348 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
349 // Ignore "extra" args from the call site for C variadic functions.
350 // Only the "fixed" args are part of the LLVM function signature.
351 let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
353 let args_capacity: usize = args.iter().map(|arg|
354 if arg.pad.is_some() { 1 } else { 0 } +
355 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
357 let mut llargument_tys = Vec::with_capacity(
358 if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
361 let llreturn_ty = match self.ret.mode {
362 PassMode::Ignore => cx.type_void(),
363 PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
364 PassMode::Cast(cast) => cast.llvm_type(cx),
365 PassMode::Indirect { .. } => {
366 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
373 if let Some(ty) = arg.pad {
374 llargument_tys.push(ty.llvm_type(cx));
377 let llarg_ty = match arg.mode {
378 PassMode::Ignore => continue,
379 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
380 PassMode::Pair(..) => {
381 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
382 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
385 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
386 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
387 let ptr_layout = cx.layout_of(ptr_ty);
388 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
389 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
392 PassMode::Cast(cast) => cast.llvm_type(cx),
393 PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
394 cx.type_ptr_to(arg.memory_ty(cx))
397 llargument_tys.push(llarg_ty);
401 cx.type_variadic_func(&llargument_tys, llreturn_ty)
403 cx.type_func(&llargument_tys, llreturn_ty)
407 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
409 llvm::LLVMPointerType(
411 cx.data_layout().instruction_address_space.0 as c_uint,
416 fn llvm_cconv(&self) -> llvm::CallConv {
418 Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
419 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
420 Conv::AvrInterrupt => llvm::AvrInterrupt,
421 Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
422 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
423 Conv::Msp430Intr => llvm::Msp430Intr,
424 Conv::PtxKernel => llvm::PtxKernel,
425 Conv::X86Fastcall => llvm::X86FastcallCallConv,
426 Conv::X86Intr => llvm::X86_Intr,
427 Conv::X86Stdcall => llvm::X86StdcallCallConv,
428 Conv::X86ThisCall => llvm::X86_ThisCall,
429 Conv::X86VectorCall => llvm::X86_VectorCall,
430 Conv::X86_64SysV => llvm::X86_64_SysV,
431 Conv::X86_64Win64 => llvm::X86_64_Win64,
435 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
436 // FIXME(eddyb) can this also be applied to callsites?
437 if self.ret.layout.abi.is_uninhabited() {
438 llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
441 // FIXME(eddyb, wesleywiser): apply this to callsites as well?
442 if !self.can_unwind {
443 llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
447 let mut apply = |attrs: &ArgAttributes| {
448 attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
452 match self.ret.mode {
453 PassMode::Direct(ref attrs) => {
454 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
456 PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
458 let i = apply(attrs);
460 llvm::LLVMRustAddStructRetAttr(
462 llvm::AttributePlace::Argument(i).as_uint(),
463 self.ret.layout.llvm_type(cx),
467 PassMode::Cast(cast) => {
468 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
472 for arg in &self.args {
473 if arg.pad.is_some() {
474 apply(&ArgAttributes::new());
477 PassMode::Ignore => {}
478 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
479 let i = apply(attrs);
481 llvm::LLVMRustAddByValAttr(
483 llvm::AttributePlace::Argument(i).as_uint(),
484 arg.layout.llvm_type(cx),
488 PassMode::Direct(ref attrs)
489 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
492 PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
497 PassMode::Pair(ref a, ref b) => {
501 PassMode::Cast(cast) => {
508 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
509 if self.ret.layout.abi.is_uninhabited() {
510 llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite);
512 if !self.can_unwind {
513 llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite);
517 let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
518 attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
522 match self.ret.mode {
523 PassMode::Direct(ref attrs) => {
524 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
526 PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
528 let i = apply(bx.cx, attrs);
530 llvm::LLVMRustAddStructRetCallSiteAttr(
532 llvm::AttributePlace::Argument(i).as_uint(),
533 self.ret.layout.llvm_type(bx),
537 PassMode::Cast(cast) => {
538 cast.attrs.apply_attrs_to_callsite(
539 llvm::AttributePlace::ReturnValue,
546 if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
547 // If the value is a boolean, the range is 0..2 and that ultimately
548 // become 0..0 when the type becomes i1, which would be rejected
549 // by the LLVM verifier.
550 if let Int(..) = scalar.value {
551 if !scalar.is_bool() && !scalar.is_always_valid(bx) {
552 bx.range_metadata(callsite, scalar.valid_range);
556 for arg in &self.args {
557 if arg.pad.is_some() {
558 apply(bx.cx, &ArgAttributes::new());
561 PassMode::Ignore => {}
562 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
563 let i = apply(bx.cx, attrs);
565 llvm::LLVMRustAddByValCallSiteAttr(
567 llvm::AttributePlace::Argument(i).as_uint(),
568 arg.layout.llvm_type(bx),
572 PassMode::Direct(ref attrs)
573 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
578 extra_attrs: Some(ref extra_attrs),
582 apply(bx.cx, extra_attrs);
584 PassMode::Pair(ref a, ref b) => {
588 PassMode::Cast(cast) => {
589 apply(bx.cx, &cast.attrs);
594 let cconv = self.llvm_cconv();
595 if cconv != llvm::CCallConv {
596 llvm::SetInstructionCallConv(callsite, cconv);
599 if self.conv == Conv::CCmseNonSecureCall {
600 // This will probably get ignored on all targets but those supporting the TrustZone-M
601 // extension (thumbv8m targets).
603 llvm::AddCallSiteAttrString(
605 llvm::AttributePlace::Function,
606 cstr::cstr!("cmse_nonsecure_call"),
613 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
614 fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
615 fn_abi.apply_attrs_callsite(self, callsite)
618 fn get_param(&mut self, index: usize) -> Self::Value {
619 llvm::get_param(self.llfn(), index as c_uint)