1 use crate::builder::Builder;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, AttributePlace};
4 use crate::type_::Type;
5 use crate::type_of::LayoutLlvmExt;
6 use crate::value::Value;
9 use rustc::ty::layout::{self};
11 use rustc_codegen_ssa::mir::operand::OperandValue;
12 use rustc_codegen_ssa::mir::place::PlaceRef;
13 use rustc_codegen_ssa::traits::*;
14 use rustc_codegen_ssa::MemFlags;
15 use rustc_target::abi::call::ArgAbi;
16 use rustc_target::abi::{HasDataLayout, LayoutOf};
20 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
21 pub use rustc_target::abi::call::*;
22 pub use rustc_target::spec::abi::Abi;
24 macro_rules! for_each_kind {
25 ($flags: ident, $f: ident, $($kind: ident),+) => ({
26 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
30 trait ArgAttributeExt {
31 fn for_each_kind<F>(&self, f: F)
33 F: FnMut(llvm::Attribute);
36 impl ArgAttributeExt for ArgAttribute {
37 fn for_each_kind<F>(&self, mut f: F)
39 F: FnMut(llvm::Attribute),
41 for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
45 pub trait ArgAttributesExt {
46 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>);
47 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>);
50 impl ArgAttributesExt for ArgAttributes {
51 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>) {
52 let mut regular = self.regular;
54 let deref = self.pointee_size.bytes();
56 if regular.contains(ArgAttribute::NonNull) {
57 llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
59 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
61 regular -= ArgAttribute::NonNull;
63 if let Some(align) = self.pointee_align {
64 llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
66 if regular.contains(ArgAttribute::ByVal) {
67 llvm::LLVMRustAddByValAttr(llfn, idx.as_uint(), ty.unwrap());
69 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
73 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>) {
74 let mut regular = self.regular;
76 let deref = self.pointee_size.bytes();
78 if regular.contains(ArgAttribute::NonNull) {
79 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
81 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
87 regular -= ArgAttribute::NonNull;
89 if let Some(align) = self.pointee_align {
90 llvm::LLVMRustAddAlignmentCallSiteAttr(
96 if regular.contains(ArgAttribute::ByVal) {
97 llvm::LLVMRustAddByValCallSiteAttr(callsite, idx.as_uint(), ty.unwrap());
99 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
105 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
108 impl LlvmType for Reg {
109 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
111 RegKind::Integer => cx.type_ix(self.size.bits()),
112 RegKind::Float => match self.size.bits() {
115 _ => bug!("unsupported float: {:?}", self),
117 RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
122 impl LlvmType for CastTarget {
123 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
124 let rest_ll_unit = self.rest.unit.llvm_type(cx);
125 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
129 self.rest.total.bytes() / self.rest.unit.size.bytes(),
130 self.rest.total.bytes() % self.rest.unit.size.bytes(),
134 if self.prefix.iter().all(|x| x.is_none()) {
135 // Simplify to a single unit when there is no prefix and size <= unit size
136 if self.rest.total <= self.rest.unit.size {
140 // Simplify to array when all chunks are the same size and type
142 return cx.type_array(rest_ll_unit, rest_count);
146 // Create list of fields in the main structure
147 let mut args: Vec<_> = self
150 .flat_map(|option_kind| {
151 option_kind.map(|kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx))
153 .chain((0..rest_count).map(|_| rest_ll_unit))
156 // Append final integer
158 // Only integers can be really split further.
159 assert_eq!(self.rest.unit.kind, RegKind::Integer);
160 args.push(cx.type_ix(rem_bytes * 8));
163 cx.type_struct(&args, false)
167 pub trait ArgAbiExt<'ll, 'tcx> {
168 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
171 bx: &mut Builder<'_, 'll, 'tcx>,
173 dst: PlaceRef<'tcx, &'ll Value>,
177 bx: &mut Builder<'_, 'll, 'tcx>,
179 dst: PlaceRef<'tcx, &'ll Value>,
183 impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
184 /// Gets the LLVM type for a place of the original Rust type of
185 /// this argument/return, i.e., the result of `type_of::type_of`.
186 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
187 self.layout.llvm_type(cx)
190 /// Stores a direct/indirect value described by this ArgAbi into a
191 /// place for the original Rust type of this argument/return.
192 /// Can be used for both storing formal arguments into Rust variables
193 /// or results of call/invoke instructions into their destinations.
196 bx: &mut Builder<'_, 'll, 'tcx>,
198 dst: PlaceRef<'tcx, &'ll Value>,
200 if self.is_ignore() {
203 if self.is_sized_indirect() {
204 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
205 } else if self.is_unsized_indirect() {
206 bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
207 } else if let PassMode::Cast(cast) = self.mode {
208 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
209 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
210 let can_store_through_cast_ptr = false;
211 if can_store_through_cast_ptr {
212 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
213 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
214 bx.store(val, cast_dst, self.layout.align.abi);
216 // The actual return type is a struct, but the ABI
217 // adaptation code has cast it into some scalar type. The
218 // code that follows is the only reliable way I have
219 // found to do a transform like i64 -> {i32,i32}.
220 // Basically we dump the data onto the stack then memcpy it.
222 // Other approaches I tried:
223 // - Casting rust ret pointer to the foreign type and using Store
224 // is (a) unsafe if size of foreign type > size of rust type and
225 // (b) runs afoul of strict aliasing rules, yielding invalid
226 // assembly under -O (specifically, the store gets removed).
227 // - Truncating foreign type to correct integral type and then
228 // bitcasting to the struct type yields invalid cast errors.
230 // We instead thus allocate some scratch space...
231 let scratch_size = cast.size(bx);
232 let scratch_align = cast.align(bx);
233 let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
234 bx.lifetime_start(llscratch, scratch_size);
236 // ... where we first store the value...
237 bx.store(val, llscratch, scratch_align);
239 // ... and then memcpy it to the intended destination.
242 self.layout.align.abi,
245 bx.const_usize(self.layout.size.bytes()),
249 bx.lifetime_end(llscratch, scratch_size);
252 OperandValue::Immediate(val).store(bx, dst);
258 bx: &mut Builder<'a, 'll, 'tcx>,
260 dst: PlaceRef<'tcx, &'ll Value>,
263 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
268 PassMode::Ignore => {}
269 PassMode::Pair(..) => {
270 OperandValue::Pair(next(), next()).store(bx, dst);
272 PassMode::Indirect(_, Some(_)) => {
273 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
275 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
276 let next_arg = next();
277 self.store(bx, next_arg, dst);
283 impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
286 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
288 dst: PlaceRef<'tcx, Self::Value>,
290 arg_abi.store_fn_arg(self, idx, dst)
294 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
296 dst: PlaceRef<'tcx, &'ll Value>,
298 arg_abi.store(self, val, dst)
300 fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
301 arg_abi.memory_ty(self)
305 pub trait FnAbiLlvmExt<'tcx> {
306 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
307 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
308 fn llvm_cconv(&self) -> llvm::CallConv;
309 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
310 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
313 impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
314 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
315 let args_capacity: usize = self.args.iter().map(|arg|
316 if arg.pad.is_some() { 1 } else { 0 } +
317 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
319 let mut llargument_tys = Vec::with_capacity(
320 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity,
323 let llreturn_ty = match self.ret.mode {
324 PassMode::Ignore => cx.type_void(),
325 PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
326 PassMode::Cast(cast) => cast.llvm_type(cx),
327 PassMode::Indirect(..) => {
328 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
333 for arg in &self.args {
335 if let Some(ty) = arg.pad {
336 llargument_tys.push(ty.llvm_type(cx));
339 let llarg_ty = match arg.mode {
340 PassMode::Ignore => continue,
341 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
342 PassMode::Pair(..) => {
343 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
344 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
347 PassMode::Indirect(_, Some(_)) => {
348 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
349 let ptr_layout = cx.layout_of(ptr_ty);
350 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
351 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
354 PassMode::Cast(cast) => cast.llvm_type(cx),
355 PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
357 llargument_tys.push(llarg_ty);
361 cx.type_variadic_func(&llargument_tys, llreturn_ty)
363 cx.type_func(&llargument_tys, llreturn_ty)
367 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
369 llvm::LLVMPointerType(
371 cx.data_layout().instruction_address_space as c_uint,
376 fn llvm_cconv(&self) -> llvm::CallConv {
378 Conv::C | Conv::Rust => llvm::CCallConv,
379 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
380 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
381 Conv::Msp430Intr => llvm::Msp430Intr,
382 Conv::PtxKernel => llvm::PtxKernel,
383 Conv::X86Fastcall => llvm::X86FastcallCallConv,
384 Conv::X86Intr => llvm::X86_Intr,
385 Conv::X86Stdcall => llvm::X86StdcallCallConv,
386 Conv::X86ThisCall => llvm::X86_ThisCall,
387 Conv::X86VectorCall => llvm::X86_VectorCall,
388 Conv::X86_64SysV => llvm::X86_64_SysV,
389 Conv::X86_64Win64 => llvm::X86_64_Win64,
393 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
394 // FIXME(eddyb) can this also be applied to callsites?
395 if self.ret.layout.abi.is_uninhabited() {
396 llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
400 let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
401 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
404 match self.ret.mode {
405 PassMode::Direct(ref attrs) => {
406 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
408 PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(cx))),
411 for arg in &self.args {
412 if arg.pad.is_some() {
413 apply(&ArgAttributes::new(), None);
416 PassMode::Ignore => {}
417 PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
418 apply(attrs, Some(arg.layout.llvm_type(cx)))
420 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
422 apply(extra_attrs, None);
424 PassMode::Pair(ref a, ref b) => {
428 PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
433 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
435 let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
436 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
439 match self.ret.mode {
440 PassMode::Direct(ref attrs) => {
441 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
443 PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(bx))),
446 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
447 // If the value is a boolean, the range is 0..2 and that ultimately
448 // become 0..0 when the type becomes i1, which would be rejected
449 // by the LLVM verifier.
450 if let layout::Int(..) = scalar.value {
451 if !scalar.is_bool() {
452 let range = scalar.valid_range_exclusive(bx);
453 if range.start != range.end {
454 bx.range_metadata(callsite, range);
459 for arg in &self.args {
460 if arg.pad.is_some() {
461 apply(&ArgAttributes::new(), None);
464 PassMode::Ignore => {}
465 PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
466 apply(attrs, Some(arg.layout.llvm_type(bx)))
468 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
470 apply(extra_attrs, None);
472 PassMode::Pair(ref a, ref b) => {
476 PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
480 let cconv = self.llvm_cconv();
481 if cconv != llvm::CCallConv {
482 llvm::SetInstructionCallConv(callsite, cconv);
487 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
488 fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
489 fn_abi.apply_attrs_callsite(self, callsite)
492 fn get_param(&self, index: usize) -> Self::Value {
493 llvm::get_param(self.llfn(), index as c_uint)