1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef, AttributePlace};
14 use common::{ty_fn_sig, C_usize};
15 use context::CodegenCx;
16 use mir::place::PlaceRef;
17 use mir::operand::OperandValue;
19 use type_of::{LayoutLlvmExt, PointerKind};
21 use rustc_target::abi::{LayoutOf, Size, TyLayout};
22 use rustc::ty::{self, Ty};
23 use rustc::ty::layout;
27 pub use rustc_target::spec::abi::Abi;
28 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
29 pub use rustc_target::abi::call::*;
31 macro_rules! for_each_kind {
32 ($flags: ident, $f: ident, $($kind: ident),+) => ({
33 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
37 trait ArgAttributeExt {
38 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
41 impl ArgAttributeExt for ArgAttribute {
42 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
43 for_each_kind!(self, f,
44 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
48 pub trait ArgAttributesExt {
49 fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef);
50 fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef);
53 impl ArgAttributesExt for ArgAttributes {
54 fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
55 let mut regular = self.regular;
57 let deref = self.pointee_size.bytes();
59 if regular.contains(ArgAttribute::NonNull) {
60 llvm::LLVMRustAddDereferenceableAttr(llfn,
64 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
68 regular -= ArgAttribute::NonNull;
70 if let Some(align) = self.pointee_align {
71 llvm::LLVMRustAddAlignmentAttr(llfn,
75 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
79 fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
80 let mut regular = self.regular;
82 let deref = self.pointee_size.bytes();
84 if regular.contains(ArgAttribute::NonNull) {
85 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
89 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
93 regular -= ArgAttribute::NonNull;
95 if let Some(align) = self.pointee_align {
96 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
100 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
106 fn llvm_type(&self, cx: &CodegenCx) -> Type;
109 impl LlvmType for Reg {
110 fn llvm_type(&self, cx: &CodegenCx) -> Type {
112 RegKind::Integer => Type::ix(cx, self.size.bits()),
114 match self.size.bits() {
117 _ => bug!("unsupported float: {:?}", self)
121 Type::vector(&Type::i8(cx), self.size.bytes())
127 impl LlvmType for CastTarget {
128 fn llvm_type(&self, cx: &CodegenCx) -> Type {
129 let rest_ll_unit = self.rest.unit.llvm_type(cx);
130 let rest_count = self.rest.total.bytes() / self.rest.unit.size.bytes();
131 let rem_bytes = self.rest.total.bytes() % self.rest.unit.size.bytes();
133 if self.prefix.iter().all(|x| x.is_none()) {
134 // Simplify to a single unit when there is no prefix and size <= unit size
135 if self.rest.total <= self.rest.unit.size {
139 // Simplify to array when all chunks are the same size and type
141 return Type::array(&rest_ll_unit, rest_count);
145 // Create list of fields in the main structure
146 let mut args: Vec<_> =
147 self.prefix.iter().flat_map(|option_kind| option_kind.map(
148 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
149 .chain((0..rest_count).map(|_| rest_ll_unit))
152 // Append final integer
154 // Only integers can be really split further.
155 assert_eq!(self.rest.unit.kind, RegKind::Integer);
156 args.push(Type::ix(cx, rem_bytes * 8));
159 Type::struct_(cx, &args, false)
163 pub trait ArgTypeExt<'a, 'tcx> {
164 fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
165 fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>);
166 fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>);
169 impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
170 /// Get the LLVM type for a place of the original Rust type of
171 /// this argument/return, i.e. the result of `type_of::type_of`.
172 fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
173 self.layout.llvm_type(cx)
176 /// Store a direct/indirect value described by this ArgType into a
177 /// place for the original Rust type of this argument/return.
178 /// Can be used for both storing formal arguments into Rust variables
179 /// or results of call/invoke instructions into their destinations.
180 fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
181 if self.is_ignore() {
185 if self.is_indirect() {
186 OperandValue::Ref(val, self.layout.align).store(bx, dst)
187 } else if let PassMode::Cast(cast) = self.mode {
188 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
189 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
190 let can_store_through_cast_ptr = false;
191 if can_store_through_cast_ptr {
192 let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
193 bx.store(val, cast_dst, self.layout.align);
195 // The actual return type is a struct, but the ABI
196 // adaptation code has cast it into some scalar type. The
197 // code that follows is the only reliable way I have
198 // found to do a transform like i64 -> {i32,i32}.
199 // Basically we dump the data onto the stack then memcpy it.
201 // Other approaches I tried:
202 // - Casting rust ret pointer to the foreign type and using Store
203 // is (a) unsafe if size of foreign type > size of rust type and
204 // (b) runs afoul of strict aliasing rules, yielding invalid
205 // assembly under -O (specifically, the store gets removed).
206 // - Truncating foreign type to correct integral type and then
207 // bitcasting to the struct type yields invalid cast errors.
209 // We instead thus allocate some scratch space...
210 let scratch_size = cast.size(cx);
211 let scratch_align = cast.align(cx);
212 let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
213 bx.lifetime_start(llscratch, scratch_size);
215 // ...where we first store the value...
216 bx.store(val, llscratch, scratch_align);
218 // ...and then memcpy it to the intended destination.
219 base::call_memcpy(bx,
220 bx.pointercast(dst.llval, Type::i8p(cx)),
221 bx.pointercast(llscratch, Type::i8p(cx)),
222 C_usize(cx, self.layout.size.bytes()),
223 self.layout.align.min(scratch_align));
225 bx.lifetime_end(llscratch, scratch_size);
228 OperandValue::Immediate(val).store(bx, dst);
232 fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
234 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
239 PassMode::Ignore => {},
240 PassMode::Pair(..) => {
241 OperandValue::Pair(next(), next()).store(bx, dst);
243 PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
244 self.store(bx, next(), dst);
250 pub trait FnTypeExt<'a, 'tcx> {
251 fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
253 fn new(cx: &CodegenCx<'a, 'tcx>,
254 sig: ty::FnSig<'tcx>,
255 extra_args: &[Ty<'tcx>]) -> Self;
256 fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
257 sig: ty::FnSig<'tcx>,
258 extra_args: &[Ty<'tcx>]) -> Self;
259 fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
260 sig: ty::FnSig<'tcx>,
261 extra_args: &[Ty<'tcx>]) -> Self;
262 fn adjust_for_abi(&mut self,
263 cx: &CodegenCx<'a, 'tcx>,
265 fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
266 fn llvm_cconv(&self) -> llvm::CallConv;
267 fn apply_attrs_llfn(&self, llfn: ValueRef);
268 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'tcx>, callsite: ValueRef);
271 impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
272 fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
274 let fn_ty = instance.ty(cx.tcx);
275 let sig = ty_fn_sig(cx, fn_ty);
276 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
277 FnType::new(cx, sig, &[])
280 fn new(cx: &CodegenCx<'a, 'tcx>,
281 sig: ty::FnSig<'tcx>,
282 extra_args: &[Ty<'tcx>]) -> Self {
283 let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
284 fn_ty.adjust_for_abi(cx, sig.abi);
288 fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
289 sig: ty::FnSig<'tcx>,
290 extra_args: &[Ty<'tcx>]) -> Self {
291 let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
292 // Don't pass the vtable, it's not an argument of the virtual fn.
294 let self_arg = &mut fn_ty.args[0];
295 match self_arg.mode {
296 PassMode::Pair(data_ptr, _) => {
297 self_arg.mode = PassMode::Direct(data_ptr);
299 _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg)
302 let pointee = self_arg.layout.ty.builtin_deref(true)
304 bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
306 let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee);
307 self_arg.layout = cx.layout_of(fat_ptr_ty).field(cx, 0);
309 fn_ty.adjust_for_abi(cx, sig.abi);
313 fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
314 sig: ty::FnSig<'tcx>,
315 extra_args: &[Ty<'tcx>]) -> Self {
316 debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
319 let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
320 RustIntrinsic | PlatformIntrinsic |
321 Rust | RustCall => Conv::C,
323 // It's the ABI's job to select this, not us.
324 System => bug!("system abi should be selected elsewhere"),
326 Stdcall => Conv::X86Stdcall,
327 Fastcall => Conv::X86Fastcall,
328 Vectorcall => Conv::X86VectorCall,
329 Thiscall => Conv::X86ThisCall,
331 Unadjusted => Conv::C,
332 Win64 => Conv::X86_64Win64,
333 SysV64 => Conv::X86_64SysV,
334 Aapcs => Conv::ArmAapcs,
335 PtxKernel => Conv::PtxKernel,
336 Msp430Interrupt => Conv::Msp430Intr,
337 X86Interrupt => Conv::X86Intr,
339 // These API constants ought to be more specific...
343 let mut inputs = sig.inputs();
344 let extra_args = if sig.abi == RustCall {
345 assert!(!sig.variadic && extra_args.is_empty());
347 match sig.inputs().last().unwrap().sty {
348 ty::TyTuple(ref tupled_arguments) => {
349 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
353 bug!("argument to function with \"rust-call\" ABI \
358 assert!(sig.variadic || extra_args.is_empty());
362 let target = &cx.sess().target.target;
363 let win_x64_gnu = target.target_os == "windows"
364 && target.arch == "x86_64"
365 && target.target_env == "gnu";
366 let linux_s390x = target.target_os == "linux"
367 && target.arch == "s390x"
368 && target.target_env == "gnu";
369 let rust_abi = match sig.abi {
370 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
374 // Handle safe Rust thin and fat pointers.
375 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
376 scalar: &layout::Scalar,
377 layout: TyLayout<'tcx, Ty<'tcx>>,
380 // Booleans are always an i1 that needs to be zero-extended.
381 if scalar.is_bool() {
382 attrs.set(ArgAttribute::ZExt);
386 // Only pointer types handled below.
387 if scalar.value != layout::Pointer {
391 if scalar.valid_range.start < scalar.valid_range.end {
392 if scalar.valid_range.start > 0 {
393 attrs.set(ArgAttribute::NonNull);
397 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
398 if let Some(kind) = pointee.safe {
399 attrs.pointee_size = pointee.size;
400 attrs.pointee_align = Some(pointee.align);
402 // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
403 // with align attributes, and those calls later block optimizations.
404 if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes {
405 attrs.pointee_align = None;
408 // `Box` pointer parameters never alias because ownership is transferred
409 // `&mut` pointer parameters never alias other parameters,
410 // or mutable global data
412 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
413 // and can be marked as both `readonly` and `noalias`, as
414 // LLVM's definition of `noalias` is based solely on memory
415 // dependencies rather than pointer equality
416 let no_alias = match kind {
417 PointerKind::Shared => false,
418 PointerKind::UniqueOwned => true,
419 PointerKind::Frozen |
420 PointerKind::UniqueBorrowed => !is_return
423 attrs.set(ArgAttribute::NoAlias);
426 if kind == PointerKind::Frozen && !is_return {
427 attrs.set(ArgAttribute::ReadOnly);
433 let arg_of = |ty: Ty<'tcx>, is_return: bool| {
434 let mut arg = ArgType::new(cx.layout_of(ty));
435 if arg.layout.is_zst() {
436 // For some forsaken reason, x86_64-pc-windows-gnu
437 // doesn't ignore zero-sized struct arguments.
438 // The same is true for s390x-unknown-linux-gnu.
439 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
440 arg.mode = PassMode::Ignore;
444 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
445 if !is_return && rust_abi {
446 if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
447 let mut a_attrs = ArgAttributes::new();
448 let mut b_attrs = ArgAttributes::new();
449 adjust_for_rust_scalar(&mut a_attrs,
454 adjust_for_rust_scalar(&mut b_attrs,
457 a.value.size(cx).abi_align(b.value.align(cx)),
459 arg.mode = PassMode::Pair(a_attrs, b_attrs);
464 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
465 if let PassMode::Direct(ref mut attrs) = arg.mode {
466 adjust_for_rust_scalar(attrs,
478 ret: arg_of(sig.output(), true),
479 args: inputs.iter().chain(extra_args.iter()).map(|ty| {
482 variadic: sig.variadic,
487 fn adjust_for_abi(&mut self,
488 cx: &CodegenCx<'a, 'tcx>,
490 if abi == Abi::Unadjusted { return }
492 if abi == Abi::Rust || abi == Abi::RustCall ||
493 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
494 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
495 if arg.is_ignore() { return; }
497 match arg.layout.abi {
498 layout::Abi::Aggregate { .. } => {}
500 // This is a fun case! The gist of what this is doing is
501 // that we want callers and callees to always agree on the
502 // ABI of how they pass SIMD arguments. If we were to *not*
503 // make these arguments indirect then they'd be immediates
504 // in LLVM, which means that they'd used whatever the
505 // appropriate ABI is for the callee and the caller. That
506 // means, for example, if the caller doesn't have AVX
507 // enabled but the callee does, then passing an AVX argument
508 // across this boundary would cause corrupt data to show up.
510 // This problem is fixed by unconditionally passing SIMD
511 // arguments through memory between callers and callees
512 // which should get them all to agree on ABI regardless of
513 // target feature sets. Some more information about this
514 // issue can be found in #44367.
516 // Note that the platform intrinsic ABI is exempt here as
517 // that's how we connect up to LLVM and it's unstable
518 // anyway, we control all calls to it in libstd.
519 layout::Abi::Vector { .. } if abi != Abi::PlatformIntrinsic => {
527 let size = arg.layout.size;
528 if size > layout::Pointer.size(cx) {
531 // We want to pass small aggregates as immediates, but using
532 // a LLVM aggregate type for this leads to bad optimizations,
533 // so we pick an appropriately sized integer type instead.
535 kind: RegKind::Integer,
540 fixup(&mut self.ret);
541 for arg in &mut self.args {
544 if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
545 attrs.set(ArgAttribute::StructRet);
550 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
551 cx.sess().fatal(&msg);
555 fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
556 let mut llargument_tys = Vec::new();
558 let llreturn_ty = match self.ret.mode {
559 PassMode::Ignore => Type::void(cx),
560 PassMode::Direct(_) | PassMode::Pair(..) => {
561 self.ret.layout.immediate_llvm_type(cx)
563 PassMode::Cast(cast) => cast.llvm_type(cx),
564 PassMode::Indirect(_) => {
565 llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
570 for arg in &self.args {
572 if let Some(ty) = arg.pad {
573 llargument_tys.push(ty.llvm_type(cx));
576 let llarg_ty = match arg.mode {
577 PassMode::Ignore => continue,
578 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
579 PassMode::Pair(..) => {
580 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0));
581 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1));
584 PassMode::Cast(cast) => cast.llvm_type(cx),
585 PassMode::Indirect(_) => arg.memory_ty(cx).ptr_to(),
587 llargument_tys.push(llarg_ty);
591 Type::variadic_func(&llargument_tys, &llreturn_ty)
593 Type::func(&llargument_tys, &llreturn_ty)
597 fn llvm_cconv(&self) -> llvm::CallConv {
599 Conv::C => llvm::CCallConv,
600 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
601 Conv::Msp430Intr => llvm::Msp430Intr,
602 Conv::PtxKernel => llvm::PtxKernel,
603 Conv::X86Fastcall => llvm::X86FastcallCallConv,
604 Conv::X86Intr => llvm::X86_Intr,
605 Conv::X86Stdcall => llvm::X86StdcallCallConv,
606 Conv::X86ThisCall => llvm::X86_ThisCall,
607 Conv::X86VectorCall => llvm::X86_VectorCall,
608 Conv::X86_64SysV => llvm::X86_64_SysV,
609 Conv::X86_64Win64 => llvm::X86_64_Win64,
613 fn apply_attrs_llfn(&self, llfn: ValueRef) {
615 let mut apply = |attrs: &ArgAttributes| {
616 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
619 match self.ret.mode {
620 PassMode::Direct(ref attrs) => {
621 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
623 PassMode::Indirect(ref attrs) => apply(attrs),
626 for arg in &self.args {
627 if arg.pad.is_some() {
628 apply(&ArgAttributes::new());
631 PassMode::Ignore => {}
632 PassMode::Direct(ref attrs) |
633 PassMode::Indirect(ref attrs) => apply(attrs),
634 PassMode::Pair(ref a, ref b) => {
638 PassMode::Cast(_) => apply(&ArgAttributes::new()),
643 fn apply_attrs_callsite(&self, bx: &Builder<'a, 'tcx>, callsite: ValueRef) {
645 let mut apply = |attrs: &ArgAttributes| {
646 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
649 match self.ret.mode {
650 PassMode::Direct(ref attrs) => {
651 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
653 PassMode::Indirect(ref attrs) => apply(attrs),
656 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
657 // If the value is a boolean, the range is 0..2 and that ultimately
658 // become 0..0 when the type becomes i1, which would be rejected
659 // by the LLVM verifier.
661 layout::Int(..) if !scalar.is_bool() => {
662 let range = scalar.valid_range_exclusive(bx.cx);
663 if range.start != range.end {
664 // FIXME(nox): This causes very weird type errors about
665 // SHL operators in constants in stage 2 with LLVM 3.9.
666 if unsafe { llvm::LLVMRustVersionMajor() >= 4 } {
667 bx.range_metadata(callsite, range);
674 for arg in &self.args {
675 if arg.pad.is_some() {
676 apply(&ArgAttributes::new());
679 PassMode::Ignore => {}
680 PassMode::Direct(ref attrs) |
681 PassMode::Indirect(ref attrs) => apply(attrs),
682 PassMode::Pair(ref a, ref b) => {
686 PassMode::Cast(_) => apply(&ArgAttributes::new()),
690 let cconv = self.llvm_cconv();
691 if cconv != llvm::CCallConv {
692 llvm::SetInstructionCallConv(callsite, cconv);