1 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
11 use rustc_codegen_ssa::base::{compare_simd_types, to_immediate, wants_msvc_seh};
12 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
13 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
14 use rustc_codegen_ssa::coverageinfo;
15 use rustc_codegen_ssa::glue;
16 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
17 use rustc_codegen_ssa::mir::place::PlaceRef;
18 use rustc_codegen_ssa::traits::*;
19 use rustc_codegen_ssa::MemFlags;
21 use rustc_middle::mir::coverage;
22 use rustc_middle::mir::Operand;
23 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
24 use rustc_middle::ty::{self, Ty};
25 use rustc_middle::{bug, span_bug};
26 use rustc_span::{sym, symbol::kw, Span, Symbol};
27 use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
28 use rustc_target::spec::PanicStrategy;
31 use std::cmp::Ordering;
34 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
35 let llvm_name = match name {
36 sym::sqrtf32 => "llvm.sqrt.f32",
37 sym::sqrtf64 => "llvm.sqrt.f64",
38 sym::powif32 => "llvm.powi.f32",
39 sym::powif64 => "llvm.powi.f64",
40 sym::sinf32 => "llvm.sin.f32",
41 sym::sinf64 => "llvm.sin.f64",
42 sym::cosf32 => "llvm.cos.f32",
43 sym::cosf64 => "llvm.cos.f64",
44 sym::powf32 => "llvm.pow.f32",
45 sym::powf64 => "llvm.pow.f64",
46 sym::expf32 => "llvm.exp.f32",
47 sym::expf64 => "llvm.exp.f64",
48 sym::exp2f32 => "llvm.exp2.f32",
49 sym::exp2f64 => "llvm.exp2.f64",
50 sym::logf32 => "llvm.log.f32",
51 sym::logf64 => "llvm.log.f64",
52 sym::log10f32 => "llvm.log10.f32",
53 sym::log10f64 => "llvm.log10.f64",
54 sym::log2f32 => "llvm.log2.f32",
55 sym::log2f64 => "llvm.log2.f64",
56 sym::fmaf32 => "llvm.fma.f32",
57 sym::fmaf64 => "llvm.fma.f64",
58 sym::fabsf32 => "llvm.fabs.f32",
59 sym::fabsf64 => "llvm.fabs.f64",
60 sym::minnumf32 => "llvm.minnum.f32",
61 sym::minnumf64 => "llvm.minnum.f64",
62 sym::maxnumf32 => "llvm.maxnum.f32",
63 sym::maxnumf64 => "llvm.maxnum.f64",
64 sym::copysignf32 => "llvm.copysign.f32",
65 sym::copysignf64 => "llvm.copysign.f64",
66 sym::floorf32 => "llvm.floor.f32",
67 sym::floorf64 => "llvm.floor.f64",
68 sym::ceilf32 => "llvm.ceil.f32",
69 sym::ceilf64 => "llvm.ceil.f64",
70 sym::truncf32 => "llvm.trunc.f32",
71 sym::truncf64 => "llvm.trunc.f64",
72 sym::rintf32 => "llvm.rint.f32",
73 sym::rintf64 => "llvm.rint.f64",
74 sym::nearbyintf32 => "llvm.nearbyint.f32",
75 sym::nearbyintf64 => "llvm.nearbyint.f64",
76 sym::roundf32 => "llvm.round.f32",
77 sym::roundf64 => "llvm.round.f64",
78 sym::assume => "llvm.assume",
79 sym::abort => "llvm.trap",
82 Some(cx.get_intrinsic(&llvm_name))
85 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
86 fn is_codegen_intrinsic(
89 args: &Vec<Operand<'tcx>>,
90 caller_instance: ty::Instance<'tcx>,
92 let mut is_codegen_intrinsic = true;
93 // Set `is_codegen_intrinsic` to `false` to bypass `codegen_intrinsic_call()`.
95 // FIXME(richkadel): Make sure to add coverage analysis tests on a crate with
96 // external crate dependencies, where:
97 // 1. Both binary and dependent crates are compiled with `-Zinstrument-coverage`
98 // 2. Only binary is compiled with `-Zinstrument-coverage`
99 // 3. Only dependent crates are compiled with `-Zinstrument-coverage`
101 sym::count_code_region => {
102 use coverage::count_code_region_args::*;
103 self.add_counter_region(
105 op_to_u64(&args[FUNCTION_SOURCE_HASH]),
106 op_to_u32(&args[COUNTER_ID]),
107 coverageinfo::Region::new(
108 op_to_str_slice(&args[FILE_NAME]),
109 op_to_u32(&args[START_LINE]),
110 op_to_u32(&args[START_COL]),
111 op_to_u32(&args[END_LINE]),
112 op_to_u32(&args[END_COL]),
116 sym::coverage_counter_add | sym::coverage_counter_subtract => {
117 is_codegen_intrinsic = false;
118 use coverage::coverage_counter_expression_args::*;
119 self.add_counter_expression_region(
121 op_to_u32(&args[EXPRESSION_ID]),
122 op_to_u32(&args[LEFT_ID]),
123 if intrinsic == sym::coverage_counter_add {
124 coverageinfo::ExprKind::Add
126 coverageinfo::ExprKind::Subtract
128 op_to_u32(&args[RIGHT_ID]),
129 coverageinfo::Region::new(
130 op_to_str_slice(&args[FILE_NAME]),
131 op_to_u32(&args[START_LINE]),
132 op_to_u32(&args[START_COL]),
133 op_to_u32(&args[END_LINE]),
134 op_to_u32(&args[END_COL]),
138 sym::coverage_unreachable => {
139 is_codegen_intrinsic = false;
140 use coverage::coverage_unreachable_args::*;
141 self.add_unreachable_region(
143 coverageinfo::Region::new(
144 op_to_str_slice(&args[FILE_NAME]),
145 op_to_u32(&args[START_LINE]),
146 op_to_u32(&args[START_COL]),
147 op_to_u32(&args[END_LINE]),
148 op_to_u32(&args[END_COL]),
157 fn codegen_intrinsic_call(
159 instance: ty::Instance<'tcx>,
160 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
161 args: &[OperandRef<'tcx, &'ll Value>],
162 llresult: &'ll Value,
164 caller_instance: ty::Instance<'tcx>,
167 let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
169 let (def_id, substs) = match callee_ty.kind {
170 ty::FnDef(def_id, substs) => (def_id, substs),
171 _ => bug!("expected fn item type, found {}", callee_ty),
174 let sig = callee_ty.fn_sig(tcx);
175 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
176 let arg_tys = sig.inputs();
177 let ret_ty = sig.output();
178 let name = tcx.item_name(def_id);
179 let name_str = &*name.as_str();
181 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
182 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
184 let simple = get_simple_intrinsic(self, name);
185 let llval = match name {
186 _ if simple.is_some() => self.call(
188 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
191 sym::unreachable => {
195 let expect = self.get_intrinsic(&("llvm.expect.i1"));
196 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
199 let expect = self.get_intrinsic(&("llvm.expect.i1"));
200 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
213 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
214 self.call(llfn, &[], None)
216 sym::count_code_region => {
217 use coverage::count_code_region_args::*;
218 let coverageinfo = tcx.coverageinfo(caller_instance.def_id());
220 let fn_name = self.create_pgo_func_name_var(caller_instance);
221 let hash = args[FUNCTION_SOURCE_HASH].immediate();
222 let num_counters = self.const_u32(coverageinfo.num_counters);
223 let index = args[COUNTER_ID].immediate();
225 "translating Rust intrinsic `count_code_region()` to LLVM intrinsic: \
226 instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
227 fn_name, hash, num_counters, index,
229 self.instrprof_increment(fn_name, hash, num_counters, index)
231 sym::va_start => self.va_start(args[0].immediate()),
232 sym::va_end => self.va_end(args[0].immediate()),
234 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
235 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
238 match fn_abi.ret.layout.abi {
239 abi::Abi::Scalar(ref scalar) => {
241 Primitive::Int(..) => {
242 if self.cx().size_of(ret_ty).bytes() < 4 {
243 // `va_arg` should not be called on a integer type
244 // less than 4 bytes in length. If it is, promote
245 // the integer to a `i32` and truncate the result
246 // back to the smaller type.
247 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
248 self.trunc(promoted_result, llret_ty)
250 emit_va_arg(self, args[0], ret_ty)
253 Primitive::F64 | Primitive::Pointer => {
254 emit_va_arg(self, args[0], ret_ty)
256 // `va_arg` should never be used with the return type f32.
257 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
260 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
263 sym::size_of_val => {
264 let tp_ty = substs.type_at(0);
265 if let OperandValue::Pair(_, meta) = args[0].val {
266 let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
269 self.const_usize(self.size_of(tp_ty).bytes())
272 sym::min_align_of_val => {
273 let tp_ty = substs.type_at(0);
274 if let OperandValue::Pair(_, meta) = args[0].val {
275 let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
278 self.const_usize(self.align_of(tp_ty).bytes())
287 | sym::variant_count => {
290 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
292 OperandRef::from_const(self, value, ret_ty).immediate_or_packed_pair(self)
299 let ptr = args[0].immediate();
300 let offset = args[1].immediate();
301 self.inbounds_gep(ptr, &[offset])
303 sym::arith_offset => {
304 let ptr = args[0].immediate();
305 let offset = args[1].immediate();
306 self.gep(ptr, &[offset])
309 sym::copy_nonoverlapping => {
333 sym::write_bytes => {
345 sym::volatile_copy_nonoverlapping_memory => {
357 sym::volatile_copy_memory => {
369 sym::volatile_set_memory => {
380 sym::volatile_load | sym::unaligned_volatile_load => {
381 let tp_ty = substs.type_at(0);
382 let mut ptr = args[0].immediate();
383 if let PassMode::Cast(ty) = fn_abi.ret.mode {
384 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
386 let load = self.volatile_load(ptr);
387 let align = if name == sym::unaligned_volatile_load {
390 self.align_of(tp_ty).bytes() as u32
393 llvm::LLVMSetAlignment(load, align);
395 to_immediate(self, load, self.layout_of(tp_ty))
397 sym::volatile_store => {
398 let dst = args[0].deref(self.cx());
399 args[1].val.volatile_store(self, dst);
402 sym::unaligned_volatile_store => {
403 let dst = args[0].deref(self.cx());
404 args[1].val.unaligned_volatile_store(self, dst);
407 sym::prefetch_read_data
408 | sym::prefetch_write_data
409 | sym::prefetch_read_instruction
410 | sym::prefetch_write_instruction => {
411 let expect = self.get_intrinsic(&("llvm.prefetch"));
412 let (rw, cache_type) = match name {
413 sym::prefetch_read_data => (0, 1),
414 sym::prefetch_write_data => (1, 1),
415 sym::prefetch_read_instruction => (0, 0),
416 sym::prefetch_write_instruction => (1, 0),
425 self.const_i32(cache_type),
437 | sym::add_with_overflow
438 | sym::sub_with_overflow
439 | sym::mul_with_overflow
453 | sym::saturating_add
454 | sym::saturating_sub => {
456 match int_type_width_signed(ty, self) {
457 Some((width, signed)) => match name {
458 sym::ctlz | sym::cttz => {
459 let y = self.const_bool(false);
460 let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
461 self.call(llfn, &[args[0].immediate(), y], None)
463 sym::ctlz_nonzero | sym::cttz_nonzero => {
464 let y = self.const_bool(true);
465 let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
466 let llfn = self.get_intrinsic(llvm_name);
467 self.call(llfn, &[args[0].immediate(), y], None)
469 sym::ctpop => self.call(
470 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
471 &[args[0].immediate()],
476 args[0].immediate() // byte swap a u8/i8 is just a no-op
479 self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
480 &[args[0].immediate()],
485 sym::bitreverse => self.call(
486 self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
487 &[args[0].immediate()],
490 sym::add_with_overflow
491 | sym::sub_with_overflow
492 | sym::mul_with_overflow => {
493 let intrinsic = format!(
494 "llvm.{}{}.with.overflow.i{}",
495 if signed { 's' } else { 'u' },
499 let llfn = self.get_intrinsic(&intrinsic);
501 // Convert `i1` to a `bool`, and write it to the out parameter
503 self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
504 let val = self.extract_value(pair, 0);
505 let overflow = self.extract_value(pair, 1);
506 let overflow = self.zext(overflow, self.type_bool());
508 let dest = result.project_field(self, 0);
509 self.store(val, dest.llval, dest.align);
510 let dest = result.project_field(self, 1);
511 self.store(overflow, dest.llval, dest.align);
515 sym::wrapping_add => self.add(args[0].immediate(), args[1].immediate()),
516 sym::wrapping_sub => self.sub(args[0].immediate(), args[1].immediate()),
517 sym::wrapping_mul => self.mul(args[0].immediate(), args[1].immediate()),
520 self.exactsdiv(args[0].immediate(), args[1].immediate())
522 self.exactudiv(args[0].immediate(), args[1].immediate())
525 sym::unchecked_div => {
527 self.sdiv(args[0].immediate(), args[1].immediate())
529 self.udiv(args[0].immediate(), args[1].immediate())
532 sym::unchecked_rem => {
534 self.srem(args[0].immediate(), args[1].immediate())
536 self.urem(args[0].immediate(), args[1].immediate())
539 sym::unchecked_shl => self.shl(args[0].immediate(), args[1].immediate()),
540 sym::unchecked_shr => {
542 self.ashr(args[0].immediate(), args[1].immediate())
544 self.lshr(args[0].immediate(), args[1].immediate())
547 sym::unchecked_add => {
549 self.unchecked_sadd(args[0].immediate(), args[1].immediate())
551 self.unchecked_uadd(args[0].immediate(), args[1].immediate())
554 sym::unchecked_sub => {
556 self.unchecked_ssub(args[0].immediate(), args[1].immediate())
558 self.unchecked_usub(args[0].immediate(), args[1].immediate())
561 sym::unchecked_mul => {
563 self.unchecked_smul(args[0].immediate(), args[1].immediate())
565 self.unchecked_umul(args[0].immediate(), args[1].immediate())
568 sym::rotate_left | sym::rotate_right => {
569 let is_left = name == sym::rotate_left;
570 let val = args[0].immediate();
571 let raw_shift = args[1].immediate();
572 // rotate = funnel shift with first two args the same
574 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
575 let llfn = self.get_intrinsic(llvm_name);
576 self.call(llfn, &[val, val, raw_shift], None)
578 sym::saturating_add | sym::saturating_sub => {
579 let is_add = name == sym::saturating_add;
580 let lhs = args[0].immediate();
581 let rhs = args[1].immediate();
582 let llvm_name = &format!(
584 if signed { 's' } else { 'u' },
585 if is_add { "add" } else { "sub" },
588 let llfn = self.get_intrinsic(llvm_name);
589 self.call(llfn, &[lhs, rhs], None)
594 span_invalid_monomorphization_error(
598 "invalid monomorphization of `{}` intrinsic: \
599 expected basic integer type, found `{}`",
607 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
608 match float_type_width(arg_tys[0]) {
609 Some(_width) => match name {
610 sym::fadd_fast => self.fadd_fast(args[0].immediate(), args[1].immediate()),
611 sym::fsub_fast => self.fsub_fast(args[0].immediate(), args[1].immediate()),
612 sym::fmul_fast => self.fmul_fast(args[0].immediate(), args[1].immediate()),
613 sym::fdiv_fast => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
614 sym::frem_fast => self.frem_fast(args[0].immediate(), args[1].immediate()),
618 span_invalid_monomorphization_error(
622 "invalid monomorphization of `{}` intrinsic: \
623 expected basic float type, found `{}`",
632 sym::float_to_int_unchecked => {
633 if float_type_width(arg_tys[0]).is_none() {
634 span_invalid_monomorphization_error(
638 "invalid monomorphization of `float_to_int_unchecked` \
639 intrinsic: expected basic float type, \
646 let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
649 span_invalid_monomorphization_error(
653 "invalid monomorphization of `float_to_int_unchecked` \
654 intrinsic: expected basic integer type, \
663 self.fptosi(args[0].immediate(), self.cx.type_ix(width))
665 self.fptoui(args[0].immediate(), self.cx.type_ix(width))
669 sym::discriminant_value => {
670 if ret_ty.is_integral() {
671 args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
673 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
677 _ if name_str.starts_with("simd_") => {
678 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
683 // This requires that atomic intrinsics follow a specific naming pattern:
684 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
685 name if name_str.starts_with("atomic_") => {
686 use rustc_codegen_ssa::common::AtomicOrdering::*;
687 use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
689 let split: Vec<&str> = name_str.split('_').collect();
691 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
692 let (order, failorder) = match split.len() {
693 2 => (SequentiallyConsistent, SequentiallyConsistent),
694 3 => match split[2] {
695 "unordered" => (Unordered, Unordered),
696 "relaxed" => (Monotonic, Monotonic),
697 "acq" => (Acquire, Acquire),
698 "rel" => (Release, Monotonic),
699 "acqrel" => (AcquireRelease, Acquire),
700 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
701 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
702 _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
704 4 => match (split[2], split[3]) {
705 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
706 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
707 _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
709 _ => self.sess().fatal("Atomic intrinsic not in correct format"),
712 let invalid_monomorphization = |ty| {
713 span_invalid_monomorphization_error(
717 "invalid monomorphization of `{}` intrinsic: \
718 expected basic integer type, found `{}`",
725 "cxchg" | "cxchgweak" => {
726 let ty = substs.type_at(0);
727 if int_type_width_signed(ty, self).is_some() {
728 let weak = split[1] == "cxchgweak";
729 let pair = self.atomic_cmpxchg(
737 let val = self.extract_value(pair, 0);
738 let success = self.extract_value(pair, 1);
739 let success = self.zext(success, self.type_bool());
741 let dest = result.project_field(self, 0);
742 self.store(val, dest.llval, dest.align);
743 let dest = result.project_field(self, 1);
744 self.store(success, dest.llval, dest.align);
747 return invalid_monomorphization(ty);
752 let ty = substs.type_at(0);
753 if int_type_width_signed(ty, self).is_some() {
754 let size = self.size_of(ty);
755 self.atomic_load(args[0].immediate(), order, size)
757 return invalid_monomorphization(ty);
762 let ty = substs.type_at(0);
763 if int_type_width_signed(ty, self).is_some() {
764 let size = self.size_of(ty);
773 return invalid_monomorphization(ty);
778 self.atomic_fence(order, SynchronizationScope::CrossThread);
782 "singlethreadfence" => {
783 self.atomic_fence(order, SynchronizationScope::SingleThread);
787 // These are all AtomicRMW ops
789 let atom_op = match op {
790 "xchg" => AtomicRmwBinOp::AtomicXchg,
791 "xadd" => AtomicRmwBinOp::AtomicAdd,
792 "xsub" => AtomicRmwBinOp::AtomicSub,
793 "and" => AtomicRmwBinOp::AtomicAnd,
794 "nand" => AtomicRmwBinOp::AtomicNand,
795 "or" => AtomicRmwBinOp::AtomicOr,
796 "xor" => AtomicRmwBinOp::AtomicXor,
797 "max" => AtomicRmwBinOp::AtomicMax,
798 "min" => AtomicRmwBinOp::AtomicMin,
799 "umax" => AtomicRmwBinOp::AtomicUMax,
800 "umin" => AtomicRmwBinOp::AtomicUMin,
801 _ => self.sess().fatal("unknown atomic operation"),
804 let ty = substs.type_at(0);
805 if int_type_width_signed(ty, self).is_some() {
813 return invalid_monomorphization(ty);
819 sym::nontemporal_store => {
820 let dst = args[0].deref(self.cx());
821 args[1].val.nontemporal_store(self, dst);
825 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
826 let a = args[0].immediate();
827 let b = args[1].immediate();
828 if name == sym::ptr_guaranteed_eq {
829 self.icmp(IntPredicate::IntEQ, a, b)
831 self.icmp(IntPredicate::IntNE, a, b)
835 sym::ptr_offset_from => {
836 let ty = substs.type_at(0);
837 let pointee_size = self.size_of(ty);
839 // This is the same sequence that Clang emits for pointer subtraction.
840 // It can be neither `nsw` nor `nuw` because the input is treated as
841 // unsigned but then the output is treated as signed, so neither works.
842 let a = args[0].immediate();
843 let b = args[1].immediate();
844 let a = self.ptrtoint(a, self.type_isize());
845 let b = self.ptrtoint(b, self.type_isize());
846 let d = self.sub(a, b);
847 let pointee_size = self.const_usize(pointee_size.bytes());
848 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
849 self.exactsdiv(d, pointee_size)
852 _ => bug!("unknown intrinsic '{}'", name),
855 if !fn_abi.ret.is_ignore() {
856 if let PassMode::Cast(ty) = fn_abi.ret.mode {
857 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
858 let ptr = self.pointercast(result.llval, ptr_llty);
859 self.store(llval, ptr, result.align);
861 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
863 .store(self, result);
868 fn abort(&mut self) {
869 let fnname = self.get_intrinsic(&("llvm.trap"));
870 self.call(fnname, &[], None);
873 fn assume(&mut self, val: Self::Value) {
874 let assume_intrinsic = self.get_intrinsic("llvm.assume");
875 self.call(assume_intrinsic, &[val], None);
878 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
879 let expect = self.get_intrinsic(&"llvm.expect.i1");
880 self.call(expect, &[cond, self.const_bool(expected)], None)
883 fn sideeffect(&mut self) {
884 if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
885 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
886 self.call(fnname, &[], None);
890 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
891 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
892 self.call(intrinsic, &[va_list], None)
895 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
896 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
897 self.call(intrinsic, &[va_list], None)
902 bx: &mut Builder<'a, 'll, 'tcx>,
910 let (size, align) = bx.size_and_align_of(ty);
911 let size = bx.mul(bx.const_usize(size.bytes()), count);
912 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
914 bx.memmove(dst, align, src, align, size, flags);
916 bx.memcpy(dst, align, src, align, size, flags);
921 bx: &mut Builder<'a, 'll, 'tcx>,
928 let (size, align) = bx.size_and_align_of(ty);
929 let size = bx.mul(bx.const_usize(size.bytes()), count);
930 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
931 bx.memset(dst, val, size, align, flags);
935 bx: &mut Builder<'a, 'll, 'tcx>,
936 try_func: &'ll Value,
938 catch_func: &'ll Value,
941 if bx.sess().panic_strategy() == PanicStrategy::Abort {
942 bx.call(try_func, &[data], None);
943 // Return 0 unconditionally from the intrinsic call;
944 // we can never unwind.
945 let ret_align = bx.tcx().data_layout.i32_align.abi;
946 bx.store(bx.const_i32(0), dest, ret_align);
947 } else if wants_msvc_seh(bx.sess()) {
948 codegen_msvc_try(bx, try_func, data, catch_func, dest);
950 codegen_gnu_try(bx, try_func, data, catch_func, dest);
954 // MSVC's definition of the `rust_try` function.
956 // This implementation uses the new exception handling instructions in LLVM
957 // which have support in LLVM for SEH on MSVC targets. Although these
958 // instructions are meant to work for all targets, as of the time of this
959 // writing, however, LLVM does not recommend the usage of these new instructions
960 // as the old ones are still more optimized.
962 bx: &mut Builder<'a, 'll, 'tcx>,
963 try_func: &'ll Value,
965 catch_func: &'ll Value,
968 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
969 bx.set_personality_fn(bx.eh_personality());
972 let mut normal = bx.build_sibling_block("normal");
973 let mut catchswitch = bx.build_sibling_block("catchswitch");
974 let mut catchpad = bx.build_sibling_block("catchpad");
975 let mut caught = bx.build_sibling_block("caught");
977 let try_func = llvm::get_param(bx.llfn(), 0);
978 let data = llvm::get_param(bx.llfn(), 1);
979 let catch_func = llvm::get_param(bx.llfn(), 2);
981 // We're generating an IR snippet that looks like:
983 // declare i32 @rust_try(%try_func, %data, %catch_func) {
984 // %slot = alloca u8*
985 // invoke %try_func(%data) to label %normal unwind label %catchswitch
991 // %cs = catchswitch within none [%catchpad] unwind to caller
994 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
996 // call %catch_func(%data, %ptr)
997 // catchret from %tok to label %caught
1003 // This structure follows the basic usage of throw/try/catch in LLVM.
1004 // For example, compile this C++ snippet to see what LLVM generates:
1006 // #include <stdint.h>
1008 // struct rust_panic {
1009 // rust_panic(const rust_panic&);
1016 // void (*try_func)(void*),
1018 // void (*catch_func)(void*, void*) noexcept
1023 // } catch(rust_panic& a) {
1024 // catch_func(data, &a);
1029 // More information can be found in libstd's seh.rs implementation.
1030 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1031 let slot = bx.alloca(bx.type_i8p(), ptr_align);
1032 bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
1034 normal.ret(bx.const_i32(0));
1036 let cs = catchswitch.catch_switch(None, None, 1);
1037 catchswitch.add_handler(cs, catchpad.llbb());
1039 // We can't use the TypeDescriptor defined in libpanic_unwind because it
1040 // might be in another DLL and the SEH encoding only supports specifying
1041 // a TypeDescriptor from the current module.
1043 // However this isn't an issue since the MSVC runtime uses string
1044 // comparison on the type name to match TypeDescriptors rather than
1045 // pointer equality.
1047 // So instead we generate a new TypeDescriptor in each module that uses
1048 // `try` and let the linker merge duplicate definitions in the same
1051 // When modifying, make sure that the type_name string exactly matches
1052 // the one used in src/libpanic_unwind/seh.rs.
1053 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
1054 let type_name = bx.const_bytes(b"rust_panic\0");
1056 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
1057 let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
1059 llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1060 llvm::SetUniqueComdat(bx.llmod, tydesc);
1061 llvm::LLVMSetInitializer(tydesc, type_info);
1064 // The flag value of 8 indicates that we are catching the exception by
1065 // reference instead of by value. We can't use catch by value because
1066 // that requires copying the exception object, which we don't support
1067 // since our exception object effectively contains a Box.
1069 // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
1070 let flags = bx.const_i32(8);
1071 let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
1072 let ptr = catchpad.load(slot, ptr_align);
1073 catchpad.call(catch_func, &[data, ptr], Some(&funclet));
1075 catchpad.catch_ret(&funclet, caught.llbb());
1077 caught.ret(bx.const_i32(1));
1080 // Note that no invoke is used here because by definition this function
1081 // can't panic (that's what it's catching).
1082 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1083 let i32_align = bx.tcx().data_layout.i32_align.abi;
1084 bx.store(ret, dest, i32_align);
1087 // Definition of the standard `try` function for Rust using the GNU-like model
1088 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
1091 // This codegen is a little surprising because we always call a shim
1092 // function instead of inlining the call to `invoke` manually here. This is done
1093 // because in LLVM we're only allowed to have one personality per function
1094 // definition. The call to the `try` intrinsic is being inlined into the
1095 // function calling it, and that function may already have other personality
1096 // functions in play. By calling a shim we're guaranteed that our shim will have
1097 // the right personality function.
1099 bx: &mut Builder<'a, 'll, 'tcx>,
1100 try_func: &'ll Value,
1102 catch_func: &'ll Value,
1105 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1106 // Codegens the shims described above:
1109 // invoke %try_func(%data) normal %normal unwind %catch
1115 // (%ptr, _) = landingpad
1116 // call %catch_func(%data, %ptr)
1121 let mut then = bx.build_sibling_block("then");
1122 let mut catch = bx.build_sibling_block("catch");
1124 let try_func = llvm::get_param(bx.llfn(), 0);
1125 let data = llvm::get_param(bx.llfn(), 1);
1126 let catch_func = llvm::get_param(bx.llfn(), 2);
1127 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
1128 then.ret(bx.const_i32(0));
1130 // Type indicator for the exception being thrown.
1132 // The first value in this tuple is a pointer to the exception object
1133 // being thrown. The second value is a "selector" indicating which of
1134 // the landing pad clauses the exception's type had been matched to.
1135 // rust_try ignores the selector.
1136 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
1137 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
1138 let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
1140 let tydesc = bx.get_static(tydesc);
1141 bx.bitcast(tydesc, bx.type_i8p())
1143 None => bx.const_null(bx.type_i8p()),
1145 catch.add_clause(vals, tydesc);
1146 let ptr = catch.extract_value(vals, 0);
1147 catch.call(catch_func, &[data, ptr], None);
1148 catch.ret(bx.const_i32(1));
1151 // Note that no invoke is used here because by definition this function
1152 // can't panic (that's what it's catching).
1153 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1154 let i32_align = bx.tcx().data_layout.i32_align.abi;
1155 bx.store(ret, dest, i32_align);
1158 // Helper function to give a Block to a closure to codegen a shim function.
1159 // This is currently primarily used for the `try` intrinsic functions above.
1160 fn gen_fn<'ll, 'tcx>(
1161 cx: &CodegenCx<'ll, 'tcx>,
1163 inputs: Vec<Ty<'tcx>>,
1165 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1167 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1171 hir::Unsafety::Unsafe,
1174 let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
1175 let llfn = cx.declare_fn(name, &fn_abi);
1176 cx.set_frame_pointer_elimination(llfn);
1177 cx.apply_target_cpu_attr(llfn);
1178 // FIXME(eddyb) find a nicer way to do this.
1179 unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
1180 let bx = Builder::new_block(cx, llfn, "entry-block");
1185 // Helper function used to get a handle to the `__rust_try` function used to
1186 // catch exceptions.
1188 // This function is only generated once and is then cached.
1189 fn get_rust_try_fn<'ll, 'tcx>(
1190 cx: &CodegenCx<'ll, 'tcx>,
1191 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1193 if let Some(llfn) = cx.rust_try_fn.get() {
1197 // Define the type up front for the signature of the rust_try function.
1199 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1200 let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1204 hir::Unsafety::Unsafe,
1207 let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1208 [i8p, i8p].iter().cloned(),
1211 hir::Unsafety::Unsafe,
1214 let output = tcx.types.i32;
1215 let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
1216 cx.rust_try_fn.set(Some(rust_try));
1220 fn generic_simd_intrinsic(
1221 bx: &mut Builder<'a, 'll, 'tcx>,
1223 callee_ty: Ty<'tcx>,
1224 args: &[OperandRef<'tcx, &'ll Value>],
1226 llret_ty: &'ll Type,
1228 ) -> Result<&'ll Value, ()> {
1229 // macros for error handling:
1230 macro_rules! emit_error {
1234 ($msg: tt, $($fmt: tt)*) => {
1235 span_invalid_monomorphization_error(
1237 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1242 macro_rules! return_error {
1245 emit_error!($($fmt)*);
1251 macro_rules! require {
1252 ($cond: expr, $($fmt: tt)*) => {
1254 return_error!($($fmt)*);
1259 macro_rules! require_simd {
1260 ($ty: expr, $position: expr) => {
1261 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1267 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx));
1268 let arg_tys = sig.inputs();
1269 let name_str = &*name.as_str();
1271 if name == sym::simd_select_bitmask {
1272 let in_ty = arg_tys[0];
1273 let m_len = match in_ty.kind {
1274 // Note that this `.unwrap()` crashes for isize/usize, that's sort
1275 // of intentional as there's not currently a use case for that.
1276 ty::Int(i) => i.bit_width().unwrap(),
1277 ty::Uint(i) => i.bit_width().unwrap(),
1278 _ => return_error!("`{}` is not an integral type", in_ty),
1280 require_simd!(arg_tys[1], "argument");
1281 let v_len = arg_tys[1].simd_size(tcx);
1284 "mismatched lengths: mask length `{}` != other vector length `{}`",
1288 let i1 = bx.type_i1();
1289 let i1xn = bx.type_vector(i1, m_len);
1290 let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
1291 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1294 // every intrinsic below takes a SIMD vector as its first argument
1295 require_simd!(arg_tys[0], "input");
1296 let in_ty = arg_tys[0];
1297 let in_elem = arg_tys[0].simd_type(tcx);
1298 let in_len = arg_tys[0].simd_size(tcx);
1300 let comparison = match name {
1301 sym::simd_eq => Some(hir::BinOpKind::Eq),
1302 sym::simd_ne => Some(hir::BinOpKind::Ne),
1303 sym::simd_lt => Some(hir::BinOpKind::Lt),
1304 sym::simd_le => Some(hir::BinOpKind::Le),
1305 sym::simd_gt => Some(hir::BinOpKind::Gt),
1306 sym::simd_ge => Some(hir::BinOpKind::Ge),
1310 if let Some(cmp_op) = comparison {
1311 require_simd!(ret_ty, "return");
1313 let out_len = ret_ty.simd_size(tcx);
1316 "expected return type with length {} (same as input type `{}`), \
1317 found `{}` with length {}",
1324 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1325 "expected return type with integer elements, found `{}` with non-integer `{}`",
1327 ret_ty.simd_type(tcx)
1330 return Ok(compare_simd_types(
1332 args[0].immediate(),
1333 args[1].immediate(),
1340 if name_str.starts_with("simd_shuffle") {
1341 let n: u64 = name_str["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
1342 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
1345 require_simd!(ret_ty, "return");
1347 let out_len = ret_ty.simd_size(tcx);
1350 "expected return type of length {}, found `{}` with length {}",
1356 in_elem == ret_ty.simd_type(tcx),
1357 "expected return element type `{}` (element of input `{}`), \
1358 found `{}` with element type `{}`",
1362 ret_ty.simd_type(tcx)
1365 let total_len = u128::from(in_len) * 2;
1367 let vector = args[2].immediate();
1369 let indices: Option<Vec<_>> = (0..n)
1372 let val = bx.const_get_elt(vector, i as u64);
1373 match bx.const_to_opt_u128(val, true) {
1375 emit_error!("shuffle index #{} is not a constant", arg_idx);
1378 Some(idx) if idx >= total_len => {
1380 "shuffle index #{} is out of bounds (limit {})",
1386 Some(idx) => Some(bx.const_i32(idx as i32)),
1390 let indices = match indices {
1392 None => return Ok(bx.const_null(llret_ty)),
1395 return Ok(bx.shuffle_vector(
1396 args[0].immediate(),
1397 args[1].immediate(),
1398 bx.const_vector(&indices),
1402 if name == sym::simd_insert {
1404 in_elem == arg_tys[2],
1405 "expected inserted type `{}` (element of input `{}`), found `{}`",
1410 return Ok(bx.insert_element(
1411 args[0].immediate(),
1412 args[2].immediate(),
1413 args[1].immediate(),
1416 if name == sym::simd_extract {
1419 "expected return type `{}` (element of input `{}`), found `{}`",
1424 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
1427 if name == sym::simd_select {
1428 let m_elem_ty = in_elem;
1430 require_simd!(arg_tys[1], "argument");
1431 let v_len = arg_tys[1].simd_size(tcx);
1434 "mismatched lengths: mask length `{}` != other vector length `{}`",
1438 match m_elem_ty.kind {
1440 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
1442 // truncate the mask to a vector of i1s
1443 let i1 = bx.type_i1();
1444 let i1xn = bx.type_vector(i1, m_len as u64);
1445 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1446 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1449 if name == sym::simd_bitmask {
1450 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1451 // vector mask and returns an unsigned integer containing the most
1452 // significant bit (MSB) of each lane.
1454 // If the vector has less than 8 lanes, an u8 is returned with zeroed
1456 let expected_int_bits = in_len.max(8);
1458 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
1459 _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
1462 // Integer vector <i{in_bitwidth} x in_len>:
1463 let (i_xn, in_elem_bitwidth) = match in_elem.kind {
1465 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1468 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1471 "vector argument `{}`'s element type `{}`, expected integer element type",
1477 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1480 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1483 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1484 // Truncate vector to an <i1 x N>
1485 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1486 // Bitcast <i1 x N> to iN:
1487 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1488 // Zero-extend iN to the bitmask type:
1489 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1492 fn simd_simple_float_intrinsic(
1494 in_elem: &::rustc_middle::ty::TyS<'_>,
1495 in_ty: &::rustc_middle::ty::TyS<'_>,
1497 bx: &mut Builder<'a, 'll, 'tcx>,
1499 args: &[OperandRef<'tcx, &'ll Value>],
1500 ) -> Result<&'ll Value, ()> {
1501 macro_rules! emit_error {
1505 ($msg: tt, $($fmt: tt)*) => {
1506 span_invalid_monomorphization_error(
1508 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1512 macro_rules! return_error {
1515 emit_error!($($fmt)*);
1520 let ety = match in_elem.kind {
1521 ty::Float(f) if f.bit_width() == 32 => {
1522 if in_len < 2 || in_len > 16 {
1524 "unsupported floating-point vector `{}` with length `{}` \
1525 out-of-range [2, 16]",
1532 ty::Float(f) if f.bit_width() == 64 => {
1533 if in_len < 2 || in_len > 8 {
1535 "unsupported floating-point vector `{}` with length `{}` \
1536 out-of-range [2, 8]",
1545 "unsupported element type `{}` of floating-point vector `{}`",
1551 return_error!("`{}` is not a floating-point type", in_ty);
1555 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1556 let intrinsic = bx.get_intrinsic(&llvm_name);
1558 bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
1559 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1564 sym::simd_fsqrt => {
1565 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1568 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1571 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1574 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1576 sym::simd_floor => {
1577 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1580 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1583 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1585 sym::simd_fexp2 => {
1586 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1588 sym::simd_flog10 => {
1589 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1591 sym::simd_flog2 => {
1592 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1595 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1597 sym::simd_fpowi => {
1598 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1601 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1604 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1606 _ => { /* fallthrough */ }
1610 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1611 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1612 fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1613 let p0s: String = "p0".repeat(no_pointers);
1614 match elem_ty.kind {
1615 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1616 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1617 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1618 _ => unreachable!(),
1623 cx: &CodegenCx<'ll, '_>,
1626 mut no_pointers: usize,
1628 // FIXME: use cx.layout_of(ty).llvm_type() ?
1629 let mut elem_ty = match elem_ty.kind {
1630 ty::Int(v) => cx.type_int_from_ty(v),
1631 ty::Uint(v) => cx.type_uint_from_ty(v),
1632 ty::Float(v) => cx.type_float_from_ty(v),
1633 _ => unreachable!(),
1635 while no_pointers > 0 {
1636 elem_ty = cx.type_ptr_to(elem_ty);
1639 cx.type_vector(elem_ty, vec_len)
1642 if name == sym::simd_gather {
1643 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1644 // mask: <N x i{M}>) -> <N x T>
1645 // * N: number of elements in the input vectors
1646 // * T: type of the element to load
1647 // * M: any integer width is supported, will be truncated to i1
1649 // All types must be simd vector types
1650 require_simd!(in_ty, "first");
1651 require_simd!(arg_tys[1], "second");
1652 require_simd!(arg_tys[2], "third");
1653 require_simd!(ret_ty, "return");
1655 // Of the same length:
1657 in_len == arg_tys[1].simd_size(tcx),
1658 "expected {} argument with length {} (same as input type `{}`), \
1659 found `{}` with length {}",
1664 arg_tys[1].simd_size(tcx)
1667 in_len == arg_tys[2].simd_size(tcx),
1668 "expected {} argument with length {} (same as input type `{}`), \
1669 found `{}` with length {}",
1674 arg_tys[2].simd_size(tcx)
1677 // The return type must match the first argument type
1678 require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1680 // This counts how many pointers
1681 fn ptr_count(t: Ty<'_>) -> usize {
1683 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1689 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1691 ty::RawPtr(p) => non_ptr(p.ty),
1696 // The second argument must be a simd vector with an element type that's a pointer
1697 // to the element type of the first argument
1698 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1699 ty::RawPtr(p) if p.ty == in_elem => {
1700 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1705 "expected element type `{}` of second argument `{}` \
1706 to be a pointer to the element type `{}` of the first \
1707 argument `{}`, found `{}` != `*_ {}`",
1708 arg_tys[1].simd_type(tcx),
1712 arg_tys[1].simd_type(tcx),
1718 assert!(pointer_count > 0);
1719 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1720 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1722 // The element type of the third argument must be a signed integer type of any width:
1723 match arg_tys[2].simd_type(tcx).kind {
1728 "expected element type `{}` of third argument `{}` \
1729 to be a signed integer type",
1730 arg_tys[2].simd_type(tcx),
1736 // Alignment of T, must be a constant integer value:
1737 let alignment_ty = bx.type_i32();
1738 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1740 // Truncate the mask vector to a vector of i1s:
1741 let (mask, mask_ty) = {
1742 let i1 = bx.type_i1();
1743 let i1xn = bx.type_vector(i1, in_len);
1744 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1747 // Type of the vector of pointers:
1748 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1749 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1751 // Type of the vector of elements:
1752 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1753 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1755 let llvm_intrinsic =
1756 format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1757 let f = bx.declare_cfn(
1760 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1764 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1765 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
1769 if name == sym::simd_scatter {
1770 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1771 // mask: <N x i{M}>) -> ()
1772 // * N: number of elements in the input vectors
1773 // * T: type of the element to load
1774 // * M: any integer width is supported, will be truncated to i1
1776 // All types must be simd vector types
1777 require_simd!(in_ty, "first");
1778 require_simd!(arg_tys[1], "second");
1779 require_simd!(arg_tys[2], "third");
1781 // Of the same length:
1783 in_len == arg_tys[1].simd_size(tcx),
1784 "expected {} argument with length {} (same as input type `{}`), \
1785 found `{}` with length {}",
1790 arg_tys[1].simd_size(tcx)
1793 in_len == arg_tys[2].simd_size(tcx),
1794 "expected {} argument with length {} (same as input type `{}`), \
1795 found `{}` with length {}",
1800 arg_tys[2].simd_size(tcx)
1803 // This counts how many pointers
1804 fn ptr_count(t: Ty<'_>) -> usize {
1806 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1812 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1814 ty::RawPtr(p) => non_ptr(p.ty),
1819 // The second argument must be a simd vector with an element type that's a pointer
1820 // to the element type of the first argument
1821 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1822 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
1823 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1828 "expected element type `{}` of second argument `{}` \
1829 to be a pointer to the element type `{}` of the first \
1830 argument `{}`, found `{}` != `*mut {}`",
1831 arg_tys[1].simd_type(tcx),
1835 arg_tys[1].simd_type(tcx),
1841 assert!(pointer_count > 0);
1842 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1843 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1845 // The element type of the third argument must be a signed integer type of any width:
1846 match arg_tys[2].simd_type(tcx).kind {
1851 "expected element type `{}` of third argument `{}` \
1852 to be a signed integer type",
1853 arg_tys[2].simd_type(tcx),
1859 // Alignment of T, must be a constant integer value:
1860 let alignment_ty = bx.type_i32();
1861 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1863 // Truncate the mask vector to a vector of i1s:
1864 let (mask, mask_ty) = {
1865 let i1 = bx.type_i1();
1866 let i1xn = bx.type_vector(i1, in_len);
1867 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1870 let ret_t = bx.type_void();
1872 // Type of the vector of pointers:
1873 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1874 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1876 // Type of the vector of elements:
1877 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1878 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1880 let llvm_intrinsic =
1881 format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1882 let f = bx.declare_cfn(
1884 bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
1886 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1887 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
1891 macro_rules! arith_red {
1892 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1893 $identity:expr) => {
1894 if name == sym::$name {
1897 "expected return type `{}` (element of input `{}`), found `{}`",
1902 return match in_elem.kind {
1903 ty::Int(_) | ty::Uint(_) => {
1904 let r = bx.$integer_reduce(args[0].immediate());
1906 // if overflow occurs, the result is the
1907 // mathematical result modulo 2^n:
1908 Ok(bx.$op(args[1].immediate(), r))
1910 Ok(bx.$integer_reduce(args[0].immediate()))
1914 let acc = if $ordered {
1915 // ordered arithmetic reductions take an accumulator
1918 // unordered arithmetic reductions use the identity accumulator
1919 match f.bit_width() {
1920 32 => bx.const_real(bx.type_f32(), $identity),
1921 64 => bx.const_real(bx.type_f64(), $identity),
1924 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1933 Ok(bx.$float_reduce(acc, args[0].immediate()))
1936 "unsupported {} from `{}` with element `{}` to `{}`",
1947 arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
1948 arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1950 simd_reduce_add_unordered: vector_reduce_add,
1951 vector_reduce_fadd_fast,
1957 simd_reduce_mul_unordered: vector_reduce_mul,
1958 vector_reduce_fmul_fast,
1964 macro_rules! minmax_red {
1965 ($name:ident: $int_red:ident, $float_red:ident) => {
1966 if name == sym::$name {
1969 "expected return type `{}` (element of input `{}`), found `{}`",
1974 return match in_elem.kind {
1975 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1976 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1977 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1979 "unsupported {} from `{}` with element `{}` to `{}`",
1990 minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
1991 minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
1993 minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
1994 minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
1996 macro_rules! bitwise_red {
1997 ($name:ident : $red:ident, $boolean:expr) => {
1998 if name == sym::$name {
1999 let input = if !$boolean {
2002 "expected return type `{}` (element of input `{}`), found `{}`",
2009 match in_elem.kind {
2010 ty::Int(_) | ty::Uint(_) => {}
2012 "unsupported {} from `{}` with element `{}` to `{}`",
2020 // boolean reductions operate on vectors of i1s:
2021 let i1 = bx.type_i1();
2022 let i1xn = bx.type_vector(i1, in_len as u64);
2023 bx.trunc(args[0].immediate(), i1xn)
2025 return match in_elem.kind {
2026 ty::Int(_) | ty::Uint(_) => {
2027 let r = bx.$red(input);
2028 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2031 "unsupported {} from `{}` with element `{}` to `{}`",
2042 bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2043 bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2044 bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2045 bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2046 bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2048 if name == sym::simd_cast {
2049 require_simd!(ret_ty, "return");
2050 let out_len = ret_ty.simd_size(tcx);
2053 "expected return type with length {} (same as input type `{}`), \
2054 found `{}` with length {}",
2060 // casting cares about nominal type, not just structural type
2061 let out_elem = ret_ty.simd_type(tcx);
2063 if in_elem == out_elem {
2064 return Ok(args[0].immediate());
2069 Int(/* is signed? */ bool),
2073 let (in_style, in_width) = match in_elem.kind {
2074 // vectors of pointer-sized integers should've been
2075 // disallowed before here, so this unwrap is safe.
2076 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
2077 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
2078 ty::Float(f) => (Style::Float, f.bit_width()),
2079 _ => (Style::Unsupported, 0),
2081 let (out_style, out_width) = match out_elem.kind {
2082 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
2083 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
2084 ty::Float(f) => (Style::Float, f.bit_width()),
2085 _ => (Style::Unsupported, 0),
2088 match (in_style, out_style) {
2089 (Style::Int(in_is_signed), Style::Int(_)) => {
2090 return Ok(match in_width.cmp(&out_width) {
2091 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2092 Ordering::Equal => args[0].immediate(),
2095 bx.sext(args[0].immediate(), llret_ty)
2097 bx.zext(args[0].immediate(), llret_ty)
2102 (Style::Int(in_is_signed), Style::Float) => {
2103 return Ok(if in_is_signed {
2104 bx.sitofp(args[0].immediate(), llret_ty)
2106 bx.uitofp(args[0].immediate(), llret_ty)
2109 (Style::Float, Style::Int(out_is_signed)) => {
2110 return Ok(if out_is_signed {
2111 bx.fptosi(args[0].immediate(), llret_ty)
2113 bx.fptoui(args[0].immediate(), llret_ty)
2116 (Style::Float, Style::Float) => {
2117 return Ok(match in_width.cmp(&out_width) {
2118 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2119 Ordering::Equal => args[0].immediate(),
2120 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2123 _ => { /* Unsupported. Fallthrough. */ }
2127 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
2134 macro_rules! arith {
2135 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2136 $(if name == sym::$name {
2137 match in_elem.kind {
2138 $($(ty::$p(_))|* => {
2139 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2144 "unsupported operation on `{}` with element `{}`",
2151 simd_add: Uint, Int => add, Float => fadd;
2152 simd_sub: Uint, Int => sub, Float => fsub;
2153 simd_mul: Uint, Int => mul, Float => fmul;
2154 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2155 simd_rem: Uint => urem, Int => srem, Float => frem;
2156 simd_shl: Uint, Int => shl;
2157 simd_shr: Uint => lshr, Int => ashr;
2158 simd_and: Uint, Int => and;
2159 simd_or: Uint, Int => or;
2160 simd_xor: Uint, Int => xor;
2161 simd_fmax: Float => maxnum;
2162 simd_fmin: Float => minnum;
2166 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2167 let lhs = args[0].immediate();
2168 let rhs = args[1].immediate();
2169 let is_add = name == sym::simd_saturating_add;
2170 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2171 let (signed, elem_width, elem_ty) = match in_elem.kind {
2172 ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2173 ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2176 "expected element type `{}` of vector type `{}` \
2177 to be a signed or unsigned integer type",
2178 arg_tys[0].simd_type(tcx),
2183 let llvm_intrinsic = &format!(
2184 "llvm.{}{}.sat.v{}i{}",
2185 if signed { 's' } else { 'u' },
2186 if is_add { "add" } else { "sub" },
2190 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2192 let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
2193 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
2194 let v = bx.call(f, &[lhs, rhs], None);
2198 span_bug!(span, "unknown SIMD intrinsic");
2201 // Returns the width of an int Ty, and if it's signed or not
2202 // Returns None if the type is not an integer
2203 // FIXME: there’s multiple of this functions, investigate using some of the already existing
2205 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
2207 ty::Int(t) => Some((
2209 ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
2210 ast::IntTy::I8 => 8,
2211 ast::IntTy::I16 => 16,
2212 ast::IntTy::I32 => 32,
2213 ast::IntTy::I64 => 64,
2214 ast::IntTy::I128 => 128,
2218 ty::Uint(t) => Some((
2220 ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
2221 ast::UintTy::U8 => 8,
2222 ast::UintTy::U16 => 16,
2223 ast::UintTy::U32 => 32,
2224 ast::UintTy::U64 => 64,
2225 ast::UintTy::U128 => 128,
2233 // Returns the width of a float Ty
2234 // Returns None if the type is not a float
2235 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
2237 ty::Float(t) => Some(t.bit_width()),
2242 fn op_to_str_slice<'tcx>(op: &Operand<'tcx>) -> &'tcx str {
2243 Operand::value_from_const(op).try_to_str_slice().expect("Value is &str")
2246 fn op_to_u32<'tcx>(op: &Operand<'tcx>) -> u32 {
2247 Operand::scalar_from_const(op).to_u32().expect("Scalar is u32")
2250 fn op_to_u64<'tcx>(op: &Operand<'tcx>) -> u64 {
2251 Operand::scalar_from_const(op).to_u64().expect("Scalar is u64")