1 #![allow(non_upper_case_globals)]
6 use crate::abi::{Abi, FnType, LlvmType, PassMode};
7 use crate::context::CodegenCx;
8 use crate::type_::Type;
9 use crate::type_of::LayoutLlvmExt;
10 use crate::builder::Builder;
11 use crate::value::Value;
12 use crate::va_arg::emit_va_arg;
13 use rustc_codegen_ssa::MemFlags;
14 use rustc_codegen_ssa::mir::place::PlaceRef;
15 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
16 use rustc_codegen_ssa::glue;
17 use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types};
18 use rustc::ty::{self, Ty};
19 use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive};
20 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
22 use syntax::ast::{self, FloatTy};
24 use rustc_codegen_ssa::traits::*;
26 use rustc::session::Session;
29 use std::cmp::Ordering;
30 use std::{iter, i128, u128};
32 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
33 let llvm_name = match name {
34 "sqrtf32" => "llvm.sqrt.f32",
35 "sqrtf64" => "llvm.sqrt.f64",
36 "powif32" => "llvm.powi.f32",
37 "powif64" => "llvm.powi.f64",
38 "sinf32" => "llvm.sin.f32",
39 "sinf64" => "llvm.sin.f64",
40 "cosf32" => "llvm.cos.f32",
41 "cosf64" => "llvm.cos.f64",
42 "powf32" => "llvm.pow.f32",
43 "powf64" => "llvm.pow.f64",
44 "expf32" => "llvm.exp.f32",
45 "expf64" => "llvm.exp.f64",
46 "exp2f32" => "llvm.exp2.f32",
47 "exp2f64" => "llvm.exp2.f64",
48 "logf32" => "llvm.log.f32",
49 "logf64" => "llvm.log.f64",
50 "log10f32" => "llvm.log10.f32",
51 "log10f64" => "llvm.log10.f64",
52 "log2f32" => "llvm.log2.f32",
53 "log2f64" => "llvm.log2.f64",
54 "fmaf32" => "llvm.fma.f32",
55 "fmaf64" => "llvm.fma.f64",
56 "fabsf32" => "llvm.fabs.f32",
57 "fabsf64" => "llvm.fabs.f64",
58 "minnumf32" => "llvm.minnum.f32",
59 "minnumf64" => "llvm.minnum.f64",
60 "maxnumf32" => "llvm.maxnum.f32",
61 "maxnumf64" => "llvm.maxnum.f64",
62 "copysignf32" => "llvm.copysign.f32",
63 "copysignf64" => "llvm.copysign.f64",
64 "floorf32" => "llvm.floor.f32",
65 "floorf64" => "llvm.floor.f64",
66 "ceilf32" => "llvm.ceil.f32",
67 "ceilf64" => "llvm.ceil.f64",
68 "truncf32" => "llvm.trunc.f32",
69 "truncf64" => "llvm.trunc.f64",
70 "rintf32" => "llvm.rint.f32",
71 "rintf64" => "llvm.rint.f64",
72 "nearbyintf32" => "llvm.nearbyint.f32",
73 "nearbyintf64" => "llvm.nearbyint.f64",
74 "roundf32" => "llvm.round.f32",
75 "roundf64" => "llvm.round.f64",
76 "assume" => "llvm.assume",
77 "abort" => "llvm.trap",
80 Some(cx.get_intrinsic(&llvm_name))
83 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
84 fn codegen_intrinsic_call(
87 fn_ty: &FnType<'tcx, Ty<'tcx>>,
88 args: &[OperandRef<'tcx, &'ll Value>],
94 let (def_id, substs) = match callee_ty.sty {
95 ty::FnDef(def_id, substs) => (def_id, substs),
96 _ => bug!("expected fn item type, found {}", callee_ty)
99 let sig = callee_ty.fn_sig(tcx);
100 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
101 let arg_tys = sig.inputs();
102 let ret_ty = sig.output();
103 let name = &*tcx.item_name(def_id).as_str();
105 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
106 let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
108 let simple = get_simple_intrinsic(self, name);
109 let llval = match name {
110 _ if simple.is_some() => {
111 self.call(simple.unwrap(),
112 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
119 let expect = self.get_intrinsic(&("llvm.expect.i1"));
120 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
123 let expect = self.get_intrinsic(&("llvm.expect.i1"));
124 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
135 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
136 self.call(llfn, &[], None)
139 let tp_ty = substs.type_at(0);
140 self.const_usize(self.size_of(tp_ty).bytes())
143 self.va_start(args[0].immediate())
146 self.va_end(args[0].immediate())
149 let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) {
150 (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(),
151 (Some(_), _) => self.load(args[0].immediate(),
152 tcx.data_layout.pointer_align.abi),
153 (None, _) => bug!("`va_list` language item must be defined")
155 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
156 self.call(intrinsic, &[llresult, va_list], None);
160 match fn_ty.ret.layout.abi {
161 layout::Abi::Scalar(ref scalar) => {
163 Primitive::Int(..) => {
164 if self.cx().size_of(ret_ty).bytes() < 4 {
165 // va_arg should not be called on a integer type
166 // less than 4 bytes in length. If it is, promote
167 // the integer to a `i32` and truncate the result
168 // back to the smaller type.
169 let promoted_result = emit_va_arg(self, args[0],
171 self.trunc(promoted_result, llret_ty)
173 emit_va_arg(self, args[0], ret_ty)
176 Primitive::Float(FloatTy::F64) |
177 Primitive::Pointer => {
178 emit_va_arg(self, args[0], ret_ty)
180 // `va_arg` should never be used with the return type f32.
181 Primitive::Float(FloatTy::F32) => {
182 bug!("the va_arg intrinsic does not work with `f32`")
187 bug!("the va_arg intrinsic does not work with non-scalar types")
192 let tp_ty = substs.type_at(0);
193 if let OperandValue::Pair(_, meta) = args[0].val {
194 let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
197 self.const_usize(self.size_of(tp_ty).bytes())
201 let tp_ty = substs.type_at(0);
202 self.const_usize(self.align_of(tp_ty).bytes())
204 "min_align_of_val" => {
205 let tp_ty = substs.type_at(0);
206 if let OperandValue::Pair(_, meta) = args[0].val {
207 let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
210 self.const_usize(self.align_of(tp_ty).bytes())
214 let tp_ty = substs.type_at(0);
215 self.const_usize(self.layout_of(tp_ty).align.pref.bytes())
218 let tp_ty = substs.type_at(0);
219 let ty_name = self.tcx.type_name(tp_ty);
220 OperandRef::from_const(self, ty_name).immediate_or_packed_pair(self)
223 self.const_u64(self.tcx.type_id_hash(substs.type_at(0)))
226 let ty = substs.type_at(0);
227 if !self.layout_of(ty).is_zst() {
228 // Just zero out the stack slot.
229 // If we store a zero constant, LLVM will drown in vreg allocation for large
230 // data structures, and the generated code will be awful. (A telltale sign of
231 // this is large quantities of `mov [byte ptr foo],0` in the generated code.)
243 // Effectively no-ops
244 "uninit" | "forget" => {
248 let tp_ty = substs.type_at(0);
250 self.const_bool(self.type_needs_drop(tp_ty))
253 let ptr = args[0].immediate();
254 let offset = args[1].immediate();
255 self.inbounds_gep(ptr, &[offset])
258 let ptr = args[0].immediate();
259 let offset = args[1].immediate();
260 self.gep(ptr, &[offset])
263 "copy_nonoverlapping" => {
264 copy_intrinsic(self, false, false, substs.type_at(0),
265 args[1].immediate(), args[0].immediate(), args[2].immediate());
269 copy_intrinsic(self, true, false, substs.type_at(0),
270 args[1].immediate(), args[0].immediate(), args[2].immediate());
274 memset_intrinsic(self, false, substs.type_at(0),
275 args[0].immediate(), args[1].immediate(), args[2].immediate());
279 "volatile_copy_nonoverlapping_memory" => {
280 copy_intrinsic(self, false, true, substs.type_at(0),
281 args[0].immediate(), args[1].immediate(), args[2].immediate());
284 "volatile_copy_memory" => {
285 copy_intrinsic(self, true, true, substs.type_at(0),
286 args[0].immediate(), args[1].immediate(), args[2].immediate());
289 "volatile_set_memory" => {
290 memset_intrinsic(self, true, substs.type_at(0),
291 args[0].immediate(), args[1].immediate(), args[2].immediate());
294 "volatile_load" | "unaligned_volatile_load" => {
295 let tp_ty = substs.type_at(0);
296 let mut ptr = args[0].immediate();
297 if let PassMode::Cast(ty) = fn_ty.ret.mode {
298 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
300 let load = self.volatile_load(ptr);
301 let align = if name == "unaligned_volatile_load" {
304 self.align_of(tp_ty).bytes() as u32
307 llvm::LLVMSetAlignment(load, align);
309 to_immediate(self, load, self.layout_of(tp_ty))
311 "volatile_store" => {
312 let dst = args[0].deref(self.cx());
313 args[1].val.volatile_store(self, dst);
316 "unaligned_volatile_store" => {
317 let dst = args[0].deref(self.cx());
318 args[1].val.unaligned_volatile_store(self, dst);
321 "prefetch_read_data" | "prefetch_write_data" |
322 "prefetch_read_instruction" | "prefetch_write_instruction" => {
323 let expect = self.get_intrinsic(&("llvm.prefetch"));
324 let (rw, cache_type) = match name {
325 "prefetch_read_data" => (0, 1),
326 "prefetch_write_data" => (1, 1),
327 "prefetch_read_instruction" => (0, 0),
328 "prefetch_write_instruction" => (1, 0),
335 self.const_i32(cache_type)
338 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
339 "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
340 "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
341 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" |
342 "unchecked_add" | "unchecked_sub" | "unchecked_mul" | "exact_div" |
343 "rotate_left" | "rotate_right" | "saturating_add" | "saturating_sub" => {
345 match int_type_width_signed(ty, self) {
346 Some((width, signed)) =>
349 let y = self.const_bool(false);
350 let llfn = self.get_intrinsic(
351 &format!("llvm.{}.i{}", name, width),
353 self.call(llfn, &[args[0].immediate(), y], None)
355 "ctlz_nonzero" | "cttz_nonzero" => {
356 let y = self.const_bool(true);
357 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
358 let llfn = self.get_intrinsic(llvm_name);
359 self.call(llfn, &[args[0].immediate(), y], None)
361 "ctpop" => self.call(
362 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
363 &[args[0].immediate()],
368 args[0].immediate() // byte swap a u8/i8 is just a no-op
372 &format!("llvm.bswap.i{}", width),
374 &[args[0].immediate()],
382 &format!("llvm.bitreverse.i{}", width),
384 &[args[0].immediate()],
388 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
389 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
390 if signed { 's' } else { 'u' },
392 let llfn = self.get_intrinsic(&intrinsic);
394 // Convert `i1` to a `bool`, and write it to the out parameter
395 let pair = self.call(llfn, &[
399 let val = self.extract_value(pair, 0);
400 let overflow = self.extract_value(pair, 1);
401 let overflow = self.zext(overflow, self.type_bool());
403 let dest = result.project_field(self, 0);
404 self.store(val, dest.llval, dest.align);
405 let dest = result.project_field(self, 1);
406 self.store(overflow, dest.llval, dest.align);
410 "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()),
411 "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()),
412 "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()),
415 self.exactsdiv(args[0].immediate(), args[1].immediate())
417 self.exactudiv(args[0].immediate(), args[1].immediate())
421 self.sdiv(args[0].immediate(), args[1].immediate())
423 self.udiv(args[0].immediate(), args[1].immediate())
427 self.srem(args[0].immediate(), args[1].immediate())
429 self.urem(args[0].immediate(), args[1].immediate())
431 "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()),
434 self.ashr(args[0].immediate(), args[1].immediate())
436 self.lshr(args[0].immediate(), args[1].immediate())
440 self.unchecked_sadd(args[0].immediate(), args[1].immediate())
442 self.unchecked_uadd(args[0].immediate(), args[1].immediate())
447 self.unchecked_ssub(args[0].immediate(), args[1].immediate())
449 self.unchecked_usub(args[0].immediate(), args[1].immediate())
454 self.unchecked_smul(args[0].immediate(), args[1].immediate())
456 self.unchecked_umul(args[0].immediate(), args[1].immediate())
459 "rotate_left" | "rotate_right" => {
460 let is_left = name == "rotate_left";
461 let val = args[0].immediate();
462 let raw_shift = args[1].immediate();
463 if llvm_util::get_major_version() >= 7 {
464 // rotate = funnel shift with first two args the same
465 let llvm_name = &format!("llvm.fsh{}.i{}",
466 if is_left { 'l' } else { 'r' }, width);
467 let llfn = self.get_intrinsic(llvm_name);
468 self.call(llfn, &[val, val, raw_shift], None)
470 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
471 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
472 let width = self.const_uint(
476 let shift = self.urem(raw_shift, width);
477 let width_minus_raw_shift = self.sub(width, raw_shift);
478 let inv_shift = self.urem(width_minus_raw_shift, width);
479 let shift1 = self.shl(
481 if is_left { shift } else { inv_shift },
483 let shift2 = self.lshr(
485 if !is_left { shift } else { inv_shift },
487 self.or(shift1, shift2)
490 "saturating_add" | "saturating_sub" => {
491 let is_add = name == "saturating_add";
492 let lhs = args[0].immediate();
493 let rhs = args[1].immediate();
494 if llvm_util::get_major_version() >= 8 {
495 let llvm_name = &format!("llvm.{}{}.sat.i{}",
496 if signed { 's' } else { 'u' },
497 if is_add { "add" } else { "sub" },
499 let llfn = self.get_intrinsic(llvm_name);
500 self.call(llfn, &[lhs, rhs], None)
502 let llvm_name = &format!("llvm.{}{}.with.overflow.i{}",
503 if signed { 's' } else { 'u' },
504 if is_add { "add" } else { "sub" },
506 let llfn = self.get_intrinsic(llvm_name);
507 let pair = self.call(llfn, &[lhs, rhs], None);
508 let val = self.extract_value(pair, 0);
509 let overflow = self.extract_value(pair, 1);
510 let llty = self.type_ix(width);
512 let limit = if signed {
513 let limit_lo = self.const_uint_big(
514 llty, (i128::MIN >> (128 - width)) as u128);
515 let limit_hi = self.const_uint_big(
516 llty, (i128::MAX >> (128 - width)) as u128);
518 IntPredicate::IntSLT, val, self.const_uint(llty, 0));
519 self.select(neg, limit_hi, limit_lo)
521 self.const_uint_big(llty, u128::MAX >> (128 - width))
523 self.const_uint(llty, 0)
525 self.select(overflow, limit, val)
531 span_invalid_monomorphization_error(
533 &format!("invalid monomorphization of `{}` intrinsic: \
534 expected basic integer type, found `{}`", name, ty));
540 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
541 match float_type_width(arg_tys[0]) {
544 "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()),
545 "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()),
546 "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()),
547 "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
548 "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()),
552 span_invalid_monomorphization_error(
554 &format!("invalid monomorphization of `{}` intrinsic: \
555 expected basic float type, found `{}`", name, arg_tys[0]));
562 "discriminant_value" => {
563 args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
566 name if name.starts_with("simd_") => {
567 match generic_simd_intrinsic(self, name,
576 // This requires that atomic intrinsics follow a specific naming pattern:
577 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
578 name if name.starts_with("atomic_") => {
579 use rustc_codegen_ssa::common::AtomicOrdering::*;
580 use rustc_codegen_ssa::common::
581 {SynchronizationScope, AtomicRmwBinOp};
583 let split: Vec<&str> = name.split('_').collect();
585 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
586 let (order, failorder) = match split.len() {
587 2 => (SequentiallyConsistent, SequentiallyConsistent),
588 3 => match split[2] {
589 "unordered" => (Unordered, Unordered),
590 "relaxed" => (Monotonic, Monotonic),
591 "acq" => (Acquire, Acquire),
592 "rel" => (Release, Monotonic),
593 "acqrel" => (AcquireRelease, Acquire),
594 "failrelaxed" if is_cxchg =>
595 (SequentiallyConsistent, Monotonic),
596 "failacq" if is_cxchg =>
597 (SequentiallyConsistent, Acquire),
598 _ => self.sess().fatal("unknown ordering in atomic intrinsic")
600 4 => match (split[2], split[3]) {
601 ("acq", "failrelaxed") if is_cxchg =>
602 (Acquire, Monotonic),
603 ("acqrel", "failrelaxed") if is_cxchg =>
604 (AcquireRelease, Monotonic),
605 _ => self.sess().fatal("unknown ordering in atomic intrinsic")
607 _ => self.sess().fatal("Atomic intrinsic not in correct format"),
610 let invalid_monomorphization = |ty| {
611 span_invalid_monomorphization_error(tcx.sess, span,
612 &format!("invalid monomorphization of `{}` intrinsic: \
613 expected basic integer type, found `{}`", name, ty));
617 "cxchg" | "cxchgweak" => {
618 let ty = substs.type_at(0);
619 if int_type_width_signed(ty, self).is_some() {
620 let weak = split[1] == "cxchgweak";
621 let pair = self.atomic_cmpxchg(
628 let val = self.extract_value(pair, 0);
629 let success = self.extract_value(pair, 1);
630 let success = self.zext(success, self.type_bool());
632 let dest = result.project_field(self, 0);
633 self.store(val, dest.llval, dest.align);
634 let dest = result.project_field(self, 1);
635 self.store(success, dest.llval, dest.align);
638 return invalid_monomorphization(ty);
643 let ty = substs.type_at(0);
644 if int_type_width_signed(ty, self).is_some() {
645 let size = self.size_of(ty);
646 self.atomic_load(args[0].immediate(), order, size)
648 return invalid_monomorphization(ty);
653 let ty = substs.type_at(0);
654 if int_type_width_signed(ty, self).is_some() {
655 let size = self.size_of(ty);
664 return invalid_monomorphization(ty);
669 self.atomic_fence(order, SynchronizationScope::CrossThread);
673 "singlethreadfence" => {
674 self.atomic_fence(order, SynchronizationScope::SingleThread);
678 // These are all AtomicRMW ops
680 let atom_op = match op {
681 "xchg" => AtomicRmwBinOp::AtomicXchg,
682 "xadd" => AtomicRmwBinOp::AtomicAdd,
683 "xsub" => AtomicRmwBinOp::AtomicSub,
684 "and" => AtomicRmwBinOp::AtomicAnd,
685 "nand" => AtomicRmwBinOp::AtomicNand,
686 "or" => AtomicRmwBinOp::AtomicOr,
687 "xor" => AtomicRmwBinOp::AtomicXor,
688 "max" => AtomicRmwBinOp::AtomicMax,
689 "min" => AtomicRmwBinOp::AtomicMin,
690 "umax" => AtomicRmwBinOp::AtomicUMax,
691 "umin" => AtomicRmwBinOp::AtomicUMin,
692 _ => self.sess().fatal("unknown atomic operation")
695 let ty = substs.type_at(0);
696 if int_type_width_signed(ty, self).is_some() {
704 return invalid_monomorphization(ty);
710 "nontemporal_store" => {
711 let dst = args[0].deref(self.cx());
712 args[1].val.nontemporal_store(self, dst);
716 _ => bug!("unknown intrinsic '{}'", name),
719 if !fn_ty.ret.is_ignore() {
720 if let PassMode::Cast(ty) = fn_ty.ret.mode {
721 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
722 let ptr = self.pointercast(result.llval, ptr_llty);
723 self.store(llval, ptr, result.align);
725 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
726 .val.store(self, result);
731 fn abort(&mut self) {
732 let fnname = self.get_intrinsic(&("llvm.trap"));
733 self.call(fnname, &[], None);
736 fn assume(&mut self, val: Self::Value) {
737 let assume_intrinsic = self.get_intrinsic("llvm.assume");
738 self.call(assume_intrinsic, &[val], None);
741 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
742 let expect = self.get_intrinsic(&"llvm.expect.i1");
743 self.call(expect, &[cond, self.const_bool(expected)], None)
746 fn va_start(&mut self, list: &'ll Value) -> &'ll Value {
747 let target = &self.cx.tcx.sess.target.target;
748 let arch = &target.arch;
749 // A pointer to the architecture specific structure is passed to this
750 // function. For pointer variants (i686, RISC-V, Windows, etc), we
751 // should do do nothing, as the address to the pointer is needed. For
752 // architectures with a architecture specific structure (`Aarch64`,
753 // `X86_64`, etc), this function should load the structure from the
755 let va_list = match &**arch {
756 _ if target.options.is_like_windows => list,
757 "aarch64" if target.target_os == "ios" => list,
758 "aarch64" | "x86_64" | "powerpc" =>
759 self.load(list, self.tcx().data_layout.pointer_align.abi),
762 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
763 self.call(intrinsic, &[va_list], None)
766 fn va_end(&mut self, list: &'ll Value) -> &'ll Value {
767 let target = &self.cx.tcx.sess.target.target;
768 let arch = &target.arch;
769 // See the comment in `va_start` for the purpose of the following.
770 let va_list = match &**arch {
771 _ if target.options.is_like_windows => list,
772 "aarch64" if target.target_os == "ios" => list,
773 "aarch64" | "x86_64" | "powerpc" =>
774 self.load(list, self.tcx().data_layout.pointer_align.abi),
777 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
778 self.call(intrinsic, &[va_list], None)
783 bx: &mut Builder<'a, 'll, 'tcx>,
791 let (size, align) = bx.size_and_align_of(ty);
792 let size = bx.mul(bx.const_usize(size.bytes()), count);
793 let flags = if volatile {
799 bx.memmove(dst, align, src, align, size, flags);
801 bx.memcpy(dst, align, src, align, size, flags);
806 bx: &mut Builder<'a, 'll, 'tcx>,
813 let (size, align) = bx.size_and_align_of(ty);
814 let size = bx.mul(bx.const_usize(size.bytes()), count);
815 let flags = if volatile {
820 bx.memset(dst, val, size, align, flags);
824 bx: &mut Builder<'a, 'll, 'tcx>,
827 local_ptr: &'ll Value,
830 if bx.sess().no_landing_pads() {
831 bx.call(func, &[data], None);
832 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
833 bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
834 } else if wants_msvc_seh(bx.sess()) {
835 codegen_msvc_try(bx, func, data, local_ptr, dest);
837 codegen_gnu_try(bx, func, data, local_ptr, dest);
841 // MSVC's definition of the `rust_try` function.
843 // This implementation uses the new exception handling instructions in LLVM
844 // which have support in LLVM for SEH on MSVC targets. Although these
845 // instructions are meant to work for all targets, as of the time of this
846 // writing, however, LLVM does not recommend the usage of these new instructions
847 // as the old ones are still more optimized.
849 bx: &mut Builder<'a, 'll, 'tcx>,
852 local_ptr: &'ll Value,
855 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
856 bx.set_personality_fn(bx.eh_personality());
858 let mut normal = bx.build_sibling_block("normal");
859 let mut catchswitch = bx.build_sibling_block("catchswitch");
860 let mut catchpad = bx.build_sibling_block("catchpad");
861 let mut caught = bx.build_sibling_block("caught");
863 let func = llvm::get_param(bx.llfn(), 0);
864 let data = llvm::get_param(bx.llfn(), 1);
865 let local_ptr = llvm::get_param(bx.llfn(), 2);
867 // We're generating an IR snippet that looks like:
869 // declare i32 @rust_try(%func, %data, %ptr) {
870 // %slot = alloca i64*
871 // invoke %func(%data) to label %normal unwind label %catchswitch
877 // %cs = catchswitch within none [%catchpad] unwind to caller
880 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
881 // %ptr[0] = %slot[0]
882 // %ptr[1] = %slot[1]
883 // catchret from %tok to label %caught
889 // This structure follows the basic usage of throw/try/catch in LLVM.
890 // For example, compile this C++ snippet to see what LLVM generates:
892 // #include <stdint.h>
894 // int bar(void (*foo)(void), uint64_t *ret) {
898 // } catch(uint64_t a[2]) {
905 // More information can be found in libstd's seh.rs implementation.
906 let i64p = bx.type_ptr_to(bx.type_i64());
907 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
908 let slot = bx.alloca(i64p, "slot", ptr_align);
909 bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
911 normal.ret(bx.const_i32(0));
913 let cs = catchswitch.catch_switch(None, None, 1);
914 catchswitch.add_handler(cs, catchpad.llbb());
916 let tydesc = match bx.tcx().lang_items().msvc_try_filter() {
917 Some(did) => bx.get_static(did),
918 None => bug!("msvc_try_filter not defined"),
920 let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]);
921 let addr = catchpad.load(slot, ptr_align);
923 let i64_align = bx.tcx().data_layout.i64_align.abi;
924 let arg1 = catchpad.load(addr, i64_align);
925 let val1 = bx.const_i32(1);
926 let gep1 = catchpad.inbounds_gep(addr, &[val1]);
927 let arg2 = catchpad.load(gep1, i64_align);
928 let local_ptr = catchpad.bitcast(local_ptr, i64p);
929 let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]);
930 catchpad.store(arg1, local_ptr, i64_align);
931 catchpad.store(arg2, gep2, i64_align);
932 catchpad.catch_ret(&funclet, caught.llbb());
934 caught.ret(bx.const_i32(1));
937 // Note that no invoke is used here because by definition this function
938 // can't panic (that's what it's catching).
939 let ret = bx.call(llfn, &[func, data, local_ptr], None);
940 let i32_align = bx.tcx().data_layout.i32_align.abi;
941 bx.store(ret, dest, i32_align);
944 // Definition of the standard `try` function for Rust using the GNU-like model
945 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
948 // This codegen is a little surprising because we always call a shim
949 // function instead of inlining the call to `invoke` manually here. This is done
950 // because in LLVM we're only allowed to have one personality per function
951 // definition. The call to the `try` intrinsic is being inlined into the
952 // function calling it, and that function may already have other personality
953 // functions in play. By calling a shim we're guaranteed that our shim will have
954 // the right personality function.
956 bx: &mut Builder<'a, 'll, 'tcx>,
959 local_ptr: &'ll Value,
962 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
963 // Codegens the shims described above:
966 // invoke %func(%args...) normal %normal unwind %catch
972 // (ptr, _) = landingpad
973 // store ptr, %local_ptr
976 // Note that the `local_ptr` data passed into the `try` intrinsic is
977 // expected to be `*mut *mut u8` for this to actually work, but that's
978 // managed by the standard library.
980 let mut then = bx.build_sibling_block("then");
981 let mut catch = bx.build_sibling_block("catch");
983 let func = llvm::get_param(bx.llfn(), 0);
984 let data = llvm::get_param(bx.llfn(), 1);
985 let local_ptr = llvm::get_param(bx.llfn(), 2);
986 bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
987 then.ret(bx.const_i32(0));
989 // Type indicator for the exception being thrown.
991 // The first value in this tuple is a pointer to the exception object
992 // being thrown. The second value is a "selector" indicating which of
993 // the landing pad clauses the exception's type had been matched to.
994 // rust_try ignores the selector.
995 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
996 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
997 catch.add_clause(vals, bx.const_null(bx.type_i8p()));
998 let ptr = catch.extract_value(vals, 0);
999 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1000 let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
1001 catch.store(ptr, bitcast, ptr_align);
1002 catch.ret(bx.const_i32(1));
1005 // Note that no invoke is used here because by definition this function
1006 // can't panic (that's what it's catching).
1007 let ret = bx.call(llfn, &[func, data, local_ptr], None);
1008 let i32_align = bx.tcx().data_layout.i32_align.abi;
1009 bx.store(ret, dest, i32_align);
1012 // Helper function to give a Block to a closure to codegen a shim function.
1013 // This is currently primarily used for the `try` intrinsic functions above.
1014 fn gen_fn<'ll, 'tcx>(
1015 cx: &CodegenCx<'ll, 'tcx>,
1017 inputs: Vec<Ty<'tcx>>,
1019 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1021 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1025 hir::Unsafety::Unsafe,
1028 let llfn = cx.define_internal_fn(name, rust_fn_sig);
1029 attributes::from_fn_attrs(cx, llfn, None, rust_fn_sig);
1030 let bx = Builder::new_block(cx, llfn, "entry-block");
1035 // Helper function used to get a handle to the `__rust_try` function used to
1036 // catch exceptions.
1038 // This function is only generated once and is then cached.
1039 fn get_rust_try_fn<'ll, 'tcx>(
1040 cx: &CodegenCx<'ll, 'tcx>,
1041 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1043 if let Some(llfn) = cx.rust_try_fn.get() {
1047 // Define the type up front for the signature of the rust_try function.
1049 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1050 let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1054 hir::Unsafety::Unsafe,
1057 let output = tcx.types.i32;
1058 let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
1059 cx.rust_try_fn.set(Some(rust_try));
1063 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1064 span_err!(a, b, E0511, "{}", c);
1067 fn generic_simd_intrinsic(
1068 bx: &mut Builder<'a, 'll, 'tcx>,
1070 callee_ty: Ty<'tcx>,
1071 args: &[OperandRef<'tcx, &'ll Value>],
1073 llret_ty: &'ll Type,
1075 ) -> Result<&'ll Value, ()> {
1076 // macros for error handling:
1077 macro_rules! emit_error {
1081 ($msg: tt, $($fmt: tt)*) => {
1082 span_invalid_monomorphization_error(
1084 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1089 macro_rules! return_error {
1092 emit_error!($($fmt)*);
1098 macro_rules! require {
1099 ($cond: expr, $($fmt: tt)*) => {
1101 return_error!($($fmt)*);
1106 macro_rules! require_simd {
1107 ($ty: expr, $position: expr) => {
1108 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1113 let sig = tcx.normalize_erasing_late_bound_regions(
1114 ty::ParamEnv::reveal_all(),
1115 &callee_ty.fn_sig(tcx),
1117 let arg_tys = sig.inputs();
1119 if name == "simd_select_bitmask" {
1120 let in_ty = arg_tys[0];
1121 let m_len = match in_ty.sty {
1122 // Note that this `.unwrap()` crashes for isize/usize, that's sort
1123 // of intentional as there's not currently a use case for that.
1124 ty::Int(i) => i.bit_width().unwrap(),
1125 ty::Uint(i) => i.bit_width().unwrap(),
1126 _ => return_error!("`{}` is not an integral type", in_ty),
1128 require_simd!(arg_tys[1], "argument");
1129 let v_len = arg_tys[1].simd_size(tcx);
1130 require!(m_len == v_len,
1131 "mismatched lengths: mask length `{}` != other vector length `{}`",
1134 let i1 = bx.type_i1();
1135 let i1xn = bx.type_vector(i1, m_len as u64);
1136 let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
1137 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1140 // every intrinsic below takes a SIMD vector as its first argument
1141 require_simd!(arg_tys[0], "input");
1142 let in_ty = arg_tys[0];
1143 let in_elem = arg_tys[0].simd_type(tcx);
1144 let in_len = arg_tys[0].simd_size(tcx);
1146 let comparison = match name {
1147 "simd_eq" => Some(hir::BinOpKind::Eq),
1148 "simd_ne" => Some(hir::BinOpKind::Ne),
1149 "simd_lt" => Some(hir::BinOpKind::Lt),
1150 "simd_le" => Some(hir::BinOpKind::Le),
1151 "simd_gt" => Some(hir::BinOpKind::Gt),
1152 "simd_ge" => Some(hir::BinOpKind::Ge),
1156 if let Some(cmp_op) = comparison {
1157 require_simd!(ret_ty, "return");
1159 let out_len = ret_ty.simd_size(tcx);
1160 require!(in_len == out_len,
1161 "expected return type with length {} (same as input type `{}`), \
1162 found `{}` with length {}",
1165 require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1166 "expected return type with integer elements, found `{}` with non-integer `{}`",
1168 ret_ty.simd_type(tcx));
1170 return Ok(compare_simd_types(bx,
1171 args[0].immediate(),
1172 args[1].immediate(),
1178 if name.starts_with("simd_shuffle") {
1179 let n: usize = name["simd_shuffle".len()..].parse().unwrap_or_else(|_|
1180 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?"));
1182 require_simd!(ret_ty, "return");
1184 let out_len = ret_ty.simd_size(tcx);
1185 require!(out_len == n,
1186 "expected return type of length {}, found `{}` with length {}",
1187 n, ret_ty, out_len);
1188 require!(in_elem == ret_ty.simd_type(tcx),
1189 "expected return element type `{}` (element of input `{}`), \
1190 found `{}` with element type `{}`",
1192 ret_ty, ret_ty.simd_type(tcx));
1194 let total_len = in_len as u128 * 2;
1196 let vector = args[2].immediate();
1198 let indices: Option<Vec<_>> = (0..n)
1201 let val = bx.const_get_elt(vector, i as u64);
1202 match bx.const_to_opt_u128(val, true) {
1204 emit_error!("shuffle index #{} is not a constant", arg_idx);
1207 Some(idx) if idx >= total_len => {
1208 emit_error!("shuffle index #{} is out of bounds (limit {})",
1209 arg_idx, total_len);
1212 Some(idx) => Some(bx.const_i32(idx as i32)),
1216 let indices = match indices {
1218 None => return Ok(bx.const_null(llret_ty))
1221 return Ok(bx.shuffle_vector(args[0].immediate(),
1222 args[1].immediate(),
1223 bx.const_vector(&indices)))
1226 if name == "simd_insert" {
1227 require!(in_elem == arg_tys[2],
1228 "expected inserted type `{}` (element of input `{}`), found `{}`",
1229 in_elem, in_ty, arg_tys[2]);
1230 return Ok(bx.insert_element(args[0].immediate(),
1231 args[2].immediate(),
1232 args[1].immediate()))
1234 if name == "simd_extract" {
1235 require!(ret_ty == in_elem,
1236 "expected return type `{}` (element of input `{}`), found `{}`",
1237 in_elem, in_ty, ret_ty);
1238 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1241 if name == "simd_select" {
1242 let m_elem_ty = in_elem;
1244 require_simd!(arg_tys[1], "argument");
1245 let v_len = arg_tys[1].simd_size(tcx);
1246 require!(m_len == v_len,
1247 "mismatched lengths: mask length `{}` != other vector length `{}`",
1250 match m_elem_ty.sty {
1252 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
1254 // truncate the mask to a vector of i1s
1255 let i1 = bx.type_i1();
1256 let i1xn = bx.type_vector(i1, m_len as u64);
1257 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1258 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1261 if name == "simd_bitmask" {
1262 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1263 // vector mask and returns an unsigned integer containing the most
1264 // significant bit (MSB) of each lane.
1265 use rustc_target::abi::HasDataLayout;
1267 // If the vector has less than 8 lanes, an u8 is returned with zeroed
1269 let expected_int_bits = in_len.max(8);
1271 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
1273 "bitmask `{}`, expected `u{}`",
1274 ret_ty, expected_int_bits
1278 // Integer vector <i{in_bitwidth} x in_len>:
1279 let (i_xn, in_elem_bitwidth) = match in_elem.sty {
1281 args[0].immediate(),
1282 i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
1285 args[0].immediate(),
1286 i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
1289 "vector argument `{}`'s element type `{}`, expected integer element type",
1294 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1295 let shift_indices = vec![
1296 bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _); in_len
1298 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1299 // Truncate vector to an <i1 x N>
1300 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _));
1301 // Bitcast <i1 x N> to iN:
1302 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _));
1303 // Zero-extend iN to the bitmask type:
1304 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _)));
1307 fn simd_simple_float_intrinsic(
1309 in_elem: &::rustc::ty::TyS<'_>,
1310 in_ty: &::rustc::ty::TyS<'_>,
1312 bx: &mut Builder<'a, 'll, 'tcx>,
1314 args: &[OperandRef<'tcx, &'ll Value>],
1315 ) -> Result<&'ll Value, ()> {
1316 macro_rules! emit_error {
1320 ($msg: tt, $($fmt: tt)*) => {
1321 span_invalid_monomorphization_error(
1323 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1327 macro_rules! return_error {
1330 emit_error!($($fmt)*);
1335 let ety = match in_elem.sty {
1336 ty::Float(f) if f.bit_width() == 32 => {
1337 if in_len < 2 || in_len > 16 {
1339 "unsupported floating-point vector `{}` with length `{}` \
1340 out-of-range [2, 16]",
1345 ty::Float(f) if f.bit_width() == 64 => {
1346 if in_len < 2 || in_len > 8 {
1347 return_error!("unsupported floating-point vector `{}` with length `{}` \
1348 out-of-range [2, 8]",
1354 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1358 return_error!("`{}` is not a floating-point type", in_ty);
1362 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1363 let intrinsic = bx.get_intrinsic(&llvm_name);
1364 let c = bx.call(intrinsic,
1365 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1367 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1373 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1376 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1379 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1382 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1385 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1388 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1391 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1394 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1397 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1400 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1403 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1406 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1409 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1412 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1414 _ => { /* fallthrough */ }
1418 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1419 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1420 fn llvm_vector_str(elem_ty: ty::Ty<'_>, vec_len: usize, no_pointers: usize) -> String {
1421 let p0s: String = "p0".repeat(no_pointers);
1423 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1424 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1425 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1426 _ => unreachable!(),
1430 fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty<'_>, vec_len: usize,
1431 mut no_pointers: usize) -> &'ll Type {
1432 // FIXME: use cx.layout_of(ty).llvm_type() ?
1433 let mut elem_ty = match elem_ty.sty {
1434 ty::Int(v) => cx.type_int_from_ty( v),
1435 ty::Uint(v) => cx.type_uint_from_ty( v),
1436 ty::Float(v) => cx.type_float_from_ty( v),
1437 _ => unreachable!(),
1439 while no_pointers > 0 {
1440 elem_ty = cx.type_ptr_to(elem_ty);
1443 cx.type_vector(elem_ty, vec_len as u64)
1447 if name == "simd_gather" {
1448 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1449 // mask: <N x i{M}>) -> <N x T>
1450 // * N: number of elements in the input vectors
1451 // * T: type of the element to load
1452 // * M: any integer width is supported, will be truncated to i1
1454 // All types must be simd vector types
1455 require_simd!(in_ty, "first");
1456 require_simd!(arg_tys[1], "second");
1457 require_simd!(arg_tys[2], "third");
1458 require_simd!(ret_ty, "return");
1460 // Of the same length:
1461 require!(in_len == arg_tys[1].simd_size(tcx),
1462 "expected {} argument with length {} (same as input type `{}`), \
1463 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1464 arg_tys[1].simd_size(tcx));
1465 require!(in_len == arg_tys[2].simd_size(tcx),
1466 "expected {} argument with length {} (same as input type `{}`), \
1467 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1468 arg_tys[2].simd_size(tcx));
1470 // The return type must match the first argument type
1471 require!(ret_ty == in_ty,
1472 "expected return type `{}`, found `{}`",
1475 // This counts how many pointers
1476 fn ptr_count(t: ty::Ty<'_>) -> usize {
1478 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1484 fn non_ptr(t: ty::Ty<'_>) -> ty::Ty<'_> {
1486 ty::RawPtr(p) => non_ptr(p.ty),
1491 // The second argument must be a simd vector with an element type that's a pointer
1492 // to the element type of the first argument
1493 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1494 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1495 non_ptr(arg_tys[1].simd_type(tcx))),
1497 require!(false, "expected element type `{}` of second argument `{}` \
1498 to be a pointer to the element type `{}` of the first \
1499 argument `{}`, found `{}` != `*_ {}`",
1500 arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty,
1501 arg_tys[1].simd_type(tcx), in_elem);
1505 assert!(pointer_count > 0);
1506 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1507 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1509 // The element type of the third argument must be a signed integer type of any width:
1510 match arg_tys[2].simd_type(tcx).sty {
1513 require!(false, "expected element type `{}` of third argument `{}` \
1514 to be a signed integer type",
1515 arg_tys[2].simd_type(tcx), arg_tys[2]);
1519 // Alignment of T, must be a constant integer value:
1520 let alignment_ty = bx.type_i32();
1521 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1523 // Truncate the mask vector to a vector of i1s:
1524 let (mask, mask_ty) = {
1525 let i1 = bx.type_i1();
1526 let i1xn = bx.type_vector(i1, in_len as u64);
1527 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1530 // Type of the vector of pointers:
1531 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1532 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1534 // Type of the vector of elements:
1535 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1536 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1538 let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1539 llvm_elem_vec_str, llvm_pointer_vec_str);
1540 let f = bx.declare_cfn(&llvm_intrinsic,
1542 llvm_pointer_vec_ty,
1545 llvm_elem_vec_ty], llvm_elem_vec_ty));
1546 llvm::SetUnnamedAddr(f, false);
1547 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1552 if name == "simd_scatter" {
1553 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1554 // mask: <N x i{M}>) -> ()
1555 // * N: number of elements in the input vectors
1556 // * T: type of the element to load
1557 // * M: any integer width is supported, will be truncated to i1
1559 // All types must be simd vector types
1560 require_simd!(in_ty, "first");
1561 require_simd!(arg_tys[1], "second");
1562 require_simd!(arg_tys[2], "third");
1564 // Of the same length:
1565 require!(in_len == arg_tys[1].simd_size(tcx),
1566 "expected {} argument with length {} (same as input type `{}`), \
1567 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1568 arg_tys[1].simd_size(tcx));
1569 require!(in_len == arg_tys[2].simd_size(tcx),
1570 "expected {} argument with length {} (same as input type `{}`), \
1571 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1572 arg_tys[2].simd_size(tcx));
1574 // This counts how many pointers
1575 fn ptr_count(t: ty::Ty<'_>) -> usize {
1577 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1583 fn non_ptr(t: ty::Ty<'_>) -> ty::Ty<'_> {
1585 ty::RawPtr(p) => non_ptr(p.ty),
1590 // The second argument must be a simd vector with an element type that's a pointer
1591 // to the element type of the first argument
1592 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1593 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
1594 => (ptr_count(arg_tys[1].simd_type(tcx)),
1595 non_ptr(arg_tys[1].simd_type(tcx))),
1597 require!(false, "expected element type `{}` of second argument `{}` \
1598 to be a pointer to the element type `{}` of the first \
1599 argument `{}`, found `{}` != `*mut {}`",
1600 arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty,
1601 arg_tys[1].simd_type(tcx), in_elem);
1605 assert!(pointer_count > 0);
1606 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1607 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1609 // The element type of the third argument must be a signed integer type of any width:
1610 match arg_tys[2].simd_type(tcx).sty {
1613 require!(false, "expected element type `{}` of third argument `{}` \
1614 to be a signed integer type",
1615 arg_tys[2].simd_type(tcx), arg_tys[2]);
1619 // Alignment of T, must be a constant integer value:
1620 let alignment_ty = bx.type_i32();
1621 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1623 // Truncate the mask vector to a vector of i1s:
1624 let (mask, mask_ty) = {
1625 let i1 = bx.type_i1();
1626 let i1xn = bx.type_vector(i1, in_len as u64);
1627 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1630 let ret_t = bx.type_void();
1632 // Type of the vector of pointers:
1633 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1634 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1636 // Type of the vector of elements:
1637 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1638 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1640 let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1641 llvm_elem_vec_str, llvm_pointer_vec_str);
1642 let f = bx.declare_cfn(&llvm_intrinsic,
1643 bx.type_func(&[llvm_elem_vec_ty,
1644 llvm_pointer_vec_ty,
1647 llvm::SetUnnamedAddr(f, false);
1648 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1653 macro_rules! arith_red {
1654 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1656 require!(ret_ty == in_elem,
1657 "expected return type `{}` (element of input `{}`), found `{}`",
1658 in_elem, in_ty, ret_ty);
1659 return match in_elem.sty {
1660 ty::Int(_) | ty::Uint(_) => {
1661 let r = bx.$integer_reduce(args[0].immediate());
1663 // if overflow occurs, the result is the
1664 // mathematical result modulo 2^n:
1665 if name.contains("mul") {
1666 Ok(bx.mul(args[1].immediate(), r))
1668 Ok(bx.add(args[1].immediate(), r))
1671 Ok(bx.$integer_reduce(args[0].immediate()))
1675 // ordered arithmetic reductions take an accumulator
1676 let acc = if $ordered {
1677 let acc = args[1].immediate();
1678 // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
1679 // * if the accumulator of the fadd isn't 0, incorrect
1680 // code is generated
1681 // * if the accumulator of the fmul isn't 1, incorrect
1682 // code is generated
1683 match bx.const_get_real(acc) {
1684 None => return_error!("accumulator of {} is not a constant", $name),
1685 Some((v, loses_info)) => {
1686 if $name.contains("mul") && v != 1.0_f64 {
1687 return_error!("accumulator of {} is not 1.0", $name);
1688 } else if $name.contains("add") && v != 0.0_f64 {
1689 return_error!("accumulator of {} is not 0.0", $name);
1690 } else if loses_info {
1691 return_error!("accumulator of {} loses information", $name);
1697 // unordered arithmetic reductions do not:
1698 match f.bit_width() {
1699 32 => bx.const_undef(bx.type_f32()),
1700 64 => bx.const_undef(bx.type_f64()),
1703 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1704 $name, in_ty, in_elem, v, ret_ty
1709 Ok(bx.$float_reduce(acc, args[0].immediate()))
1713 "unsupported {} from `{}` with element `{}` to `{}`",
1714 $name, in_ty, in_elem, ret_ty
1722 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
1723 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
1724 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1725 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1727 macro_rules! minmax_red {
1728 ($name:tt: $int_red:ident, $float_red:ident) => {
1730 require!(ret_ty == in_elem,
1731 "expected return type `{}` (element of input `{}`), found `{}`",
1732 in_elem, in_ty, ret_ty);
1733 return match in_elem.sty {
1735 Ok(bx.$int_red(args[0].immediate(), true))
1738 Ok(bx.$int_red(args[0].immediate(), false))
1741 Ok(bx.$float_red(args[0].immediate()))
1744 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1745 $name, in_ty, in_elem, ret_ty)
1753 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1754 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1756 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1757 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1759 macro_rules! bitwise_red {
1760 ($name:tt : $red:ident, $boolean:expr) => {
1762 let input = if !$boolean {
1763 require!(ret_ty == in_elem,
1764 "expected return type `{}` (element of input `{}`), found `{}`",
1765 in_elem, in_ty, ret_ty);
1769 ty::Int(_) | ty::Uint(_) => {},
1771 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1772 $name, in_ty, in_elem, ret_ty)
1776 // boolean reductions operate on vectors of i1s:
1777 let i1 = bx.type_i1();
1778 let i1xn = bx.type_vector(i1, in_len as u64);
1779 bx.trunc(args[0].immediate(), i1xn)
1781 return match in_elem.sty {
1782 ty::Int(_) | ty::Uint(_) => {
1783 let r = bx.$red(input);
1788 bx.zext(r, bx.type_bool())
1793 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1794 $name, in_ty, in_elem, ret_ty)
1801 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1802 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1803 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1804 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1805 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1807 if name == "simd_cast" {
1808 require_simd!(ret_ty, "return");
1809 let out_len = ret_ty.simd_size(tcx);
1810 require!(in_len == out_len,
1811 "expected return type with length {} (same as input type `{}`), \
1812 found `{}` with length {}",
1815 // casting cares about nominal type, not just structural type
1816 let out_elem = ret_ty.simd_type(tcx);
1818 if in_elem == out_elem { return Ok(args[0].immediate()); }
1820 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1822 let (in_style, in_width) = match in_elem.sty {
1823 // vectors of pointer-sized integers should've been
1824 // disallowed before here, so this unwrap is safe.
1825 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1826 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1827 ty::Float(f) => (Style::Float, f.bit_width()),
1828 _ => (Style::Unsupported, 0)
1830 let (out_style, out_width) = match out_elem.sty {
1831 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1832 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1833 ty::Float(f) => (Style::Float, f.bit_width()),
1834 _ => (Style::Unsupported, 0)
1837 match (in_style, out_style) {
1838 (Style::Int(in_is_signed), Style::Int(_)) => {
1839 return Ok(match in_width.cmp(&out_width) {
1840 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1841 Ordering::Equal => args[0].immediate(),
1842 Ordering::Less => if in_is_signed {
1843 bx.sext(args[0].immediate(), llret_ty)
1845 bx.zext(args[0].immediate(), llret_ty)
1849 (Style::Int(in_is_signed), Style::Float) => {
1850 return Ok(if in_is_signed {
1851 bx.sitofp(args[0].immediate(), llret_ty)
1853 bx.uitofp(args[0].immediate(), llret_ty)
1856 (Style::Float, Style::Int(out_is_signed)) => {
1857 return Ok(if out_is_signed {
1858 bx.fptosi(args[0].immediate(), llret_ty)
1860 bx.fptoui(args[0].immediate(), llret_ty)
1863 (Style::Float, Style::Float) => {
1864 return Ok(match in_width.cmp(&out_width) {
1865 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1866 Ordering::Equal => args[0].immediate(),
1867 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1870 _ => {/* Unsupported. Fallthrough. */}
1873 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1877 macro_rules! arith {
1878 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1879 $(if name == stringify!($name) {
1881 $($(ty::$p(_))|* => {
1882 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1887 "unsupported operation on `{}` with element `{}`",
1894 simd_add: Uint, Int => add, Float => fadd;
1895 simd_sub: Uint, Int => sub, Float => fsub;
1896 simd_mul: Uint, Int => mul, Float => fmul;
1897 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1898 simd_rem: Uint => urem, Int => srem, Float => frem;
1899 simd_shl: Uint, Int => shl;
1900 simd_shr: Uint => lshr, Int => ashr;
1901 simd_and: Uint, Int => and;
1902 simd_or: Uint, Int => or;
1903 simd_xor: Uint, Int => xor;
1904 simd_fmax: Float => maxnum;
1905 simd_fmin: Float => minnum;
1909 if name == "simd_saturating_add" || name == "simd_saturating_sub" {
1910 let lhs = args[0].immediate();
1911 let rhs = args[1].immediate();
1912 let is_add = name == "simd_saturating_add";
1913 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
1914 let (signed, elem_width, elem_ty) = match in_elem.sty {
1918 i.bit_width().unwrap_or(ptr_bits),
1919 bx.cx.type_int_from_ty(i)
1924 i.bit_width().unwrap_or(ptr_bits),
1925 bx.cx.type_uint_from_ty(i)
1929 "expected element type `{}` of vector type `{}` \
1930 to be a signed or unsigned integer type",
1931 arg_tys[0].simd_type(tcx), arg_tys[0]
1935 let llvm_intrinsic = &format!(
1936 "llvm.{}{}.sat.v{}i{}",
1937 if signed { 's' } else { 'u' },
1938 if is_add { "add" } else { "sub" },
1941 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
1943 let f = bx.declare_cfn(
1945 bx.type_func(&[vec_ty, vec_ty], vec_ty)
1947 llvm::SetUnnamedAddr(f, false);
1948 let v = bx.call(f, &[lhs, rhs], None);
1952 span_bug!(span, "unknown SIMD intrinsic");
1955 // Returns the width of an int Ty, and if it's signed or not
1956 // Returns None if the type is not an integer
1957 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1959 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
1961 ty::Int(t) => Some((match t {
1962 ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64,
1963 ast::IntTy::I8 => 8,
1964 ast::IntTy::I16 => 16,
1965 ast::IntTy::I32 => 32,
1966 ast::IntTy::I64 => 64,
1967 ast::IntTy::I128 => 128,
1969 ty::Uint(t) => Some((match t {
1970 ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64,
1971 ast::UintTy::U8 => 8,
1972 ast::UintTy::U16 => 16,
1973 ast::UintTy::U32 => 32,
1974 ast::UintTy::U64 => 64,
1975 ast::UintTy::U128 => 128,
1981 // Returns the width of a float Ty
1982 // Returns None if the type is not a float
1983 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
1985 ty::Float(t) => Some(t.bit_width() as u64),