3 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
4 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::builder::Builder;
8 use crate::value::Value;
9 use crate::va_arg::emit_va_arg;
10 use rustc_codegen_ssa::MemFlags;
11 use rustc_codegen_ssa::mir::place::PlaceRef;
12 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13 use rustc_codegen_ssa::glue;
14 use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types};
15 use rustc::ty::{self, Ty};
16 use rustc::ty::layout::{self, FnAbiExt, LayoutOf, HasTyCtxt, Primitive};
17 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
19 use rustc_target::abi::HasDataLayout;
21 use rustc::{bug, span_bug};
23 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
24 use rustc_codegen_ssa::traits::*;
28 use std::cmp::Ordering;
29 use std::{iter, i128, u128};
31 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
32 let llvm_name = match name {
33 "sqrtf32" => "llvm.sqrt.f32",
34 "sqrtf64" => "llvm.sqrt.f64",
35 "powif32" => "llvm.powi.f32",
36 "powif64" => "llvm.powi.f64",
37 "sinf32" => "llvm.sin.f32",
38 "sinf64" => "llvm.sin.f64",
39 "cosf32" => "llvm.cos.f32",
40 "cosf64" => "llvm.cos.f64",
41 "powf32" => "llvm.pow.f32",
42 "powf64" => "llvm.pow.f64",
43 "expf32" => "llvm.exp.f32",
44 "expf64" => "llvm.exp.f64",
45 "exp2f32" => "llvm.exp2.f32",
46 "exp2f64" => "llvm.exp2.f64",
47 "logf32" => "llvm.log.f32",
48 "logf64" => "llvm.log.f64",
49 "log10f32" => "llvm.log10.f32",
50 "log10f64" => "llvm.log10.f64",
51 "log2f32" => "llvm.log2.f32",
52 "log2f64" => "llvm.log2.f64",
53 "fmaf32" => "llvm.fma.f32",
54 "fmaf64" => "llvm.fma.f64",
55 "fabsf32" => "llvm.fabs.f32",
56 "fabsf64" => "llvm.fabs.f64",
57 "minnumf32" => "llvm.minnum.f32",
58 "minnumf64" => "llvm.minnum.f64",
59 "maxnumf32" => "llvm.maxnum.f32",
60 "maxnumf64" => "llvm.maxnum.f64",
61 "copysignf32" => "llvm.copysign.f32",
62 "copysignf64" => "llvm.copysign.f64",
63 "floorf32" => "llvm.floor.f32",
64 "floorf64" => "llvm.floor.f64",
65 "ceilf32" => "llvm.ceil.f32",
66 "ceilf64" => "llvm.ceil.f64",
67 "truncf32" => "llvm.trunc.f32",
68 "truncf64" => "llvm.trunc.f64",
69 "rintf32" => "llvm.rint.f32",
70 "rintf64" => "llvm.rint.f64",
71 "nearbyintf32" => "llvm.nearbyint.f32",
72 "nearbyintf64" => "llvm.nearbyint.f64",
73 "roundf32" => "llvm.round.f32",
74 "roundf64" => "llvm.round.f64",
75 "assume" => "llvm.assume",
76 "abort" => "llvm.trap",
79 Some(cx.get_intrinsic(&llvm_name))
82 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
83 fn codegen_intrinsic_call(
85 instance: ty::Instance<'tcx>,
86 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
87 args: &[OperandRef<'tcx, &'ll Value>],
92 let callee_ty = instance.ty(tcx);
94 let (def_id, substs) = match callee_ty.kind {
95 ty::FnDef(def_id, substs) => (def_id, substs),
96 _ => bug!("expected fn item type, found {}", callee_ty)
99 let sig = callee_ty.fn_sig(tcx);
100 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
101 let arg_tys = sig.inputs();
102 let ret_ty = sig.output();
103 let name = &*tcx.item_name(def_id).as_str();
105 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
106 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
108 let simple = get_simple_intrinsic(self, name);
109 let llval = match name {
110 _ if simple.is_some() => {
111 self.call(simple.unwrap(),
112 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
119 let expect = self.get_intrinsic(&("llvm.expect.i1"));
120 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
123 let expect = self.get_intrinsic(&("llvm.expect.i1"));
124 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
135 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
136 self.call(llfn, &[], None)
139 self.va_start(args[0].immediate())
142 self.va_end(args[0].immediate())
145 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
146 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
149 match fn_abi.ret.layout.abi {
150 layout::Abi::Scalar(ref scalar) => {
152 Primitive::Int(..) => {
153 if self.cx().size_of(ret_ty).bytes() < 4 {
154 // `va_arg` should not be called on a integer type
155 // less than 4 bytes in length. If it is, promote
156 // the integer to a `i32` and truncate the result
157 // back to the smaller type.
158 let promoted_result = emit_va_arg(self, args[0],
160 self.trunc(promoted_result, llret_ty)
162 emit_va_arg(self, args[0], ret_ty)
166 Primitive::Pointer => {
167 emit_va_arg(self, args[0], ret_ty)
169 // `va_arg` should never be used with the return type f32.
171 bug!("the va_arg intrinsic does not work with `f32`")
176 bug!("the va_arg intrinsic does not work with non-scalar types")
181 let tp_ty = substs.type_at(0);
182 if let OperandValue::Pair(_, meta) = args[0].val {
183 let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
186 self.const_usize(self.size_of(tp_ty).bytes())
189 "min_align_of_val" => {
190 let tp_ty = substs.type_at(0);
191 if let OperandValue::Pair(_, meta) = args[0].val {
192 let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
195 self.const_usize(self.align_of(tp_ty).bytes())
204 let ty_name = self.tcx
205 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
207 OperandRef::from_const(self, ty_name).immediate_or_packed_pair(self)
210 let ty = substs.type_at(0);
211 if !self.layout_of(ty).is_zst() {
212 // Just zero out the stack slot.
213 // If we store a zero constant, LLVM will drown in vreg allocation for large
214 // data structures, and the generated code will be awful. (A telltale sign of
215 // this is large quantities of `mov [byte ptr foo],0` in the generated code.)
227 // Effectively no-ops
228 "uninit" | "forget" => {
232 let ptr = args[0].immediate();
233 let offset = args[1].immediate();
234 self.inbounds_gep(ptr, &[offset])
237 let ptr = args[0].immediate();
238 let offset = args[1].immediate();
239 self.gep(ptr, &[offset])
242 "copy_nonoverlapping" => {
243 copy_intrinsic(self, false, false, substs.type_at(0),
244 args[1].immediate(), args[0].immediate(), args[2].immediate());
248 copy_intrinsic(self, true, false, substs.type_at(0),
249 args[1].immediate(), args[0].immediate(), args[2].immediate());
253 memset_intrinsic(self, false, substs.type_at(0),
254 args[0].immediate(), args[1].immediate(), args[2].immediate());
258 "volatile_copy_nonoverlapping_memory" => {
259 copy_intrinsic(self, false, true, substs.type_at(0),
260 args[0].immediate(), args[1].immediate(), args[2].immediate());
263 "volatile_copy_memory" => {
264 copy_intrinsic(self, true, true, substs.type_at(0),
265 args[0].immediate(), args[1].immediate(), args[2].immediate());
268 "volatile_set_memory" => {
269 memset_intrinsic(self, true, substs.type_at(0),
270 args[0].immediate(), args[1].immediate(), args[2].immediate());
273 "volatile_load" | "unaligned_volatile_load" => {
274 let tp_ty = substs.type_at(0);
275 let mut ptr = args[0].immediate();
276 if let PassMode::Cast(ty) = fn_abi.ret.mode {
277 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
279 let load = self.volatile_load(ptr);
280 let align = if name == "unaligned_volatile_load" {
283 self.align_of(tp_ty).bytes() as u32
286 llvm::LLVMSetAlignment(load, align);
288 to_immediate(self, load, self.layout_of(tp_ty))
290 "volatile_store" => {
291 let dst = args[0].deref(self.cx());
292 args[1].val.volatile_store(self, dst);
295 "unaligned_volatile_store" => {
296 let dst = args[0].deref(self.cx());
297 args[1].val.unaligned_volatile_store(self, dst);
300 "prefetch_read_data" | "prefetch_write_data" |
301 "prefetch_read_instruction" | "prefetch_write_instruction" => {
302 let expect = self.get_intrinsic(&("llvm.prefetch"));
303 let (rw, cache_type) = match name {
304 "prefetch_read_data" => (0, 1),
305 "prefetch_write_data" => (1, 1),
306 "prefetch_read_instruction" => (0, 0),
307 "prefetch_write_instruction" => (1, 0),
314 self.const_i32(cache_type)
317 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
318 "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
319 "mul_with_overflow" | "wrapping_add" | "wrapping_sub" | "wrapping_mul" |
320 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" |
321 "unchecked_add" | "unchecked_sub" | "unchecked_mul" | "exact_div" |
322 "rotate_left" | "rotate_right" | "saturating_add" | "saturating_sub" => {
324 match int_type_width_signed(ty, self) {
325 Some((width, signed)) =>
328 let y = self.const_bool(false);
329 let llfn = self.get_intrinsic(
330 &format!("llvm.{}.i{}", name, width),
332 self.call(llfn, &[args[0].immediate(), y], None)
334 "ctlz_nonzero" | "cttz_nonzero" => {
335 let y = self.const_bool(true);
336 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
337 let llfn = self.get_intrinsic(llvm_name);
338 self.call(llfn, &[args[0].immediate(), y], None)
340 "ctpop" => self.call(
341 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
342 &[args[0].immediate()],
347 args[0].immediate() // byte swap a u8/i8 is just a no-op
351 &format!("llvm.bswap.i{}", width),
353 &[args[0].immediate()],
361 &format!("llvm.bitreverse.i{}", width),
363 &[args[0].immediate()],
367 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
368 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
369 if signed { 's' } else { 'u' },
371 let llfn = self.get_intrinsic(&intrinsic);
373 // Convert `i1` to a `bool`, and write it to the out parameter
374 let pair = self.call(llfn, &[
378 let val = self.extract_value(pair, 0);
379 let overflow = self.extract_value(pair, 1);
380 let overflow = self.zext(overflow, self.type_bool());
382 let dest = result.project_field(self, 0);
383 self.store(val, dest.llval, dest.align);
384 let dest = result.project_field(self, 1);
385 self.store(overflow, dest.llval, dest.align);
389 "wrapping_add" => self.add(args[0].immediate(), args[1].immediate()),
390 "wrapping_sub" => self.sub(args[0].immediate(), args[1].immediate()),
391 "wrapping_mul" => self.mul(args[0].immediate(), args[1].immediate()),
394 self.exactsdiv(args[0].immediate(), args[1].immediate())
396 self.exactudiv(args[0].immediate(), args[1].immediate())
400 self.sdiv(args[0].immediate(), args[1].immediate())
402 self.udiv(args[0].immediate(), args[1].immediate())
406 self.srem(args[0].immediate(), args[1].immediate())
408 self.urem(args[0].immediate(), args[1].immediate())
410 "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()),
413 self.ashr(args[0].immediate(), args[1].immediate())
415 self.lshr(args[0].immediate(), args[1].immediate())
419 self.unchecked_sadd(args[0].immediate(), args[1].immediate())
421 self.unchecked_uadd(args[0].immediate(), args[1].immediate())
426 self.unchecked_ssub(args[0].immediate(), args[1].immediate())
428 self.unchecked_usub(args[0].immediate(), args[1].immediate())
433 self.unchecked_smul(args[0].immediate(), args[1].immediate())
435 self.unchecked_umul(args[0].immediate(), args[1].immediate())
438 "rotate_left" | "rotate_right" => {
439 let is_left = name == "rotate_left";
440 let val = args[0].immediate();
441 let raw_shift = args[1].immediate();
442 // rotate = funnel shift with first two args the same
443 let llvm_name = &format!("llvm.fsh{}.i{}",
444 if is_left { 'l' } else { 'r' }, width);
445 let llfn = self.get_intrinsic(llvm_name);
446 self.call(llfn, &[val, val, raw_shift], None)
448 "saturating_add" | "saturating_sub" => {
449 let is_add = name == "saturating_add";
450 let lhs = args[0].immediate();
451 let rhs = args[1].immediate();
452 if llvm_util::get_major_version() >= 8 {
453 let llvm_name = &format!("llvm.{}{}.sat.i{}",
454 if signed { 's' } else { 'u' },
455 if is_add { "add" } else { "sub" },
457 let llfn = self.get_intrinsic(llvm_name);
458 self.call(llfn, &[lhs, rhs], None)
460 let llvm_name = &format!("llvm.{}{}.with.overflow.i{}",
461 if signed { 's' } else { 'u' },
462 if is_add { "add" } else { "sub" },
464 let llfn = self.get_intrinsic(llvm_name);
465 let pair = self.call(llfn, &[lhs, rhs], None);
466 let val = self.extract_value(pair, 0);
467 let overflow = self.extract_value(pair, 1);
468 let llty = self.type_ix(width);
470 let limit = if signed {
471 let limit_lo = self.const_uint_big(
472 llty, (i128::MIN >> (128 - width)) as u128);
473 let limit_hi = self.const_uint_big(
474 llty, (i128::MAX >> (128 - width)) as u128);
476 IntPredicate::IntSLT, val, self.const_uint(llty, 0));
477 self.select(neg, limit_hi, limit_lo)
479 self.const_uint_big(llty, u128::MAX >> (128 - width))
481 self.const_uint(llty, 0)
483 self.select(overflow, limit, val)
489 span_invalid_monomorphization_error(
491 &format!("invalid monomorphization of `{}` intrinsic: \
492 expected basic integer type, found `{}`", name, ty));
498 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
499 match float_type_width(arg_tys[0]) {
502 "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()),
503 "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()),
504 "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()),
505 "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
506 "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()),
510 span_invalid_monomorphization_error(
512 &format!("invalid monomorphization of `{}` intrinsic: \
513 expected basic float type, found `{}`", name, arg_tys[0]));
519 "float_to_int_approx_unchecked" => {
520 if float_type_width(arg_tys[0]).is_none() {
521 span_invalid_monomorphization_error(
523 &format!("invalid monomorphization of `float_to_int_approx_unchecked` \
524 intrinsic: expected basic float type, \
525 found `{}`", arg_tys[0]));
528 match int_type_width_signed(ret_ty, self.cx) {
529 Some((width, signed)) => {
531 self.fptosi(args[0].immediate(), self.cx.type_ix(width))
533 self.fptoui(args[0].immediate(), self.cx.type_ix(width))
537 span_invalid_monomorphization_error(
539 &format!("invalid monomorphization of `float_to_int_approx_unchecked` \
540 intrinsic: expected basic integer type, \
541 found `{}`", ret_ty));
547 "discriminant_value" => {
548 args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
551 name if name.starts_with("simd_") => {
552 match generic_simd_intrinsic(self, name,
561 // This requires that atomic intrinsics follow a specific naming pattern:
562 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
563 name if name.starts_with("atomic_") => {
564 use rustc_codegen_ssa::common::AtomicOrdering::*;
565 use rustc_codegen_ssa::common::
566 {SynchronizationScope, AtomicRmwBinOp};
568 let split: Vec<&str> = name.split('_').collect();
570 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
571 let (order, failorder) = match split.len() {
572 2 => (SequentiallyConsistent, SequentiallyConsistent),
573 3 => match split[2] {
574 "unordered" => (Unordered, Unordered),
575 "relaxed" => (Monotonic, Monotonic),
576 "acq" => (Acquire, Acquire),
577 "rel" => (Release, Monotonic),
578 "acqrel" => (AcquireRelease, Acquire),
579 "failrelaxed" if is_cxchg =>
580 (SequentiallyConsistent, Monotonic),
581 "failacq" if is_cxchg =>
582 (SequentiallyConsistent, Acquire),
583 _ => self.sess().fatal("unknown ordering in atomic intrinsic")
585 4 => match (split[2], split[3]) {
586 ("acq", "failrelaxed") if is_cxchg =>
587 (Acquire, Monotonic),
588 ("acqrel", "failrelaxed") if is_cxchg =>
589 (AcquireRelease, Monotonic),
590 _ => self.sess().fatal("unknown ordering in atomic intrinsic")
592 _ => self.sess().fatal("Atomic intrinsic not in correct format"),
595 let invalid_monomorphization = |ty| {
596 span_invalid_monomorphization_error(tcx.sess, span,
597 &format!("invalid monomorphization of `{}` intrinsic: \
598 expected basic integer type, found `{}`", name, ty));
602 "cxchg" | "cxchgweak" => {
603 let ty = substs.type_at(0);
604 if int_type_width_signed(ty, self).is_some() {
605 let weak = split[1] == "cxchgweak";
606 let pair = self.atomic_cmpxchg(
613 let val = self.extract_value(pair, 0);
614 let success = self.extract_value(pair, 1);
615 let success = self.zext(success, self.type_bool());
617 let dest = result.project_field(self, 0);
618 self.store(val, dest.llval, dest.align);
619 let dest = result.project_field(self, 1);
620 self.store(success, dest.llval, dest.align);
623 return invalid_monomorphization(ty);
628 let ty = substs.type_at(0);
629 if int_type_width_signed(ty, self).is_some() {
630 let size = self.size_of(ty);
631 self.atomic_load(args[0].immediate(), order, size)
633 return invalid_monomorphization(ty);
638 let ty = substs.type_at(0);
639 if int_type_width_signed(ty, self).is_some() {
640 let size = self.size_of(ty);
649 return invalid_monomorphization(ty);
654 self.atomic_fence(order, SynchronizationScope::CrossThread);
658 "singlethreadfence" => {
659 self.atomic_fence(order, SynchronizationScope::SingleThread);
663 // These are all AtomicRMW ops
665 let atom_op = match op {
666 "xchg" => AtomicRmwBinOp::AtomicXchg,
667 "xadd" => AtomicRmwBinOp::AtomicAdd,
668 "xsub" => AtomicRmwBinOp::AtomicSub,
669 "and" => AtomicRmwBinOp::AtomicAnd,
670 "nand" => AtomicRmwBinOp::AtomicNand,
671 "or" => AtomicRmwBinOp::AtomicOr,
672 "xor" => AtomicRmwBinOp::AtomicXor,
673 "max" => AtomicRmwBinOp::AtomicMax,
674 "min" => AtomicRmwBinOp::AtomicMin,
675 "umax" => AtomicRmwBinOp::AtomicUMax,
676 "umin" => AtomicRmwBinOp::AtomicUMin,
677 _ => self.sess().fatal("unknown atomic operation")
680 let ty = substs.type_at(0);
681 if int_type_width_signed(ty, self).is_some() {
689 return invalid_monomorphization(ty);
695 "nontemporal_store" => {
696 let dst = args[0].deref(self.cx());
697 args[1].val.nontemporal_store(self, dst);
701 "ptr_offset_from" => {
702 let ty = substs.type_at(0);
703 let pointee_size = self.size_of(ty);
705 // This is the same sequence that Clang emits for pointer subtraction.
706 // It can be neither `nsw` nor `nuw` because the input is treated as
707 // unsigned but then the output is treated as signed, so neither works.
708 let a = args[0].immediate();
709 let b = args[1].immediate();
710 let a = self.ptrtoint(a, self.type_isize());
711 let b = self.ptrtoint(b, self.type_isize());
712 let d = self.sub(a, b);
713 let pointee_size = self.const_usize(pointee_size.bytes());
714 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
715 self.exactsdiv(d, pointee_size)
718 _ => bug!("unknown intrinsic '{}'", name),
721 if !fn_abi.ret.is_ignore() {
722 if let PassMode::Cast(ty) = fn_abi.ret.mode {
723 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
724 let ptr = self.pointercast(result.llval, ptr_llty);
725 self.store(llval, ptr, result.align);
727 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
728 .val.store(self, result);
733 fn abort(&mut self) {
734 let fnname = self.get_intrinsic(&("llvm.trap"));
735 self.call(fnname, &[], None);
738 fn assume(&mut self, val: Self::Value) {
739 let assume_intrinsic = self.get_intrinsic("llvm.assume");
740 self.call(assume_intrinsic, &[val], None);
743 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
744 let expect = self.get_intrinsic(&"llvm.expect.i1");
745 self.call(expect, &[cond, self.const_bool(expected)], None)
748 fn sideeffect(&mut self) {
749 if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
750 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
751 self.call(fnname, &[], None);
755 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
756 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
757 self.call(intrinsic, &[va_list], None)
760 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
761 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
762 self.call(intrinsic, &[va_list], None)
767 bx: &mut Builder<'a, 'll, 'tcx>,
775 let (size, align) = bx.size_and_align_of(ty);
776 let size = bx.mul(bx.const_usize(size.bytes()), count);
777 let flags = if volatile {
783 bx.memmove(dst, align, src, align, size, flags);
785 bx.memcpy(dst, align, src, align, size, flags);
790 bx: &mut Builder<'a, 'll, 'tcx>,
797 let (size, align) = bx.size_and_align_of(ty);
798 let size = bx.mul(bx.const_usize(size.bytes()), count);
799 let flags = if volatile {
804 bx.memset(dst, val, size, align, flags);
808 bx: &mut Builder<'a, 'll, 'tcx>,
811 local_ptr: &'ll Value,
814 if bx.sess().no_landing_pads() {
815 bx.call(func, &[data], None);
816 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
817 bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
818 } else if wants_msvc_seh(bx.sess()) {
819 codegen_msvc_try(bx, func, data, local_ptr, dest);
821 codegen_gnu_try(bx, func, data, local_ptr, dest);
825 // MSVC's definition of the `rust_try` function.
827 // This implementation uses the new exception handling instructions in LLVM
828 // which have support in LLVM for SEH on MSVC targets. Although these
829 // instructions are meant to work for all targets, as of the time of this
830 // writing, however, LLVM does not recommend the usage of these new instructions
831 // as the old ones are still more optimized.
833 bx: &mut Builder<'a, 'll, 'tcx>,
836 local_ptr: &'ll Value,
839 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
840 bx.set_personality_fn(bx.eh_personality());
843 let mut normal = bx.build_sibling_block("normal");
844 let mut catchswitch = bx.build_sibling_block("catchswitch");
845 let mut catchpad = bx.build_sibling_block("catchpad");
846 let mut caught = bx.build_sibling_block("caught");
848 let func = llvm::get_param(bx.llfn(), 0);
849 let data = llvm::get_param(bx.llfn(), 1);
850 let local_ptr = llvm::get_param(bx.llfn(), 2);
852 // We're generating an IR snippet that looks like:
854 // declare i32 @rust_try(%func, %data, %ptr) {
855 // %slot = alloca [2 x i64]
856 // invoke %func(%data) to label %normal unwind label %catchswitch
862 // %cs = catchswitch within none [%catchpad] unwind to caller
865 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
866 // %ptr[0] = %slot[0]
867 // %ptr[1] = %slot[1]
868 // catchret from %tok to label %caught
874 // This structure follows the basic usage of throw/try/catch in LLVM.
875 // For example, compile this C++ snippet to see what LLVM generates:
877 // #include <stdint.h>
879 // struct rust_panic {
883 // int bar(void (*foo)(void), uint64_t *ret) {
887 // } catch(rust_panic a) {
894 // More information can be found in libstd's seh.rs implementation.
895 let i64_2 = bx.type_array(bx.type_i64(), 2);
896 let i64_align = bx.tcx().data_layout.i64_align.abi;
897 let slot = bx.alloca(i64_2, i64_align);
898 bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
900 normal.ret(bx.const_i32(0));
902 let cs = catchswitch.catch_switch(None, None, 1);
903 catchswitch.add_handler(cs, catchpad.llbb());
905 let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
906 Some(did) => bx.get_static(did),
907 None => bug!("eh_catch_typeinfo not defined, but needed for SEH unwinding"),
909 let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]);
911 let payload = catchpad.load(slot, i64_align);
912 let local_ptr = catchpad.bitcast(local_ptr, bx.type_ptr_to(i64_2));
913 catchpad.store(payload, local_ptr, i64_align);
914 catchpad.catch_ret(&funclet, caught.llbb());
916 caught.ret(bx.const_i32(1));
919 // Note that no invoke is used here because by definition this function
920 // can't panic (that's what it's catching).
921 let ret = bx.call(llfn, &[func, data, local_ptr], None);
922 let i32_align = bx.tcx().data_layout.i32_align.abi;
923 bx.store(ret, dest, i32_align);
926 // Definition of the standard `try` function for Rust using the GNU-like model
927 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
930 // This codegen is a little surprising because we always call a shim
931 // function instead of inlining the call to `invoke` manually here. This is done
932 // because in LLVM we're only allowed to have one personality per function
933 // definition. The call to the `try` intrinsic is being inlined into the
934 // function calling it, and that function may already have other personality
935 // functions in play. By calling a shim we're guaranteed that our shim will have
936 // the right personality function.
938 bx: &mut Builder<'a, 'll, 'tcx>,
941 local_ptr: &'ll Value,
944 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
945 // Codegens the shims described above:
948 // invoke %func(%args...) normal %normal unwind %catch
954 // (ptr, _) = landingpad
955 // store ptr, %local_ptr
958 // Note that the `local_ptr` data passed into the `try` intrinsic is
959 // expected to be `*mut *mut u8` for this to actually work, but that's
960 // managed by the standard library.
964 let mut then = bx.build_sibling_block("then");
965 let mut catch = bx.build_sibling_block("catch");
967 let func = llvm::get_param(bx.llfn(), 0);
968 let data = llvm::get_param(bx.llfn(), 1);
969 let local_ptr = llvm::get_param(bx.llfn(), 2);
970 bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
971 then.ret(bx.const_i32(0));
973 // Type indicator for the exception being thrown.
975 // The first value in this tuple is a pointer to the exception object
976 // being thrown. The second value is a "selector" indicating which of
977 // the landing pad clauses the exception's type had been matched to.
978 // rust_try ignores the selector.
979 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
980 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
981 let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
983 let tydesc = bx.get_static(tydesc);
984 bx.bitcast(tydesc, bx.type_i8p())
986 None => bx.const_null(bx.type_i8p()),
988 catch.add_clause(vals, tydesc);
989 let ptr = catch.extract_value(vals, 0);
990 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
991 let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
992 catch.store(ptr, bitcast, ptr_align);
993 catch.ret(bx.const_i32(1));
996 // Note that no invoke is used here because by definition this function
997 // can't panic (that's what it's catching).
998 let ret = bx.call(llfn, &[func, data, local_ptr], None);
999 let i32_align = bx.tcx().data_layout.i32_align.abi;
1000 bx.store(ret, dest, i32_align);
1003 // Helper function to give a Block to a closure to codegen a shim function.
1004 // This is currently primarily used for the `try` intrinsic functions above.
1005 fn gen_fn<'ll, 'tcx>(
1006 cx: &CodegenCx<'ll, 'tcx>,
1008 inputs: Vec<Ty<'tcx>>,
1010 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1012 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1016 hir::Unsafety::Unsafe,
1019 let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
1020 let llfn = cx.declare_fn(name, &fn_abi);
1021 // FIXME(eddyb) find a nicer way to do this.
1022 unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
1023 let bx = Builder::new_block(cx, llfn, "entry-block");
1028 // Helper function used to get a handle to the `__rust_try` function used to
1029 // catch exceptions.
1031 // This function is only generated once and is then cached.
1032 fn get_rust_try_fn<'ll, 'tcx>(
1033 cx: &CodegenCx<'ll, 'tcx>,
1034 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1036 if let Some(llfn) = cx.rust_try_fn.get() {
1040 // Define the type up front for the signature of the rust_try function.
1042 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1043 let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1047 hir::Unsafety::Unsafe,
1050 let output = tcx.types.i32;
1051 let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
1052 cx.rust_try_fn.set(Some(rust_try));
1056 fn generic_simd_intrinsic(
1057 bx: &mut Builder<'a, 'll, 'tcx>,
1059 callee_ty: Ty<'tcx>,
1060 args: &[OperandRef<'tcx, &'ll Value>],
1062 llret_ty: &'ll Type,
1064 ) -> Result<&'ll Value, ()> {
1065 // macros for error handling:
1066 macro_rules! emit_error {
1070 ($msg: tt, $($fmt: tt)*) => {
1071 span_invalid_monomorphization_error(
1073 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1078 macro_rules! return_error {
1081 emit_error!($($fmt)*);
1087 macro_rules! require {
1088 ($cond: expr, $($fmt: tt)*) => {
1090 return_error!($($fmt)*);
1095 macro_rules! require_simd {
1096 ($ty: expr, $position: expr) => {
1097 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1102 let sig = tcx.normalize_erasing_late_bound_regions(
1103 ty::ParamEnv::reveal_all(),
1104 &callee_ty.fn_sig(tcx),
1106 let arg_tys = sig.inputs();
1108 if name == "simd_select_bitmask" {
1109 let in_ty = arg_tys[0];
1110 let m_len = match in_ty.kind {
1111 // Note that this `.unwrap()` crashes for isize/usize, that's sort
1112 // of intentional as there's not currently a use case for that.
1113 ty::Int(i) => i.bit_width().unwrap() as u64,
1114 ty::Uint(i) => i.bit_width().unwrap() as u64,
1115 _ => return_error!("`{}` is not an integral type", in_ty),
1117 require_simd!(arg_tys[1], "argument");
1118 let v_len = arg_tys[1].simd_size(tcx);
1119 require!(m_len == v_len,
1120 "mismatched lengths: mask length `{}` != other vector length `{}`",
1123 let i1 = bx.type_i1();
1124 let i1xn = bx.type_vector(i1, m_len);
1125 let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
1126 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1129 // every intrinsic below takes a SIMD vector as its first argument
1130 require_simd!(arg_tys[0], "input");
1131 let in_ty = arg_tys[0];
1132 let in_elem = arg_tys[0].simd_type(tcx);
1133 let in_len = arg_tys[0].simd_size(tcx);
1135 let comparison = match name {
1136 "simd_eq" => Some(hir::BinOpKind::Eq),
1137 "simd_ne" => Some(hir::BinOpKind::Ne),
1138 "simd_lt" => Some(hir::BinOpKind::Lt),
1139 "simd_le" => Some(hir::BinOpKind::Le),
1140 "simd_gt" => Some(hir::BinOpKind::Gt),
1141 "simd_ge" => Some(hir::BinOpKind::Ge),
1145 if let Some(cmp_op) = comparison {
1146 require_simd!(ret_ty, "return");
1148 let out_len = ret_ty.simd_size(tcx);
1149 require!(in_len == out_len,
1150 "expected return type with length {} (same as input type `{}`), \
1151 found `{}` with length {}",
1154 require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1155 "expected return type with integer elements, found `{}` with non-integer `{}`",
1157 ret_ty.simd_type(tcx));
1159 return Ok(compare_simd_types(bx,
1160 args[0].immediate(),
1161 args[1].immediate(),
1167 if name.starts_with("simd_shuffle") {
1168 let n: u64 = name["simd_shuffle".len()..].parse().unwrap_or_else(|_|
1169 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?"));
1171 require_simd!(ret_ty, "return");
1173 let out_len = ret_ty.simd_size(tcx);
1174 require!(out_len == n,
1175 "expected return type of length {}, found `{}` with length {}",
1176 n, ret_ty, out_len);
1177 require!(in_elem == ret_ty.simd_type(tcx),
1178 "expected return element type `{}` (element of input `{}`), \
1179 found `{}` with element type `{}`",
1181 ret_ty, ret_ty.simd_type(tcx));
1183 let total_len = u128::from(in_len) * 2;
1185 let vector = args[2].immediate();
1187 let indices: Option<Vec<_>> = (0..n)
1190 let val = bx.const_get_elt(vector, i as u64);
1191 match bx.const_to_opt_u128(val, true) {
1193 emit_error!("shuffle index #{} is not a constant", arg_idx);
1196 Some(idx) if idx >= total_len => {
1197 emit_error!("shuffle index #{} is out of bounds (limit {})",
1198 arg_idx, total_len);
1201 Some(idx) => Some(bx.const_i32(idx as i32)),
1205 let indices = match indices {
1207 None => return Ok(bx.const_null(llret_ty))
1210 return Ok(bx.shuffle_vector(args[0].immediate(),
1211 args[1].immediate(),
1212 bx.const_vector(&indices)))
1215 if name == "simd_insert" {
1216 require!(in_elem == arg_tys[2],
1217 "expected inserted type `{}` (element of input `{}`), found `{}`",
1218 in_elem, in_ty, arg_tys[2]);
1219 return Ok(bx.insert_element(args[0].immediate(),
1220 args[2].immediate(),
1221 args[1].immediate()))
1223 if name == "simd_extract" {
1224 require!(ret_ty == in_elem,
1225 "expected return type `{}` (element of input `{}`), found `{}`",
1226 in_elem, in_ty, ret_ty);
1227 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1230 if name == "simd_select" {
1231 let m_elem_ty = in_elem;
1233 require_simd!(arg_tys[1], "argument");
1234 let v_len = arg_tys[1].simd_size(tcx);
1235 require!(m_len == v_len,
1236 "mismatched lengths: mask length `{}` != other vector length `{}`",
1239 match m_elem_ty.kind {
1241 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
1243 // truncate the mask to a vector of i1s
1244 let i1 = bx.type_i1();
1245 let i1xn = bx.type_vector(i1, m_len as u64);
1246 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1247 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1250 if name == "simd_bitmask" {
1251 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1252 // vector mask and returns an unsigned integer containing the most
1253 // significant bit (MSB) of each lane.
1255 // If the vector has less than 8 lanes, an u8 is returned with zeroed
1257 let expected_int_bits = in_len.max(8);
1259 ty::Uint(i) if i.bit_width() == Some(expected_int_bits as usize) => (),
1261 "bitmask `{}`, expected `u{}`",
1262 ret_ty, expected_int_bits
1266 // Integer vector <i{in_bitwidth} x in_len>:
1267 let (i_xn, in_elem_bitwidth) = match in_elem.kind {
1269 args[0].immediate(),
1270 i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
1273 args[0].immediate(),
1274 i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
1277 "vector argument `{}`'s element type `{}`, expected integer element type",
1282 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1283 let shift_indices = vec![
1284 bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _);
1287 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1288 // Truncate vector to an <i1 x N>
1289 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _));
1290 // Bitcast <i1 x N> to iN:
1291 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _));
1292 // Zero-extend iN to the bitmask type:
1293 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _)));
1296 fn simd_simple_float_intrinsic(
1298 in_elem: &::rustc::ty::TyS<'_>,
1299 in_ty: &::rustc::ty::TyS<'_>,
1301 bx: &mut Builder<'a, 'll, 'tcx>,
1303 args: &[OperandRef<'tcx, &'ll Value>],
1304 ) -> Result<&'ll Value, ()> {
1305 macro_rules! emit_error {
1309 ($msg: tt, $($fmt: tt)*) => {
1310 span_invalid_monomorphization_error(
1312 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1316 macro_rules! return_error {
1319 emit_error!($($fmt)*);
1324 let ety = match in_elem.kind {
1325 ty::Float(f) if f.bit_width() == 32 => {
1326 if in_len < 2 || in_len > 16 {
1328 "unsupported floating-point vector `{}` with length `{}` \
1329 out-of-range [2, 16]",
1334 ty::Float(f) if f.bit_width() == 64 => {
1335 if in_len < 2 || in_len > 8 {
1336 return_error!("unsupported floating-point vector `{}` with length `{}` \
1337 out-of-range [2, 8]",
1343 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1344 f.name_str(), in_ty);
1347 return_error!("`{}` is not a floating-point type", in_ty);
1351 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1352 let intrinsic = bx.get_intrinsic(&llvm_name);
1353 let c = bx.call(intrinsic,
1354 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1356 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1362 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1365 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1368 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1371 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1374 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1377 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1380 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1383 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1386 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1389 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1392 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1395 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1398 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1401 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1403 _ => { /* fallthrough */ }
1407 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1408 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1409 fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1410 let p0s: String = "p0".repeat(no_pointers);
1411 match elem_ty.kind {
1412 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1413 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1414 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1415 _ => unreachable!(),
1419 fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64,
1420 mut no_pointers: usize) -> &'ll Type {
1421 // FIXME: use cx.layout_of(ty).llvm_type() ?
1422 let mut elem_ty = match elem_ty.kind {
1423 ty::Int(v) => cx.type_int_from_ty( v),
1424 ty::Uint(v) => cx.type_uint_from_ty( v),
1425 ty::Float(v) => cx.type_float_from_ty( v),
1426 _ => unreachable!(),
1428 while no_pointers > 0 {
1429 elem_ty = cx.type_ptr_to(elem_ty);
1432 cx.type_vector(elem_ty, vec_len)
1436 if name == "simd_gather" {
1437 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1438 // mask: <N x i{M}>) -> <N x T>
1439 // * N: number of elements in the input vectors
1440 // * T: type of the element to load
1441 // * M: any integer width is supported, will be truncated to i1
1443 // All types must be simd vector types
1444 require_simd!(in_ty, "first");
1445 require_simd!(arg_tys[1], "second");
1446 require_simd!(arg_tys[2], "third");
1447 require_simd!(ret_ty, "return");
1449 // Of the same length:
1450 require!(in_len == arg_tys[1].simd_size(tcx),
1451 "expected {} argument with length {} (same as input type `{}`), \
1452 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1453 arg_tys[1].simd_size(tcx));
1454 require!(in_len == arg_tys[2].simd_size(tcx),
1455 "expected {} argument with length {} (same as input type `{}`), \
1456 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1457 arg_tys[2].simd_size(tcx));
1459 // The return type must match the first argument type
1460 require!(ret_ty == in_ty,
1461 "expected return type `{}`, found `{}`",
1464 // This counts how many pointers
1465 fn ptr_count(t: Ty<'_>) -> usize {
1467 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1473 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1475 ty::RawPtr(p) => non_ptr(p.ty),
1480 // The second argument must be a simd vector with an element type that's a pointer
1481 // to the element type of the first argument
1482 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1483 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1484 non_ptr(arg_tys[1].simd_type(tcx))),
1486 require!(false, "expected element type `{}` of second argument `{}` \
1487 to be a pointer to the element type `{}` of the first \
1488 argument `{}`, found `{}` != `*_ {}`",
1489 arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty,
1490 arg_tys[1].simd_type(tcx), in_elem);
1494 assert!(pointer_count > 0);
1495 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1496 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1498 // The element type of the third argument must be a signed integer type of any width:
1499 match arg_tys[2].simd_type(tcx).kind {
1502 require!(false, "expected element type `{}` of third argument `{}` \
1503 to be a signed integer type",
1504 arg_tys[2].simd_type(tcx), arg_tys[2]);
1508 // Alignment of T, must be a constant integer value:
1509 let alignment_ty = bx.type_i32();
1510 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1512 // Truncate the mask vector to a vector of i1s:
1513 let (mask, mask_ty) = {
1514 let i1 = bx.type_i1();
1515 let i1xn = bx.type_vector(i1, in_len);
1516 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1519 // Type of the vector of pointers:
1520 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1521 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1523 // Type of the vector of elements:
1524 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1525 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1527 let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1528 llvm_elem_vec_str, llvm_pointer_vec_str);
1529 let f = bx.declare_cfn(&llvm_intrinsic,
1531 llvm_pointer_vec_ty,
1534 llvm_elem_vec_ty], llvm_elem_vec_ty));
1535 llvm::SetUnnamedAddr(f, false);
1536 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1541 if name == "simd_scatter" {
1542 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1543 // mask: <N x i{M}>) -> ()
1544 // * N: number of elements in the input vectors
1545 // * T: type of the element to load
1546 // * M: any integer width is supported, will be truncated to i1
1548 // All types must be simd vector types
1549 require_simd!(in_ty, "first");
1550 require_simd!(arg_tys[1], "second");
1551 require_simd!(arg_tys[2], "third");
1553 // Of the same length:
1554 require!(in_len == arg_tys[1].simd_size(tcx),
1555 "expected {} argument with length {} (same as input type `{}`), \
1556 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1557 arg_tys[1].simd_size(tcx));
1558 require!(in_len == arg_tys[2].simd_size(tcx),
1559 "expected {} argument with length {} (same as input type `{}`), \
1560 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1561 arg_tys[2].simd_size(tcx));
1563 // This counts how many pointers
1564 fn ptr_count(t: Ty<'_>) -> usize {
1566 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1572 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1574 ty::RawPtr(p) => non_ptr(p.ty),
1579 // The second argument must be a simd vector with an element type that's a pointer
1580 // to the element type of the first argument
1581 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1582 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut
1583 => (ptr_count(arg_tys[1].simd_type(tcx)),
1584 non_ptr(arg_tys[1].simd_type(tcx))),
1586 require!(false, "expected element type `{}` of second argument `{}` \
1587 to be a pointer to the element type `{}` of the first \
1588 argument `{}`, found `{}` != `*mut {}`",
1589 arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty,
1590 arg_tys[1].simd_type(tcx), in_elem);
1594 assert!(pointer_count > 0);
1595 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1596 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1598 // The element type of the third argument must be a signed integer type of any width:
1599 match arg_tys[2].simd_type(tcx).kind {
1602 require!(false, "expected element type `{}` of third argument `{}` \
1603 to be a signed integer type",
1604 arg_tys[2].simd_type(tcx), arg_tys[2]);
1608 // Alignment of T, must be a constant integer value:
1609 let alignment_ty = bx.type_i32();
1610 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1612 // Truncate the mask vector to a vector of i1s:
1613 let (mask, mask_ty) = {
1614 let i1 = bx.type_i1();
1615 let i1xn = bx.type_vector(i1, in_len);
1616 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1619 let ret_t = bx.type_void();
1621 // Type of the vector of pointers:
1622 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1623 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1625 // Type of the vector of elements:
1626 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1627 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1629 let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1630 llvm_elem_vec_str, llvm_pointer_vec_str);
1631 let f = bx.declare_cfn(&llvm_intrinsic,
1632 bx.type_func(&[llvm_elem_vec_ty,
1633 llvm_pointer_vec_ty,
1636 llvm::SetUnnamedAddr(f, false);
1637 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1642 macro_rules! arith_red {
1643 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1645 require!(ret_ty == in_elem,
1646 "expected return type `{}` (element of input `{}`), found `{}`",
1647 in_elem, in_ty, ret_ty);
1648 return match in_elem.kind {
1649 ty::Int(_) | ty::Uint(_) => {
1650 let r = bx.$integer_reduce(args[0].immediate());
1652 // if overflow occurs, the result is the
1653 // mathematical result modulo 2^n:
1654 if name.contains("mul") {
1655 Ok(bx.mul(args[1].immediate(), r))
1657 Ok(bx.add(args[1].immediate(), r))
1660 Ok(bx.$integer_reduce(args[0].immediate()))
1664 let acc = if $ordered {
1665 // ordered arithmetic reductions take an accumulator
1668 // unordered arithmetic reductions use the identity accumulator
1669 let identity_acc = if $name.contains("mul") { 1.0 } else { 0.0 };
1670 match f.bit_width() {
1671 32 => bx.const_real(bx.type_f32(), identity_acc),
1672 64 => bx.const_real(bx.type_f64(), identity_acc),
1675 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1676 $name, in_ty, in_elem, v, ret_ty
1681 Ok(bx.$float_reduce(acc, args[0].immediate()))
1685 "unsupported {} from `{}` with element `{}` to `{}`",
1686 $name, in_ty, in_elem, ret_ty
1694 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd, true);
1695 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul, true);
1696 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1697 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1699 macro_rules! minmax_red {
1700 ($name:tt: $int_red:ident, $float_red:ident) => {
1702 require!(ret_ty == in_elem,
1703 "expected return type `{}` (element of input `{}`), found `{}`",
1704 in_elem, in_ty, ret_ty);
1705 return match in_elem.kind {
1707 Ok(bx.$int_red(args[0].immediate(), true))
1710 Ok(bx.$int_red(args[0].immediate(), false))
1713 Ok(bx.$float_red(args[0].immediate()))
1716 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1717 $name, in_ty, in_elem, ret_ty)
1725 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1726 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1728 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1729 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1731 macro_rules! bitwise_red {
1732 ($name:tt : $red:ident, $boolean:expr) => {
1734 let input = if !$boolean {
1735 require!(ret_ty == in_elem,
1736 "expected return type `{}` (element of input `{}`), found `{}`",
1737 in_elem, in_ty, ret_ty);
1740 match in_elem.kind {
1741 ty::Int(_) | ty::Uint(_) => {},
1743 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1744 $name, in_ty, in_elem, ret_ty)
1748 // boolean reductions operate on vectors of i1s:
1749 let i1 = bx.type_i1();
1750 let i1xn = bx.type_vector(i1, in_len as u64);
1751 bx.trunc(args[0].immediate(), i1xn)
1753 return match in_elem.kind {
1754 ty::Int(_) | ty::Uint(_) => {
1755 let r = bx.$red(input);
1760 bx.zext(r, bx.type_bool())
1765 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1766 $name, in_ty, in_elem, ret_ty)
1773 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1774 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1775 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1776 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1777 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1779 if name == "simd_cast" {
1780 require_simd!(ret_ty, "return");
1781 let out_len = ret_ty.simd_size(tcx);
1782 require!(in_len == out_len,
1783 "expected return type with length {} (same as input type `{}`), \
1784 found `{}` with length {}",
1787 // casting cares about nominal type, not just structural type
1788 let out_elem = ret_ty.simd_type(tcx);
1790 if in_elem == out_elem { return Ok(args[0].immediate()); }
1792 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1794 let (in_style, in_width) = match in_elem.kind {
1795 // vectors of pointer-sized integers should've been
1796 // disallowed before here, so this unwrap is safe.
1797 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1798 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1799 ty::Float(f) => (Style::Float, f.bit_width()),
1800 _ => (Style::Unsupported, 0)
1802 let (out_style, out_width) = match out_elem.kind {
1803 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1804 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1805 ty::Float(f) => (Style::Float, f.bit_width()),
1806 _ => (Style::Unsupported, 0)
1809 match (in_style, out_style) {
1810 (Style::Int(in_is_signed), Style::Int(_)) => {
1811 return Ok(match in_width.cmp(&out_width) {
1812 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1813 Ordering::Equal => args[0].immediate(),
1814 Ordering::Less => if in_is_signed {
1815 bx.sext(args[0].immediate(), llret_ty)
1817 bx.zext(args[0].immediate(), llret_ty)
1821 (Style::Int(in_is_signed), Style::Float) => {
1822 return Ok(if in_is_signed {
1823 bx.sitofp(args[0].immediate(), llret_ty)
1825 bx.uitofp(args[0].immediate(), llret_ty)
1828 (Style::Float, Style::Int(out_is_signed)) => {
1829 return Ok(if out_is_signed {
1830 bx.fptosi(args[0].immediate(), llret_ty)
1832 bx.fptoui(args[0].immediate(), llret_ty)
1835 (Style::Float, Style::Float) => {
1836 return Ok(match in_width.cmp(&out_width) {
1837 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1838 Ordering::Equal => args[0].immediate(),
1839 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1842 _ => {/* Unsupported. Fallthrough. */}
1845 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1849 macro_rules! arith {
1850 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1851 $(if name == stringify!($name) {
1852 match in_elem.kind {
1853 $($(ty::$p(_))|* => {
1854 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1859 "unsupported operation on `{}` with element `{}`",
1866 simd_add: Uint, Int => add, Float => fadd;
1867 simd_sub: Uint, Int => sub, Float => fsub;
1868 simd_mul: Uint, Int => mul, Float => fmul;
1869 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1870 simd_rem: Uint => urem, Int => srem, Float => frem;
1871 simd_shl: Uint, Int => shl;
1872 simd_shr: Uint => lshr, Int => ashr;
1873 simd_and: Uint, Int => and;
1874 simd_or: Uint, Int => or;
1875 simd_xor: Uint, Int => xor;
1876 simd_fmax: Float => maxnum;
1877 simd_fmin: Float => minnum;
1881 if name == "simd_saturating_add" || name == "simd_saturating_sub" {
1882 let lhs = args[0].immediate();
1883 let rhs = args[1].immediate();
1884 let is_add = name == "simd_saturating_add";
1885 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
1886 let (signed, elem_width, elem_ty) = match in_elem.kind {
1890 i.bit_width().unwrap_or(ptr_bits),
1891 bx.cx.type_int_from_ty(i)
1896 i.bit_width().unwrap_or(ptr_bits),
1897 bx.cx.type_uint_from_ty(i)
1901 "expected element type `{}` of vector type `{}` \
1902 to be a signed or unsigned integer type",
1903 arg_tys[0].simd_type(tcx), arg_tys[0]
1907 let llvm_intrinsic = &format!(
1908 "llvm.{}{}.sat.v{}i{}",
1909 if signed { 's' } else { 'u' },
1910 if is_add { "add" } else { "sub" },
1913 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
1915 let f = bx.declare_cfn(
1917 bx.type_func(&[vec_ty, vec_ty], vec_ty)
1919 llvm::SetUnnamedAddr(f, false);
1920 let v = bx.call(f, &[lhs, rhs], None);
1924 span_bug!(span, "unknown SIMD intrinsic");
1927 // Returns the width of an int Ty, and if it's signed or not
1928 // Returns None if the type is not an integer
1929 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1931 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
1933 ty::Int(t) => Some((match t {
1934 ast::IntTy::Isize => cx.tcx.sess.target.ptr_width as u64,
1935 ast::IntTy::I8 => 8,
1936 ast::IntTy::I16 => 16,
1937 ast::IntTy::I32 => 32,
1938 ast::IntTy::I64 => 64,
1939 ast::IntTy::I128 => 128,
1941 ty::Uint(t) => Some((match t {
1942 ast::UintTy::Usize => cx.tcx.sess.target.ptr_width as u64,
1943 ast::UintTy::U8 => 8,
1944 ast::UintTy::U16 => 16,
1945 ast::UintTy::U32 => 32,
1946 ast::UintTy::U64 => 64,
1947 ast::UintTy::U128 => 128,
1953 // Returns the width of a float Ty
1954 // Returns None if the type is not a float
1955 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
1957 ty::Float(t) => Some(t.bit_width() as u64),