1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use intrinsics::{self, Intrinsic};
17 use abi::{Abi, FnType, LlvmType, PassMode};
18 use mir::place::PlaceRef;
19 use mir::operand::{OperandRef, OperandValue};
21 use context::CodegenCx;
24 use type_of::LayoutLlvmExt;
25 use rustc::ty::{self, Ty};
26 use rustc::ty::layout::{LayoutOf, HasTyCtxt};
27 use rustc_codegen_ssa::common::TypeKind;
30 use syntax::symbol::Symbol;
31 use builder::{Builder, MemFlags};
36 use rustc::session::Session;
39 use std::cmp::Ordering;
42 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
43 let llvm_name = match name {
44 "sqrtf32" => "llvm.sqrt.f32",
45 "sqrtf64" => "llvm.sqrt.f64",
46 "powif32" => "llvm.powi.f32",
47 "powif64" => "llvm.powi.f64",
48 "sinf32" => "llvm.sin.f32",
49 "sinf64" => "llvm.sin.f64",
50 "cosf32" => "llvm.cos.f32",
51 "cosf64" => "llvm.cos.f64",
52 "powf32" => "llvm.pow.f32",
53 "powf64" => "llvm.pow.f64",
54 "expf32" => "llvm.exp.f32",
55 "expf64" => "llvm.exp.f64",
56 "exp2f32" => "llvm.exp2.f32",
57 "exp2f64" => "llvm.exp2.f64",
58 "logf32" => "llvm.log.f32",
59 "logf64" => "llvm.log.f64",
60 "log10f32" => "llvm.log10.f32",
61 "log10f64" => "llvm.log10.f64",
62 "log2f32" => "llvm.log2.f32",
63 "log2f64" => "llvm.log2.f64",
64 "fmaf32" => "llvm.fma.f32",
65 "fmaf64" => "llvm.fma.f64",
66 "fabsf32" => "llvm.fabs.f32",
67 "fabsf64" => "llvm.fabs.f64",
68 "copysignf32" => "llvm.copysign.f32",
69 "copysignf64" => "llvm.copysign.f64",
70 "floorf32" => "llvm.floor.f32",
71 "floorf64" => "llvm.floor.f64",
72 "ceilf32" => "llvm.ceil.f32",
73 "ceilf64" => "llvm.ceil.f64",
74 "truncf32" => "llvm.trunc.f32",
75 "truncf64" => "llvm.trunc.f64",
76 "rintf32" => "llvm.rint.f32",
77 "rintf64" => "llvm.rint.f64",
78 "nearbyintf32" => "llvm.nearbyint.f32",
79 "nearbyintf64" => "llvm.nearbyint.f64",
80 "roundf32" => "llvm.round.f32",
81 "roundf64" => "llvm.round.f64",
82 "assume" => "llvm.assume",
83 "abort" => "llvm.trap",
86 Some(cx.get_intrinsic(&llvm_name))
89 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
90 fn codegen_intrinsic_call(
93 fn_ty: &FnType<'tcx, Ty<'tcx>>,
94 args: &[OperandRef<'tcx, &'ll Value>],
101 let (def_id, substs) = match callee_ty.sty {
102 ty::FnDef(def_id, substs) => (def_id, substs),
103 _ => bug!("expected fn item type, found {}", callee_ty)
106 let sig = callee_ty.fn_sig(tcx);
107 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
108 let arg_tys = sig.inputs();
109 let ret_ty = sig.output();
110 let name = &*tcx.item_name(def_id).as_str();
112 let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
113 let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
115 let simple = get_simple_intrinsic(cx, name);
116 let llval = match name {
117 _ if simple.is_some() => {
118 self.call(simple.unwrap(),
119 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
126 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
127 self.call(expect, &[args[0].immediate(), cx.const_bool(true)], None)
130 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
131 self.call(expect, &[args[0].immediate(), cx.const_bool(false)], None)
134 try_intrinsic(self, cx,
142 let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
143 self.call(llfn, &[], None)
146 let tp_ty = substs.type_at(0);
147 cx.const_usize(cx.size_of(tp_ty).bytes())
150 let tp_ty = substs.type_at(0);
151 if let OperandValue::Pair(_, meta) = args[0].val {
153 glue::size_and_align_of_dst(self, tp_ty, Some(meta));
156 cx.const_usize(cx.size_of(tp_ty).bytes())
160 let tp_ty = substs.type_at(0);
161 cx.const_usize(cx.align_of(tp_ty).abi())
163 "min_align_of_val" => {
164 let tp_ty = substs.type_at(0);
165 if let OperandValue::Pair(_, meta) = args[0].val {
167 glue::size_and_align_of_dst(self, tp_ty, Some(meta));
170 cx.const_usize(cx.align_of(tp_ty).abi())
174 let tp_ty = substs.type_at(0);
175 cx.const_usize(cx.align_of(tp_ty).pref())
178 let tp_ty = substs.type_at(0);
179 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
180 cx.const_str_slice(ty_name)
183 cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0)))
186 let ty = substs.type_at(0);
187 if !cx.layout_of(ty).is_zst() {
188 // Just zero out the stack slot.
189 // If we store a zero constant, LLVM will drown in vreg allocation for large
190 // data structures, and the generated code will be awful. (A telltale sign of
191 // this is large quantities of `mov [byte ptr foo],0` in the generated code.)
203 // Effectively no-ops
204 "uninit" | "forget" => {
208 let tp_ty = substs.type_at(0);
210 cx.const_bool(cx.type_needs_drop(tp_ty))
213 let ptr = args[0].immediate();
214 let offset = args[1].immediate();
215 self.inbounds_gep(ptr, &[offset])
218 let ptr = args[0].immediate();
219 let offset = args[1].immediate();
220 self.gep(ptr, &[offset])
223 "copy_nonoverlapping" => {
224 copy_intrinsic(&self, false, false, substs.type_at(0),
225 args[1].immediate(), args[0].immediate(), args[2].immediate());
229 copy_intrinsic(&self, true, false, substs.type_at(0),
230 args[1].immediate(), args[0].immediate(), args[2].immediate());
234 memset_intrinsic(&self, false, substs.type_at(0),
235 args[0].immediate(), args[1].immediate(), args[2].immediate());
239 "volatile_copy_nonoverlapping_memory" => {
240 copy_intrinsic(&self, false, true, substs.type_at(0),
241 args[0].immediate(), args[1].immediate(), args[2].immediate());
244 "volatile_copy_memory" => {
245 copy_intrinsic(&self, true, true, substs.type_at(0),
246 args[0].immediate(), args[1].immediate(), args[2].immediate());
249 "volatile_set_memory" => {
250 memset_intrinsic(&self, true, substs.type_at(0),
251 args[0].immediate(), args[1].immediate(), args[2].immediate());
254 "volatile_load" | "unaligned_volatile_load" => {
255 let tp_ty = substs.type_at(0);
256 let mut ptr = args[0].immediate();
257 if let PassMode::Cast(ty) = fn_ty.ret.mode {
258 ptr = self.pointercast(ptr, cx.type_ptr_to(ty.llvm_type(cx)));
260 let load = self.volatile_load(ptr);
261 let align = if name == "unaligned_volatile_load" {
264 cx.align_of(tp_ty).abi() as u32
267 llvm::LLVMSetAlignment(load, align);
269 to_immediate(self, load, cx.layout_of(tp_ty))
271 "volatile_store" => {
272 let dst = args[0].deref(cx);
273 args[1].val.volatile_store(self, dst);
276 "unaligned_volatile_store" => {
277 let dst = args[0].deref(cx);
278 args[1].val.unaligned_volatile_store(self, dst);
281 "prefetch_read_data" | "prefetch_write_data" |
282 "prefetch_read_instruction" | "prefetch_write_instruction" => {
283 let expect = cx.get_intrinsic(&("llvm.prefetch"));
284 let (rw, cache_type) = match name {
285 "prefetch_read_data" => (0, 1),
286 "prefetch_write_data" => (1, 1),
287 "prefetch_read_instruction" => (0, 0),
288 "prefetch_write_instruction" => (1, 0),
295 cx.const_i32(cache_type)
298 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
299 "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
300 "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
301 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
302 "rotate_left" | "rotate_right" => {
304 match int_type_width_signed(ty, cx) {
305 Some((width, signed)) =>
308 let y = cx.const_bool(false);
309 let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
310 self.call(llfn, &[args[0].immediate(), y], None)
312 "ctlz_nonzero" | "cttz_nonzero" => {
313 let y = cx.const_bool(true);
314 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
315 let llfn = cx.get_intrinsic(llvm_name);
316 self.call(llfn, &[args[0].immediate(), y], None)
318 "ctpop" => self.call(
319 cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
320 &[args[0].immediate()],
325 args[0].immediate() // byte swap a u8/i8 is just a no-op
327 self.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
328 &[args[0].immediate()], None)
332 self.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
333 &[args[0].immediate()], None)
335 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
336 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
337 if signed { 's' } else { 'u' },
339 let llfn = cx.get_intrinsic(&intrinsic);
341 // Convert `i1` to a `bool`, and write it to the out parameter
342 let pair = self.call(llfn, &[
346 let val = self.extract_value(pair, 0);
347 let overflow = self.zext(
348 self.extract_value(pair, 1),
352 let dest = result.project_field(self, 0);
353 self.store(val, dest.llval, dest.align);
354 let dest = result.project_field(self, 1);
355 self.store(overflow, dest.llval, dest.align);
359 "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()),
360 "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()),
361 "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()),
364 self.exactsdiv(args[0].immediate(), args[1].immediate())
366 self.exactudiv(args[0].immediate(), args[1].immediate())
370 self.sdiv(args[0].immediate(), args[1].immediate())
372 self.udiv(args[0].immediate(), args[1].immediate())
376 self.srem(args[0].immediate(), args[1].immediate())
378 self.urem(args[0].immediate(), args[1].immediate())
380 "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()),
383 self.ashr(args[0].immediate(), args[1].immediate())
385 self.lshr(args[0].immediate(), args[1].immediate())
387 "rotate_left" | "rotate_right" => {
388 let is_left = name == "rotate_left";
389 let val = args[0].immediate();
390 let raw_shift = args[1].immediate();
391 if llvm_util::get_major_version() >= 7 {
392 // rotate = funnel shift with first two args the same
393 let llvm_name = &format!("llvm.fsh{}.i{}",
394 if is_left { 'l' } else { 'r' }, width);
395 let llfn = cx.get_intrinsic(llvm_name);
396 self.call(llfn, &[val, val, raw_shift], None)
398 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
399 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
400 let width = cx.const_uint(cx.type_ix(width), width);
401 let shift = self.urem(raw_shift, width);
402 let inv_shift = self.urem(self.sub(width, raw_shift), width);
403 let shift1 = self.shl(
405 if is_left { shift } else { inv_shift },
407 let shift2 = self.lshr(
409 if !is_left { shift } else { inv_shift },
411 self.or(shift1, shift2)
417 span_invalid_monomorphization_error(
419 &format!("invalid monomorphization of `{}` intrinsic: \
420 expected basic integer type, found `{}`", name, ty));
426 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
427 let sty = &arg_tys[0].sty;
428 match float_type_width(sty) {
431 "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()),
432 "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()),
433 "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()),
434 "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
435 "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()),
439 span_invalid_monomorphization_error(
441 &format!("invalid monomorphization of `{}` intrinsic: \
442 expected basic float type, found `{}`", name, sty));
449 "discriminant_value" => {
450 args[0].deref(cx).codegen_get_discr(self, ret_ty)
453 name if name.starts_with("simd_") => {
454 match generic_simd_intrinsic(&self, name,
463 // This requires that atomic intrinsics follow a specific naming pattern:
464 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
465 name if name.starts_with("atomic_") => {
466 use rustc_codegen_ssa::common::AtomicOrdering::*;
467 use rustc_codegen_ssa::common::
468 {SynchronizationScope, AtomicRmwBinOp};
470 let split: Vec<&str> = name.split('_').collect();
472 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
473 let (order, failorder) = match split.len() {
474 2 => (SequentiallyConsistent, SequentiallyConsistent),
475 3 => match split[2] {
476 "unordered" => (Unordered, Unordered),
477 "relaxed" => (Monotonic, Monotonic),
478 "acq" => (Acquire, Acquire),
479 "rel" => (Release, Monotonic),
480 "acqrel" => (AcquireRelease, Acquire),
481 "failrelaxed" if is_cxchg =>
482 (SequentiallyConsistent, Monotonic),
483 "failacq" if is_cxchg =>
484 (SequentiallyConsistent, Acquire),
485 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
487 4 => match (split[2], split[3]) {
488 ("acq", "failrelaxed") if is_cxchg =>
489 (Acquire, Monotonic),
490 ("acqrel", "failrelaxed") if is_cxchg =>
491 (AcquireRelease, Monotonic),
492 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
494 _ => cx.sess().fatal("Atomic intrinsic not in correct format"),
497 let invalid_monomorphization = |ty| {
498 span_invalid_monomorphization_error(tcx.sess, span,
499 &format!("invalid monomorphization of `{}` intrinsic: \
500 expected basic integer type, found `{}`", name, ty));
504 "cxchg" | "cxchgweak" => {
505 let ty = substs.type_at(0);
506 if int_type_width_signed(ty, cx).is_some() {
507 let weak = split[1] == "cxchgweak";
508 let pair = self.atomic_cmpxchg(
515 let val = self.extract_value(pair, 0);
516 let success = self.zext(
517 self.extract_value(pair, 1),
521 let dest = result.project_field(self, 0);
522 self.store(val, dest.llval, dest.align);
523 let dest = result.project_field(self, 1);
524 self.store(success, dest.llval, dest.align);
527 return invalid_monomorphization(ty);
532 let ty = substs.type_at(0);
533 if int_type_width_signed(ty, cx).is_some() {
534 let size = cx.size_of(ty);
535 self.atomic_load(args[0].immediate(), order, size)
537 return invalid_monomorphization(ty);
542 let ty = substs.type_at(0);
543 if int_type_width_signed(ty, cx).is_some() {
544 let size = cx.size_of(ty);
553 return invalid_monomorphization(ty);
558 self.atomic_fence(order, SynchronizationScope::CrossThread);
562 "singlethreadfence" => {
563 self.atomic_fence(order, SynchronizationScope::SingleThread);
567 // These are all AtomicRMW ops
569 let atom_op = match op {
570 "xchg" => AtomicRmwBinOp::AtomicXchg,
571 "xadd" => AtomicRmwBinOp::AtomicAdd,
572 "xsub" => AtomicRmwBinOp::AtomicSub,
573 "and" => AtomicRmwBinOp::AtomicAnd,
574 "nand" => AtomicRmwBinOp::AtomicNand,
575 "or" => AtomicRmwBinOp::AtomicOr,
576 "xor" => AtomicRmwBinOp::AtomicXor,
577 "max" => AtomicRmwBinOp::AtomicMax,
578 "min" => AtomicRmwBinOp::AtomicMin,
579 "umax" => AtomicRmwBinOp::AtomicUMax,
580 "umin" => AtomicRmwBinOp::AtomicUMin,
581 _ => cx.sess().fatal("unknown atomic operation")
584 let ty = substs.type_at(0);
585 if int_type_width_signed(ty, cx).is_some() {
593 return invalid_monomorphization(ty);
599 "nontemporal_store" => {
600 let dst = args[0].deref(cx);
601 args[1].val.nontemporal_store(self, dst);
606 let intr = match Intrinsic::find(&name) {
608 None => bug!("unknown intrinsic '{}'", name),
610 fn one<T>(x: Vec<T>) -> T {
611 assert_eq!(x.len(), 1);
612 x.into_iter().next().unwrap()
615 cx: &CodegenCx<'ll, '_>,
617 ) -> Vec<&'ll Type> {
618 use intrinsics::Type::*;
620 Void => vec![cx.type_void()],
621 Integer(_signed, _width, llvm_width) => {
622 vec![cx.type_ix( llvm_width as u64)]
626 32 => vec![cx.type_f32()],
627 64 => vec![cx.type_f64()],
631 Pointer(ref t, ref llvm_elem, _const) => {
632 let t = llvm_elem.as_ref().unwrap_or(t);
633 let elem = one(ty_to_type(cx, t));
634 vec![cx.type_ptr_to(elem)]
636 Vector(ref t, ref llvm_elem, length) => {
637 let t = llvm_elem.as_ref().unwrap_or(t);
638 let elem = one(ty_to_type(cx, t));
639 vec![cx.type_vector(elem, length as u64)]
641 Aggregate(false, ref contents) => {
642 let elems = contents.iter()
643 .map(|t| one(ty_to_type(cx, t)))
644 .collect::<Vec<_>>();
645 vec![cx.type_struct( &elems, false)]
647 Aggregate(true, ref contents) => {
649 .flat_map(|t| ty_to_type(cx, t))
655 // This allows an argument list like `foo, (bar, baz),
656 // qux` to be converted into `foo, bar, baz, qux`, integer
657 // arguments to be truncated as needed and pointers to be
659 fn modify_as_needed<'ll, 'tcx>(
660 bx: &Builder<'_, 'll, 'tcx>,
661 t: &intrinsics::Type,
662 arg: &OperandRef<'tcx, &'ll Value>,
663 ) -> Vec<&'ll Value> {
665 intrinsics::Type::Aggregate(true, ref contents) => {
666 // We found a tuple that needs squishing! So
667 // run over the tuple and load each field.
669 // This assumes the type is "simple", i.e. no
670 // destructors, and the contents are SIMD
672 assert!(!bx.cx().type_needs_drop(arg.layout.ty));
673 let (ptr, align) = match arg.val {
674 OperandValue::Ref(ptr, None, align) => (ptr, align),
677 let arg = PlaceRef::new_sized(ptr, arg.layout, align);
678 (0..contents.len()).map(|i| {
679 bx.load_operand(arg.project_field(bx, i)).immediate()
682 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
683 let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
684 vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
686 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
687 let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
689 bx.bitcast(arg.immediate(),
690 bx.cx().type_vector(llvm_elem, length as u64))
693 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
694 // the LLVM intrinsic uses a smaller integer
695 // size than the C intrinsic's signature, so
696 // we have to trim it down here.
697 vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
699 _ => vec![arg.immediate()],
704 let inputs = intr.inputs.iter()
705 .flat_map(|t| ty_to_type(cx, t))
706 .collect::<Vec<_>>();
708 let outputs = one(ty_to_type(cx, &intr.output));
710 let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
711 modify_as_needed(&self, t, arg)
713 assert_eq!(inputs.len(), llargs.len());
715 let val = match intr.definition {
716 intrinsics::IntrinsicDef::Named(name) => {
717 let f = cx.declare_cfn(
719 cx.type_func(&inputs, outputs),
721 self.call(f, &llargs, None)
726 intrinsics::Type::Aggregate(flatten, ref elems) => {
727 // the output is a tuple so we need to munge it properly
730 for i in 0..elems.len() {
731 let dest = result.project_field(self, i);
732 let val = self.extract_value(val, i as u64);
733 self.store(val, dest.llval, dest.align);
742 if !fn_ty.ret.is_ignore() {
743 if let PassMode::Cast(ty) = fn_ty.ret.mode {
744 let ptr = self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
745 self.store(llval, ptr, result.align);
747 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
748 .val.store(self, result);
755 bx: &Builder<'a, 'll, 'tcx>,
763 let (size, align) = bx.cx().size_and_align_of(ty);
764 let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
765 let flags = if volatile {
771 bx.memmove(dst, align, src, align, size, flags);
773 bx.memcpy(dst, align, src, align, size, flags);
778 bx: &Builder<'a, 'll, 'tcx>,
785 let (size, align) = bx.cx().size_and_align_of(ty);
786 let size = bx.cx().const_usize(size.bytes());
787 let flags = if volatile {
792 bx.memset(dst, val, bx.mul(size, count), align, flags);
796 bx: &Builder<'a, 'll, 'tcx>,
797 cx: &CodegenCx<'ll, 'tcx>,
800 local_ptr: &'ll Value,
803 if bx.cx().sess().no_landing_pads() {
804 bx.call(func, &[data], None);
805 let ptr_align = bx.tcx().data_layout.pointer_align;
806 bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
807 } else if wants_msvc_seh(bx.cx().sess()) {
808 codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
810 codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
814 // MSVC's definition of the `rust_try` function.
816 // This implementation uses the new exception handling instructions in LLVM
817 // which have support in LLVM for SEH on MSVC targets. Although these
818 // instructions are meant to work for all targets, as of the time of this
819 // writing, however, LLVM does not recommend the usage of these new instructions
820 // as the old ones are still more optimized.
822 bx: &Builder<'a, 'll, 'tcx>,
823 cx: &CodegenCx<'ll, 'tcx>,
826 local_ptr: &'ll Value,
829 let llfn = get_rust_try_fn(cx, &mut |bx| {
832 bx.set_personality_fn(bx.cx().eh_personality());
834 let normal = bx.build_sibling_block("normal");
835 let catchswitch = bx.build_sibling_block("catchswitch");
836 let catchpad = bx.build_sibling_block("catchpad");
837 let caught = bx.build_sibling_block("caught");
839 let func = llvm::get_param(bx.llfn(), 0);
840 let data = llvm::get_param(bx.llfn(), 1);
841 let local_ptr = llvm::get_param(bx.llfn(), 2);
843 // We're generating an IR snippet that looks like:
845 // declare i32 @rust_try(%func, %data, %ptr) {
846 // %slot = alloca i64*
847 // invoke %func(%data) to label %normal unwind label %catchswitch
853 // %cs = catchswitch within none [%catchpad] unwind to caller
856 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
857 // %ptr[0] = %slot[0]
858 // %ptr[1] = %slot[1]
859 // catchret from %tok to label %caught
865 // This structure follows the basic usage of throw/try/catch in LLVM.
866 // For example, compile this C++ snippet to see what LLVM generates:
868 // #include <stdint.h>
870 // int bar(void (*foo)(void), uint64_t *ret) {
874 // } catch(uint64_t a[2]) {
881 // More information can be found in libstd's seh.rs implementation.
882 let i64p = cx.type_ptr_to(cx.type_i64());
883 let ptr_align = bx.tcx().data_layout.pointer_align;
884 let slot = bx.alloca(i64p, "slot", ptr_align);
885 bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
887 normal.ret(cx.const_i32(0));
889 let cs = catchswitch.catch_switch(None, None, 1);
890 catchswitch.add_handler(cs, catchpad.llbb());
893 let tydesc = match tcx.lang_items().msvc_try_filter() {
894 Some(did) => cx.get_static(did),
895 None => bug!("msvc_try_filter not defined"),
897 let funclet = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]);
898 let addr = catchpad.load(slot, ptr_align);
900 let i64_align = bx.tcx().data_layout.i64_align;
901 let arg1 = catchpad.load(addr, i64_align);
902 let val1 = cx.const_i32(1);
903 let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
904 let local_ptr = catchpad.bitcast(local_ptr, i64p);
905 catchpad.store(arg1, local_ptr, i64_align);
906 catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
907 catchpad.catch_ret(&funclet, caught.llbb());
909 caught.ret(cx.const_i32(1));
912 // Note that no invoke is used here because by definition this function
913 // can't panic (that's what it's catching).
914 let ret = bx.call(llfn, &[func, data, local_ptr], None);
915 let i32_align = bx.tcx().data_layout.i32_align;
916 bx.store(ret, dest, i32_align);
919 // Definition of the standard "try" function for Rust using the GNU-like model
920 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
923 // This codegen is a little surprising because we always call a shim
924 // function instead of inlining the call to `invoke` manually here. This is done
925 // because in LLVM we're only allowed to have one personality per function
926 // definition. The call to the `try` intrinsic is being inlined into the
927 // function calling it, and that function may already have other personality
928 // functions in play. By calling a shim we're guaranteed that our shim will have
929 // the right personality function.
931 bx: &Builder<'a, 'll, 'tcx>,
932 cx: &CodegenCx<'ll, 'tcx>,
935 local_ptr: &'ll Value,
938 let llfn = get_rust_try_fn(cx, &mut |bx| {
941 // Codegens the shims described above:
944 // invoke %func(%args...) normal %normal unwind %catch
950 // (ptr, _) = landingpad
951 // store ptr, %local_ptr
954 // Note that the `local_ptr` data passed into the `try` intrinsic is
955 // expected to be `*mut *mut u8` for this to actually work, but that's
956 // managed by the standard library.
958 let then = bx.build_sibling_block("then");
959 let catch = bx.build_sibling_block("catch");
961 let func = llvm::get_param(bx.llfn(), 0);
962 let data = llvm::get_param(bx.llfn(), 1);
963 let local_ptr = llvm::get_param(bx.llfn(), 2);
964 bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
965 then.ret(cx.const_i32(0));
967 // Type indicator for the exception being thrown.
969 // The first value in this tuple is a pointer to the exception object
970 // being thrown. The second value is a "selector" indicating which of
971 // the landing pad clauses the exception's type had been matched to.
972 // rust_try ignores the selector.
973 let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false);
974 let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
975 catch.add_clause(vals, bx.cx().const_null(cx.type_i8p()));
976 let ptr = catch.extract_value(vals, 0);
977 let ptr_align = bx.tcx().data_layout.pointer_align;
978 catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align);
979 catch.ret(cx.const_i32(1));
982 // Note that no invoke is used here because by definition this function
983 // can't panic (that's what it's catching).
984 let ret = bx.call(llfn, &[func, data, local_ptr], None);
985 let i32_align = bx.tcx().data_layout.i32_align;
986 bx.store(ret, dest, i32_align);
989 // Helper function to give a Block to a closure to codegen a shim function.
990 // This is currently primarily used for the `try` intrinsic functions above.
991 fn gen_fn<'ll, 'tcx>(
992 cx: &CodegenCx<'ll, 'tcx>,
994 inputs: Vec<Ty<'tcx>>,
996 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
998 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1002 hir::Unsafety::Unsafe,
1005 let llfn = cx.define_internal_fn(name, rust_fn_sig);
1006 attributes::from_fn_attrs(cx, llfn, None);
1007 let bx = Builder::new_block(cx, llfn, "entry-block");
1012 // Helper function used to get a handle to the `__rust_try` function used to
1013 // catch exceptions.
1015 // This function is only generated once and is then cached.
1016 fn get_rust_try_fn<'ll, 'tcx>(
1017 cx: &CodegenCx<'ll, 'tcx>,
1018 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1020 if let Some(llfn) = cx.rust_try_fn.get() {
1024 // Define the type up front for the signature of the rust_try function.
1026 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1027 let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1031 hir::Unsafety::Unsafe,
1034 let output = tcx.types.i32;
1035 let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
1036 cx.rust_try_fn.set(Some(rust_try));
1040 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1041 span_err!(a, b, E0511, "{}", c);
1044 fn generic_simd_intrinsic(
1045 bx: &Builder<'a, 'll, 'tcx>,
1047 callee_ty: Ty<'tcx>,
1048 args: &[OperandRef<'tcx, &'ll Value>],
1050 llret_ty: &'ll Type,
1052 ) -> Result<&'ll Value, ()> {
1053 // macros for error handling:
1054 macro_rules! emit_error {
1058 ($msg: tt, $($fmt: tt)*) => {
1059 span_invalid_monomorphization_error(
1060 bx.cx().sess(), span,
1061 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1066 macro_rules! return_error {
1069 emit_error!($($fmt)*);
1075 macro_rules! require {
1076 ($cond: expr, $($fmt: tt)*) => {
1078 return_error!($($fmt)*);
1083 macro_rules! require_simd {
1084 ($ty: expr, $position: expr) => {
1085 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1090 let sig = tcx.normalize_erasing_late_bound_regions(
1091 ty::ParamEnv::reveal_all(),
1092 &callee_ty.fn_sig(tcx),
1094 let arg_tys = sig.inputs();
1096 // every intrinsic takes a SIMD vector as its first argument
1097 require_simd!(arg_tys[0], "input");
1098 let in_ty = arg_tys[0];
1099 let in_elem = arg_tys[0].simd_type(tcx);
1100 let in_len = arg_tys[0].simd_size(tcx);
1102 let comparison = match name {
1103 "simd_eq" => Some(hir::BinOpKind::Eq),
1104 "simd_ne" => Some(hir::BinOpKind::Ne),
1105 "simd_lt" => Some(hir::BinOpKind::Lt),
1106 "simd_le" => Some(hir::BinOpKind::Le),
1107 "simd_gt" => Some(hir::BinOpKind::Gt),
1108 "simd_ge" => Some(hir::BinOpKind::Ge),
1112 if let Some(cmp_op) = comparison {
1113 require_simd!(ret_ty, "return");
1115 let out_len = ret_ty.simd_size(tcx);
1116 require!(in_len == out_len,
1117 "expected return type with length {} (same as input type `{}`), \
1118 found `{}` with length {}",
1121 require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
1122 "expected return type with integer elements, found `{}` with non-integer `{}`",
1124 ret_ty.simd_type(tcx));
1126 return Ok(compare_simd_types(bx,
1127 args[0].immediate(),
1128 args[1].immediate(),
1134 if name.starts_with("simd_shuffle") {
1135 let n: usize = name["simd_shuffle".len()..].parse().unwrap_or_else(|_|
1136 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?"));
1138 require_simd!(ret_ty, "return");
1140 let out_len = ret_ty.simd_size(tcx);
1141 require!(out_len == n,
1142 "expected return type of length {}, found `{}` with length {}",
1143 n, ret_ty, out_len);
1144 require!(in_elem == ret_ty.simd_type(tcx),
1145 "expected return element type `{}` (element of input `{}`), \
1146 found `{}` with element type `{}`",
1148 ret_ty, ret_ty.simd_type(tcx));
1150 let total_len = in_len as u128 * 2;
1152 let vector = args[2].immediate();
1154 let indices: Option<Vec<_>> = (0..n)
1157 let val = bx.cx().const_get_elt(vector, i as u64);
1158 match bx.cx().const_to_opt_u128(val, true) {
1160 emit_error!("shuffle index #{} is not a constant", arg_idx);
1163 Some(idx) if idx >= total_len => {
1164 emit_error!("shuffle index #{} is out of bounds (limit {})",
1165 arg_idx, total_len);
1168 Some(idx) => Some(bx.cx().const_i32(idx as i32)),
1172 let indices = match indices {
1174 None => return Ok(bx.cx().const_null(llret_ty))
1177 return Ok(bx.shuffle_vector(args[0].immediate(),
1178 args[1].immediate(),
1179 bx.cx().const_vector(&indices)))
1182 if name == "simd_insert" {
1183 require!(in_elem == arg_tys[2],
1184 "expected inserted type `{}` (element of input `{}`), found `{}`",
1185 in_elem, in_ty, arg_tys[2]);
1186 return Ok(bx.insert_element(args[0].immediate(),
1187 args[2].immediate(),
1188 args[1].immediate()))
1190 if name == "simd_extract" {
1191 require!(ret_ty == in_elem,
1192 "expected return type `{}` (element of input `{}`), found `{}`",
1193 in_elem, in_ty, ret_ty);
1194 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1197 if name == "simd_select" {
1198 let m_elem_ty = in_elem;
1200 let v_len = arg_tys[1].simd_size(tcx);
1201 require!(m_len == v_len,
1202 "mismatched lengths: mask length `{}` != other vector length `{}`",
1205 match m_elem_ty.sty {
1207 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
1209 // truncate the mask to a vector of i1s
1210 let i1 = bx.cx().type_i1();
1211 let i1xn = bx.cx().type_vector(i1, m_len as u64);
1212 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1213 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1216 fn simd_simple_float_intrinsic(
1218 in_elem: &::rustc::ty::TyS,
1219 in_ty: &::rustc::ty::TyS,
1221 bx: &Builder<'a, 'll, 'tcx>,
1223 args: &[OperandRef<'tcx, &'ll Value>],
1224 ) -> Result<&'ll Value, ()> {
1225 macro_rules! emit_error {
1229 ($msg: tt, $($fmt: tt)*) => {
1230 span_invalid_monomorphization_error(
1231 bx.cx().sess(), span,
1232 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1236 macro_rules! return_error {
1239 emit_error!($($fmt)*);
1244 let ety = match in_elem.sty {
1245 ty::Float(f) if f.bit_width() == 32 => {
1246 if in_len < 2 || in_len > 16 {
1248 "unsupported floating-point vector `{}` with length `{}` \
1249 out-of-range [2, 16]",
1254 ty::Float(f) if f.bit_width() == 64 => {
1255 if in_len < 2 || in_len > 8 {
1256 return_error!("unsupported floating-point vector `{}` with length `{}` \
1257 out-of-range [2, 8]",
1263 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1267 return_error!("`{}` is not a floating-point type", in_ty);
1271 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1272 let intrinsic = bx.cx().get_intrinsic(&llvm_name);
1273 let c = bx.call(intrinsic,
1274 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1276 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1282 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1285 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1288 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1291 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1294 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1297 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1300 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1303 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1306 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1309 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1312 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1315 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1318 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1321 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1323 _ => { /* fallthrough */ }
1327 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1328 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1329 fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
1330 let p0s: String = "p0".repeat(no_pointers);
1332 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1333 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1334 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1335 _ => unreachable!(),
1339 fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize,
1340 mut no_pointers: usize) -> &'ll Type {
1341 // FIXME: use cx.layout_of(ty).llvm_type() ?
1342 let mut elem_ty = match elem_ty.sty {
1343 ty::Int(v) => cx.type_int_from_ty( v),
1344 ty::Uint(v) => cx.type_uint_from_ty( v),
1345 ty::Float(v) => cx.type_float_from_ty( v),
1346 _ => unreachable!(),
1348 while no_pointers > 0 {
1349 elem_ty = cx.type_ptr_to(elem_ty);
1352 cx.type_vector(elem_ty, vec_len as u64)
1356 if name == "simd_gather" {
1357 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1358 // mask: <N x i{M}>) -> <N x T>
1359 // * N: number of elements in the input vectors
1360 // * T: type of the element to load
1361 // * M: any integer width is supported, will be truncated to i1
1363 // All types must be simd vector types
1364 require_simd!(in_ty, "first");
1365 require_simd!(arg_tys[1], "second");
1366 require_simd!(arg_tys[2], "third");
1367 require_simd!(ret_ty, "return");
1369 // Of the same length:
1370 require!(in_len == arg_tys[1].simd_size(tcx),
1371 "expected {} argument with length {} (same as input type `{}`), \
1372 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1373 arg_tys[1].simd_size(tcx));
1374 require!(in_len == arg_tys[2].simd_size(tcx),
1375 "expected {} argument with length {} (same as input type `{}`), \
1376 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1377 arg_tys[2].simd_size(tcx));
1379 // The return type must match the first argument type
1380 require!(ret_ty == in_ty,
1381 "expected return type `{}`, found `{}`",
1384 // This counts how many pointers
1385 fn ptr_count(t: ty::Ty) -> usize {
1387 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1393 fn non_ptr(t: ty::Ty) -> ty::Ty {
1395 ty::RawPtr(p) => non_ptr(p.ty),
1400 // The second argument must be a simd vector with an element type that's a pointer
1401 // to the element type of the first argument
1402 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1403 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1404 non_ptr(arg_tys[1].simd_type(tcx))),
1406 require!(false, "expected element type `{}` of second argument `{}` \
1407 to be a pointer to the element type `{}` of the first \
1408 argument `{}`, found `{}` != `*_ {}`",
1409 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1410 arg_tys[1].simd_type(tcx).sty, in_elem);
1414 assert!(pointer_count > 0);
1415 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1416 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1418 // The element type of the third argument must be a signed integer type of any width:
1419 match arg_tys[2].simd_type(tcx).sty {
1422 require!(false, "expected element type `{}` of third argument `{}` \
1423 to be a signed integer type",
1424 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1428 // Alignment of T, must be a constant integer value:
1429 let alignment_ty = bx.cx().type_i32();
1430 let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
1432 // Truncate the mask vector to a vector of i1s:
1433 let (mask, mask_ty) = {
1434 let i1 = bx.cx().type_i1();
1435 let i1xn = bx.cx().type_vector(i1, in_len as u64);
1436 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1439 // Type of the vector of pointers:
1440 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
1441 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1443 // Type of the vector of elements:
1444 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
1445 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1447 let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1448 llvm_elem_vec_str, llvm_pointer_vec_str);
1449 let f = bx.cx().declare_cfn(&llvm_intrinsic,
1450 bx.cx().type_func(&[
1451 llvm_pointer_vec_ty,
1454 llvm_elem_vec_ty], llvm_elem_vec_ty));
1455 llvm::SetUnnamedAddr(f, false);
1456 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1461 if name == "simd_scatter" {
1462 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1463 // mask: <N x i{M}>) -> ()
1464 // * N: number of elements in the input vectors
1465 // * T: type of the element to load
1466 // * M: any integer width is supported, will be truncated to i1
1468 // All types must be simd vector types
1469 require_simd!(in_ty, "first");
1470 require_simd!(arg_tys[1], "second");
1471 require_simd!(arg_tys[2], "third");
1473 // Of the same length:
1474 require!(in_len == arg_tys[1].simd_size(tcx),
1475 "expected {} argument with length {} (same as input type `{}`), \
1476 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1477 arg_tys[1].simd_size(tcx));
1478 require!(in_len == arg_tys[2].simd_size(tcx),
1479 "expected {} argument with length {} (same as input type `{}`), \
1480 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1481 arg_tys[2].simd_size(tcx));
1483 // This counts how many pointers
1484 fn ptr_count(t: ty::Ty) -> usize {
1486 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1492 fn non_ptr(t: ty::Ty) -> ty::Ty {
1494 ty::RawPtr(p) => non_ptr(p.ty),
1499 // The second argument must be a simd vector with an element type that's a pointer
1500 // to the element type of the first argument
1501 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1502 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
1503 => (ptr_count(arg_tys[1].simd_type(tcx)),
1504 non_ptr(arg_tys[1].simd_type(tcx))),
1506 require!(false, "expected element type `{}` of second argument `{}` \
1507 to be a pointer to the element type `{}` of the first \
1508 argument `{}`, found `{}` != `*mut {}`",
1509 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1510 arg_tys[1].simd_type(tcx).sty, in_elem);
1514 assert!(pointer_count > 0);
1515 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1516 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1518 // The element type of the third argument must be a signed integer type of any width:
1519 match arg_tys[2].simd_type(tcx).sty {
1522 require!(false, "expected element type `{}` of third argument `{}` \
1523 to be a signed integer type",
1524 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1528 // Alignment of T, must be a constant integer value:
1529 let alignment_ty = bx.cx().type_i32();
1530 let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
1532 // Truncate the mask vector to a vector of i1s:
1533 let (mask, mask_ty) = {
1534 let i1 = bx.cx().type_i1();
1535 let i1xn = bx.cx().type_vector(i1, in_len as u64);
1536 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1539 let ret_t = bx.cx().type_void();
1541 // Type of the vector of pointers:
1542 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
1543 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1545 // Type of the vector of elements:
1546 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
1547 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1549 let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1550 llvm_elem_vec_str, llvm_pointer_vec_str);
1551 let f = bx.cx().declare_cfn(&llvm_intrinsic,
1552 bx.cx().type_func(&[llvm_elem_vec_ty,
1553 llvm_pointer_vec_ty,
1556 llvm::SetUnnamedAddr(f, false);
1557 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1562 macro_rules! arith_red {
1563 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1565 require!(ret_ty == in_elem,
1566 "expected return type `{}` (element of input `{}`), found `{}`",
1567 in_elem, in_ty, ret_ty);
1568 return match in_elem.sty {
1569 ty::Int(_) | ty::Uint(_) => {
1570 let r = bx.$integer_reduce(args[0].immediate());
1572 // if overflow occurs, the result is the
1573 // mathematical result modulo 2^n:
1574 if name.contains("mul") {
1575 Ok(bx.mul(args[1].immediate(), r))
1577 Ok(bx.add(args[1].immediate(), r))
1580 Ok(bx.$integer_reduce(args[0].immediate()))
1584 // ordered arithmetic reductions take an accumulator
1585 let acc = if $ordered {
1586 let acc = args[1].immediate();
1587 // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
1588 // * if the accumulator of the fadd isn't 0, incorrect
1589 // code is generated
1590 // * if the accumulator of the fmul isn't 1, incorrect
1591 // code is generated
1592 match bx.cx().const_get_real(acc) {
1593 None => return_error!("accumulator of {} is not a constant", $name),
1594 Some((v, loses_info)) => {
1595 if $name.contains("mul") && v != 1.0_f64 {
1596 return_error!("accumulator of {} is not 1.0", $name);
1597 } else if $name.contains("add") && v != 0.0_f64 {
1598 return_error!("accumulator of {} is not 0.0", $name);
1599 } else if loses_info {
1600 return_error!("accumulator of {} loses information", $name);
1606 // unordered arithmetic reductions do not:
1607 match f.bit_width() {
1608 32 => bx.cx().const_undef(bx.cx().type_f32()),
1609 64 => bx.cx().const_undef(bx.cx().type_f64()),
1612 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1613 $name, in_ty, in_elem, v, ret_ty
1618 Ok(bx.$float_reduce(acc, args[0].immediate()))
1622 "unsupported {} from `{}` with element `{}` to `{}`",
1623 $name, in_ty, in_elem, ret_ty
1631 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
1632 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
1633 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1634 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1636 macro_rules! minmax_red {
1637 ($name:tt: $int_red:ident, $float_red:ident) => {
1639 require!(ret_ty == in_elem,
1640 "expected return type `{}` (element of input `{}`), found `{}`",
1641 in_elem, in_ty, ret_ty);
1642 return match in_elem.sty {
1644 Ok(bx.$int_red(args[0].immediate(), true))
1647 Ok(bx.$int_red(args[0].immediate(), false))
1650 Ok(bx.$float_red(args[0].immediate()))
1653 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1654 $name, in_ty, in_elem, ret_ty)
1662 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1663 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1665 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1666 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1668 macro_rules! bitwise_red {
1669 ($name:tt : $red:ident, $boolean:expr) => {
1671 let input = if !$boolean {
1672 require!(ret_ty == in_elem,
1673 "expected return type `{}` (element of input `{}`), found `{}`",
1674 in_elem, in_ty, ret_ty);
1678 ty::Int(_) | ty::Uint(_) => {},
1680 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1681 $name, in_ty, in_elem, ret_ty)
1685 // boolean reductions operate on vectors of i1s:
1686 let i1 = bx.cx().type_i1();
1687 let i1xn = bx.cx().type_vector(i1, in_len as u64);
1688 bx.trunc(args[0].immediate(), i1xn)
1690 return match in_elem.sty {
1691 ty::Int(_) | ty::Uint(_) => {
1692 let r = bx.$red(input);
1697 bx.zext(r, bx.cx().type_bool())
1702 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1703 $name, in_ty, in_elem, ret_ty)
1710 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1711 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1712 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1713 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1714 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1716 if name == "simd_cast" {
1717 require_simd!(ret_ty, "return");
1718 let out_len = ret_ty.simd_size(tcx);
1719 require!(in_len == out_len,
1720 "expected return type with length {} (same as input type `{}`), \
1721 found `{}` with length {}",
1724 // casting cares about nominal type, not just structural type
1725 let out_elem = ret_ty.simd_type(tcx);
1727 if in_elem == out_elem { return Ok(args[0].immediate()); }
1729 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1731 let (in_style, in_width) = match in_elem.sty {
1732 // vectors of pointer-sized integers should've been
1733 // disallowed before here, so this unwrap is safe.
1734 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1735 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1736 ty::Float(f) => (Style::Float, f.bit_width()),
1737 _ => (Style::Unsupported, 0)
1739 let (out_style, out_width) = match out_elem.sty {
1740 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1741 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1742 ty::Float(f) => (Style::Float, f.bit_width()),
1743 _ => (Style::Unsupported, 0)
1746 match (in_style, out_style) {
1747 (Style::Int(in_is_signed), Style::Int(_)) => {
1748 return Ok(match in_width.cmp(&out_width) {
1749 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1750 Ordering::Equal => args[0].immediate(),
1751 Ordering::Less => if in_is_signed {
1752 bx.sext(args[0].immediate(), llret_ty)
1754 bx.zext(args[0].immediate(), llret_ty)
1758 (Style::Int(in_is_signed), Style::Float) => {
1759 return Ok(if in_is_signed {
1760 bx.sitofp(args[0].immediate(), llret_ty)
1762 bx.uitofp(args[0].immediate(), llret_ty)
1765 (Style::Float, Style::Int(out_is_signed)) => {
1766 return Ok(if out_is_signed {
1767 bx.fptosi(args[0].immediate(), llret_ty)
1769 bx.fptoui(args[0].immediate(), llret_ty)
1772 (Style::Float, Style::Float) => {
1773 return Ok(match in_width.cmp(&out_width) {
1774 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1775 Ordering::Equal => args[0].immediate(),
1776 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1779 _ => {/* Unsupported. Fallthrough. */}
1782 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1786 macro_rules! arith {
1787 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1788 $(if name == stringify!($name) {
1790 $($(ty::$p(_))|* => {
1791 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1796 "unsupported operation on `{}` with element `{}`",
1803 simd_add: Uint, Int => add, Float => fadd;
1804 simd_sub: Uint, Int => sub, Float => fsub;
1805 simd_mul: Uint, Int => mul, Float => fmul;
1806 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1807 simd_rem: Uint => urem, Int => srem, Float => frem;
1808 simd_shl: Uint, Int => shl;
1809 simd_shr: Uint => lshr, Int => ashr;
1810 simd_and: Uint, Int => and;
1811 simd_or: Uint, Int => or;
1812 simd_xor: Uint, Int => xor;
1813 simd_fmax: Float => maxnum;
1814 simd_fmin: Float => minnum;
1816 span_bug!(span, "unknown SIMD intrinsic");
1819 // Returns the width of an int Ty, and if it's signed or not
1820 // Returns None if the type is not an integer
1821 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1823 fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
1825 ty::Int(t) => Some((match t {
1826 ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64,
1827 ast::IntTy::I8 => 8,
1828 ast::IntTy::I16 => 16,
1829 ast::IntTy::I32 => 32,
1830 ast::IntTy::I64 => 64,
1831 ast::IntTy::I128 => 128,
1833 ty::Uint(t) => Some((match t {
1834 ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64,
1835 ast::UintTy::U8 => 8,
1836 ast::UintTy::U16 => 16,
1837 ast::UintTy::U32 => 32,
1838 ast::UintTy::U64 => 64,
1839 ast::UintTy::U128 => 128,
1845 // Returns the width of a float TypeVariant
1846 // Returns None if the type is not a float
1847 fn float_type_width<'tcx>(sty: &ty::TyKind<'tcx>) -> Option<u64> {
1849 ty::Float(t) => Some(t.bit_width() as u64),