1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use intrinsics::{self, Intrinsic};
15 use llvm::{self, TypeKind};
17 use abi::{Abi, FnType, LlvmType, PassMode};
18 use mir::place::PlaceRef;
19 use mir::operand::{OperandRef, OperandValue};
25 use type_of::LayoutLlvmExt;
26 use rustc::ty::{self, Ty};
27 use rustc::ty::layout::LayoutOf;
30 use syntax::symbol::Symbol;
34 use rustc::session::Session;
37 use std::cmp::Ordering;
40 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
41 let llvm_name = match name {
42 "sqrtf32" => "llvm.sqrt.f32",
43 "sqrtf64" => "llvm.sqrt.f64",
44 "powif32" => "llvm.powi.f32",
45 "powif64" => "llvm.powi.f64",
46 "sinf32" => "llvm.sin.f32",
47 "sinf64" => "llvm.sin.f64",
48 "cosf32" => "llvm.cos.f32",
49 "cosf64" => "llvm.cos.f64",
50 "powf32" => "llvm.pow.f32",
51 "powf64" => "llvm.pow.f64",
52 "expf32" => "llvm.exp.f32",
53 "expf64" => "llvm.exp.f64",
54 "exp2f32" => "llvm.exp2.f32",
55 "exp2f64" => "llvm.exp2.f64",
56 "logf32" => "llvm.log.f32",
57 "logf64" => "llvm.log.f64",
58 "log10f32" => "llvm.log10.f32",
59 "log10f64" => "llvm.log10.f64",
60 "log2f32" => "llvm.log2.f32",
61 "log2f64" => "llvm.log2.f64",
62 "fmaf32" => "llvm.fma.f32",
63 "fmaf64" => "llvm.fma.f64",
64 "fabsf32" => "llvm.fabs.f32",
65 "fabsf64" => "llvm.fabs.f64",
66 "copysignf32" => "llvm.copysign.f32",
67 "copysignf64" => "llvm.copysign.f64",
68 "floorf32" => "llvm.floor.f32",
69 "floorf64" => "llvm.floor.f64",
70 "ceilf32" => "llvm.ceil.f32",
71 "ceilf64" => "llvm.ceil.f64",
72 "truncf32" => "llvm.trunc.f32",
73 "truncf64" => "llvm.trunc.f64",
74 "rintf32" => "llvm.rint.f32",
75 "rintf64" => "llvm.rint.f64",
76 "nearbyintf32" => "llvm.nearbyint.f32",
77 "nearbyintf64" => "llvm.nearbyint.f64",
78 "roundf32" => "llvm.round.f32",
79 "roundf64" => "llvm.round.f64",
80 "assume" => "llvm.assume",
81 "abort" => "llvm.trap",
84 Some(cx.get_intrinsic(&llvm_name))
87 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
88 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
89 /// add them to librustc_codegen_llvm/context.rs
90 pub fn codegen_intrinsic_call(
91 bx: &Builder<'a, 'll, 'tcx>,
93 fn_ty: &FnType<'tcx, Ty<'tcx>>,
94 args: &[OperandRef<'tcx, &'ll Value>],
101 let (def_id, substs) = match callee_ty.sty {
102 ty::FnDef(def_id, substs) => (def_id, substs),
103 _ => bug!("expected fn item type, found {}", callee_ty)
106 let sig = callee_ty.fn_sig(tcx);
107 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
108 let arg_tys = sig.inputs();
109 let ret_ty = sig.output();
110 let name = &*tcx.item_name(def_id).as_str();
112 let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
113 let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
115 let simple = get_simple_intrinsic(cx, name);
116 let llval = match name {
117 _ if simple.is_some() => {
118 bx.call(simple.unwrap(),
119 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
126 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
127 bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
130 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
131 bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
134 try_intrinsic(bx, cx,
142 let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
143 bx.call(llfn, &[], None)
146 let tp_ty = substs.type_at(0);
147 C_usize(cx, cx.size_of(tp_ty).bytes())
150 let tp_ty = substs.type_at(0);
151 if let OperandValue::Pair(_, meta) = args[0].val {
153 glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
156 C_usize(cx, cx.size_of(tp_ty).bytes())
160 let tp_ty = substs.type_at(0);
161 C_usize(cx, cx.align_of(tp_ty).abi())
163 "min_align_of_val" => {
164 let tp_ty = substs.type_at(0);
165 if let OperandValue::Pair(_, meta) = args[0].val {
167 glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
170 C_usize(cx, cx.align_of(tp_ty).abi())
174 let tp_ty = substs.type_at(0);
175 C_usize(cx, cx.align_of(tp_ty).pref())
178 let tp_ty = substs.type_at(0);
179 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
180 C_str_slice(cx, ty_name)
183 C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
186 let ty = substs.type_at(0);
187 if !cx.layout_of(ty).is_zst() {
188 // Just zero out the stack slot.
189 // If we store a zero constant, LLVM will drown in vreg allocation for large data
190 // structures, and the generated code will be awful. (A telltale sign of this is
191 // large quantities of `mov [byte ptr foo],0` in the generated code.)
192 memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
196 // Effectively no-ops
197 "uninit" | "forget" => {
201 let tp_ty = substs.type_at(0);
203 C_bool(cx, bx.cx.type_needs_drop(tp_ty))
206 let ptr = args[0].immediate();
207 let offset = args[1].immediate();
208 bx.inbounds_gep(ptr, &[offset])
211 let ptr = args[0].immediate();
212 let offset = args[1].immediate();
213 bx.gep(ptr, &[offset])
216 "copy_nonoverlapping" => {
217 copy_intrinsic(bx, false, false, substs.type_at(0),
218 args[1].immediate(), args[0].immediate(), args[2].immediate())
221 copy_intrinsic(bx, true, false, substs.type_at(0),
222 args[1].immediate(), args[0].immediate(), args[2].immediate())
225 memset_intrinsic(bx, false, substs.type_at(0),
226 args[0].immediate(), args[1].immediate(), args[2].immediate())
229 "volatile_copy_nonoverlapping_memory" => {
230 copy_intrinsic(bx, false, true, substs.type_at(0),
231 args[0].immediate(), args[1].immediate(), args[2].immediate())
233 "volatile_copy_memory" => {
234 copy_intrinsic(bx, true, true, substs.type_at(0),
235 args[0].immediate(), args[1].immediate(), args[2].immediate())
237 "volatile_set_memory" => {
238 memset_intrinsic(bx, true, substs.type_at(0),
239 args[0].immediate(), args[1].immediate(), args[2].immediate())
241 "volatile_load" | "unaligned_volatile_load" => {
242 let tp_ty = substs.type_at(0);
243 let mut ptr = args[0].immediate();
244 if let PassMode::Cast(ty) = fn_ty.ret.mode {
245 ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
247 let load = bx.volatile_load(ptr);
248 let align = if name == "unaligned_volatile_load" {
251 cx.align_of(tp_ty).abi() as u32
254 llvm::LLVMSetAlignment(load, align);
256 to_immediate(bx, load, cx.layout_of(tp_ty))
258 "volatile_store" => {
259 let dst = args[0].deref(bx.cx);
260 args[1].val.volatile_store(bx, dst);
263 "unaligned_volatile_store" => {
264 let dst = args[0].deref(bx.cx);
265 args[1].val.unaligned_volatile_store(bx, dst);
268 "prefetch_read_data" | "prefetch_write_data" |
269 "prefetch_read_instruction" | "prefetch_write_instruction" => {
270 let expect = cx.get_intrinsic(&("llvm.prefetch"));
271 let (rw, cache_type) = match name {
272 "prefetch_read_data" => (0, 1),
273 "prefetch_write_data" => (1, 1),
274 "prefetch_read_instruction" => (0, 0),
275 "prefetch_write_instruction" => (1, 0),
282 C_i32(cx, cache_type)
285 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
286 "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
287 "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
288 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
289 "rotate_left" | "rotate_right" => {
291 match int_type_width_signed(ty, cx) {
292 Some((width, signed)) =>
295 let y = C_bool(bx.cx, false);
296 let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
297 bx.call(llfn, &[args[0].immediate(), y], None)
299 "ctlz_nonzero" | "cttz_nonzero" => {
300 let y = C_bool(bx.cx, true);
301 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
302 let llfn = cx.get_intrinsic(llvm_name);
303 bx.call(llfn, &[args[0].immediate(), y], None)
305 "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
306 &[args[0].immediate()], None),
309 args[0].immediate() // byte swap a u8/i8 is just a no-op
311 bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
312 &[args[0].immediate()], None)
316 bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
317 &[args[0].immediate()], None)
319 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
320 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
321 if signed { 's' } else { 'u' },
323 let llfn = bx.cx.get_intrinsic(&intrinsic);
325 // Convert `i1` to a `bool`, and write it to the out parameter
326 let pair = bx.call(llfn, &[
330 let val = bx.extract_value(pair, 0);
331 let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
333 let dest = result.project_field(bx, 0);
334 bx.store(val, dest.llval, dest.align);
335 let dest = result.project_field(bx, 1);
336 bx.store(overflow, dest.llval, dest.align);
340 "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
341 "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
342 "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
345 bx.exactsdiv(args[0].immediate(), args[1].immediate())
347 bx.exactudiv(args[0].immediate(), args[1].immediate())
351 bx.sdiv(args[0].immediate(), args[1].immediate())
353 bx.udiv(args[0].immediate(), args[1].immediate())
357 bx.srem(args[0].immediate(), args[1].immediate())
359 bx.urem(args[0].immediate(), args[1].immediate())
361 "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
364 bx.ashr(args[0].immediate(), args[1].immediate())
366 bx.lshr(args[0].immediate(), args[1].immediate())
368 "rotate_left" | "rotate_right" => {
369 let is_left = name == "rotate_left";
370 let val = args[0].immediate();
371 let raw_shift = args[1].immediate();
372 if llvm_util::get_major_version() >= 7 {
373 // rotate = funnel shift with first two args the same
374 let llvm_name = &format!("llvm.fsh{}.i{}",
375 if is_left { 'l' } else { 'r' }, width);
376 let llfn = cx.get_intrinsic(llvm_name);
377 bx.call(llfn, &[val, val, raw_shift], None)
379 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
380 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
381 let width = C_uint(Type::ix(cx, width), width);
382 let shift = bx.urem(raw_shift, width);
383 let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
384 let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
385 let shift2 = bx.lshr(val, if !is_left { shift } else { inv_shift });
386 bx.or(shift1, shift2)
392 span_invalid_monomorphization_error(
394 &format!("invalid monomorphization of `{}` intrinsic: \
395 expected basic integer type, found `{}`", name, ty));
400 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
401 let sty = &arg_tys[0].sty;
402 match float_type_width(sty) {
405 "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
406 "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
407 "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
408 "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
409 "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
413 span_invalid_monomorphization_error(
415 &format!("invalid monomorphization of `{}` intrinsic: \
416 expected basic float type, found `{}`", name, sty));
423 "discriminant_value" => {
424 args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
427 name if name.starts_with("simd_") => {
428 match generic_simd_intrinsic(bx, name,
437 // This requires that atomic intrinsics follow a specific naming pattern:
438 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
439 name if name.starts_with("atomic_") => {
440 use llvm::AtomicOrdering::*;
442 let split: Vec<&str> = name.split('_').collect();
444 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
445 let (order, failorder) = match split.len() {
446 2 => (SequentiallyConsistent, SequentiallyConsistent),
447 3 => match split[2] {
448 "unordered" => (Unordered, Unordered),
449 "relaxed" => (Monotonic, Monotonic),
450 "acq" => (Acquire, Acquire),
451 "rel" => (Release, Monotonic),
452 "acqrel" => (AcquireRelease, Acquire),
453 "failrelaxed" if is_cxchg =>
454 (SequentiallyConsistent, Monotonic),
455 "failacq" if is_cxchg =>
456 (SequentiallyConsistent, Acquire),
457 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
459 4 => match (split[2], split[3]) {
460 ("acq", "failrelaxed") if is_cxchg =>
461 (Acquire, Monotonic),
462 ("acqrel", "failrelaxed") if is_cxchg =>
463 (AcquireRelease, Monotonic),
464 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
466 _ => cx.sess().fatal("Atomic intrinsic not in correct format"),
469 let invalid_monomorphization = |ty| {
470 span_invalid_monomorphization_error(tcx.sess, span,
471 &format!("invalid monomorphization of `{}` intrinsic: \
472 expected basic integer type, found `{}`", name, ty));
476 "cxchg" | "cxchgweak" => {
477 let ty = substs.type_at(0);
478 if int_type_width_signed(ty, cx).is_some() {
479 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
480 let pair = bx.atomic_cmpxchg(
487 let val = bx.extract_value(pair, 0);
488 let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
490 let dest = result.project_field(bx, 0);
491 bx.store(val, dest.llval, dest.align);
492 let dest = result.project_field(bx, 1);
493 bx.store(success, dest.llval, dest.align);
496 return invalid_monomorphization(ty);
501 let ty = substs.type_at(0);
502 if int_type_width_signed(ty, cx).is_some() {
503 let size = cx.size_of(ty);
504 bx.atomic_load(args[0].immediate(), order, size)
506 return invalid_monomorphization(ty);
511 let ty = substs.type_at(0);
512 if int_type_width_signed(ty, cx).is_some() {
513 let size = cx.size_of(ty);
514 bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
517 return invalid_monomorphization(ty);
522 bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
526 "singlethreadfence" => {
527 bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
531 // These are all AtomicRMW ops
533 let atom_op = match op {
534 "xchg" => llvm::AtomicXchg,
535 "xadd" => llvm::AtomicAdd,
536 "xsub" => llvm::AtomicSub,
537 "and" => llvm::AtomicAnd,
538 "nand" => llvm::AtomicNand,
539 "or" => llvm::AtomicOr,
540 "xor" => llvm::AtomicXor,
541 "max" => llvm::AtomicMax,
542 "min" => llvm::AtomicMin,
543 "umax" => llvm::AtomicUMax,
544 "umin" => llvm::AtomicUMin,
545 _ => cx.sess().fatal("unknown atomic operation")
548 let ty = substs.type_at(0);
549 if int_type_width_signed(ty, cx).is_some() {
550 bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
552 return invalid_monomorphization(ty);
558 "nontemporal_store" => {
559 let dst = args[0].deref(bx.cx);
560 args[1].val.nontemporal_store(bx, dst);
565 let intr = Intrinsic::find(&name).unwrap_or_else(||
566 bug!("unknown intrinsic '{}'", name));
568 fn one<T>(x: Vec<T>) -> T {
569 assert_eq!(x.len(), 1);
570 x.into_iter().next().unwrap()
572 fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
573 use intrinsics::Type::*;
575 Void => vec![Type::void(cx)],
576 Integer(_signed, _width, llvm_width) => {
577 vec![Type::ix(cx, llvm_width as u64)]
581 32 => vec![Type::f32(cx)],
582 64 => vec![Type::f64(cx)],
586 Pointer(ref t, ref llvm_elem, _const) => {
587 let t = llvm_elem.as_ref().unwrap_or(t);
588 let elem = one(ty_to_type(cx, t));
591 Vector(ref t, ref llvm_elem, length) => {
592 let t = llvm_elem.as_ref().unwrap_or(t);
593 let elem = one(ty_to_type(cx, t));
594 vec![Type::vector(elem, length as u64)]
596 Aggregate(false, ref contents) => {
597 let elems = contents.iter()
598 .map(|t| one(ty_to_type(cx, t)))
599 .collect::<Vec<_>>();
600 vec![Type::struct_(cx, &elems, false)]
602 Aggregate(true, ref contents) => {
604 .flat_map(|t| ty_to_type(cx, t))
610 // This allows an argument list like `foo, (bar, baz),
611 // qux` to be converted into `foo, bar, baz, qux`, integer
612 // arguments to be truncated as needed and pointers to be
615 bx: &Builder<'a, 'll, 'tcx>,
616 t: &intrinsics::Type,
617 arg: &OperandRef<'tcx, &'ll Value>,
618 ) -> Vec<&'ll Value> {
620 intrinsics::Type::Aggregate(true, ref contents) => {
621 // We found a tuple that needs squishing! So
622 // run over the tuple and load each field.
624 // This assumes the type is "simple", i.e. no
625 // destructors, and the contents are SIMD
627 assert!(!bx.cx.type_needs_drop(arg.layout.ty));
628 let (ptr, align) = match arg.val {
629 OperandValue::Ref(ptr, None, align) => (ptr, align),
632 let arg = PlaceRef::new_sized(ptr, arg.layout, align);
633 (0..contents.len()).map(|i| {
634 arg.project_field(bx, i).load(bx).immediate()
637 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
638 let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
639 vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
641 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
642 let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
643 vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))]
645 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
646 // the LLVM intrinsic uses a smaller integer
647 // size than the C intrinsic's signature, so
648 // we have to trim it down here.
649 vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
651 _ => vec![arg.immediate()],
656 let inputs = intr.inputs.iter()
657 .flat_map(|t| ty_to_type(cx, t))
658 .collect::<Vec<_>>();
660 let outputs = one(ty_to_type(cx, &intr.output));
662 let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
663 modify_as_needed(bx, t, arg)
665 assert_eq!(inputs.len(), llargs.len());
667 let val = match intr.definition {
668 intrinsics::IntrinsicDef::Named(name) => {
669 let f = declare::declare_cfn(cx,
671 Type::func(&inputs, outputs));
672 bx.call(f, &llargs, None)
677 intrinsics::Type::Aggregate(flatten, ref elems) => {
678 // the output is a tuple so we need to munge it properly
681 for i in 0..elems.len() {
682 let dest = result.project_field(bx, i);
683 let val = bx.extract_value(val, i as u64);
684 bx.store(val, dest.llval, dest.align);
693 if !fn_ty.ret.is_ignore() {
694 if let PassMode::Cast(ty) = fn_ty.ret.mode {
695 let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
696 bx.store(llval, ptr, result.align);
698 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
699 .val.store(bx, result);
705 bx: &Builder<'a, 'll, 'tcx>,
714 let (size, align) = cx.size_and_align_of(ty);
715 let size = C_usize(cx, size.bytes());
716 let align = align.abi();
717 let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
718 let src_ptr = bx.pointercast(src, Type::i8p(cx));
720 bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
722 bx.memcpy(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
727 bx: &Builder<'a, 'll, 'tcx>,
735 let (size, align) = cx.size_and_align_of(ty);
736 let size = C_usize(cx, size.bytes());
737 let align = C_i32(cx, align.abi() as i32);
738 let dst = bx.pointercast(dst, Type::i8p(cx));
739 call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
743 bx: &Builder<'a, 'll, 'tcx>,
744 cx: &CodegenCx<'ll, 'tcx>,
747 local_ptr: &'ll Value,
750 if bx.sess().no_landing_pads() {
751 bx.call(func, &[data], None);
752 let ptr_align = bx.tcx().data_layout.pointer_align;
753 bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
754 } else if wants_msvc_seh(bx.sess()) {
755 codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
757 codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
761 // MSVC's definition of the `rust_try` function.
763 // This implementation uses the new exception handling instructions in LLVM
764 // which have support in LLVM for SEH on MSVC targets. Although these
765 // instructions are meant to work for all targets, as of the time of this
766 // writing, however, LLVM does not recommend the usage of these new instructions
767 // as the old ones are still more optimized.
769 bx: &Builder<'a, 'll, 'tcx>,
770 cx: &CodegenCx<'ll, 'tcx>,
773 local_ptr: &'ll Value,
776 let llfn = get_rust_try_fn(cx, &mut |bx| {
779 bx.set_personality_fn(bx.cx.eh_personality());
781 let normal = bx.build_sibling_block("normal");
782 let catchswitch = bx.build_sibling_block("catchswitch");
783 let catchpad = bx.build_sibling_block("catchpad");
784 let caught = bx.build_sibling_block("caught");
786 let func = llvm::get_param(bx.llfn(), 0);
787 let data = llvm::get_param(bx.llfn(), 1);
788 let local_ptr = llvm::get_param(bx.llfn(), 2);
790 // We're generating an IR snippet that looks like:
792 // declare i32 @rust_try(%func, %data, %ptr) {
793 // %slot = alloca i64*
794 // invoke %func(%data) to label %normal unwind label %catchswitch
800 // %cs = catchswitch within none [%catchpad] unwind to caller
803 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
804 // %ptr[0] = %slot[0]
805 // %ptr[1] = %slot[1]
806 // catchret from %tok to label %caught
812 // This structure follows the basic usage of throw/try/catch in LLVM.
813 // For example, compile this C++ snippet to see what LLVM generates:
815 // #include <stdint.h>
817 // int bar(void (*foo)(void), uint64_t *ret) {
821 // } catch(uint64_t a[2]) {
828 // More information can be found in libstd's seh.rs implementation.
829 let i64p = Type::i64(cx).ptr_to();
830 let ptr_align = bx.tcx().data_layout.pointer_align;
831 let slot = bx.alloca(i64p, "slot", ptr_align);
832 bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
834 normal.ret(C_i32(cx, 0));
836 let cs = catchswitch.catch_switch(None, None, 1);
837 catchswitch.add_handler(cs, catchpad.llbb());
840 let tydesc = match tcx.lang_items().msvc_try_filter() {
841 Some(did) => ::consts::get_static(cx, did),
842 None => bug!("msvc_try_filter not defined"),
844 let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
845 let addr = catchpad.load(slot, ptr_align);
847 let i64_align = bx.tcx().data_layout.i64_align;
848 let arg1 = catchpad.load(addr, i64_align);
849 let val1 = C_i32(cx, 1);
850 let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
851 let local_ptr = catchpad.bitcast(local_ptr, i64p);
852 catchpad.store(arg1, local_ptr, i64_align);
853 catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
854 catchpad.catch_ret(tok, caught.llbb());
856 caught.ret(C_i32(cx, 1));
859 // Note that no invoke is used here because by definition this function
860 // can't panic (that's what it's catching).
861 let ret = bx.call(llfn, &[func, data, local_ptr], None);
862 let i32_align = bx.tcx().data_layout.i32_align;
863 bx.store(ret, dest, i32_align);
866 // Definition of the standard "try" function for Rust using the GNU-like model
867 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
870 // This codegen is a little surprising because we always call a shim
871 // function instead of inlining the call to `invoke` manually here. This is done
872 // because in LLVM we're only allowed to have one personality per function
873 // definition. The call to the `try` intrinsic is being inlined into the
874 // function calling it, and that function may already have other personality
875 // functions in play. By calling a shim we're guaranteed that our shim will have
876 // the right personality function.
878 bx: &Builder<'a, 'll, 'tcx>,
879 cx: &CodegenCx<'ll, 'tcx>,
882 local_ptr: &'ll Value,
885 let llfn = get_rust_try_fn(cx, &mut |bx| {
888 // Codegens the shims described above:
891 // invoke %func(%args...) normal %normal unwind %catch
897 // (ptr, _) = landingpad
898 // store ptr, %local_ptr
901 // Note that the `local_ptr` data passed into the `try` intrinsic is
902 // expected to be `*mut *mut u8` for this to actually work, but that's
903 // managed by the standard library.
905 let then = bx.build_sibling_block("then");
906 let catch = bx.build_sibling_block("catch");
908 let func = llvm::get_param(bx.llfn(), 0);
909 let data = llvm::get_param(bx.llfn(), 1);
910 let local_ptr = llvm::get_param(bx.llfn(), 2);
911 bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
912 then.ret(C_i32(cx, 0));
914 // Type indicator for the exception being thrown.
916 // The first value in this tuple is a pointer to the exception object
917 // being thrown. The second value is a "selector" indicating which of
918 // the landing pad clauses the exception's type had been matched to.
919 // rust_try ignores the selector.
920 let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
921 let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
922 catch.add_clause(vals, C_null(Type::i8p(cx)));
923 let ptr = catch.extract_value(vals, 0);
924 let ptr_align = bx.tcx().data_layout.pointer_align;
925 catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
926 catch.ret(C_i32(cx, 1));
929 // Note that no invoke is used here because by definition this function
930 // can't panic (that's what it's catching).
931 let ret = bx.call(llfn, &[func, data, local_ptr], None);
932 let i32_align = bx.tcx().data_layout.i32_align;
933 bx.store(ret, dest, i32_align);
936 // Helper function to give a Block to a closure to codegen a shim function.
937 // This is currently primarily used for the `try` intrinsic functions above.
938 fn gen_fn<'ll, 'tcx>(
939 cx: &CodegenCx<'ll, 'tcx>,
941 inputs: Vec<Ty<'tcx>>,
943 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
945 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
949 hir::Unsafety::Unsafe,
952 let llfn = declare::define_internal_fn(cx, name, rust_fn_sig);
953 attributes::from_fn_attrs(cx, llfn, None);
954 let bx = Builder::new_block(cx, llfn, "entry-block");
959 // Helper function used to get a handle to the `__rust_try` function used to
962 // This function is only generated once and is then cached.
963 fn get_rust_try_fn<'ll, 'tcx>(
964 cx: &CodegenCx<'ll, 'tcx>,
965 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
967 if let Some(llfn) = cx.rust_try_fn.get() {
971 // Define the type up front for the signature of the rust_try function.
973 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
974 let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
978 hir::Unsafety::Unsafe,
981 let output = tcx.types.i32;
982 let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
983 cx.rust_try_fn.set(Some(rust_try));
987 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
988 span_err!(a, b, E0511, "{}", c);
991 fn generic_simd_intrinsic(
992 bx: &Builder<'a, 'll, 'tcx>,
995 args: &[OperandRef<'tcx, &'ll Value>],
999 ) -> Result<&'ll Value, ()> {
1000 // macros for error handling:
1001 macro_rules! emit_error {
1005 ($msg: tt, $($fmt: tt)*) => {
1006 span_invalid_monomorphization_error(
1008 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1013 macro_rules! return_error {
1016 emit_error!($($fmt)*);
1022 macro_rules! require {
1023 ($cond: expr, $($fmt: tt)*) => {
1025 return_error!($($fmt)*);
1030 macro_rules! require_simd {
1031 ($ty: expr, $position: expr) => {
1032 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1037 let sig = tcx.normalize_erasing_late_bound_regions(
1038 ty::ParamEnv::reveal_all(),
1039 &callee_ty.fn_sig(tcx),
1041 let arg_tys = sig.inputs();
1043 // every intrinsic takes a SIMD vector as its first argument
1044 require_simd!(arg_tys[0], "input");
1045 let in_ty = arg_tys[0];
1046 let in_elem = arg_tys[0].simd_type(tcx);
1047 let in_len = arg_tys[0].simd_size(tcx);
1049 let comparison = match name {
1050 "simd_eq" => Some(hir::BinOpKind::Eq),
1051 "simd_ne" => Some(hir::BinOpKind::Ne),
1052 "simd_lt" => Some(hir::BinOpKind::Lt),
1053 "simd_le" => Some(hir::BinOpKind::Le),
1054 "simd_gt" => Some(hir::BinOpKind::Gt),
1055 "simd_ge" => Some(hir::BinOpKind::Ge),
1059 if let Some(cmp_op) = comparison {
1060 require_simd!(ret_ty, "return");
1062 let out_len = ret_ty.simd_size(tcx);
1063 require!(in_len == out_len,
1064 "expected return type with length {} (same as input type `{}`), \
1065 found `{}` with length {}",
1068 require!(llret_ty.element_type().kind() == TypeKind::Integer,
1069 "expected return type with integer elements, found `{}` with non-integer `{}`",
1071 ret_ty.simd_type(tcx));
1073 return Ok(compare_simd_types(bx,
1074 args[0].immediate(),
1075 args[1].immediate(),
1081 if name.starts_with("simd_shuffle") {
1082 let n: usize = name["simd_shuffle".len()..].parse().unwrap_or_else(|_|
1083 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?"));
1085 require_simd!(ret_ty, "return");
1087 let out_len = ret_ty.simd_size(tcx);
1088 require!(out_len == n,
1089 "expected return type of length {}, found `{}` with length {}",
1090 n, ret_ty, out_len);
1091 require!(in_elem == ret_ty.simd_type(tcx),
1092 "expected return element type `{}` (element of input `{}`), \
1093 found `{}` with element type `{}`",
1095 ret_ty, ret_ty.simd_type(tcx));
1097 let total_len = in_len as u128 * 2;
1099 let vector = args[2].immediate();
1101 let indices: Option<Vec<_>> = (0..n)
1104 let val = const_get_elt(vector, i as u64);
1105 match const_to_opt_u128(val, true) {
1107 emit_error!("shuffle index #{} is not a constant", arg_idx);
1110 Some(idx) if idx >= total_len => {
1111 emit_error!("shuffle index #{} is out of bounds (limit {})",
1112 arg_idx, total_len);
1115 Some(idx) => Some(C_i32(bx.cx, idx as i32)),
1119 let indices = match indices {
1121 None => return Ok(C_null(llret_ty))
1124 return Ok(bx.shuffle_vector(args[0].immediate(),
1125 args[1].immediate(),
1126 C_vector(&indices)))
1129 if name == "simd_insert" {
1130 require!(in_elem == arg_tys[2],
1131 "expected inserted type `{}` (element of input `{}`), found `{}`",
1132 in_elem, in_ty, arg_tys[2]);
1133 return Ok(bx.insert_element(args[0].immediate(),
1134 args[2].immediate(),
1135 args[1].immediate()))
1137 if name == "simd_extract" {
1138 require!(ret_ty == in_elem,
1139 "expected return type `{}` (element of input `{}`), found `{}`",
1140 in_elem, in_ty, ret_ty);
1141 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1144 if name == "simd_select" {
1145 let m_elem_ty = in_elem;
1147 let v_len = arg_tys[1].simd_size(tcx);
1148 require!(m_len == v_len,
1149 "mismatched lengths: mask length `{}` != other vector length `{}`",
1152 match m_elem_ty.sty {
1154 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
1156 // truncate the mask to a vector of i1s
1157 let i1 = Type::i1(bx.cx);
1158 let i1xn = Type::vector(i1, m_len as u64);
1159 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1160 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1163 fn simd_simple_float_intrinsic(
1165 in_elem: &::rustc::ty::TyS,
1166 in_ty: &::rustc::ty::TyS,
1168 bx: &Builder<'a, 'll, 'tcx>,
1170 args: &[OperandRef<'tcx, &'ll Value>],
1171 ) -> Result<&'ll Value, ()> {
1172 macro_rules! emit_error {
1176 ($msg: tt, $($fmt: tt)*) => {
1177 span_invalid_monomorphization_error(
1179 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1183 macro_rules! return_error {
1186 emit_error!($($fmt)*);
1191 let ety = match in_elem.sty {
1192 ty::Float(f) if f.bit_width() == 32 => {
1193 if in_len < 2 || in_len > 16 {
1195 "unsupported floating-point vector `{}` with length `{}` \
1196 out-of-range [2, 16]",
1201 ty::Float(f) if f.bit_width() == 64 => {
1202 if in_len < 2 || in_len > 8 {
1203 return_error!("unsupported floating-point vector `{}` with length `{}` \
1204 out-of-range [2, 8]",
1210 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1214 return_error!("`{}` is not a floating-point type", in_ty);
1218 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1219 let intrinsic = bx.cx.get_intrinsic(&llvm_name);
1220 let c = bx.call(intrinsic,
1221 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1223 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1229 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1232 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1235 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1238 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1241 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1244 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1247 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1250 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1253 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1256 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1259 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1262 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1265 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1268 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1270 _ => { /* fallthrough */ }
1274 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1275 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1276 fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
1277 let p0s: String = "p0".repeat(no_pointers);
1279 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1280 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1281 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1282 _ => unreachable!(),
1286 fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize,
1287 mut no_pointers: usize) -> &'ll Type {
1288 // FIXME: use cx.layout_of(ty).llvm_type() ?
1289 let mut elem_ty = match elem_ty.sty {
1290 ty::Int(v) => Type::int_from_ty(cx, v),
1291 ty::Uint(v) => Type::uint_from_ty(cx, v),
1292 ty::Float(v) => Type::float_from_ty(cx, v),
1293 _ => unreachable!(),
1295 while no_pointers > 0 {
1296 elem_ty = elem_ty.ptr_to();
1299 Type::vector(elem_ty, vec_len as u64)
1303 if name == "simd_gather" {
1304 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1305 // mask: <N x i{M}>) -> <N x T>
1306 // * N: number of elements in the input vectors
1307 // * T: type of the element to load
1308 // * M: any integer width is supported, will be truncated to i1
1310 // All types must be simd vector types
1311 require_simd!(in_ty, "first");
1312 require_simd!(arg_tys[1], "second");
1313 require_simd!(arg_tys[2], "third");
1314 require_simd!(ret_ty, "return");
1316 // Of the same length:
1317 require!(in_len == arg_tys[1].simd_size(tcx),
1318 "expected {} argument with length {} (same as input type `{}`), \
1319 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1320 arg_tys[1].simd_size(tcx));
1321 require!(in_len == arg_tys[2].simd_size(tcx),
1322 "expected {} argument with length {} (same as input type `{}`), \
1323 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1324 arg_tys[2].simd_size(tcx));
1326 // The return type must match the first argument type
1327 require!(ret_ty == in_ty,
1328 "expected return type `{}`, found `{}`",
1331 // This counts how many pointers
1332 fn ptr_count(t: ty::Ty) -> usize {
1334 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1340 fn non_ptr(t: ty::Ty) -> ty::Ty {
1342 ty::RawPtr(p) => non_ptr(p.ty),
1347 // The second argument must be a simd vector with an element type that's a pointer
1348 // to the element type of the first argument
1349 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1350 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1351 non_ptr(arg_tys[1].simd_type(tcx))),
1353 require!(false, "expected element type `{}` of second argument `{}` \
1354 to be a pointer to the element type `{}` of the first \
1355 argument `{}`, found `{}` != `*_ {}`",
1356 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1357 arg_tys[1].simd_type(tcx).sty, in_elem);
1361 assert!(pointer_count > 0);
1362 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1363 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1365 // The element type of the third argument must be a signed integer type of any width:
1366 match arg_tys[2].simd_type(tcx).sty {
1369 require!(false, "expected element type `{}` of third argument `{}` \
1370 to be a signed integer type",
1371 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1375 // Alignment of T, must be a constant integer value:
1376 let alignment_ty = Type::i32(bx.cx);
1377 let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1379 // Truncate the mask vector to a vector of i1s:
1380 let (mask, mask_ty) = {
1381 let i1 = Type::i1(bx.cx);
1382 let i1xn = Type::vector(i1, in_len as u64);
1383 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1386 // Type of the vector of pointers:
1387 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1388 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1390 // Type of the vector of elements:
1391 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1392 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1394 let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1395 llvm_elem_vec_str, llvm_pointer_vec_str);
1396 let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1397 Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty,
1398 llvm_elem_vec_ty], llvm_elem_vec_ty));
1399 llvm::SetUnnamedAddr(f, false);
1400 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1405 if name == "simd_scatter" {
1406 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1407 // mask: <N x i{M}>) -> ()
1408 // * N: number of elements in the input vectors
1409 // * T: type of the element to load
1410 // * M: any integer width is supported, will be truncated to i1
1412 // All types must be simd vector types
1413 require_simd!(in_ty, "first");
1414 require_simd!(arg_tys[1], "second");
1415 require_simd!(arg_tys[2], "third");
1417 // Of the same length:
1418 require!(in_len == arg_tys[1].simd_size(tcx),
1419 "expected {} argument with length {} (same as input type `{}`), \
1420 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1421 arg_tys[1].simd_size(tcx));
1422 require!(in_len == arg_tys[2].simd_size(tcx),
1423 "expected {} argument with length {} (same as input type `{}`), \
1424 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1425 arg_tys[2].simd_size(tcx));
1427 // This counts how many pointers
1428 fn ptr_count(t: ty::Ty) -> usize {
1430 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1436 fn non_ptr(t: ty::Ty) -> ty::Ty {
1438 ty::RawPtr(p) => non_ptr(p.ty),
1443 // The second argument must be a simd vector with an element type that's a pointer
1444 // to the element type of the first argument
1445 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1446 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
1447 => (ptr_count(arg_tys[1].simd_type(tcx)),
1448 non_ptr(arg_tys[1].simd_type(tcx))),
1450 require!(false, "expected element type `{}` of second argument `{}` \
1451 to be a pointer to the element type `{}` of the first \
1452 argument `{}`, found `{}` != `*mut {}`",
1453 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1454 arg_tys[1].simd_type(tcx).sty, in_elem);
1458 assert!(pointer_count > 0);
1459 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1460 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1462 // The element type of the third argument must be a signed integer type of any width:
1463 match arg_tys[2].simd_type(tcx).sty {
1466 require!(false, "expected element type `{}` of third argument `{}` \
1467 to be a signed integer type",
1468 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1472 // Alignment of T, must be a constant integer value:
1473 let alignment_ty = Type::i32(bx.cx);
1474 let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1476 // Truncate the mask vector to a vector of i1s:
1477 let (mask, mask_ty) = {
1478 let i1 = Type::i1(bx.cx);
1479 let i1xn = Type::vector(i1, in_len as u64);
1480 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1483 let ret_t = Type::void(bx.cx);
1485 // Type of the vector of pointers:
1486 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1487 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1489 // Type of the vector of elements:
1490 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1491 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1493 let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1494 llvm_elem_vec_str, llvm_pointer_vec_str);
1495 let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1496 Type::func(&[llvm_elem_vec_ty,
1497 llvm_pointer_vec_ty,
1500 llvm::SetUnnamedAddr(f, false);
1501 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1506 macro_rules! arith_red {
1507 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1509 require!(ret_ty == in_elem,
1510 "expected return type `{}` (element of input `{}`), found `{}`",
1511 in_elem, in_ty, ret_ty);
1512 return match in_elem.sty {
1513 ty::Int(_) | ty::Uint(_) => {
1514 let r = bx.$integer_reduce(args[0].immediate());
1516 // if overflow occurs, the result is the
1517 // mathematical result modulo 2^n:
1518 if name.contains("mul") {
1519 Ok(bx.mul(args[1].immediate(), r))
1521 Ok(bx.add(args[1].immediate(), r))
1524 Ok(bx.$integer_reduce(args[0].immediate()))
1528 // ordered arithmetic reductions take an accumulator
1529 let acc = if $ordered {
1530 let acc = args[1].immediate();
1531 // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
1532 // * if the accumulator of the fadd isn't 0, incorrect
1533 // code is generated
1534 // * if the accumulator of the fmul isn't 1, incorrect
1535 // code is generated
1536 match const_get_real(acc) {
1537 None => return_error!("accumulator of {} is not a constant", $name),
1538 Some((v, loses_info)) => {
1539 if $name.contains("mul") && v != 1.0_f64 {
1540 return_error!("accumulator of {} is not 1.0", $name);
1541 } else if $name.contains("add") && v != 0.0_f64 {
1542 return_error!("accumulator of {} is not 0.0", $name);
1543 } else if loses_info {
1544 return_error!("accumulator of {} loses information", $name);
1550 // unordered arithmetic reductions do not:
1551 match f.bit_width() {
1552 32 => C_undef(Type::f32(bx.cx)),
1553 64 => C_undef(Type::f64(bx.cx)),
1556 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1557 $name, in_ty, in_elem, v, ret_ty
1562 Ok(bx.$float_reduce(acc, args[0].immediate()))
1566 "unsupported {} from `{}` with element `{}` to `{}`",
1567 $name, in_ty, in_elem, ret_ty
1575 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
1576 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
1577 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1578 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1580 macro_rules! minmax_red {
1581 ($name:tt: $int_red:ident, $float_red:ident) => {
1583 require!(ret_ty == in_elem,
1584 "expected return type `{}` (element of input `{}`), found `{}`",
1585 in_elem, in_ty, ret_ty);
1586 return match in_elem.sty {
1588 Ok(bx.$int_red(args[0].immediate(), true))
1591 Ok(bx.$int_red(args[0].immediate(), false))
1594 Ok(bx.$float_red(args[0].immediate()))
1597 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1598 $name, in_ty, in_elem, ret_ty)
1606 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1607 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1609 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1610 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1612 macro_rules! bitwise_red {
1613 ($name:tt : $red:ident, $boolean:expr) => {
1615 let input = if !$boolean {
1616 require!(ret_ty == in_elem,
1617 "expected return type `{}` (element of input `{}`), found `{}`",
1618 in_elem, in_ty, ret_ty);
1622 ty::Int(_) | ty::Uint(_) => {},
1624 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1625 $name, in_ty, in_elem, ret_ty)
1629 // boolean reductions operate on vectors of i1s:
1630 let i1 = Type::i1(bx.cx);
1631 let i1xn = Type::vector(i1, in_len as u64);
1632 bx.trunc(args[0].immediate(), i1xn)
1634 return match in_elem.sty {
1635 ty::Int(_) | ty::Uint(_) => {
1636 let r = bx.$red(input);
1641 bx.zext(r, Type::bool(bx.cx))
1646 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1647 $name, in_ty, in_elem, ret_ty)
1654 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1655 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1656 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1657 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1658 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1660 if name == "simd_cast" {
1661 require_simd!(ret_ty, "return");
1662 let out_len = ret_ty.simd_size(tcx);
1663 require!(in_len == out_len,
1664 "expected return type with length {} (same as input type `{}`), \
1665 found `{}` with length {}",
1668 // casting cares about nominal type, not just structural type
1669 let out_elem = ret_ty.simd_type(tcx);
1671 if in_elem == out_elem { return Ok(args[0].immediate()); }
1673 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1675 let (in_style, in_width) = match in_elem.sty {
1676 // vectors of pointer-sized integers should've been
1677 // disallowed before here, so this unwrap is safe.
1678 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1679 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1680 ty::Float(f) => (Style::Float, f.bit_width()),
1681 _ => (Style::Unsupported, 0)
1683 let (out_style, out_width) = match out_elem.sty {
1684 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1685 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1686 ty::Float(f) => (Style::Float, f.bit_width()),
1687 _ => (Style::Unsupported, 0)
1690 match (in_style, out_style) {
1691 (Style::Int(in_is_signed), Style::Int(_)) => {
1692 return Ok(match in_width.cmp(&out_width) {
1693 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1694 Ordering::Equal => args[0].immediate(),
1695 Ordering::Less => if in_is_signed {
1696 bx.sext(args[0].immediate(), llret_ty)
1698 bx.zext(args[0].immediate(), llret_ty)
1702 (Style::Int(in_is_signed), Style::Float) => {
1703 return Ok(if in_is_signed {
1704 bx.sitofp(args[0].immediate(), llret_ty)
1706 bx.uitofp(args[0].immediate(), llret_ty)
1709 (Style::Float, Style::Int(out_is_signed)) => {
1710 return Ok(if out_is_signed {
1711 bx.fptosi(args[0].immediate(), llret_ty)
1713 bx.fptoui(args[0].immediate(), llret_ty)
1716 (Style::Float, Style::Float) => {
1717 return Ok(match in_width.cmp(&out_width) {
1718 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1719 Ordering::Equal => args[0].immediate(),
1720 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1723 _ => {/* Unsupported. Fallthrough. */}
1726 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1730 macro_rules! arith {
1731 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1732 $(if name == stringify!($name) {
1734 $($(ty::$p(_))|* => {
1735 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1740 "unsupported operation on `{}` with element `{}`",
1747 simd_add: Uint, Int => add, Float => fadd;
1748 simd_sub: Uint, Int => sub, Float => fsub;
1749 simd_mul: Uint, Int => mul, Float => fmul;
1750 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1751 simd_rem: Uint => urem, Int => srem, Float => frem;
1752 simd_shl: Uint, Int => shl;
1753 simd_shr: Uint => lshr, Int => ashr;
1754 simd_and: Uint, Int => and;
1755 simd_or: Uint, Int => or;
1756 simd_xor: Uint, Int => xor;
1757 simd_fmax: Float => maxnum;
1758 simd_fmin: Float => minnum;
1760 span_bug!(span, "unknown SIMD intrinsic");
1763 // Returns the width of an int Ty, and if it's signed or not
1764 // Returns None if the type is not an integer
1765 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1767 fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
1769 ty::Int(t) => Some((match t {
1770 ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64,
1771 ast::IntTy::I8 => 8,
1772 ast::IntTy::I16 => 16,
1773 ast::IntTy::I32 => 32,
1774 ast::IntTy::I64 => 64,
1775 ast::IntTy::I128 => 128,
1777 ty::Uint(t) => Some((match t {
1778 ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64,
1779 ast::UintTy::U8 => 8,
1780 ast::UintTy::U16 => 16,
1781 ast::UintTy::U32 => 32,
1782 ast::UintTy::U64 => 64,
1783 ast::UintTy::U128 => 128,
1789 // Returns the width of a float TypeVariant
1790 // Returns None if the type is not a float
1791 fn float_type_width<'tcx>(sty: &ty::TyKind<'tcx>) -> Option<u64> {
1793 ty::Float(t) => Some(t.bit_width() as u64),