1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use intrinsics::{self, Intrinsic};
16 use abi::{Abi, FnType, LlvmType, PassMode};
17 use mir::place::PlaceRef;
18 use mir::operand::{OperandRef, OperandValue};
24 use type_of::LayoutLlvmExt;
25 use rustc::ty::{self, Ty};
26 use rustc::ty::layout::{HasDataLayout, LayoutOf};
29 use syntax::symbol::Symbol;
32 use rustc::session::Session;
35 use std::cmp::Ordering;
38 fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
39 let llvm_name = match name {
40 "sqrtf32" => "llvm.sqrt.f32",
41 "sqrtf64" => "llvm.sqrt.f64",
42 "powif32" => "llvm.powi.f32",
43 "powif64" => "llvm.powi.f64",
44 "sinf32" => "llvm.sin.f32",
45 "sinf64" => "llvm.sin.f64",
46 "cosf32" => "llvm.cos.f32",
47 "cosf64" => "llvm.cos.f64",
48 "powf32" => "llvm.pow.f32",
49 "powf64" => "llvm.pow.f64",
50 "expf32" => "llvm.exp.f32",
51 "expf64" => "llvm.exp.f64",
52 "exp2f32" => "llvm.exp2.f32",
53 "exp2f64" => "llvm.exp2.f64",
54 "logf32" => "llvm.log.f32",
55 "logf64" => "llvm.log.f64",
56 "log10f32" => "llvm.log10.f32",
57 "log10f64" => "llvm.log10.f64",
58 "log2f32" => "llvm.log2.f32",
59 "log2f64" => "llvm.log2.f64",
60 "fmaf32" => "llvm.fma.f32",
61 "fmaf64" => "llvm.fma.f64",
62 "fabsf32" => "llvm.fabs.f32",
63 "fabsf64" => "llvm.fabs.f64",
64 "copysignf32" => "llvm.copysign.f32",
65 "copysignf64" => "llvm.copysign.f64",
66 "floorf32" => "llvm.floor.f32",
67 "floorf64" => "llvm.floor.f64",
68 "ceilf32" => "llvm.ceil.f32",
69 "ceilf64" => "llvm.ceil.f64",
70 "truncf32" => "llvm.trunc.f32",
71 "truncf64" => "llvm.trunc.f64",
72 "rintf32" => "llvm.rint.f32",
73 "rintf64" => "llvm.rint.f64",
74 "nearbyintf32" => "llvm.nearbyint.f32",
75 "nearbyintf64" => "llvm.nearbyint.f64",
76 "roundf32" => "llvm.round.f32",
77 "roundf64" => "llvm.round.f64",
78 "assume" => "llvm.assume",
79 "abort" => "llvm.trap",
82 Some(cx.get_intrinsic(&llvm_name))
85 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
86 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
87 /// add them to librustc_codegen_llvm/context.rs
88 pub fn codegen_intrinsic_call(
89 bx: &Builder<'a, 'll, 'tcx>,
91 fn_ty: &FnType<'tcx, Ty<'tcx>>,
92 args: &[OperandRef<'tcx>],
99 let (def_id, substs) = match callee_ty.sty {
100 ty::TyFnDef(def_id, substs) => (def_id, substs),
101 _ => bug!("expected fn item type, found {}", callee_ty)
104 let sig = callee_ty.fn_sig(tcx);
105 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
106 let arg_tys = sig.inputs();
107 let ret_ty = sig.output();
108 let name = &*tcx.item_name(def_id).as_str();
110 let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
111 let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
113 let simple = get_simple_intrinsic(cx, name);
114 let llval = match name {
115 _ if simple.is_some() => {
116 bx.call(simple.unwrap(),
117 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
124 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
125 bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
128 let expect = cx.get_intrinsic(&("llvm.expect.i1"));
129 bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
132 try_intrinsic(bx, cx,
140 let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
141 bx.call(llfn, &[], None)
144 let tp_ty = substs.type_at(0);
145 C_usize(cx, cx.size_of(tp_ty).bytes())
148 let tp_ty = substs.type_at(0);
149 if let OperandValue::Pair(_, meta) = args[0].val {
151 glue::size_and_align_of_dst(bx, tp_ty, meta);
154 C_usize(cx, cx.size_of(tp_ty).bytes())
158 let tp_ty = substs.type_at(0);
159 C_usize(cx, cx.align_of(tp_ty).abi())
161 "min_align_of_val" => {
162 let tp_ty = substs.type_at(0);
163 if let OperandValue::Pair(_, meta) = args[0].val {
165 glue::size_and_align_of_dst(bx, tp_ty, meta);
168 C_usize(cx, cx.align_of(tp_ty).abi())
172 let tp_ty = substs.type_at(0);
173 C_usize(cx, cx.align_of(tp_ty).pref())
176 let tp_ty = substs.type_at(0);
177 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
178 C_str_slice(cx, ty_name)
181 C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
184 let ty = substs.type_at(0);
185 if !cx.layout_of(ty).is_zst() {
186 // Just zero out the stack slot.
187 // If we store a zero constant, LLVM will drown in vreg allocation for large data
188 // structures, and the generated code will be awful. (A telltale sign of this is
189 // large quantities of `mov [byte ptr foo],0` in the generated code.)
190 memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
194 // Effectively no-ops
199 let tp_ty = substs.type_at(0);
201 C_bool(cx, bx.cx.type_needs_drop(tp_ty))
204 let ptr = args[0].immediate();
205 let offset = args[1].immediate();
206 bx.inbounds_gep(ptr, &[offset])
209 let ptr = args[0].immediate();
210 let offset = args[1].immediate();
211 bx.gep(ptr, &[offset])
214 "copy_nonoverlapping" => {
215 copy_intrinsic(bx, false, false, substs.type_at(0),
216 args[1].immediate(), args[0].immediate(), args[2].immediate())
219 copy_intrinsic(bx, true, false, substs.type_at(0),
220 args[1].immediate(), args[0].immediate(), args[2].immediate())
223 memset_intrinsic(bx, false, substs.type_at(0),
224 args[0].immediate(), args[1].immediate(), args[2].immediate())
227 "volatile_copy_nonoverlapping_memory" => {
228 copy_intrinsic(bx, false, true, substs.type_at(0),
229 args[0].immediate(), args[1].immediate(), args[2].immediate())
231 "volatile_copy_memory" => {
232 copy_intrinsic(bx, true, true, substs.type_at(0),
233 args[0].immediate(), args[1].immediate(), args[2].immediate())
235 "volatile_set_memory" => {
236 memset_intrinsic(bx, true, substs.type_at(0),
237 args[0].immediate(), args[1].immediate(), args[2].immediate())
239 "volatile_load" | "unaligned_volatile_load" => {
240 let tp_ty = substs.type_at(0);
241 let mut ptr = args[0].immediate();
242 if let PassMode::Cast(ty) = fn_ty.ret.mode {
243 ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
245 let load = bx.volatile_load(ptr);
246 let align = if name == "unaligned_volatile_load" {
249 cx.align_of(tp_ty).abi() as u32
252 llvm::LLVMSetAlignment(load, align);
254 to_immediate(bx, load, cx.layout_of(tp_ty))
256 "volatile_store" => {
257 let dst = args[0].deref(bx.cx);
258 args[1].val.volatile_store(bx, dst);
261 "unaligned_volatile_store" => {
262 let dst = args[0].deref(bx.cx);
263 args[1].val.unaligned_volatile_store(bx, dst);
266 "prefetch_read_data" | "prefetch_write_data" |
267 "prefetch_read_instruction" | "prefetch_write_instruction" => {
268 let expect = cx.get_intrinsic(&("llvm.prefetch"));
269 let (rw, cache_type) = match name {
270 "prefetch_read_data" => (0, 1),
271 "prefetch_write_data" => (1, 1),
272 "prefetch_read_instruction" => (0, 0),
273 "prefetch_write_instruction" => (1, 0),
280 C_i32(cx, cache_type)
283 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
284 "bitreverse" | "add_with_overflow" | "sub_with_overflow" |
285 "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
286 "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => {
288 match int_type_width_signed(ty, cx) {
289 Some((width, signed)) =>
292 let y = C_bool(bx.cx, false);
293 let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
294 bx.call(llfn, &[args[0].immediate(), y], None)
296 "ctlz_nonzero" | "cttz_nonzero" => {
297 let y = C_bool(bx.cx, true);
298 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
299 let llfn = cx.get_intrinsic(llvm_name);
300 bx.call(llfn, &[args[0].immediate(), y], None)
302 "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
303 &[args[0].immediate()], None),
306 args[0].immediate() // byte swap a u8/i8 is just a no-op
308 bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
309 &[args[0].immediate()], None)
313 bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
314 &[args[0].immediate()], None)
316 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
317 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
318 if signed { 's' } else { 'u' },
320 let llfn = bx.cx.get_intrinsic(&intrinsic);
322 // Convert `i1` to a `bool`, and write it to the out parameter
323 let pair = bx.call(llfn, &[
327 let val = bx.extract_value(pair, 0);
328 let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
330 let dest = result.project_field(bx, 0);
331 bx.store(val, dest.llval, dest.align);
332 let dest = result.project_field(bx, 1);
333 bx.store(overflow, dest.llval, dest.align);
337 "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
338 "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
339 "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
342 bx.exactsdiv(args[0].immediate(), args[1].immediate())
344 bx.exactudiv(args[0].immediate(), args[1].immediate())
348 bx.sdiv(args[0].immediate(), args[1].immediate())
350 bx.udiv(args[0].immediate(), args[1].immediate())
354 bx.srem(args[0].immediate(), args[1].immediate())
356 bx.urem(args[0].immediate(), args[1].immediate())
358 "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
361 bx.ashr(args[0].immediate(), args[1].immediate())
363 bx.lshr(args[0].immediate(), args[1].immediate())
368 span_invalid_monomorphization_error(
370 &format!("invalid monomorphization of `{}` intrinsic: \
371 expected basic integer type, found `{}`", name, ty));
377 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
378 let sty = &arg_tys[0].sty;
379 match float_type_width(sty) {
382 "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
383 "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
384 "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
385 "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
386 "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
390 span_invalid_monomorphization_error(
392 &format!("invalid monomorphization of `{}` intrinsic: \
393 expected basic float type, found `{}`", name, sty));
400 "discriminant_value" => {
401 args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
404 name if name.starts_with("simd_") => {
405 match generic_simd_intrinsic(bx, name,
414 // This requires that atomic intrinsics follow a specific naming pattern:
415 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
416 name if name.starts_with("atomic_") => {
417 use llvm::AtomicOrdering::*;
419 let split: Vec<&str> = name.split('_').collect();
421 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
422 let (order, failorder) = match split.len() {
423 2 => (SequentiallyConsistent, SequentiallyConsistent),
424 3 => match split[2] {
425 "unordered" => (Unordered, Unordered),
426 "relaxed" => (Monotonic, Monotonic),
427 "acq" => (Acquire, Acquire),
428 "rel" => (Release, Monotonic),
429 "acqrel" => (AcquireRelease, Acquire),
430 "failrelaxed" if is_cxchg =>
431 (SequentiallyConsistent, Monotonic),
432 "failacq" if is_cxchg =>
433 (SequentiallyConsistent, Acquire),
434 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
436 4 => match (split[2], split[3]) {
437 ("acq", "failrelaxed") if is_cxchg =>
438 (Acquire, Monotonic),
439 ("acqrel", "failrelaxed") if is_cxchg =>
440 (AcquireRelease, Monotonic),
441 _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
443 _ => cx.sess().fatal("Atomic intrinsic not in correct format"),
446 let invalid_monomorphization = |ty| {
447 span_invalid_monomorphization_error(tcx.sess, span,
448 &format!("invalid monomorphization of `{}` intrinsic: \
449 expected basic integer type, found `{}`", name, ty));
453 "cxchg" | "cxchgweak" => {
454 let ty = substs.type_at(0);
455 if int_type_width_signed(ty, cx).is_some() {
456 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
457 let pair = bx.atomic_cmpxchg(
464 let val = bx.extract_value(pair, 0);
465 let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
467 let dest = result.project_field(bx, 0);
468 bx.store(val, dest.llval, dest.align);
469 let dest = result.project_field(bx, 1);
470 bx.store(success, dest.llval, dest.align);
473 return invalid_monomorphization(ty);
478 let ty = substs.type_at(0);
479 if int_type_width_signed(ty, cx).is_some() {
480 let align = cx.align_of(ty);
481 bx.atomic_load(args[0].immediate(), order, align)
483 return invalid_monomorphization(ty);
488 let ty = substs.type_at(0);
489 if int_type_width_signed(ty, cx).is_some() {
490 let align = cx.align_of(ty);
491 bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
494 return invalid_monomorphization(ty);
499 bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
503 "singlethreadfence" => {
504 bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
508 // These are all AtomicRMW ops
510 let atom_op = match op {
511 "xchg" => llvm::AtomicXchg,
512 "xadd" => llvm::AtomicAdd,
513 "xsub" => llvm::AtomicSub,
514 "and" => llvm::AtomicAnd,
515 "nand" => llvm::AtomicNand,
516 "or" => llvm::AtomicOr,
517 "xor" => llvm::AtomicXor,
518 "max" => llvm::AtomicMax,
519 "min" => llvm::AtomicMin,
520 "umax" => llvm::AtomicUMax,
521 "umin" => llvm::AtomicUMin,
522 _ => cx.sess().fatal("unknown atomic operation")
525 let ty = substs.type_at(0);
526 if int_type_width_signed(ty, cx).is_some() {
527 bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
529 return invalid_monomorphization(ty);
535 "nontemporal_store" => {
536 let dst = args[0].deref(bx.cx);
537 args[1].val.nontemporal_store(bx, dst);
542 let intr = match Intrinsic::find(&name) {
544 None => bug!("unknown intrinsic '{}'", name),
546 fn one<T>(x: Vec<T>) -> T {
547 assert_eq!(x.len(), 1);
548 x.into_iter().next().unwrap()
550 fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
551 use intrinsics::Type::*;
553 Void => vec![Type::void(cx)],
554 Integer(_signed, _width, llvm_width) => {
555 vec![Type::ix(cx, llvm_width as u64)]
559 32 => vec![Type::f32(cx)],
560 64 => vec![Type::f64(cx)],
564 Pointer(ref t, ref llvm_elem, _const) => {
565 let t = llvm_elem.as_ref().unwrap_or(t);
566 let elem = one(ty_to_type(cx, t));
569 Vector(ref t, ref llvm_elem, length) => {
570 let t = llvm_elem.as_ref().unwrap_or(t);
571 let elem = one(ty_to_type(cx, t));
572 vec![Type::vector(elem, length as u64)]
574 Aggregate(false, ref contents) => {
575 let elems = contents.iter()
576 .map(|t| one(ty_to_type(cx, t)))
577 .collect::<Vec<_>>();
578 vec![Type::struct_(cx, &elems, false)]
580 Aggregate(true, ref contents) => {
582 .flat_map(|t| ty_to_type(cx, t))
588 // This allows an argument list like `foo, (bar, baz),
589 // qux` to be converted into `foo, bar, baz, qux`, integer
590 // arguments to be truncated as needed and pointers to be
593 bx: &Builder<'a, 'll, 'tcx>,
594 t: &intrinsics::Type,
595 arg: &OperandRef<'tcx>,
599 intrinsics::Type::Aggregate(true, ref contents) => {
600 // We found a tuple that needs squishing! So
601 // run over the tuple and load each field.
603 // This assumes the type is "simple", i.e. no
604 // destructors, and the contents are SIMD
606 assert!(!bx.cx.type_needs_drop(arg.layout.ty));
607 let (ptr, align) = match arg.val {
608 OperandValue::Ref(ptr, align) => (ptr, align),
611 let arg = PlaceRef::new_sized(ptr, arg.layout, align);
612 (0..contents.len()).map(|i| {
613 arg.project_field(bx, i).load(bx).immediate()
616 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
617 let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
618 vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
620 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
621 let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
622 vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))]
624 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
625 // the LLVM intrinsic uses a smaller integer
626 // size than the C intrinsic's signature, so
627 // we have to trim it down here.
628 vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
630 _ => vec![arg.immediate()],
635 let inputs = intr.inputs.iter()
636 .flat_map(|t| ty_to_type(cx, t))
637 .collect::<Vec<_>>();
639 let outputs = one(ty_to_type(cx, &intr.output));
641 let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
642 modify_as_needed(bx, t, arg)
644 assert_eq!(inputs.len(), llargs.len());
646 let val = match intr.definition {
647 intrinsics::IntrinsicDef::Named(name) => {
648 let f = declare::declare_cfn(cx,
650 Type::func(&inputs, outputs));
651 bx.call(f, &llargs, None)
656 intrinsics::Type::Aggregate(flatten, ref elems) => {
657 // the output is a tuple so we need to munge it properly
660 for i in 0..elems.len() {
661 let dest = result.project_field(bx, i);
662 let val = bx.extract_value(val, i as u64);
663 bx.store(val, dest.llval, dest.align);
672 if !fn_ty.ret.is_ignore() {
673 if let PassMode::Cast(ty) = fn_ty.ret.mode {
674 let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
675 bx.store(llval, ptr, result.align);
677 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
678 .val.store(bx, result);
684 bx: &Builder<'a, 'll, 'tcx>,
693 let (size, align) = cx.size_and_align_of(ty);
694 let size = C_usize(cx, size.bytes());
695 let align = C_i32(cx, align.abi() as i32);
697 let operation = if allow_overlap {
703 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
704 cx.data_layout().pointer_size.bits());
706 let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
707 let src_ptr = bx.pointercast(src, Type::i8p(cx));
708 let llfn = cx.get_intrinsic(&name);
715 C_bool(cx, volatile)],
720 bx: &Builder<'a, 'll, 'tcx>,
728 let (size, align) = cx.size_and_align_of(ty);
729 let size = C_usize(cx, size.bytes());
730 let align = C_i32(cx, align.abi() as i32);
731 let dst = bx.pointercast(dst, Type::i8p(cx));
732 call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
736 bx: &Builder<'a, 'll, 'tcx>,
743 if bx.sess().no_landing_pads() {
744 bx.call(func, &[data], None);
745 let ptr_align = bx.tcx().data_layout.pointer_align;
746 bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
747 } else if wants_msvc_seh(bx.sess()) {
748 codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
750 codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
754 // MSVC's definition of the `rust_try` function.
756 // This implementation uses the new exception handling instructions in LLVM
757 // which have support in LLVM for SEH on MSVC targets. Although these
758 // instructions are meant to work for all targets, as of the time of this
759 // writing, however, LLVM does not recommend the usage of these new instructions
760 // as the old ones are still more optimized.
762 bx: &Builder<'a, 'll, 'tcx>,
769 let llfn = get_rust_try_fn(cx, &mut |bx| {
772 bx.set_personality_fn(bx.cx.eh_personality());
774 let normal = bx.build_sibling_block("normal");
775 let catchswitch = bx.build_sibling_block("catchswitch");
776 let catchpad = bx.build_sibling_block("catchpad");
777 let caught = bx.build_sibling_block("caught");
779 let func = llvm::get_param(bx.llfn(), 0);
780 let data = llvm::get_param(bx.llfn(), 1);
781 let local_ptr = llvm::get_param(bx.llfn(), 2);
783 // We're generating an IR snippet that looks like:
785 // declare i32 @rust_try(%func, %data, %ptr) {
786 // %slot = alloca i64*
787 // invoke %func(%data) to label %normal unwind label %catchswitch
793 // %cs = catchswitch within none [%catchpad] unwind to caller
796 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
797 // %ptr[0] = %slot[0]
798 // %ptr[1] = %slot[1]
799 // catchret from %tok to label %caught
805 // This structure follows the basic usage of throw/try/catch in LLVM.
806 // For example, compile this C++ snippet to see what LLVM generates:
808 // #include <stdint.h>
810 // int bar(void (*foo)(void), uint64_t *ret) {
814 // } catch(uint64_t a[2]) {
821 // More information can be found in libstd's seh.rs implementation.
822 let i64p = Type::i64(cx).ptr_to();
823 let ptr_align = bx.tcx().data_layout.pointer_align;
824 let slot = bx.alloca(i64p, "slot", ptr_align);
825 bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
828 normal.ret(C_i32(cx, 0));
830 let cs = catchswitch.catch_switch(None, None, 1);
831 catchswitch.add_handler(cs, catchpad.llbb());
834 let tydesc = match tcx.lang_items().msvc_try_filter() {
835 Some(did) => ::consts::get_static(cx, did),
836 None => bug!("msvc_try_filter not defined"),
838 let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
839 let addr = catchpad.load(slot, ptr_align);
841 let i64_align = bx.tcx().data_layout.i64_align;
842 let arg1 = catchpad.load(addr, i64_align);
843 let val1 = C_i32(cx, 1);
844 let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
845 let local_ptr = catchpad.bitcast(local_ptr, i64p);
846 catchpad.store(arg1, local_ptr, i64_align);
847 catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
848 catchpad.catch_ret(tok, caught.llbb());
850 caught.ret(C_i32(cx, 1));
853 // Note that no invoke is used here because by definition this function
854 // can't panic (that's what it's catching).
855 let ret = bx.call(llfn, &[func, data, local_ptr], None);
856 let i32_align = bx.tcx().data_layout.i32_align;
857 bx.store(ret, dest, i32_align);
860 // Definition of the standard "try" function for Rust using the GNU-like model
861 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
864 // This codegen is a little surprising because we always call a shim
865 // function instead of inlining the call to `invoke` manually here. This is done
866 // because in LLVM we're only allowed to have one personality per function
867 // definition. The call to the `try` intrinsic is being inlined into the
868 // function calling it, and that function may already have other personality
869 // functions in play. By calling a shim we're guaranteed that our shim will have
870 // the right personality function.
872 bx: &Builder<'a, 'll, 'tcx>,
879 let llfn = get_rust_try_fn(cx, &mut |bx| {
882 // Codegens the shims described above:
885 // invoke %func(%args...) normal %normal unwind %catch
891 // (ptr, _) = landingpad
892 // store ptr, %local_ptr
895 // Note that the `local_ptr` data passed into the `try` intrinsic is
896 // expected to be `*mut *mut u8` for this to actually work, but that's
897 // managed by the standard library.
899 let then = bx.build_sibling_block("then");
900 let catch = bx.build_sibling_block("catch");
902 let func = llvm::get_param(bx.llfn(), 0);
903 let data = llvm::get_param(bx.llfn(), 1);
904 let local_ptr = llvm::get_param(bx.llfn(), 2);
905 bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
906 then.ret(C_i32(cx, 0));
908 // Type indicator for the exception being thrown.
910 // The first value in this tuple is a pointer to the exception object
911 // being thrown. The second value is a "selector" indicating which of
912 // the landing pad clauses the exception's type had been matched to.
913 // rust_try ignores the selector.
914 let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)],
916 let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
917 catch.add_clause(vals, C_null(Type::i8p(cx)));
918 let ptr = catch.extract_value(vals, 0);
919 let ptr_align = bx.tcx().data_layout.pointer_align;
920 catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
921 catch.ret(C_i32(cx, 1));
924 // Note that no invoke is used here because by definition this function
925 // can't panic (that's what it's catching).
926 let ret = bx.call(llfn, &[func, data, local_ptr], None);
927 let i32_align = bx.tcx().data_layout.i32_align;
928 bx.store(ret, dest, i32_align);
931 // Helper function to give a Block to a closure to codegen a shim function.
932 // This is currently primarily used for the `try` intrinsic functions above.
933 fn gen_fn<'ll, 'tcx>(
934 cx: &CodegenCx<'ll, 'tcx>,
936 inputs: Vec<Ty<'tcx>>,
938 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
940 let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig(
944 hir::Unsafety::Unsafe,
947 let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
948 let bx = Builder::new_block(cx, llfn, "entry-block");
953 // Helper function used to get a handle to the `__rust_try` function used to
956 // This function is only generated once and is then cached.
957 fn get_rust_try_fn<'ll, 'tcx>(
958 cx: &CodegenCx<'ll, 'tcx>,
959 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
961 if let Some(llfn) = cx.rust_try_fn.get() {
965 // Define the type up front for the signature of the rust_try function.
967 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
968 let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
972 hir::Unsafety::Unsafe,
975 let output = tcx.types.i32;
976 let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
977 cx.rust_try_fn.set(Some(rust_try));
981 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
982 span_err!(a, b, E0511, "{}", c);
985 fn generic_simd_intrinsic(
986 bx: &Builder<'a, 'll, 'tcx>,
989 args: &[OperandRef<'tcx>],
993 ) -> Result<ValueRef, ()> {
994 // macros for error handling:
995 macro_rules! emit_error {
999 ($msg: tt, $($fmt: tt)*) => {
1000 span_invalid_monomorphization_error(
1002 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1007 macro_rules! return_error {
1010 emit_error!($($fmt)*);
1016 macro_rules! require {
1017 ($cond: expr, $($fmt: tt)*) => {
1019 return_error!($($fmt)*);
1023 macro_rules! require_simd {
1024 ($ty: expr, $position: expr) => {
1025 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1032 let sig = tcx.normalize_erasing_late_bound_regions(
1033 ty::ParamEnv::reveal_all(),
1034 &callee_ty.fn_sig(tcx),
1036 let arg_tys = sig.inputs();
1038 // every intrinsic takes a SIMD vector as its first argument
1039 require_simd!(arg_tys[0], "input");
1040 let in_ty = arg_tys[0];
1041 let in_elem = arg_tys[0].simd_type(tcx);
1042 let in_len = arg_tys[0].simd_size(tcx);
1044 let comparison = match name {
1045 "simd_eq" => Some(hir::BinOpKind::Eq),
1046 "simd_ne" => Some(hir::BinOpKind::Ne),
1047 "simd_lt" => Some(hir::BinOpKind::Lt),
1048 "simd_le" => Some(hir::BinOpKind::Le),
1049 "simd_gt" => Some(hir::BinOpKind::Gt),
1050 "simd_ge" => Some(hir::BinOpKind::Ge),
1054 if let Some(cmp_op) = comparison {
1055 require_simd!(ret_ty, "return");
1057 let out_len = ret_ty.simd_size(tcx);
1058 require!(in_len == out_len,
1059 "expected return type with length {} (same as input type `{}`), \
1060 found `{}` with length {}",
1063 require!(llret_ty.element_type().kind() == llvm::Integer,
1064 "expected return type with integer elements, found `{}` with non-integer `{}`",
1066 ret_ty.simd_type(tcx));
1068 return Ok(compare_simd_types(bx,
1069 args[0].immediate(),
1070 args[1].immediate(),
1076 if name.starts_with("simd_shuffle") {
1077 let n: usize = match name["simd_shuffle".len()..].parse() {
1079 Err(_) => span_bug!(span,
1080 "bad `simd_shuffle` instruction only caught in codegen?")
1083 require_simd!(ret_ty, "return");
1085 let out_len = ret_ty.simd_size(tcx);
1086 require!(out_len == n,
1087 "expected return type of length {}, found `{}` with length {}",
1088 n, ret_ty, out_len);
1089 require!(in_elem == ret_ty.simd_type(tcx),
1090 "expected return element type `{}` (element of input `{}`), \
1091 found `{}` with element type `{}`",
1093 ret_ty, ret_ty.simd_type(tcx));
1095 let total_len = in_len as u128 * 2;
1097 let vector = args[2].immediate();
1099 let indices: Option<Vec<_>> = (0..n)
1102 let val = const_get_elt(vector, i as u64);
1103 match const_to_opt_u128(val, true) {
1105 emit_error!("shuffle index #{} is not a constant", arg_idx);
1108 Some(idx) if idx >= total_len => {
1109 emit_error!("shuffle index #{} is out of bounds (limit {})",
1110 arg_idx, total_len);
1113 Some(idx) => Some(C_i32(bx.cx, idx as i32)),
1117 let indices = match indices {
1119 None => return Ok(C_null(llret_ty))
1122 return Ok(bx.shuffle_vector(args[0].immediate(),
1123 args[1].immediate(),
1124 C_vector(&indices)))
1127 if name == "simd_insert" {
1128 require!(in_elem == arg_tys[2],
1129 "expected inserted type `{}` (element of input `{}`), found `{}`",
1130 in_elem, in_ty, arg_tys[2]);
1131 return Ok(bx.insert_element(args[0].immediate(),
1132 args[2].immediate(),
1133 args[1].immediate()))
1135 if name == "simd_extract" {
1136 require!(ret_ty == in_elem,
1137 "expected return type `{}` (element of input `{}`), found `{}`",
1138 in_elem, in_ty, ret_ty);
1139 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
1142 if name == "simd_select" {
1143 let m_elem_ty = in_elem;
1145 let v_len = arg_tys[1].simd_size(tcx);
1146 require!(m_len == v_len,
1147 "mismatched lengths: mask length `{}` != other vector length `{}`",
1150 match m_elem_ty.sty {
1153 return_error!("mask element type is `{}`, expected `i_`", m_elem_ty);
1156 // truncate the mask to a vector of i1s
1157 let i1 = Type::i1(bx.cx);
1158 let i1xn = Type::vector(i1, m_len as u64);
1159 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1160 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1163 fn simd_simple_float_intrinsic(
1165 in_elem: &::rustc::ty::TyS,
1166 in_ty: &::rustc::ty::TyS,
1168 bx: &Builder<'a, 'll, 'tcx>,
1170 args: &[OperandRef<'tcx>],
1171 ) -> Result<ValueRef, ()> {
1172 macro_rules! emit_error {
1176 ($msg: tt, $($fmt: tt)*) => {
1177 span_invalid_monomorphization_error(
1179 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1184 macro_rules! return_error {
1187 emit_error!($($fmt)*);
1192 let ety = match in_elem.sty {
1193 ty::TyFloat(f) if f.bit_width() == 32 => {
1194 if in_len < 2 || in_len > 16 {
1196 "unsupported floating-point vector `{}` with length `{}` \
1197 out-of-range [2, 16]",
1202 ty::TyFloat(f) if f.bit_width() == 64 => {
1203 if in_len < 2 || in_len > 8 {
1204 return_error!("unsupported floating-point vector `{}` with length `{}` \
1205 out-of-range [2, 8]",
1211 return_error!("unsupported element type `{}` of floating-point vector `{}`",
1215 return_error!("`{}` is not a floating-point type", in_ty);
1219 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1220 let intrinsic = bx.cx.get_intrinsic(&llvm_name);
1221 let c = bx.call(intrinsic,
1222 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1224 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1228 if name == "simd_fsqrt" {
1229 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1232 if name == "simd_fsin" {
1233 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1236 if name == "simd_fcos" {
1237 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1240 if name == "simd_fabs" {
1241 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1244 if name == "simd_floor" {
1245 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1248 if name == "simd_ceil" {
1249 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1252 if name == "simd_fexp" {
1253 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1256 if name == "simd_fexp2" {
1257 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1260 if name == "simd_flog10" {
1261 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1264 if name == "simd_flog2" {
1265 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1268 if name == "simd_flog" {
1269 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1272 if name == "simd_fpowi" {
1273 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1276 if name == "simd_fpow" {
1277 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1280 if name == "simd_fma" {
1281 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1285 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1286 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1287 fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
1288 let p0s: String = "p0".repeat(no_pointers);
1290 ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1291 ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1292 ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1293 _ => unreachable!(),
1297 fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize,
1298 mut no_pointers: usize) -> &'ll Type {
1299 // FIXME: use cx.layout_of(ty).llvm_type() ?
1300 let mut elem_ty = match elem_ty.sty {
1301 ty::TyInt(v) => Type::int_from_ty(cx, v),
1302 ty::TyUint(v) => Type::uint_from_ty(cx, v),
1303 ty::TyFloat(v) => Type::float_from_ty(cx, v),
1304 _ => unreachable!(),
1306 while no_pointers > 0 {
1307 elem_ty = elem_ty.ptr_to();
1310 Type::vector(elem_ty, vec_len as u64)
1314 if name == "simd_gather" {
1315 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1316 // mask: <N x i{M}>) -> <N x T>
1317 // * N: number of elements in the input vectors
1318 // * T: type of the element to load
1319 // * M: any integer width is supported, will be truncated to i1
1321 // All types must be simd vector types
1322 require_simd!(in_ty, "first");
1323 require_simd!(arg_tys[1], "second");
1324 require_simd!(arg_tys[2], "third");
1325 require_simd!(ret_ty, "return");
1327 // Of the same length:
1328 require!(in_len == arg_tys[1].simd_size(tcx),
1329 "expected {} argument with length {} (same as input type `{}`), \
1330 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1331 arg_tys[1].simd_size(tcx));
1332 require!(in_len == arg_tys[2].simd_size(tcx),
1333 "expected {} argument with length {} (same as input type `{}`), \
1334 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1335 arg_tys[2].simd_size(tcx));
1337 // The return type must match the first argument type
1338 require!(ret_ty == in_ty,
1339 "expected return type `{}`, found `{}`",
1342 // This counts how many pointers
1343 fn ptr_count(t: ty::Ty) -> usize {
1345 ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
1351 fn non_ptr(t: ty::Ty) -> ty::Ty {
1353 ty::TyRawPtr(p) => non_ptr(p.ty),
1358 // The second argument must be a simd vector with an element type that's a pointer
1359 // to the element type of the first argument
1360 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1361 ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
1362 non_ptr(arg_tys[1].simd_type(tcx))),
1364 require!(false, "expected element type `{}` of second argument `{}` \
1365 to be a pointer to the element type `{}` of the first \
1366 argument `{}`, found `{}` != `*_ {}`",
1367 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1368 arg_tys[1].simd_type(tcx).sty, in_elem);
1372 assert!(pointer_count > 0);
1373 assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx)));
1374 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1376 // The element type of the third argument must be a signed integer type of any width:
1377 match arg_tys[2].simd_type(tcx).sty {
1380 require!(false, "expected element type `{}` of third argument `{}` \
1381 to be a signed integer type",
1382 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1386 // Alignment of T, must be a constant integer value:
1387 let alignment_ty = Type::i32(bx.cx);
1388 let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1390 // Truncate the mask vector to a vector of i1s:
1391 let (mask, mask_ty) = {
1392 let i1 = Type::i1(bx.cx);
1393 let i1xn = Type::vector(i1, in_len as u64);
1394 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1397 // Type of the vector of pointers:
1398 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1399 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1401 // Type of the vector of elements:
1402 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1403 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1405 let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
1406 llvm_elem_vec_str, llvm_pointer_vec_str);
1407 let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1408 Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty,
1409 llvm_elem_vec_ty], llvm_elem_vec_ty));
1410 llvm::SetUnnamedAddr(f, false);
1411 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
1416 if name == "simd_scatter" {
1417 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1418 // mask: <N x i{M}>) -> ()
1419 // * N: number of elements in the input vectors
1420 // * T: type of the element to load
1421 // * M: any integer width is supported, will be truncated to i1
1423 // All types must be simd vector types
1424 require_simd!(in_ty, "first");
1425 require_simd!(arg_tys[1], "second");
1426 require_simd!(arg_tys[2], "third");
1428 // Of the same length:
1429 require!(in_len == arg_tys[1].simd_size(tcx),
1430 "expected {} argument with length {} (same as input type `{}`), \
1431 found `{}` with length {}", "second", in_len, in_ty, arg_tys[1],
1432 arg_tys[1].simd_size(tcx));
1433 require!(in_len == arg_tys[2].simd_size(tcx),
1434 "expected {} argument with length {} (same as input type `{}`), \
1435 found `{}` with length {}", "third", in_len, in_ty, arg_tys[2],
1436 arg_tys[2].simd_size(tcx));
1438 // This counts how many pointers
1439 fn ptr_count(t: ty::Ty) -> usize {
1441 ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
1447 fn non_ptr(t: ty::Ty) -> ty::Ty {
1449 ty::TyRawPtr(p) => non_ptr(p.ty),
1454 // The second argument must be a simd vector with an element type that's a pointer
1455 // to the element type of the first argument
1456 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
1457 ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
1458 => (ptr_count(arg_tys[1].simd_type(tcx)),
1459 non_ptr(arg_tys[1].simd_type(tcx))),
1461 require!(false, "expected element type `{}` of second argument `{}` \
1462 to be a pointer to the element type `{}` of the first \
1463 argument `{}`, found `{}` != `*mut {}`",
1464 arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty,
1465 arg_tys[1].simd_type(tcx).sty, in_elem);
1469 assert!(pointer_count > 0);
1470 assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx)));
1471 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1473 // The element type of the third argument must be a signed integer type of any width:
1474 match arg_tys[2].simd_type(tcx).sty {
1477 require!(false, "expected element type `{}` of third argument `{}` \
1478 to be a signed integer type",
1479 arg_tys[2].simd_type(tcx).sty, arg_tys[2]);
1483 // Alignment of T, must be a constant integer value:
1484 let alignment_ty = Type::i32(bx.cx);
1485 let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
1487 // Truncate the mask vector to a vector of i1s:
1488 let (mask, mask_ty) = {
1489 let i1 = Type::i1(bx.cx);
1490 let i1xn = Type::vector(i1, in_len as u64);
1491 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1494 let ret_t = Type::void(bx.cx);
1496 // Type of the vector of pointers:
1497 let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
1498 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1500 // Type of the vector of elements:
1501 let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
1502 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1504 let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
1505 llvm_elem_vec_str, llvm_pointer_vec_str);
1506 let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
1507 Type::func(&[llvm_elem_vec_ty,
1508 llvm_pointer_vec_ty,
1511 llvm::SetUnnamedAddr(f, false);
1512 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask],
1517 macro_rules! arith_red {
1518 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1520 require!(ret_ty == in_elem,
1521 "expected return type `{}` (element of input `{}`), found `{}`",
1522 in_elem, in_ty, ret_ty);
1523 return match in_elem.sty {
1524 ty::TyInt(_) | ty::TyUint(_) => {
1525 let r = bx.$integer_reduce(args[0].immediate());
1527 // if overflow occurs, the result is the
1528 // mathematical result modulo 2^n:
1529 if name.contains("mul") {
1530 Ok(bx.mul(args[1].immediate(), r))
1532 Ok(bx.add(args[1].immediate(), r))
1535 Ok(bx.$integer_reduce(args[0].immediate()))
1539 // ordered arithmetic reductions take an accumulator
1540 let acc = if $ordered {
1541 let acc = args[1].immediate();
1542 // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
1543 // * if the accumulator of the fadd isn't 0, incorrect
1544 // code is generated
1545 // * if the accumulator of the fmul isn't 1, incorrect
1546 // code is generated
1547 match const_get_real(acc) {
1548 None => return_error!("accumulator of {} is not a constant", $name),
1549 Some((v, loses_info)) => {
1550 if $name.contains("mul") && v != 1.0_f64 {
1551 return_error!("accumulator of {} is not 1.0", $name);
1552 } else if $name.contains("add") && v != 0.0_f64 {
1553 return_error!("accumulator of {} is not 0.0", $name);
1554 } else if loses_info {
1555 return_error!("accumulator of {} loses information", $name);
1561 // unordered arithmetic reductions do not:
1562 match f.bit_width() {
1563 32 => C_undef(Type::f32(bx.cx)),
1564 64 => C_undef(Type::f64(bx.cx)),
1567 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1568 $name, in_ty, in_elem, v, ret_ty
1574 Ok(bx.$float_reduce(acc, args[0].immediate()))
1578 "unsupported {} from `{}` with element `{}` to `{}`",
1579 $name, in_ty, in_elem, ret_ty
1587 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
1588 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
1589 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1590 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1592 macro_rules! minmax_red {
1593 ($name:tt: $int_red:ident, $float_red:ident) => {
1595 require!(ret_ty == in_elem,
1596 "expected return type `{}` (element of input `{}`), found `{}`",
1597 in_elem, in_ty, ret_ty);
1598 return match in_elem.sty {
1600 Ok(bx.$int_red(args[0].immediate(), true))
1603 Ok(bx.$int_red(args[0].immediate(), false))
1605 ty::TyFloat(_f) => {
1606 Ok(bx.$float_red(args[0].immediate()))
1609 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1610 $name, in_ty, in_elem, ret_ty)
1618 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1619 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1621 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1622 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1624 macro_rules! bitwise_red {
1625 ($name:tt : $red:ident, $boolean:expr) => {
1627 let input = if !$boolean {
1628 require!(ret_ty == in_elem,
1629 "expected return type `{}` (element of input `{}`), found `{}`",
1630 in_elem, in_ty, ret_ty);
1634 ty::TyInt(_) | ty::TyUint(_) => {},
1636 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1637 $name, in_ty, in_elem, ret_ty)
1641 // boolean reductions operate on vectors of i1s:
1642 let i1 = Type::i1(bx.cx);
1643 let i1xn = Type::vector(i1, in_len as u64);
1644 bx.trunc(args[0].immediate(), i1xn)
1646 return match in_elem.sty {
1647 ty::TyInt(_) | ty::TyUint(_) => {
1648 let r = bx.$red(input);
1653 bx.zext(r, Type::bool(bx.cx))
1658 return_error!("unsupported {} from `{}` with element `{}` to `{}`",
1659 $name, in_ty, in_elem, ret_ty)
1666 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1667 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1668 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1669 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1670 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1672 if name == "simd_cast" {
1673 require_simd!(ret_ty, "return");
1674 let out_len = ret_ty.simd_size(tcx);
1675 require!(in_len == out_len,
1676 "expected return type with length {} (same as input type `{}`), \
1677 found `{}` with length {}",
1680 // casting cares about nominal type, not just structural type
1681 let out_elem = ret_ty.simd_type(tcx);
1683 if in_elem == out_elem { return Ok(args[0].immediate()); }
1685 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1687 let (in_style, in_width) = match in_elem.sty {
1688 // vectors of pointer-sized integers should've been
1689 // disallowed before here, so this unwrap is safe.
1690 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1691 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1692 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1693 _ => (Style::Unsupported, 0)
1695 let (out_style, out_width) = match out_elem.sty {
1696 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1697 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1698 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1699 _ => (Style::Unsupported, 0)
1702 match (in_style, out_style) {
1703 (Style::Int(in_is_signed), Style::Int(_)) => {
1704 return Ok(match in_width.cmp(&out_width) {
1705 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1706 Ordering::Equal => args[0].immediate(),
1707 Ordering::Less => if in_is_signed {
1708 bx.sext(args[0].immediate(), llret_ty)
1710 bx.zext(args[0].immediate(), llret_ty)
1714 (Style::Int(in_is_signed), Style::Float) => {
1715 return Ok(if in_is_signed {
1716 bx.sitofp(args[0].immediate(), llret_ty)
1718 bx.uitofp(args[0].immediate(), llret_ty)
1721 (Style::Float, Style::Int(out_is_signed)) => {
1722 return Ok(if out_is_signed {
1723 bx.fptosi(args[0].immediate(), llret_ty)
1725 bx.fptoui(args[0].immediate(), llret_ty)
1728 (Style::Float, Style::Float) => {
1729 return Ok(match in_width.cmp(&out_width) {
1730 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1731 Ordering::Equal => args[0].immediate(),
1732 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
1735 _ => {/* Unsupported. Fallthrough. */}
1738 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1742 macro_rules! arith {
1743 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1744 $(if name == stringify!($name) {
1746 $($(ty::$p(_))|* => {
1747 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1752 "unsupported operation on `{}` with element `{}`",
1759 simd_add: TyUint, TyInt => add, TyFloat => fadd;
1760 simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
1761 simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
1762 simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
1763 simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
1764 simd_shl: TyUint, TyInt => shl;
1765 simd_shr: TyUint => lshr, TyInt => ashr;
1766 simd_and: TyUint, TyInt => and;
1767 simd_or: TyUint, TyInt => or;
1768 simd_xor: TyUint, TyInt => xor;
1769 simd_fmax: TyFloat => maxnum;
1770 simd_fmin: TyFloat => minnum;
1772 span_bug!(span, "unknown SIMD intrinsic");
1775 // Returns the width of an int Ty, and if it's signed or not
1776 // Returns None if the type is not an integer
1777 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1779 fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
1781 ty::TyInt(t) => Some((match t {
1782 ast::IntTy::Isize => {
1783 match &cx.tcx.sess.target.target.target_pointer_width[..] {
1787 tws => bug!("Unsupported target word size for isize: {}", tws),
1790 ast::IntTy::I8 => 8,
1791 ast::IntTy::I16 => 16,
1792 ast::IntTy::I32 => 32,
1793 ast::IntTy::I64 => 64,
1794 ast::IntTy::I128 => 128,
1796 ty::TyUint(t) => Some((match t {
1797 ast::UintTy::Usize => {
1798 match &cx.tcx.sess.target.target.target_pointer_width[..] {
1802 tws => bug!("Unsupported target word size for usize: {}", tws),
1805 ast::UintTy::U8 => 8,
1806 ast::UintTy::U16 => 16,
1807 ast::UintTy::U32 => 32,
1808 ast::UintTy::U64 => 64,
1809 ast::UintTy::U128 => 128,
1815 // Returns the width of a float TypeVariant
1816 // Returns None if the type is not a float
1817 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1819 use rustc::ty::TyFloat;
1821 TyFloat(t) => Some(match t {
1822 ast::FloatTy::F32 => 32,
1823 ast::FloatTy::F64 => 64,