1 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
10 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
11 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
12 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
13 use rustc_codegen_ssa::mir::operand::OperandRef;
14 use rustc_codegen_ssa::mir::place::PlaceRef;
15 use rustc_codegen_ssa::traits::*;
17 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
18 use rustc_middle::ty::{self, Ty};
19 use rustc_middle::{bug, span_bug};
20 use rustc_span::{sym, symbol::kw, Span, Symbol};
21 use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
22 use rustc_target::spec::PanicStrategy;
24 use std::cmp::Ordering;
27 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
28 let llvm_name = match name {
29 sym::sqrtf32 => "llvm.sqrt.f32",
30 sym::sqrtf64 => "llvm.sqrt.f64",
31 sym::powif32 => "llvm.powi.f32",
32 sym::powif64 => "llvm.powi.f64",
33 sym::sinf32 => "llvm.sin.f32",
34 sym::sinf64 => "llvm.sin.f64",
35 sym::cosf32 => "llvm.cos.f32",
36 sym::cosf64 => "llvm.cos.f64",
37 sym::powf32 => "llvm.pow.f32",
38 sym::powf64 => "llvm.pow.f64",
39 sym::expf32 => "llvm.exp.f32",
40 sym::expf64 => "llvm.exp.f64",
41 sym::exp2f32 => "llvm.exp2.f32",
42 sym::exp2f64 => "llvm.exp2.f64",
43 sym::logf32 => "llvm.log.f32",
44 sym::logf64 => "llvm.log.f64",
45 sym::log10f32 => "llvm.log10.f32",
46 sym::log10f64 => "llvm.log10.f64",
47 sym::log2f32 => "llvm.log2.f32",
48 sym::log2f64 => "llvm.log2.f64",
49 sym::fmaf32 => "llvm.fma.f32",
50 sym::fmaf64 => "llvm.fma.f64",
51 sym::fabsf32 => "llvm.fabs.f32",
52 sym::fabsf64 => "llvm.fabs.f64",
53 sym::minnumf32 => "llvm.minnum.f32",
54 sym::minnumf64 => "llvm.minnum.f64",
55 sym::maxnumf32 => "llvm.maxnum.f32",
56 sym::maxnumf64 => "llvm.maxnum.f64",
57 sym::copysignf32 => "llvm.copysign.f32",
58 sym::copysignf64 => "llvm.copysign.f64",
59 sym::floorf32 => "llvm.floor.f32",
60 sym::floorf64 => "llvm.floor.f64",
61 sym::ceilf32 => "llvm.ceil.f32",
62 sym::ceilf64 => "llvm.ceil.f64",
63 sym::truncf32 => "llvm.trunc.f32",
64 sym::truncf64 => "llvm.trunc.f64",
65 sym::rintf32 => "llvm.rint.f32",
66 sym::rintf64 => "llvm.rint.f64",
67 sym::nearbyintf32 => "llvm.nearbyint.f32",
68 sym::nearbyintf64 => "llvm.nearbyint.f64",
69 sym::roundf32 => "llvm.round.f32",
70 sym::roundf64 => "llvm.round.f64",
73 Some(cx.get_intrinsic(&llvm_name))
76 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
77 fn codegen_intrinsic_call(
79 instance: ty::Instance<'tcx>,
80 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
81 args: &[OperandRef<'tcx, &'ll Value>],
86 let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
88 let (def_id, substs) = match *callee_ty.kind() {
89 ty::FnDef(def_id, substs) => (def_id, substs),
90 _ => bug!("expected fn item type, found {}", callee_ty),
93 let sig = callee_ty.fn_sig(tcx);
94 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
95 let arg_tys = sig.inputs();
96 let ret_ty = sig.output();
97 let name = tcx.item_name(def_id);
98 let name_str = &*name.as_str();
100 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
101 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
103 let simple = get_simple_intrinsic(self, name);
104 let llval = match name {
105 _ if simple.is_some() => self.call(
107 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
111 let expect = self.get_intrinsic(&("llvm.expect.i1"));
112 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
115 let expect = self.get_intrinsic(&("llvm.expect.i1"));
116 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
129 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
130 self.call(llfn, &[], None)
133 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
134 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
137 match fn_abi.ret.layout.abi {
138 abi::Abi::Scalar(ref scalar) => {
140 Primitive::Int(..) => {
141 if self.cx().size_of(ret_ty).bytes() < 4 {
142 // `va_arg` should not be called on a integer type
143 // less than 4 bytes in length. If it is, promote
144 // the integer to a `i32` and truncate the result
145 // back to the smaller type.
146 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
147 self.trunc(promoted_result, llret_ty)
149 emit_va_arg(self, args[0], ret_ty)
152 Primitive::F64 | Primitive::Pointer => {
153 emit_va_arg(self, args[0], ret_ty)
155 // `va_arg` should never be used with the return type f32.
156 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
159 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
163 sym::volatile_load | sym::unaligned_volatile_load => {
164 let tp_ty = substs.type_at(0);
165 let mut ptr = args[0].immediate();
166 if let PassMode::Cast(ty) = fn_abi.ret.mode {
167 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
169 let load = self.volatile_load(ptr);
170 let align = if name == sym::unaligned_volatile_load {
173 self.align_of(tp_ty).bytes() as u32
176 llvm::LLVMSetAlignment(load, align);
178 self.to_immediate(load, self.layout_of(tp_ty))
180 sym::volatile_store => {
181 let dst = args[0].deref(self.cx());
182 args[1].val.volatile_store(self, dst);
185 sym::unaligned_volatile_store => {
186 let dst = args[0].deref(self.cx());
187 args[1].val.unaligned_volatile_store(self, dst);
190 sym::prefetch_read_data
191 | sym::prefetch_write_data
192 | sym::prefetch_read_instruction
193 | sym::prefetch_write_instruction => {
194 let expect = self.get_intrinsic(&("llvm.prefetch"));
195 let (rw, cache_type) = match name {
196 sym::prefetch_read_data => (0, 1),
197 sym::prefetch_write_data => (1, 1),
198 sym::prefetch_read_instruction => (0, 0),
199 sym::prefetch_write_instruction => (1, 0),
208 self.const_i32(cache_type),
222 | sym::saturating_add
223 | sym::saturating_sub => {
225 match int_type_width_signed(ty, self) {
226 Some((width, signed)) => match name {
227 sym::ctlz | sym::cttz => {
228 let y = self.const_bool(false);
229 let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
230 self.call(llfn, &[args[0].immediate(), y], None)
232 sym::ctlz_nonzero | sym::cttz_nonzero => {
233 let y = self.const_bool(true);
234 let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
235 let llfn = self.get_intrinsic(llvm_name);
236 self.call(llfn, &[args[0].immediate(), y], None)
238 sym::ctpop => self.call(
239 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
240 &[args[0].immediate()],
245 args[0].immediate() // byte swap a u8/i8 is just a no-op
248 self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
249 &[args[0].immediate()],
254 sym::bitreverse => self.call(
255 self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
256 &[args[0].immediate()],
259 sym::rotate_left | sym::rotate_right => {
260 let is_left = name == sym::rotate_left;
261 let val = args[0].immediate();
262 let raw_shift = args[1].immediate();
263 // rotate = funnel shift with first two args the same
265 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
266 let llfn = self.get_intrinsic(llvm_name);
267 self.call(llfn, &[val, val, raw_shift], None)
269 sym::saturating_add | sym::saturating_sub => {
270 let is_add = name == sym::saturating_add;
271 let lhs = args[0].immediate();
272 let rhs = args[1].immediate();
273 let llvm_name = &format!(
275 if signed { 's' } else { 'u' },
276 if is_add { "add" } else { "sub" },
279 let llfn = self.get_intrinsic(llvm_name);
280 self.call(llfn, &[lhs, rhs], None)
285 span_invalid_monomorphization_error(
289 "invalid monomorphization of `{}` intrinsic: \
290 expected basic integer type, found `{}`",
299 _ if name_str.starts_with("simd_") => {
300 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
306 _ => bug!("unknown intrinsic '{}'", name),
309 if !fn_abi.ret.is_ignore() {
310 if let PassMode::Cast(ty) = fn_abi.ret.mode {
311 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
312 let ptr = self.pointercast(result.llval, ptr_llty);
313 self.store(llval, ptr, result.align);
315 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
317 .store(self, result);
322 fn abort(&mut self) {
323 let fnname = self.get_intrinsic(&("llvm.trap"));
324 self.call(fnname, &[], None);
327 fn assume(&mut self, val: Self::Value) {
328 let assume_intrinsic = self.get_intrinsic("llvm.assume");
329 self.call(assume_intrinsic, &[val], None);
332 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
333 let expect = self.get_intrinsic(&"llvm.expect.i1");
334 self.call(expect, &[cond, self.const_bool(expected)], None)
337 fn sideeffect(&mut self, unconditional: bool) {
338 if unconditional || self.tcx.sess.opts.debugging_opts.insert_sideeffect {
339 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
340 self.call(fnname, &[], None);
344 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
345 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
346 self.call(intrinsic, &[va_list], None)
349 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
350 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
351 self.call(intrinsic, &[va_list], None)
356 bx: &mut Builder<'a, 'll, 'tcx>,
357 try_func: &'ll Value,
359 catch_func: &'ll Value,
362 if bx.sess().panic_strategy() == PanicStrategy::Abort {
363 bx.call(try_func, &[data], None);
364 // Return 0 unconditionally from the intrinsic call;
365 // we can never unwind.
366 let ret_align = bx.tcx().data_layout.i32_align.abi;
367 bx.store(bx.const_i32(0), dest, ret_align);
368 } else if wants_msvc_seh(bx.sess()) {
369 codegen_msvc_try(bx, try_func, data, catch_func, dest);
370 } else if bx.sess().target.is_like_emscripten {
371 codegen_emcc_try(bx, try_func, data, catch_func, dest);
373 codegen_gnu_try(bx, try_func, data, catch_func, dest);
377 // MSVC's definition of the `rust_try` function.
379 // This implementation uses the new exception handling instructions in LLVM
380 // which have support in LLVM for SEH on MSVC targets. Although these
381 // instructions are meant to work for all targets, as of the time of this
382 // writing, however, LLVM does not recommend the usage of these new instructions
383 // as the old ones are still more optimized.
385 bx: &mut Builder<'a, 'll, 'tcx>,
386 try_func: &'ll Value,
388 catch_func: &'ll Value,
391 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
392 bx.set_personality_fn(bx.eh_personality());
393 bx.sideeffect(false);
395 let mut normal = bx.build_sibling_block("normal");
396 let mut catchswitch = bx.build_sibling_block("catchswitch");
397 let mut catchpad_rust = bx.build_sibling_block("catchpad_rust");
398 let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign");
399 let mut caught = bx.build_sibling_block("caught");
401 let try_func = llvm::get_param(bx.llfn(), 0);
402 let data = llvm::get_param(bx.llfn(), 1);
403 let catch_func = llvm::get_param(bx.llfn(), 2);
405 // We're generating an IR snippet that looks like:
407 // declare i32 @rust_try(%try_func, %data, %catch_func) {
408 // %slot = alloca i8*
409 // invoke %try_func(%data) to label %normal unwind label %catchswitch
415 // %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
418 // %tok = catchpad within %cs [%type_descriptor, 8, %slot]
420 // call %catch_func(%data, %ptr)
421 // catchret from %tok to label %caught
424 // %tok = catchpad within %cs [null, 64, null]
425 // call %catch_func(%data, null)
426 // catchret from %tok to label %caught
432 // This structure follows the basic usage of throw/try/catch in LLVM.
433 // For example, compile this C++ snippet to see what LLVM generates:
435 // struct rust_panic {
436 // rust_panic(const rust_panic&);
443 // void (*try_func)(void*),
445 // void (*catch_func)(void*, void*) noexcept
450 // } catch(rust_panic& a) {
451 // catch_func(data, &a);
454 // catch_func(data, NULL);
459 // More information can be found in libstd's seh.rs implementation.
460 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
461 let slot = bx.alloca(bx.type_i8p(), ptr_align);
462 bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
464 normal.ret(bx.const_i32(0));
466 let cs = catchswitch.catch_switch(None, None, 2);
467 catchswitch.add_handler(cs, catchpad_rust.llbb());
468 catchswitch.add_handler(cs, catchpad_foreign.llbb());
470 // We can't use the TypeDescriptor defined in libpanic_unwind because it
471 // might be in another DLL and the SEH encoding only supports specifying
472 // a TypeDescriptor from the current module.
474 // However this isn't an issue since the MSVC runtime uses string
475 // comparison on the type name to match TypeDescriptors rather than
478 // So instead we generate a new TypeDescriptor in each module that uses
479 // `try` and let the linker merge duplicate definitions in the same
482 // When modifying, make sure that the type_name string exactly matches
483 // the one used in src/libpanic_unwind/seh.rs.
484 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
485 let type_name = bx.const_bytes(b"rust_panic\0");
487 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
488 let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
490 llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
491 llvm::SetUniqueComdat(bx.llmod, tydesc);
492 llvm::LLVMSetInitializer(tydesc, type_info);
495 // The flag value of 8 indicates that we are catching the exception by
496 // reference instead of by value. We can't use catch by value because
497 // that requires copying the exception object, which we don't support
498 // since our exception object effectively contains a Box.
500 // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
501 let flags = bx.const_i32(8);
502 let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
503 let ptr = catchpad_rust.load(slot, ptr_align);
504 catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
505 catchpad_rust.catch_ret(&funclet, caught.llbb());
507 // The flag value of 64 indicates a "catch-all".
508 let flags = bx.const_i32(64);
509 let null = bx.const_null(bx.type_i8p());
510 let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]);
511 catchpad_foreign.call(catch_func, &[data, null], Some(&funclet));
512 catchpad_foreign.catch_ret(&funclet, caught.llbb());
514 caught.ret(bx.const_i32(1));
517 // Note that no invoke is used here because by definition this function
518 // can't panic (that's what it's catching).
519 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
520 let i32_align = bx.tcx().data_layout.i32_align.abi;
521 bx.store(ret, dest, i32_align);
524 // Definition of the standard `try` function for Rust using the GNU-like model
525 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
528 // This codegen is a little surprising because we always call a shim
529 // function instead of inlining the call to `invoke` manually here. This is done
530 // because in LLVM we're only allowed to have one personality per function
531 // definition. The call to the `try` intrinsic is being inlined into the
532 // function calling it, and that function may already have other personality
533 // functions in play. By calling a shim we're guaranteed that our shim will have
534 // the right personality function.
536 bx: &mut Builder<'a, 'll, 'tcx>,
537 try_func: &'ll Value,
539 catch_func: &'ll Value,
542 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
543 // Codegens the shims described above:
546 // invoke %try_func(%data) normal %normal unwind %catch
552 // (%ptr, _) = landingpad
553 // call %catch_func(%data, %ptr)
556 bx.sideeffect(false);
558 let mut then = bx.build_sibling_block("then");
559 let mut catch = bx.build_sibling_block("catch");
561 let try_func = llvm::get_param(bx.llfn(), 0);
562 let data = llvm::get_param(bx.llfn(), 1);
563 let catch_func = llvm::get_param(bx.llfn(), 2);
564 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
565 then.ret(bx.const_i32(0));
567 // Type indicator for the exception being thrown.
569 // The first value in this tuple is a pointer to the exception object
570 // being thrown. The second value is a "selector" indicating which of
571 // the landing pad clauses the exception's type had been matched to.
572 // rust_try ignores the selector.
573 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
574 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
575 let tydesc = bx.const_null(bx.type_i8p());
576 catch.add_clause(vals, tydesc);
577 let ptr = catch.extract_value(vals, 0);
578 catch.call(catch_func, &[data, ptr], None);
579 catch.ret(bx.const_i32(1));
582 // Note that no invoke is used here because by definition this function
583 // can't panic (that's what it's catching).
584 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
585 let i32_align = bx.tcx().data_layout.i32_align.abi;
586 bx.store(ret, dest, i32_align);
589 // Variant of codegen_gnu_try used for emscripten where Rust panics are
590 // implemented using C++ exceptions. Here we use exceptions of a specific type
591 // (`struct rust_panic`) to represent Rust panics.
593 bx: &mut Builder<'a, 'll, 'tcx>,
594 try_func: &'ll Value,
596 catch_func: &'ll Value,
599 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
600 // Codegens the shims described above:
603 // invoke %try_func(%data) normal %normal unwind %catch
609 // (%ptr, %selector) = landingpad
610 // %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
611 // %is_rust_panic = %selector == %rust_typeid
612 // %catch_data = alloca { i8*, i8 }
613 // %catch_data[0] = %ptr
614 // %catch_data[1] = %is_rust_panic
615 // call %catch_func(%data, %catch_data)
618 bx.sideeffect(false);
620 let mut then = bx.build_sibling_block("then");
621 let mut catch = bx.build_sibling_block("catch");
623 let try_func = llvm::get_param(bx.llfn(), 0);
624 let data = llvm::get_param(bx.llfn(), 1);
625 let catch_func = llvm::get_param(bx.llfn(), 2);
626 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
627 then.ret(bx.const_i32(0));
629 // Type indicator for the exception being thrown.
631 // The first value in this tuple is a pointer to the exception object
632 // being thrown. The second value is a "selector" indicating which of
633 // the landing pad clauses the exception's type had been matched to.
634 let tydesc = bx.eh_catch_typeinfo();
635 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
636 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2);
637 catch.add_clause(vals, tydesc);
638 catch.add_clause(vals, bx.const_null(bx.type_i8p()));
639 let ptr = catch.extract_value(vals, 0);
640 let selector = catch.extract_value(vals, 1);
642 // Check if the typeid we got is the one for a Rust panic.
643 let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for");
644 let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None);
645 let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid);
646 let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool());
648 // We need to pass two values to catch_func (ptr and is_rust_panic), so
649 // create an alloca and pass a pointer to that.
650 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
651 let i8_align = bx.tcx().data_layout.i8_align.abi;
653 catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align);
654 let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
655 catch.store(ptr, catch_data_0, ptr_align);
656 let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
657 catch.store(is_rust_panic, catch_data_1, i8_align);
658 let catch_data = catch.bitcast(catch_data, bx.type_i8p());
660 catch.call(catch_func, &[data, catch_data], None);
661 catch.ret(bx.const_i32(1));
664 // Note that no invoke is used here because by definition this function
665 // can't panic (that's what it's catching).
666 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
667 let i32_align = bx.tcx().data_layout.i32_align.abi;
668 bx.store(ret, dest, i32_align);
671 // Helper function to give a Block to a closure to codegen a shim function.
672 // This is currently primarily used for the `try` intrinsic functions above.
673 fn gen_fn<'ll, 'tcx>(
674 cx: &CodegenCx<'ll, 'tcx>,
676 rust_fn_sig: ty::PolyFnSig<'tcx>,
677 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
679 let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
680 let llfn = cx.declare_fn(name, &fn_abi);
681 cx.set_frame_pointer_elimination(llfn);
682 cx.apply_target_cpu_attr(llfn);
683 // FIXME(eddyb) find a nicer way to do this.
684 unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
685 let bx = Builder::new_block(cx, llfn, "entry-block");
690 // Helper function used to get a handle to the `__rust_try` function used to
693 // This function is only generated once and is then cached.
694 fn get_rust_try_fn<'ll, 'tcx>(
695 cx: &CodegenCx<'ll, 'tcx>,
696 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
698 if let Some(llfn) = cx.rust_try_fn.get() {
702 // Define the type up front for the signature of the rust_try function.
704 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
705 // `unsafe fn(*mut i8) -> ()`
706 let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
710 hir::Unsafety::Unsafe,
713 // `unsafe fn(*mut i8, *mut i8) -> ()`
714 let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
715 [i8p, i8p].iter().cloned(),
718 hir::Unsafety::Unsafe,
721 // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
722 let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
723 vec![try_fn_ty, i8p, catch_fn_ty].into_iter(),
726 hir::Unsafety::Unsafe,
729 let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
730 cx.rust_try_fn.set(Some(rust_try));
734 fn generic_simd_intrinsic(
735 bx: &mut Builder<'a, 'll, 'tcx>,
738 args: &[OperandRef<'tcx, &'ll Value>],
742 ) -> Result<&'ll Value, ()> {
743 // macros for error handling:
744 macro_rules! emit_error {
748 ($msg: tt, $($fmt: tt)*) => {
749 span_invalid_monomorphization_error(
751 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
756 macro_rules! return_error {
759 emit_error!($($fmt)*);
765 macro_rules! require {
766 ($cond: expr, $($fmt: tt)*) => {
768 return_error!($($fmt)*);
773 macro_rules! require_simd {
774 ($ty: expr, $position: expr) => {
775 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
781 tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
782 let arg_tys = sig.inputs();
783 let name_str = &*name.as_str();
785 if name == sym::simd_select_bitmask {
786 let in_ty = arg_tys[0];
787 let m_len = match in_ty.kind() {
788 // Note that this `.unwrap()` crashes for isize/usize, that's sort
789 // of intentional as there's not currently a use case for that.
790 ty::Int(i) => i.bit_width().unwrap(),
791 ty::Uint(i) => i.bit_width().unwrap(),
792 _ => return_error!("`{}` is not an integral type", in_ty),
794 require_simd!(arg_tys[1], "argument");
795 let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
797 // Allow masks for vectors with fewer than 8 elements to be
798 // represented with a u8 or i8.
799 m_len == v_len || (m_len == 8 && v_len < 8),
800 "mismatched lengths: mask length `{}` != other vector length `{}`",
804 let i1 = bx.type_i1();
805 let im = bx.type_ix(v_len);
806 let i1xn = bx.type_vector(i1, v_len);
807 let m_im = bx.trunc(args[0].immediate(), im);
808 let m_i1s = bx.bitcast(m_im, i1xn);
809 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
812 // every intrinsic below takes a SIMD vector as its first argument
813 require_simd!(arg_tys[0], "input");
814 let in_ty = arg_tys[0];
816 let comparison = match name {
817 sym::simd_eq => Some(hir::BinOpKind::Eq),
818 sym::simd_ne => Some(hir::BinOpKind::Ne),
819 sym::simd_lt => Some(hir::BinOpKind::Lt),
820 sym::simd_le => Some(hir::BinOpKind::Le),
821 sym::simd_gt => Some(hir::BinOpKind::Gt),
822 sym::simd_ge => Some(hir::BinOpKind::Ge),
826 let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
827 if let Some(cmp_op) = comparison {
828 require_simd!(ret_ty, "return");
830 let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
833 "expected return type with length {} (same as input type `{}`), \
834 found `{}` with length {}",
841 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
842 "expected return type with integer elements, found `{}` with non-integer `{}`",
847 return Ok(compare_simd_types(
857 if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
858 let n: u64 = stripped.parse().unwrap_or_else(|_| {
859 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
862 require_simd!(ret_ty, "return");
864 let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
867 "expected return type of length {}, found `{}` with length {}",
874 "expected return element type `{}` (element of input `{}`), \
875 found `{}` with element type `{}`",
882 let total_len = u128::from(in_len) * 2;
884 let vector = args[2].immediate();
886 let indices: Option<Vec<_>> = (0..n)
889 let val = bx.const_get_elt(vector, i as u64);
890 match bx.const_to_opt_u128(val, true) {
892 emit_error!("shuffle index #{} is not a constant", arg_idx);
895 Some(idx) if idx >= total_len => {
897 "shuffle index #{} is out of bounds (limit {})",
903 Some(idx) => Some(bx.const_i32(idx as i32)),
907 let indices = match indices {
909 None => return Ok(bx.const_null(llret_ty)),
912 return Ok(bx.shuffle_vector(
915 bx.const_vector(&indices),
919 if name == sym::simd_insert {
921 in_elem == arg_tys[2],
922 "expected inserted type `{}` (element of input `{}`), found `{}`",
927 return Ok(bx.insert_element(
933 if name == sym::simd_extract {
936 "expected return type `{}` (element of input `{}`), found `{}`",
941 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
944 if name == sym::simd_select {
945 let m_elem_ty = in_elem;
947 require_simd!(arg_tys[1], "argument");
948 let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
951 "mismatched lengths: mask length `{}` != other vector length `{}`",
955 match m_elem_ty.kind() {
957 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
959 // truncate the mask to a vector of i1s
960 let i1 = bx.type_i1();
961 let i1xn = bx.type_vector(i1, m_len as u64);
962 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
963 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
966 if name == sym::simd_bitmask {
967 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
968 // vector mask and returns an unsigned integer containing the most
969 // significant bit (MSB) of each lane.
971 // If the vector has less than 8 lanes, an u8 is returned with zeroed
973 let expected_int_bits = in_len.max(8);
974 match ret_ty.kind() {
975 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
976 _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
979 // Integer vector <i{in_bitwidth} x in_len>:
980 let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
983 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
987 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
990 "vector argument `{}`'s element type `{}`, expected integer element type",
996 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
999 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1002 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1003 // Truncate vector to an <i1 x N>
1004 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1005 // Bitcast <i1 x N> to iN:
1006 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1007 // Zero-extend iN to the bitmask type:
1008 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1011 fn simd_simple_float_intrinsic(
1013 in_elem: &::rustc_middle::ty::TyS<'_>,
1014 in_ty: &::rustc_middle::ty::TyS<'_>,
1016 bx: &mut Builder<'a, 'll, 'tcx>,
1018 args: &[OperandRef<'tcx, &'ll Value>],
1019 ) -> Result<&'ll Value, ()> {
1020 macro_rules! emit_error {
1024 ($msg: tt, $($fmt: tt)*) => {
1025 span_invalid_monomorphization_error(
1027 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1031 macro_rules! return_error {
1034 emit_error!($($fmt)*);
1039 let ety = match in_elem.kind() {
1040 ty::Float(f) if f.bit_width() == 32 => {
1041 if in_len < 2 || in_len > 16 {
1043 "unsupported floating-point vector `{}` with length `{}` \
1044 out-of-range [2, 16]",
1051 ty::Float(f) if f.bit_width() == 64 => {
1052 if in_len < 2 || in_len > 8 {
1054 "unsupported floating-point vector `{}` with length `{}` \
1055 out-of-range [2, 8]",
1064 "unsupported element type `{}` of floating-point vector `{}`",
1070 return_error!("`{}` is not a floating-point type", in_ty);
1074 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1075 let intrinsic = bx.get_intrinsic(&llvm_name);
1077 bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
1078 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1083 sym::simd_fsqrt => {
1084 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1087 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1090 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1093 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1095 sym::simd_floor => {
1096 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1099 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1102 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1104 sym::simd_fexp2 => {
1105 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1107 sym::simd_flog10 => {
1108 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1110 sym::simd_flog2 => {
1111 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1114 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1116 sym::simd_fpowi => {
1117 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1120 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1123 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1125 _ => { /* fallthrough */ }
1129 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1130 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1131 fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1132 let p0s: String = "p0".repeat(no_pointers);
1133 match *elem_ty.kind() {
1134 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1135 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1136 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1137 _ => unreachable!(),
1142 cx: &CodegenCx<'ll, '_>,
1145 mut no_pointers: usize,
1147 // FIXME: use cx.layout_of(ty).llvm_type() ?
1148 let mut elem_ty = match *elem_ty.kind() {
1149 ty::Int(v) => cx.type_int_from_ty(v),
1150 ty::Uint(v) => cx.type_uint_from_ty(v),
1151 ty::Float(v) => cx.type_float_from_ty(v),
1152 _ => unreachable!(),
1154 while no_pointers > 0 {
1155 elem_ty = cx.type_ptr_to(elem_ty);
1158 cx.type_vector(elem_ty, vec_len)
1161 if name == sym::simd_gather {
1162 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1163 // mask: <N x i{M}>) -> <N x T>
1164 // * N: number of elements in the input vectors
1165 // * T: type of the element to load
1166 // * M: any integer width is supported, will be truncated to i1
1168 // All types must be simd vector types
1169 require_simd!(in_ty, "first");
1170 require_simd!(arg_tys[1], "second");
1171 require_simd!(arg_tys[2], "third");
1172 require_simd!(ret_ty, "return");
1174 // Of the same length:
1175 let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1176 let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1179 "expected {} argument with length {} (same as input type `{}`), \
1180 found `{}` with length {}",
1189 "expected {} argument with length {} (same as input type `{}`), \
1190 found `{}` with length {}",
1198 // The return type must match the first argument type
1199 require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1201 // This counts how many pointers
1202 fn ptr_count(t: Ty<'_>) -> usize {
1204 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1210 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1212 ty::RawPtr(p) => non_ptr(p.ty),
1217 // The second argument must be a simd vector with an element type that's a pointer
1218 // to the element type of the first argument
1219 let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1220 let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1221 let (pointer_count, underlying_ty) = match element_ty1.kind() {
1222 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
1226 "expected element type `{}` of second argument `{}` \
1227 to be a pointer to the element type `{}` of the first \
1228 argument `{}`, found `{}` != `*_ {}`",
1239 assert!(pointer_count > 0);
1240 assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1241 assert_eq!(underlying_ty, non_ptr(element_ty0));
1243 // The element type of the third argument must be a signed integer type of any width:
1244 let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1245 match element_ty2.kind() {
1250 "expected element type `{}` of third argument `{}` \
1251 to be a signed integer type",
1258 // Alignment of T, must be a constant integer value:
1259 let alignment_ty = bx.type_i32();
1260 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1262 // Truncate the mask vector to a vector of i1s:
1263 let (mask, mask_ty) = {
1264 let i1 = bx.type_i1();
1265 let i1xn = bx.type_vector(i1, in_len);
1266 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1269 // Type of the vector of pointers:
1270 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1271 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1273 // Type of the vector of elements:
1274 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1275 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1277 let llvm_intrinsic =
1278 format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1279 let f = bx.declare_cfn(
1282 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1286 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1287 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
1291 if name == sym::simd_scatter {
1292 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1293 // mask: <N x i{M}>) -> ()
1294 // * N: number of elements in the input vectors
1295 // * T: type of the element to load
1296 // * M: any integer width is supported, will be truncated to i1
1298 // All types must be simd vector types
1299 require_simd!(in_ty, "first");
1300 require_simd!(arg_tys[1], "second");
1301 require_simd!(arg_tys[2], "third");
1303 // Of the same length:
1304 let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1305 let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1307 in_len == element_len1,
1308 "expected {} argument with length {} (same as input type `{}`), \
1309 found `{}` with length {}",
1317 in_len == element_len2,
1318 "expected {} argument with length {} (same as input type `{}`), \
1319 found `{}` with length {}",
1327 // This counts how many pointers
1328 fn ptr_count(t: Ty<'_>) -> usize {
1330 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1336 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1338 ty::RawPtr(p) => non_ptr(p.ty),
1343 // The second argument must be a simd vector with an element type that's a pointer
1344 // to the element type of the first argument
1345 let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1346 let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1347 let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1348 let (pointer_count, underlying_ty) = match element_ty1.kind() {
1349 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
1350 (ptr_count(element_ty1), non_ptr(element_ty1))
1355 "expected element type `{}` of second argument `{}` \
1356 to be a pointer to the element type `{}` of the first \
1357 argument `{}`, found `{}` != `*mut {}`",
1368 assert!(pointer_count > 0);
1369 assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1370 assert_eq!(underlying_ty, non_ptr(element_ty0));
1372 // The element type of the third argument must be a signed integer type of any width:
1373 match element_ty2.kind() {
1378 "expected element type `{}` of third argument `{}` \
1379 be a signed integer type",
1386 // Alignment of T, must be a constant integer value:
1387 let alignment_ty = bx.type_i32();
1388 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1390 // Truncate the mask vector to a vector of i1s:
1391 let (mask, mask_ty) = {
1392 let i1 = bx.type_i1();
1393 let i1xn = bx.type_vector(i1, in_len);
1394 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1397 let ret_t = bx.type_void();
1399 // Type of the vector of pointers:
1400 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1401 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1403 // Type of the vector of elements:
1404 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1405 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1407 let llvm_intrinsic =
1408 format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1409 let f = bx.declare_cfn(
1411 bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
1413 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1414 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
1418 macro_rules! arith_red {
1419 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1420 $identity:expr) => {
1421 if name == sym::$name {
1424 "expected return type `{}` (element of input `{}`), found `{}`",
1429 return match in_elem.kind() {
1430 ty::Int(_) | ty::Uint(_) => {
1431 let r = bx.$integer_reduce(args[0].immediate());
1433 // if overflow occurs, the result is the
1434 // mathematical result modulo 2^n:
1435 Ok(bx.$op(args[1].immediate(), r))
1437 Ok(bx.$integer_reduce(args[0].immediate()))
1441 let acc = if $ordered {
1442 // ordered arithmetic reductions take an accumulator
1445 // unordered arithmetic reductions use the identity accumulator
1446 match f.bit_width() {
1447 32 => bx.const_real(bx.type_f32(), $identity),
1448 64 => bx.const_real(bx.type_f64(), $identity),
1451 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1460 Ok(bx.$float_reduce(acc, args[0].immediate()))
1463 "unsupported {} from `{}` with element `{}` to `{}`",
1474 arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
1475 arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1477 simd_reduce_add_unordered: vector_reduce_add,
1478 vector_reduce_fadd_fast,
1484 simd_reduce_mul_unordered: vector_reduce_mul,
1485 vector_reduce_fmul_fast,
1491 macro_rules! minmax_red {
1492 ($name:ident: $int_red:ident, $float_red:ident) => {
1493 if name == sym::$name {
1496 "expected return type `{}` (element of input `{}`), found `{}`",
1501 return match in_elem.kind() {
1502 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1503 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1504 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1506 "unsupported {} from `{}` with element `{}` to `{}`",
1517 minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
1518 minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
1520 minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
1521 minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
1523 macro_rules! bitwise_red {
1524 ($name:ident : $red:ident, $boolean:expr) => {
1525 if name == sym::$name {
1526 let input = if !$boolean {
1529 "expected return type `{}` (element of input `{}`), found `{}`",
1536 match in_elem.kind() {
1537 ty::Int(_) | ty::Uint(_) => {}
1539 "unsupported {} from `{}` with element `{}` to `{}`",
1547 // boolean reductions operate on vectors of i1s:
1548 let i1 = bx.type_i1();
1549 let i1xn = bx.type_vector(i1, in_len as u64);
1550 bx.trunc(args[0].immediate(), i1xn)
1552 return match in_elem.kind() {
1553 ty::Int(_) | ty::Uint(_) => {
1554 let r = bx.$red(input);
1555 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
1558 "unsupported {} from `{}` with element `{}` to `{}`",
1569 bitwise_red!(simd_reduce_and: vector_reduce_and, false);
1570 bitwise_red!(simd_reduce_or: vector_reduce_or, false);
1571 bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
1572 bitwise_red!(simd_reduce_all: vector_reduce_and, true);
1573 bitwise_red!(simd_reduce_any: vector_reduce_or, true);
1575 if name == sym::simd_cast {
1576 require_simd!(ret_ty, "return");
1577 let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1580 "expected return type with length {} (same as input type `{}`), \
1581 found `{}` with length {}",
1587 // casting cares about nominal type, not just structural type
1588 if in_elem == out_elem {
1589 return Ok(args[0].immediate());
1594 Int(/* is signed? */ bool),
1598 let (in_style, in_width) = match in_elem.kind() {
1599 // vectors of pointer-sized integers should've been
1600 // disallowed before here, so this unwrap is safe.
1601 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1602 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1603 ty::Float(f) => (Style::Float, f.bit_width()),
1604 _ => (Style::Unsupported, 0),
1606 let (out_style, out_width) = match out_elem.kind() {
1607 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1608 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1609 ty::Float(f) => (Style::Float, f.bit_width()),
1610 _ => (Style::Unsupported, 0),
1613 match (in_style, out_style) {
1614 (Style::Int(in_is_signed), Style::Int(_)) => {
1615 return Ok(match in_width.cmp(&out_width) {
1616 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1617 Ordering::Equal => args[0].immediate(),
1620 bx.sext(args[0].immediate(), llret_ty)
1622 bx.zext(args[0].immediate(), llret_ty)
1627 (Style::Int(in_is_signed), Style::Float) => {
1628 return Ok(if in_is_signed {
1629 bx.sitofp(args[0].immediate(), llret_ty)
1631 bx.uitofp(args[0].immediate(), llret_ty)
1634 (Style::Float, Style::Int(out_is_signed)) => {
1635 return Ok(if out_is_signed {
1636 bx.fptosi(args[0].immediate(), llret_ty)
1638 bx.fptoui(args[0].immediate(), llret_ty)
1641 (Style::Float, Style::Float) => {
1642 return Ok(match in_width.cmp(&out_width) {
1643 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1644 Ordering::Equal => args[0].immediate(),
1645 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
1648 _ => { /* Unsupported. Fallthrough. */ }
1652 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1659 macro_rules! arith {
1660 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1661 $(if name == sym::$name {
1662 match in_elem.kind() {
1663 $($(ty::$p(_))|* => {
1664 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1669 "unsupported operation on `{}` with element `{}`",
1676 simd_add: Uint, Int => add, Float => fadd;
1677 simd_sub: Uint, Int => sub, Float => fsub;
1678 simd_mul: Uint, Int => mul, Float => fmul;
1679 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1680 simd_rem: Uint => urem, Int => srem, Float => frem;
1681 simd_shl: Uint, Int => shl;
1682 simd_shr: Uint => lshr, Int => ashr;
1683 simd_and: Uint, Int => and;
1684 simd_or: Uint, Int => or;
1685 simd_xor: Uint, Int => xor;
1686 simd_fmax: Float => maxnum;
1687 simd_fmin: Float => minnum;
1691 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
1692 let lhs = args[0].immediate();
1693 let rhs = args[1].immediate();
1694 let is_add = name == sym::simd_saturating_add;
1695 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
1696 let (signed, elem_width, elem_ty) = match *in_elem.kind() {
1697 ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
1698 ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
1701 "expected element type `{}` of vector type `{}` \
1702 to be a signed or unsigned integer type",
1703 arg_tys[0].simd_size_and_type(bx.tcx()).1,
1708 let llvm_intrinsic = &format!(
1709 "llvm.{}{}.sat.v{}i{}",
1710 if signed { 's' } else { 'u' },
1711 if is_add { "add" } else { "sub" },
1715 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
1717 let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
1718 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1719 let v = bx.call(f, &[lhs, rhs], None);
1723 span_bug!(span, "unknown SIMD intrinsic");
1726 // Returns the width of an int Ty, and if it's signed or not
1727 // Returns None if the type is not an integer
1728 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1730 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
1733 Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
1736 Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))