1 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
10 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
11 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
12 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
13 use rustc_codegen_ssa::mir::operand::OperandRef;
14 use rustc_codegen_ssa::mir::place::PlaceRef;
15 use rustc_codegen_ssa::traits::*;
17 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
18 use rustc_middle::ty::{self, Ty};
19 use rustc_middle::{bug, span_bug};
20 use rustc_span::{sym, symbol::kw, Span, Symbol};
21 use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
22 use rustc_target::spec::PanicStrategy;
24 use std::cmp::Ordering;
27 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
28 let llvm_name = match name {
29 sym::sqrtf32 => "llvm.sqrt.f32",
30 sym::sqrtf64 => "llvm.sqrt.f64",
31 sym::powif32 => "llvm.powi.f32",
32 sym::powif64 => "llvm.powi.f64",
33 sym::sinf32 => "llvm.sin.f32",
34 sym::sinf64 => "llvm.sin.f64",
35 sym::cosf32 => "llvm.cos.f32",
36 sym::cosf64 => "llvm.cos.f64",
37 sym::powf32 => "llvm.pow.f32",
38 sym::powf64 => "llvm.pow.f64",
39 sym::expf32 => "llvm.exp.f32",
40 sym::expf64 => "llvm.exp.f64",
41 sym::exp2f32 => "llvm.exp2.f32",
42 sym::exp2f64 => "llvm.exp2.f64",
43 sym::logf32 => "llvm.log.f32",
44 sym::logf64 => "llvm.log.f64",
45 sym::log10f32 => "llvm.log10.f32",
46 sym::log10f64 => "llvm.log10.f64",
47 sym::log2f32 => "llvm.log2.f32",
48 sym::log2f64 => "llvm.log2.f64",
49 sym::fmaf32 => "llvm.fma.f32",
50 sym::fmaf64 => "llvm.fma.f64",
51 sym::fabsf32 => "llvm.fabs.f32",
52 sym::fabsf64 => "llvm.fabs.f64",
53 sym::minnumf32 => "llvm.minnum.f32",
54 sym::minnumf64 => "llvm.minnum.f64",
55 sym::maxnumf32 => "llvm.maxnum.f32",
56 sym::maxnumf64 => "llvm.maxnum.f64",
57 sym::copysignf32 => "llvm.copysign.f32",
58 sym::copysignf64 => "llvm.copysign.f64",
59 sym::floorf32 => "llvm.floor.f32",
60 sym::floorf64 => "llvm.floor.f64",
61 sym::ceilf32 => "llvm.ceil.f32",
62 sym::ceilf64 => "llvm.ceil.f64",
63 sym::truncf32 => "llvm.trunc.f32",
64 sym::truncf64 => "llvm.trunc.f64",
65 sym::rintf32 => "llvm.rint.f32",
66 sym::rintf64 => "llvm.rint.f64",
67 sym::nearbyintf32 => "llvm.nearbyint.f32",
68 sym::nearbyintf64 => "llvm.nearbyint.f64",
69 sym::roundf32 => "llvm.round.f32",
70 sym::roundf64 => "llvm.round.f64",
73 Some(cx.get_intrinsic(&llvm_name))
76 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
77 fn codegen_intrinsic_call(
79 instance: ty::Instance<'tcx>,
80 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
81 args: &[OperandRef<'tcx, &'ll Value>],
86 let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
88 let (def_id, substs) = match *callee_ty.kind() {
89 ty::FnDef(def_id, substs) => (def_id, substs),
90 _ => bug!("expected fn item type, found {}", callee_ty),
93 let sig = callee_ty.fn_sig(tcx);
94 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
95 let arg_tys = sig.inputs();
96 let ret_ty = sig.output();
97 let name = tcx.item_name(def_id);
98 let name_str = &*name.as_str();
100 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
101 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
103 let simple = get_simple_intrinsic(self, name);
104 let llval = match name {
105 _ if simple.is_some() => self.call(
107 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
111 let expect = self.get_intrinsic(&("llvm.expect.i1"));
112 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
115 let expect = self.get_intrinsic(&("llvm.expect.i1"));
116 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
129 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
130 self.call(llfn, &[], None)
133 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
134 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
137 match fn_abi.ret.layout.abi {
138 abi::Abi::Scalar(ref scalar) => {
140 Primitive::Int(..) => {
141 if self.cx().size_of(ret_ty).bytes() < 4 {
142 // `va_arg` should not be called on a integer type
143 // less than 4 bytes in length. If it is, promote
144 // the integer to a `i32` and truncate the result
145 // back to the smaller type.
146 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
147 self.trunc(promoted_result, llret_ty)
149 emit_va_arg(self, args[0], ret_ty)
152 Primitive::F64 | Primitive::Pointer => {
153 emit_va_arg(self, args[0], ret_ty)
155 // `va_arg` should never be used with the return type f32.
156 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
159 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
163 sym::volatile_load | sym::unaligned_volatile_load => {
164 let tp_ty = substs.type_at(0);
165 let mut ptr = args[0].immediate();
166 if let PassMode::Cast(ty) = fn_abi.ret.mode {
167 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
169 let load = self.volatile_load(ptr);
170 let align = if name == sym::unaligned_volatile_load {
173 self.align_of(tp_ty).bytes() as u32
176 llvm::LLVMSetAlignment(load, align);
178 self.to_immediate(load, self.layout_of(tp_ty))
180 sym::volatile_store => {
181 let dst = args[0].deref(self.cx());
182 args[1].val.volatile_store(self, dst);
185 sym::unaligned_volatile_store => {
186 let dst = args[0].deref(self.cx());
187 args[1].val.unaligned_volatile_store(self, dst);
190 sym::prefetch_read_data
191 | sym::prefetch_write_data
192 | sym::prefetch_read_instruction
193 | sym::prefetch_write_instruction => {
194 let expect = self.get_intrinsic(&("llvm.prefetch"));
195 let (rw, cache_type) = match name {
196 sym::prefetch_read_data => (0, 1),
197 sym::prefetch_write_data => (1, 1),
198 sym::prefetch_read_instruction => (0, 0),
199 sym::prefetch_write_instruction => (1, 0),
208 self.const_i32(cache_type),
222 | sym::saturating_add
223 | sym::saturating_sub => {
225 match int_type_width_signed(ty, self) {
226 Some((width, signed)) => match name {
227 sym::ctlz | sym::cttz => {
228 let y = self.const_bool(false);
229 let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
230 self.call(llfn, &[args[0].immediate(), y], None)
232 sym::ctlz_nonzero | sym::cttz_nonzero => {
233 let y = self.const_bool(true);
234 let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
235 let llfn = self.get_intrinsic(llvm_name);
236 self.call(llfn, &[args[0].immediate(), y], None)
238 sym::ctpop => self.call(
239 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
240 &[args[0].immediate()],
245 args[0].immediate() // byte swap a u8/i8 is just a no-op
248 self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
249 &[args[0].immediate()],
254 sym::bitreverse => self.call(
255 self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
256 &[args[0].immediate()],
259 sym::rotate_left | sym::rotate_right => {
260 let is_left = name == sym::rotate_left;
261 let val = args[0].immediate();
262 let raw_shift = args[1].immediate();
263 // rotate = funnel shift with first two args the same
265 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
266 let llfn = self.get_intrinsic(llvm_name);
267 self.call(llfn, &[val, val, raw_shift], None)
269 sym::saturating_add | sym::saturating_sub => {
270 let is_add = name == sym::saturating_add;
271 let lhs = args[0].immediate();
272 let rhs = args[1].immediate();
273 let llvm_name = &format!(
275 if signed { 's' } else { 'u' },
276 if is_add { "add" } else { "sub" },
279 let llfn = self.get_intrinsic(llvm_name);
280 self.call(llfn, &[lhs, rhs], None)
285 span_invalid_monomorphization_error(
289 "invalid monomorphization of `{}` intrinsic: \
290 expected basic integer type, found `{}`",
299 _ if name_str.starts_with("simd_") => {
300 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
306 _ => bug!("unknown intrinsic '{}'", name),
309 if !fn_abi.ret.is_ignore() {
310 if let PassMode::Cast(ty) = fn_abi.ret.mode {
311 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
312 let ptr = self.pointercast(result.llval, ptr_llty);
313 self.store(llval, ptr, result.align);
315 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
317 .store(self, result);
322 fn abort(&mut self) {
323 let fnname = self.get_intrinsic(&("llvm.trap"));
324 self.call(fnname, &[], None);
327 fn assume(&mut self, val: Self::Value) {
328 let assume_intrinsic = self.get_intrinsic("llvm.assume");
329 self.call(assume_intrinsic, &[val], None);
332 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
333 let expect = self.get_intrinsic(&"llvm.expect.i1");
334 self.call(expect, &[cond, self.const_bool(expected)], None)
337 fn sideeffect(&mut self) {
338 // This kind of check would make a ton of sense in the caller, but currently the only
339 // caller of this function is in `rustc_codegen_ssa`, which is agnostic to whether LLVM
340 // codegen backend being used, and so is unable to check the LLVM version.
341 if unsafe { llvm::LLVMRustVersionMajor() } < 12 {
342 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
343 self.call(fnname, &[], None);
347 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
348 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
349 self.call(intrinsic, &[va_list], None)
352 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
353 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
354 self.call(intrinsic, &[va_list], None)
359 bx: &mut Builder<'a, 'll, 'tcx>,
360 try_func: &'ll Value,
362 catch_func: &'ll Value,
365 if bx.sess().panic_strategy() == PanicStrategy::Abort {
366 bx.call(try_func, &[data], None);
367 // Return 0 unconditionally from the intrinsic call;
368 // we can never unwind.
369 let ret_align = bx.tcx().data_layout.i32_align.abi;
370 bx.store(bx.const_i32(0), dest, ret_align);
371 } else if wants_msvc_seh(bx.sess()) {
372 codegen_msvc_try(bx, try_func, data, catch_func, dest);
373 } else if bx.sess().target.is_like_emscripten {
374 codegen_emcc_try(bx, try_func, data, catch_func, dest);
376 codegen_gnu_try(bx, try_func, data, catch_func, dest);
380 // MSVC's definition of the `rust_try` function.
382 // This implementation uses the new exception handling instructions in LLVM
383 // which have support in LLVM for SEH on MSVC targets. Although these
384 // instructions are meant to work for all targets, as of the time of this
385 // writing, however, LLVM does not recommend the usage of these new instructions
386 // as the old ones are still more optimized.
388 bx: &mut Builder<'a, 'll, 'tcx>,
389 try_func: &'ll Value,
391 catch_func: &'ll Value,
394 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
395 bx.set_personality_fn(bx.eh_personality());
397 let mut normal = bx.build_sibling_block("normal");
398 let mut catchswitch = bx.build_sibling_block("catchswitch");
399 let mut catchpad_rust = bx.build_sibling_block("catchpad_rust");
400 let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign");
401 let mut caught = bx.build_sibling_block("caught");
403 let try_func = llvm::get_param(bx.llfn(), 0);
404 let data = llvm::get_param(bx.llfn(), 1);
405 let catch_func = llvm::get_param(bx.llfn(), 2);
407 // We're generating an IR snippet that looks like:
409 // declare i32 @rust_try(%try_func, %data, %catch_func) {
410 // %slot = alloca i8*
411 // invoke %try_func(%data) to label %normal unwind label %catchswitch
417 // %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
420 // %tok = catchpad within %cs [%type_descriptor, 8, %slot]
422 // call %catch_func(%data, %ptr)
423 // catchret from %tok to label %caught
426 // %tok = catchpad within %cs [null, 64, null]
427 // call %catch_func(%data, null)
428 // catchret from %tok to label %caught
434 // This structure follows the basic usage of throw/try/catch in LLVM.
435 // For example, compile this C++ snippet to see what LLVM generates:
437 // struct rust_panic {
438 // rust_panic(const rust_panic&);
445 // void (*try_func)(void*),
447 // void (*catch_func)(void*, void*) noexcept
452 // } catch(rust_panic& a) {
453 // catch_func(data, &a);
456 // catch_func(data, NULL);
461 // More information can be found in libstd's seh.rs implementation.
462 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
463 let slot = bx.alloca(bx.type_i8p(), ptr_align);
464 bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
466 normal.ret(bx.const_i32(0));
468 let cs = catchswitch.catch_switch(None, None, 2);
469 catchswitch.add_handler(cs, catchpad_rust.llbb());
470 catchswitch.add_handler(cs, catchpad_foreign.llbb());
472 // We can't use the TypeDescriptor defined in libpanic_unwind because it
473 // might be in another DLL and the SEH encoding only supports specifying
474 // a TypeDescriptor from the current module.
476 // However this isn't an issue since the MSVC runtime uses string
477 // comparison on the type name to match TypeDescriptors rather than
480 // So instead we generate a new TypeDescriptor in each module that uses
481 // `try` and let the linker merge duplicate definitions in the same
484 // When modifying, make sure that the type_name string exactly matches
485 // the one used in src/libpanic_unwind/seh.rs.
486 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
487 let type_name = bx.const_bytes(b"rust_panic\0");
489 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
490 let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
492 llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
493 llvm::SetUniqueComdat(bx.llmod, tydesc);
494 llvm::LLVMSetInitializer(tydesc, type_info);
497 // The flag value of 8 indicates that we are catching the exception by
498 // reference instead of by value. We can't use catch by value because
499 // that requires copying the exception object, which we don't support
500 // since our exception object effectively contains a Box.
502 // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
503 let flags = bx.const_i32(8);
504 let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
505 let ptr = catchpad_rust.load(slot, ptr_align);
506 catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
507 catchpad_rust.catch_ret(&funclet, caught.llbb());
509 // The flag value of 64 indicates a "catch-all".
510 let flags = bx.const_i32(64);
511 let null = bx.const_null(bx.type_i8p());
512 let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]);
513 catchpad_foreign.call(catch_func, &[data, null], Some(&funclet));
514 catchpad_foreign.catch_ret(&funclet, caught.llbb());
516 caught.ret(bx.const_i32(1));
519 // Note that no invoke is used here because by definition this function
520 // can't panic (that's what it's catching).
521 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
522 let i32_align = bx.tcx().data_layout.i32_align.abi;
523 bx.store(ret, dest, i32_align);
526 // Definition of the standard `try` function for Rust using the GNU-like model
527 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
530 // This codegen is a little surprising because we always call a shim
531 // function instead of inlining the call to `invoke` manually here. This is done
532 // because in LLVM we're only allowed to have one personality per function
533 // definition. The call to the `try` intrinsic is being inlined into the
534 // function calling it, and that function may already have other personality
535 // functions in play. By calling a shim we're guaranteed that our shim will have
536 // the right personality function.
538 bx: &mut Builder<'a, 'll, 'tcx>,
539 try_func: &'ll Value,
541 catch_func: &'ll Value,
544 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
545 // Codegens the shims described above:
548 // invoke %try_func(%data) normal %normal unwind %catch
554 // (%ptr, _) = landingpad
555 // call %catch_func(%data, %ptr)
557 let mut then = bx.build_sibling_block("then");
558 let mut catch = bx.build_sibling_block("catch");
560 let try_func = llvm::get_param(bx.llfn(), 0);
561 let data = llvm::get_param(bx.llfn(), 1);
562 let catch_func = llvm::get_param(bx.llfn(), 2);
563 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
564 then.ret(bx.const_i32(0));
566 // Type indicator for the exception being thrown.
568 // The first value in this tuple is a pointer to the exception object
569 // being thrown. The second value is a "selector" indicating which of
570 // the landing pad clauses the exception's type had been matched to.
571 // rust_try ignores the selector.
572 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
573 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
574 let tydesc = bx.const_null(bx.type_i8p());
575 catch.add_clause(vals, tydesc);
576 let ptr = catch.extract_value(vals, 0);
577 catch.call(catch_func, &[data, ptr], None);
578 catch.ret(bx.const_i32(1));
581 // Note that no invoke is used here because by definition this function
582 // can't panic (that's what it's catching).
583 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
584 let i32_align = bx.tcx().data_layout.i32_align.abi;
585 bx.store(ret, dest, i32_align);
588 // Variant of codegen_gnu_try used for emscripten where Rust panics are
589 // implemented using C++ exceptions. Here we use exceptions of a specific type
590 // (`struct rust_panic`) to represent Rust panics.
592 bx: &mut Builder<'a, 'll, 'tcx>,
593 try_func: &'ll Value,
595 catch_func: &'ll Value,
598 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
599 // Codegens the shims described above:
602 // invoke %try_func(%data) normal %normal unwind %catch
608 // (%ptr, %selector) = landingpad
609 // %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
610 // %is_rust_panic = %selector == %rust_typeid
611 // %catch_data = alloca { i8*, i8 }
612 // %catch_data[0] = %ptr
613 // %catch_data[1] = %is_rust_panic
614 // call %catch_func(%data, %catch_data)
616 let mut then = bx.build_sibling_block("then");
617 let mut catch = bx.build_sibling_block("catch");
619 let try_func = llvm::get_param(bx.llfn(), 0);
620 let data = llvm::get_param(bx.llfn(), 1);
621 let catch_func = llvm::get_param(bx.llfn(), 2);
622 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
623 then.ret(bx.const_i32(0));
625 // Type indicator for the exception being thrown.
627 // The first value in this tuple is a pointer to the exception object
628 // being thrown. The second value is a "selector" indicating which of
629 // the landing pad clauses the exception's type had been matched to.
630 let tydesc = bx.eh_catch_typeinfo();
631 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
632 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2);
633 catch.add_clause(vals, tydesc);
634 catch.add_clause(vals, bx.const_null(bx.type_i8p()));
635 let ptr = catch.extract_value(vals, 0);
636 let selector = catch.extract_value(vals, 1);
638 // Check if the typeid we got is the one for a Rust panic.
639 let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for");
640 let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None);
641 let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid);
642 let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool());
644 // We need to pass two values to catch_func (ptr and is_rust_panic), so
645 // create an alloca and pass a pointer to that.
646 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
647 let i8_align = bx.tcx().data_layout.i8_align.abi;
649 catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align);
650 let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
651 catch.store(ptr, catch_data_0, ptr_align);
652 let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
653 catch.store(is_rust_panic, catch_data_1, i8_align);
654 let catch_data = catch.bitcast(catch_data, bx.type_i8p());
656 catch.call(catch_func, &[data, catch_data], None);
657 catch.ret(bx.const_i32(1));
660 // Note that no invoke is used here because by definition this function
661 // can't panic (that's what it's catching).
662 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
663 let i32_align = bx.tcx().data_layout.i32_align.abi;
664 bx.store(ret, dest, i32_align);
667 // Helper function to give a Block to a closure to codegen a shim function.
668 // This is currently primarily used for the `try` intrinsic functions above.
669 fn gen_fn<'ll, 'tcx>(
670 cx: &CodegenCx<'ll, 'tcx>,
672 rust_fn_sig: ty::PolyFnSig<'tcx>,
673 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
675 let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
676 let llfn = cx.declare_fn(name, &fn_abi);
677 cx.set_frame_pointer_elimination(llfn);
678 cx.apply_target_cpu_attr(llfn);
679 // FIXME(eddyb) find a nicer way to do this.
680 unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
681 let bx = Builder::new_block(cx, llfn, "entry-block");
686 // Helper function used to get a handle to the `__rust_try` function used to
689 // This function is only generated once and is then cached.
690 fn get_rust_try_fn<'ll, 'tcx>(
691 cx: &CodegenCx<'ll, 'tcx>,
692 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
694 if let Some(llfn) = cx.rust_try_fn.get() {
698 // Define the type up front for the signature of the rust_try function.
700 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
701 // `unsafe fn(*mut i8) -> ()`
702 let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
706 hir::Unsafety::Unsafe,
709 // `unsafe fn(*mut i8, *mut i8) -> ()`
710 let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
711 [i8p, i8p].iter().cloned(),
714 hir::Unsafety::Unsafe,
717 // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
718 let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
719 vec![try_fn_ty, i8p, catch_fn_ty].into_iter(),
722 hir::Unsafety::Unsafe,
725 let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
726 cx.rust_try_fn.set(Some(rust_try));
730 fn generic_simd_intrinsic(
731 bx: &mut Builder<'a, 'll, 'tcx>,
734 args: &[OperandRef<'tcx, &'ll Value>],
738 ) -> Result<&'ll Value, ()> {
739 // macros for error handling:
740 macro_rules! emit_error {
744 ($msg: tt, $($fmt: tt)*) => {
745 span_invalid_monomorphization_error(
747 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
752 macro_rules! return_error {
755 emit_error!($($fmt)*);
761 macro_rules! require {
762 ($cond: expr, $($fmt: tt)*) => {
764 return_error!($($fmt)*);
769 macro_rules! require_simd {
770 ($ty: expr, $position: expr) => {
771 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
777 tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
778 let arg_tys = sig.inputs();
779 let name_str = &*name.as_str();
781 if name == sym::simd_select_bitmask {
782 let in_ty = arg_tys[0];
783 let m_len = match in_ty.kind() {
784 // Note that this `.unwrap()` crashes for isize/usize, that's sort
785 // of intentional as there's not currently a use case for that.
786 ty::Int(i) => i.bit_width().unwrap(),
787 ty::Uint(i) => i.bit_width().unwrap(),
788 _ => return_error!("`{}` is not an integral type", in_ty),
790 require_simd!(arg_tys[1], "argument");
791 let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
793 // Allow masks for vectors with fewer than 8 elements to be
794 // represented with a u8 or i8.
795 m_len == v_len || (m_len == 8 && v_len < 8),
796 "mismatched lengths: mask length `{}` != other vector length `{}`",
800 let i1 = bx.type_i1();
801 let im = bx.type_ix(v_len);
802 let i1xn = bx.type_vector(i1, v_len);
803 let m_im = bx.trunc(args[0].immediate(), im);
804 let m_i1s = bx.bitcast(m_im, i1xn);
805 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
808 // every intrinsic below takes a SIMD vector as its first argument
809 require_simd!(arg_tys[0], "input");
810 let in_ty = arg_tys[0];
812 let comparison = match name {
813 sym::simd_eq => Some(hir::BinOpKind::Eq),
814 sym::simd_ne => Some(hir::BinOpKind::Ne),
815 sym::simd_lt => Some(hir::BinOpKind::Lt),
816 sym::simd_le => Some(hir::BinOpKind::Le),
817 sym::simd_gt => Some(hir::BinOpKind::Gt),
818 sym::simd_ge => Some(hir::BinOpKind::Ge),
822 let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
823 if let Some(cmp_op) = comparison {
824 require_simd!(ret_ty, "return");
826 let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
829 "expected return type with length {} (same as input type `{}`), \
830 found `{}` with length {}",
837 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
838 "expected return type with integer elements, found `{}` with non-integer `{}`",
843 return Ok(compare_simd_types(
853 if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
854 let n: u64 = stripped.parse().unwrap_or_else(|_| {
855 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
858 require_simd!(ret_ty, "return");
860 let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
863 "expected return type of length {}, found `{}` with length {}",
870 "expected return element type `{}` (element of input `{}`), \
871 found `{}` with element type `{}`",
878 let total_len = u128::from(in_len) * 2;
880 let vector = args[2].immediate();
882 let indices: Option<Vec<_>> = (0..n)
885 let val = bx.const_get_elt(vector, i as u64);
886 match bx.const_to_opt_u128(val, true) {
888 emit_error!("shuffle index #{} is not a constant", arg_idx);
891 Some(idx) if idx >= total_len => {
893 "shuffle index #{} is out of bounds (limit {})",
899 Some(idx) => Some(bx.const_i32(idx as i32)),
903 let indices = match indices {
905 None => return Ok(bx.const_null(llret_ty)),
908 return Ok(bx.shuffle_vector(
911 bx.const_vector(&indices),
915 if name == sym::simd_insert {
917 in_elem == arg_tys[2],
918 "expected inserted type `{}` (element of input `{}`), found `{}`",
923 return Ok(bx.insert_element(
929 if name == sym::simd_extract {
932 "expected return type `{}` (element of input `{}`), found `{}`",
937 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
940 if name == sym::simd_select {
941 let m_elem_ty = in_elem;
943 require_simd!(arg_tys[1], "argument");
944 let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
947 "mismatched lengths: mask length `{}` != other vector length `{}`",
951 match m_elem_ty.kind() {
953 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
955 // truncate the mask to a vector of i1s
956 let i1 = bx.type_i1();
957 let i1xn = bx.type_vector(i1, m_len as u64);
958 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
959 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
962 if name == sym::simd_bitmask {
963 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
964 // vector mask and returns an unsigned integer containing the most
965 // significant bit (MSB) of each lane.
967 // If the vector has less than 8 lanes, an u8 is returned with zeroed
969 let expected_int_bits = in_len.max(8);
970 match ret_ty.kind() {
971 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
972 _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
975 // Integer vector <i{in_bitwidth} x in_len>:
976 let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
979 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
983 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
986 "vector argument `{}`'s element type `{}`, expected integer element type",
992 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
995 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
998 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
999 // Truncate vector to an <i1 x N>
1000 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1001 // Bitcast <i1 x N> to iN:
1002 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1003 // Zero-extend iN to the bitmask type:
1004 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1007 fn simd_simple_float_intrinsic(
1009 in_elem: &::rustc_middle::ty::TyS<'_>,
1010 in_ty: &::rustc_middle::ty::TyS<'_>,
1012 bx: &mut Builder<'a, 'll, 'tcx>,
1014 args: &[OperandRef<'tcx, &'ll Value>],
1015 ) -> Result<&'ll Value, ()> {
1016 macro_rules! emit_error {
1020 ($msg: tt, $($fmt: tt)*) => {
1021 span_invalid_monomorphization_error(
1023 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1027 macro_rules! return_error {
1030 emit_error!($($fmt)*);
1036 let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
1037 let elem_ty = bx.cx.type_float_from_ty(*f);
1038 match f.bit_width() {
1039 32 => ("f32", elem_ty),
1040 64 => ("f64", elem_ty),
1043 "unsupported element type `{}` of floating-point vector `{}`",
1050 return_error!("`{}` is not a floating-point type", in_ty);
1053 let vec_ty = bx.type_vector(elem_ty, in_len);
1055 let (intr_name, fn_ty) = match name {
1056 sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
1057 sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
1058 sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
1059 sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
1060 sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
1061 sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
1062 sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
1063 sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
1064 sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
1065 sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
1066 sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
1067 sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
1068 sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
1069 sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1070 _ => return_error!("unrecognized intrinsic `{}`", name),
1073 let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
1074 let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty);
1075 let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
1076 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1097 return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1101 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1102 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1103 fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1104 let p0s: String = "p0".repeat(no_pointers);
1105 match *elem_ty.kind() {
1106 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1107 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1108 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1109 _ => unreachable!(),
1114 cx: &CodegenCx<'ll, '_>,
1117 mut no_pointers: usize,
1119 // FIXME: use cx.layout_of(ty).llvm_type() ?
1120 let mut elem_ty = match *elem_ty.kind() {
1121 ty::Int(v) => cx.type_int_from_ty(v),
1122 ty::Uint(v) => cx.type_uint_from_ty(v),
1123 ty::Float(v) => cx.type_float_from_ty(v),
1124 _ => unreachable!(),
1126 while no_pointers > 0 {
1127 elem_ty = cx.type_ptr_to(elem_ty);
1130 cx.type_vector(elem_ty, vec_len)
1133 if name == sym::simd_gather {
1134 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1135 // mask: <N x i{M}>) -> <N x T>
1136 // * N: number of elements in the input vectors
1137 // * T: type of the element to load
1138 // * M: any integer width is supported, will be truncated to i1
1140 // All types must be simd vector types
1141 require_simd!(in_ty, "first");
1142 require_simd!(arg_tys[1], "second");
1143 require_simd!(arg_tys[2], "third");
1144 require_simd!(ret_ty, "return");
1146 // Of the same length:
1147 let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1148 let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1151 "expected {} argument with length {} (same as input type `{}`), \
1152 found `{}` with length {}",
1161 "expected {} argument with length {} (same as input type `{}`), \
1162 found `{}` with length {}",
1170 // The return type must match the first argument type
1171 require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1173 // This counts how many pointers
1174 fn ptr_count(t: Ty<'_>) -> usize {
1176 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1182 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1184 ty::RawPtr(p) => non_ptr(p.ty),
1189 // The second argument must be a simd vector with an element type that's a pointer
1190 // to the element type of the first argument
1191 let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1192 let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1193 let (pointer_count, underlying_ty) = match element_ty1.kind() {
1194 ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
1198 "expected element type `{}` of second argument `{}` \
1199 to be a pointer to the element type `{}` of the first \
1200 argument `{}`, found `{}` != `*_ {}`",
1211 assert!(pointer_count > 0);
1212 assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1213 assert_eq!(underlying_ty, non_ptr(element_ty0));
1215 // The element type of the third argument must be a signed integer type of any width:
1216 let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1217 match element_ty2.kind() {
1222 "expected element type `{}` of third argument `{}` \
1223 to be a signed integer type",
1230 // Alignment of T, must be a constant integer value:
1231 let alignment_ty = bx.type_i32();
1232 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1234 // Truncate the mask vector to a vector of i1s:
1235 let (mask, mask_ty) = {
1236 let i1 = bx.type_i1();
1237 let i1xn = bx.type_vector(i1, in_len);
1238 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1241 // Type of the vector of pointers:
1242 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1243 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1245 // Type of the vector of elements:
1246 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1247 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1249 let llvm_intrinsic =
1250 format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1251 let f = bx.declare_cfn(
1253 llvm::UnnamedAddr::No,
1255 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1259 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
1263 if name == sym::simd_scatter {
1264 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1265 // mask: <N x i{M}>) -> ()
1266 // * N: number of elements in the input vectors
1267 // * T: type of the element to load
1268 // * M: any integer width is supported, will be truncated to i1
1270 // All types must be simd vector types
1271 require_simd!(in_ty, "first");
1272 require_simd!(arg_tys[1], "second");
1273 require_simd!(arg_tys[2], "third");
1275 // Of the same length:
1276 let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1277 let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1279 in_len == element_len1,
1280 "expected {} argument with length {} (same as input type `{}`), \
1281 found `{}` with length {}",
1289 in_len == element_len2,
1290 "expected {} argument with length {} (same as input type `{}`), \
1291 found `{}` with length {}",
1299 // This counts how many pointers
1300 fn ptr_count(t: Ty<'_>) -> usize {
1302 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1308 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1310 ty::RawPtr(p) => non_ptr(p.ty),
1315 // The second argument must be a simd vector with an element type that's a pointer
1316 // to the element type of the first argument
1317 let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1318 let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1319 let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1320 let (pointer_count, underlying_ty) = match element_ty1.kind() {
1321 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
1322 (ptr_count(element_ty1), non_ptr(element_ty1))
1327 "expected element type `{}` of second argument `{}` \
1328 to be a pointer to the element type `{}` of the first \
1329 argument `{}`, found `{}` != `*mut {}`",
1340 assert!(pointer_count > 0);
1341 assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1342 assert_eq!(underlying_ty, non_ptr(element_ty0));
1344 // The element type of the third argument must be a signed integer type of any width:
1345 match element_ty2.kind() {
1350 "expected element type `{}` of third argument `{}` \
1351 be a signed integer type",
1358 // Alignment of T, must be a constant integer value:
1359 let alignment_ty = bx.type_i32();
1360 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1362 // Truncate the mask vector to a vector of i1s:
1363 let (mask, mask_ty) = {
1364 let i1 = bx.type_i1();
1365 let i1xn = bx.type_vector(i1, in_len);
1366 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1369 let ret_t = bx.type_void();
1371 // Type of the vector of pointers:
1372 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1373 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1375 // Type of the vector of elements:
1376 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1377 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1379 let llvm_intrinsic =
1380 format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1381 let f = bx.declare_cfn(
1383 llvm::UnnamedAddr::No,
1384 bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
1386 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
1390 macro_rules! arith_red {
1391 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1392 $identity:expr) => {
1393 if name == sym::$name {
1396 "expected return type `{}` (element of input `{}`), found `{}`",
1401 return match in_elem.kind() {
1402 ty::Int(_) | ty::Uint(_) => {
1403 let r = bx.$integer_reduce(args[0].immediate());
1405 // if overflow occurs, the result is the
1406 // mathematical result modulo 2^n:
1407 Ok(bx.$op(args[1].immediate(), r))
1409 Ok(bx.$integer_reduce(args[0].immediate()))
1413 let acc = if $ordered {
1414 // ordered arithmetic reductions take an accumulator
1417 // unordered arithmetic reductions use the identity accumulator
1418 match f.bit_width() {
1419 32 => bx.const_real(bx.type_f32(), $identity),
1420 64 => bx.const_real(bx.type_f64(), $identity),
1423 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1432 Ok(bx.$float_reduce(acc, args[0].immediate()))
1435 "unsupported {} from `{}` with element `{}` to `{}`",
1446 arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
1447 arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1449 simd_reduce_add_unordered: vector_reduce_add,
1450 vector_reduce_fadd_fast,
1456 simd_reduce_mul_unordered: vector_reduce_mul,
1457 vector_reduce_fmul_fast,
1463 macro_rules! minmax_red {
1464 ($name:ident: $int_red:ident, $float_red:ident) => {
1465 if name == sym::$name {
1468 "expected return type `{}` (element of input `{}`), found `{}`",
1473 return match in_elem.kind() {
1474 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1475 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1476 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1478 "unsupported {} from `{}` with element `{}` to `{}`",
1489 minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
1490 minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
1492 minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
1493 minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
1495 macro_rules! bitwise_red {
1496 ($name:ident : $red:ident, $boolean:expr) => {
1497 if name == sym::$name {
1498 let input = if !$boolean {
1501 "expected return type `{}` (element of input `{}`), found `{}`",
1508 match in_elem.kind() {
1509 ty::Int(_) | ty::Uint(_) => {}
1511 "unsupported {} from `{}` with element `{}` to `{}`",
1519 // boolean reductions operate on vectors of i1s:
1520 let i1 = bx.type_i1();
1521 let i1xn = bx.type_vector(i1, in_len as u64);
1522 bx.trunc(args[0].immediate(), i1xn)
1524 return match in_elem.kind() {
1525 ty::Int(_) | ty::Uint(_) => {
1526 let r = bx.$red(input);
1527 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
1530 "unsupported {} from `{}` with element `{}` to `{}`",
1541 bitwise_red!(simd_reduce_and: vector_reduce_and, false);
1542 bitwise_red!(simd_reduce_or: vector_reduce_or, false);
1543 bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
1544 bitwise_red!(simd_reduce_all: vector_reduce_and, true);
1545 bitwise_red!(simd_reduce_any: vector_reduce_or, true);
1547 if name == sym::simd_cast {
1548 require_simd!(ret_ty, "return");
1549 let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1552 "expected return type with length {} (same as input type `{}`), \
1553 found `{}` with length {}",
1559 // casting cares about nominal type, not just structural type
1560 if in_elem == out_elem {
1561 return Ok(args[0].immediate());
1566 Int(/* is signed? */ bool),
1570 let (in_style, in_width) = match in_elem.kind() {
1571 // vectors of pointer-sized integers should've been
1572 // disallowed before here, so this unwrap is safe.
1573 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1574 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1575 ty::Float(f) => (Style::Float, f.bit_width()),
1576 _ => (Style::Unsupported, 0),
1578 let (out_style, out_width) = match out_elem.kind() {
1579 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1580 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1581 ty::Float(f) => (Style::Float, f.bit_width()),
1582 _ => (Style::Unsupported, 0),
1585 match (in_style, out_style) {
1586 (Style::Int(in_is_signed), Style::Int(_)) => {
1587 return Ok(match in_width.cmp(&out_width) {
1588 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1589 Ordering::Equal => args[0].immediate(),
1592 bx.sext(args[0].immediate(), llret_ty)
1594 bx.zext(args[0].immediate(), llret_ty)
1599 (Style::Int(in_is_signed), Style::Float) => {
1600 return Ok(if in_is_signed {
1601 bx.sitofp(args[0].immediate(), llret_ty)
1603 bx.uitofp(args[0].immediate(), llret_ty)
1606 (Style::Float, Style::Int(out_is_signed)) => {
1607 return Ok(if out_is_signed {
1608 bx.fptosi(args[0].immediate(), llret_ty)
1610 bx.fptoui(args[0].immediate(), llret_ty)
1613 (Style::Float, Style::Float) => {
1614 return Ok(match in_width.cmp(&out_width) {
1615 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1616 Ordering::Equal => args[0].immediate(),
1617 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
1620 _ => { /* Unsupported. Fallthrough. */ }
1624 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1631 macro_rules! arith_binary {
1632 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1633 $(if name == sym::$name {
1634 match in_elem.kind() {
1635 $($(ty::$p(_))|* => {
1636 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1641 "unsupported operation on `{}` with element `{}`",
1648 simd_add: Uint, Int => add, Float => fadd;
1649 simd_sub: Uint, Int => sub, Float => fsub;
1650 simd_mul: Uint, Int => mul, Float => fmul;
1651 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1652 simd_rem: Uint => urem, Int => srem, Float => frem;
1653 simd_shl: Uint, Int => shl;
1654 simd_shr: Uint => lshr, Int => ashr;
1655 simd_and: Uint, Int => and;
1656 simd_or: Uint, Int => or;
1657 simd_xor: Uint, Int => xor;
1658 simd_fmax: Float => maxnum;
1659 simd_fmin: Float => minnum;
1662 macro_rules! arith_unary {
1663 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1664 $(if name == sym::$name {
1665 match in_elem.kind() {
1666 $($(ty::$p(_))|* => {
1667 return Ok(bx.$call(args[0].immediate()))
1672 "unsupported operation on `{}` with element `{}`",
1679 simd_neg: Int => neg, Float => fneg;
1682 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
1683 let lhs = args[0].immediate();
1684 let rhs = args[1].immediate();
1685 let is_add = name == sym::simd_saturating_add;
1686 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
1687 let (signed, elem_width, elem_ty) = match *in_elem.kind() {
1688 ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
1689 ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
1692 "expected element type `{}` of vector type `{}` \
1693 to be a signed or unsigned integer type",
1694 arg_tys[0].simd_size_and_type(bx.tcx()).1,
1699 let llvm_intrinsic = &format!(
1700 "llvm.{}{}.sat.v{}i{}",
1701 if signed { 's' } else { 'u' },
1702 if is_add { "add" } else { "sub" },
1706 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
1708 let f = bx.declare_cfn(
1710 llvm::UnnamedAddr::No,
1711 bx.type_func(&[vec_ty, vec_ty], vec_ty),
1713 let v = bx.call(f, &[lhs, rhs], None);
1717 span_bug!(span, "unknown SIMD intrinsic");
1720 // Returns the width of an int Ty, and if it's signed or not
1721 // Returns None if the type is not an integer
1722 // FIXME: there’s multiple of this functions, investigate using some of the already existing
1724 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
1727 Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
1730 Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))