--- /dev/null
- macro_rules! intrinsic_pat {
- (_) => {
- _
- };
- ($name:ident) => {
- sym::$name
- };
- (kw.$name:ident) => {
- kw::$name
- };
- ($name:literal) => {
- $name
- };
- }
-
- macro_rules! intrinsic_arg {
- (o $fx:expr, $arg:ident) => {};
- (c $fx:expr, $arg:ident) => {
- let $arg = codegen_operand($fx, $arg);
- };
- (v $fx:expr, $arg:ident) => {
- let $arg = codegen_operand($fx, $arg).load_scalar($fx);
- };
- }
-
- macro_rules! intrinsic_match {
- ($fx:expr, $intrinsic:expr, $args:expr,
- _ => $unknown:block;
- $(
- $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
- )*) => {
- match $intrinsic {
- $(
- $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
- if let [$($arg),*] = $args {
- $(intrinsic_arg!($a $fx, $arg);)*
- $content
- } else {
- bug!("wrong number of args for intrinsic {:?}", $intrinsic);
- }
- }
- )*
- _ => $unknown,
- }
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
- intrinsic_match! {
- fx, intrinsic, args,
- _ => {
- fx.tcx.sess.span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
- };
++macro_rules! intrinsic_args {
++ ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
++ #[allow(unused_parens)]
++ let ($($arg),*) = if let [$($arg),*] = $args {
++ ($(codegen_operand($fx, $arg)),*)
++ } else {
++ $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
++ };
+ }
+}
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
++fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
++ bug!("wrong number of args for intrinsic {}", intrinsic);
++}
++
+fn report_atomic_type_validation_error<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'tcx>,
+) {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ intrinsic, ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ val: Value,
+) -> Value {
+ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
+ let target = if let Some(target) = target {
+ target
+ } else {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ sym::abort => {
+ fx.bcx.ins().trap(TrapCode::User(0));
+ }
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ };
+
+ if intrinsic.as_str().starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(
+ fx,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ source_info.span,
+ );
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ codegen_regular_intrinsic_call(
+ fx,
+ instance,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ Some(target),
+ source_info,
+ );
+ }
+}
+
+fn codegen_float_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+) -> bool {
+ let (name, arg_count, ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ _ => return false,
+ };
+
+ if args.len() != arg_count {
+ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
+ let (a, b, c);
+ let args = match args {
+ [x] => {
+ a = [codegen_operand(fx, x)];
+ &a as &[_]
+ }
+ [x, y] => {
+ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ &b
+ }
+ [x, y, z] => {
+ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ &c
+ }
+ _ => unreachable!(),
+ };
+
+ let res = fx.easy_call(name, &args, ty);
+ ret.write_cvalue(fx, res);
+
+ true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ intrinsic: Symbol,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ destination: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
- assume, (c _a) {};
- likely | unlikely, (c a) {
++ match intrinsic {
++ sym::assume => {
++ intrinsic_args!(fx, args => (_a); intrinsic);
++ }
++ sym::likely | sym::unlikely => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- };
- breakpoint, () {
+ ret.write_cvalue(fx, a);
- };
- copy | copy_nonoverlapping, (v src, v dst, v count) {
++ }
++ sym::breakpoint => {
++ intrinsic_args!(fx, args => (); intrinsic);
++
+ fx.bcx.ins().debugtrap();
- let byte_amount = if elem_size != 1 {
- fx.bcx.ins().imul_imm(count, elem_size as i64)
- } else {
- count
- };
++ }
++ sym::copy | sym::copy_nonoverlapping => {
++ intrinsic_args!(fx, args => (src, dst, count); intrinsic);
++ let src = src.load_scalar(fx);
++ let dst = dst.load_scalar(fx);
++ let count = count.load_scalar(fx);
++
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
- };
- // NOTE: the volatile variants have src and dst swapped
- volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
++ let byte_amount =
++ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
- let byte_amount = if elem_size != 1 {
- fx.bcx.ins().imul_imm(count, elem_size as i64)
- } else {
- count
- };
++ }
++ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
++ // NOTE: the volatile variants have src and dst swapped
++ intrinsic_args!(fx, args => (dst, src, count); intrinsic);
++ let dst = dst.load_scalar(fx);
++ let src = src.load_scalar(fx);
++ let count = count.load_scalar(fx);
++
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
- };
- size_of_val, (c ptr) {
++ let byte_amount =
++ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
- fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.size.bytes() as i64)
++ }
++ sym::size_of_val => {
++ intrinsic_args!(fx, args => (ptr); intrinsic);
++
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
- };
- min_align_of_val, (c ptr) {
++ fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
- fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
++ }
++ sym::min_align_of_val => {
++ intrinsic_args!(fx, args => (ptr); intrinsic);
++
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
- };
++ fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
- vtable_size, (v vtable) {
++ }
++
++ sym::vtable_size => {
++ intrinsic_args!(fx, args => (vtable); intrinsic);
++ let vtable = vtable.load_scalar(fx);
+
- };
+ let size = crate::vtable::size_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
- vtable_align, (v vtable) {
++ }
++
++ sym::vtable_align => {
++ intrinsic_args!(fx, args => (vtable); intrinsic);
++ let vtable = vtable.load_scalar(fx);
+
- };
+ let align = crate::vtable::min_align_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
- unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem
- | unchecked_shl | unchecked_shr, (c x, c y) {
++ }
++
++ sym::unchecked_add
++ | sym::unchecked_sub
++ | sym::unchecked_mul
++ | sym::unchecked_div
++ | sym::exact_div
++ | sym::unchecked_rem
++ | sym::unchecked_shl
++ | sym::unchecked_shr => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
+
- };
- add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
- let res = crate::num::codegen_checked_int_binop(
- fx,
- bin_op,
- x,
- y,
- );
++ }
++ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
++
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
- };
- saturating_add | saturating_sub, (c lhs, c rhs) {
++ let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
- let checked_res = crate::num::codegen_checked_int_binop(
- fx,
- bin_op,
- lhs,
- rhs,
- );
++ }
++ sym::saturating_add | sym::saturating_sub => {
++ intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
++
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
+ let signed = type_sign(lhs.layout().ty);
+
- let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
++ let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (sym::saturating_add, true) => {
+ let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
++ let rhs_ge_zero =
++ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ (sym::saturating_sub, true) => {
+ let rhs = rhs.load_scalar(fx);
- };
- rotate_left, (c x, v y) {
++ let rhs_ge_zero =
++ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ let res = CValue::by_val(val, lhs.layout());
+
+ ret.write_cvalue(fx, res);
- };
- rotate_right, (c x, v y) {
++ }
++ sym::rotate_left => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
++ let y = y.load_scalar(fx);
++
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
- };
++ }
++ sym::rotate_right => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
++ let y = y.load_scalar(fx);
++
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
- offset | arith_offset, (c base, v offset) {
++ }
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
- };
++ sym::offset | sym::arith_offset => {
++ intrinsic_args!(fx, args => (base, offset); intrinsic);
++ let offset = offset.load_scalar(fx);
++
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
- transmute, (c from) {
++ }
++
++ sym::transmute => {
++ intrinsic_args!(fx, args => (from); intrinsic);
+
- };
- write_bytes | volatile_set_memory, (c dst, v val, v count) {
+ ret.write_cvalue_transmute(fx, from);
- };
- ctlz | ctlz_nonzero, (c arg) {
++ }
++ sym::write_bytes | sym::volatile_set_memory => {
++ intrinsic_args!(fx, args => (dst, val, count); intrinsic);
++ let val = val.load_scalar(fx);
++ let count = count.load_scalar(fx);
++
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
- };
- cttz | cttz_nonzero, (c arg) {
++ }
++ sym::ctlz | sym::ctlz_nonzero => {
++ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
++
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = fx.bcx.ins().clz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
- };
- ctpop, (c arg) {
++ }
++ sym::cttz | sym::cttz_nonzero => {
++ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
++
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = fx.bcx.ins().ctz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
- };
- bitreverse, (c arg) {
++ }
++ sym::ctpop => {
++ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
++
+ let res = fx.bcx.ins().popcnt(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
- };
- bswap, (c arg) {
++ }
++ sym::bitreverse => {
++ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
++
+ let res = fx.bcx.ins().bitrev(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
- };
- assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
++ }
++ sym::bswap => {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
++ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
++
+ let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
+ ret.write_cvalue(fx, res);
- &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
++ }
++ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
++ intrinsic_args!(fx, args => (); intrinsic);
++
+ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ source_info,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
- &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
++ &format!(
++ "attempted to zero-initialize type `{}`, which is invalid",
++ layout.ty
++ ),
+ source_info,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
- };
++ &format!(
++ "attempted to leave type `{}` uninitialized, which is invalid",
++ layout.ty
++ ),
+ source_info,
+ )
+ });
+ return;
+ }
- volatile_load | unaligned_volatile_load, (c ptr) {
++ }
++
++ sym::volatile_load | sym::unaligned_volatile_load => {
++ intrinsic_args!(fx, args => (ptr); intrinsic);
+
- let inner_layout =
- fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
- };
- volatile_store | unaligned_volatile_store, (v ptr, c val) {
++ let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
- };
++ }
++ sym::volatile_store | sym::unaligned_volatile_store => {
++ intrinsic_args!(fx, args => (ptr, val); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
- pref_align_of | needs_drop | type_id | type_name | variant_count, () {
++ }
++
++ sym::pref_align_of
++ | sym::needs_drop
++ | sym::type_id
++ | sym::type_name
++ | sym::variant_count => {
++ intrinsic_args!(fx, args => (); intrinsic);
+
- let val = crate::constant::codegen_const_value(
- fx,
- const_val,
- ret.layout().ty,
- );
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
- };
++ let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
+ ret.write_cvalue(fx, val);
- ptr_offset_from | ptr_offset_from_unsigned, (v ptr, v base) {
++ }
+
- };
++ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
++ intrinsic_args!(fx, args => (ptr, base); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++ let base = base.load_scalar(fx);
+ let ty = substs.type_at(0);
+
+ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff_bytes = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = if intrinsic == sym::ptr_offset_from_unsigned {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+ // but unsigned is slightly easier to codegen, so might as well.
+ CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+ } else {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+ CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+ };
+ ret.write_cvalue(fx, val);
- ptr_guaranteed_eq, (c a, c b) {
++ }
++
++ sym::ptr_guaranteed_eq => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
+
- };
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
- ptr_guaranteed_ne, (c a, c b) {
++ }
++
++ sym::ptr_guaranteed_ne => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
+
- };
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
- caller_location, () {
++ }
++
++ sym::caller_location => {
++ intrinsic_args!(fx, args => (); intrinsic);
+
- };
+ let caller_location = fx.get_caller_location(source_info);
+ ret.write_cvalue(fx, caller_location);
- _ if intrinsic.as_str().starts_with("atomic_fence"), () {
++ }
++
++ _ if intrinsic.as_str().starts_with("atomic_fence") => {
++ intrinsic_args!(fx, args => (); intrinsic);
+
- };
- _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+ fx.bcx.ins().fence();
- };
- _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
++ intrinsic_args!(fx, args => (); intrinsic);
++
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
- fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
++ }
++ _ if intrinsic.as_str().starts_with("atomic_load") => {
++ intrinsic_args!(fx, args => (ptr); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
- };
- _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
++ fx.tcx
++ .sess
++ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+ let clif_ty = fx.clif_type(ty).unwrap();
+
+ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
- fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
++ }
++ _ if intrinsic.as_str().starts_with("atomic_store") => {
++ intrinsic_args!(fx, args => (ptr, val); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
- };
- _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
++ fx.tcx
++ .sess
++ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
- };
- _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
++ }
++ _ if intrinsic.as_str().starts_with("atomic_xchg") => {
++ intrinsic_args!(fx, args => (ptr, new); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
++ }
++ _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
++ // both atomic_cxchg_* and atomic_cxchgweak_*
++ intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
- };
++ let ret_val =
++ CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val)
- _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
++ }
++
++ _ if intrinsic.as_str().starts_with("atomic_xadd") => {
++ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
++ let ptr = ptr.load_scalar(fx);
+
- let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
- };
- _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
++ let old =
++ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
++ }
++ _ if intrinsic.as_str().starts_with("atomic_xsub") => {
++ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
- };
- _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
++ let old =
++ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_and") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_or") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_xor") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_nand") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_max") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_umax") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
- _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
++ }
++ _ if intrinsic.as_str().starts_with("atomic_min") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- };
++ }
++ _ if intrinsic.as_str().starts_with("atomic_umin") => {
++ intrinsic_args!(fx, args => (ptr, src); intrinsic);
++ let ptr = ptr.load_scalar(fx);
++
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
- minnumf32, (v a, v b) {
++ }
++
++ sym::minnumf32 => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
++ let a = a.load_scalar(fx);
++ let b = b.load_scalar(fx);
+
- };
- minnumf64, (v a, v b) {
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
- };
- maxnumf32, (v a, v b) {
++ }
++ sym::minnumf64 => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
++ let a = a.load_scalar(fx);
++ let b = b.load_scalar(fx);
++
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
- };
- maxnumf64, (v a, v b) {
++ }
++ sym::maxnumf32 => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
++ let a = a.load_scalar(fx);
++ let b = b.load_scalar(fx);
++
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
- };
++ }
++ sym::maxnumf64 => {
++ intrinsic_args!(fx, args => (a, b); intrinsic);
++ let a = a.load_scalar(fx);
++ let b = b.load_scalar(fx);
++
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
- kw.Try, (v f, v data, v _catch_fn) {
++ }
++
++ kw::Try => {
++ intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
++ let f = f.load_scalar(fx);
++ let data = data.load_scalar(fx);
++ let _catch_fn = catch_fn.load_scalar(fx);
+
- };
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
- fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
- let res = crate::num::codegen_float_binop(fx, match intrinsic {
- sym::fadd_fast => BinOp::Add,
- sym::fsub_fast => BinOp::Sub,
- sym::fmul_fast => BinOp::Mul,
- sym::fdiv_fast => BinOp::Div,
- sym::frem_fast => BinOp::Rem,
- _ => unreachable!(),
- }, x, y);
++ }
+
- };
- float_to_int_unchecked, (v f) {
++ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
++
++ let res = crate::num::codegen_float_binop(
++ fx,
++ match intrinsic {
++ sym::fadd_fast => BinOp::Add,
++ sym::fsub_fast => BinOp::Sub,
++ sym::fmul_fast => BinOp::Mul,
++ sym::fdiv_fast => BinOp::Div,
++ sym::frem_fast => BinOp::Rem,
++ _ => unreachable!(),
++ },
++ x,
++ y,
++ );
+ ret.write_cvalue(fx, res);
- };
++ }
++ sym::float_to_int_unchecked => {
++ intrinsic_args!(fx, args => (f); intrinsic);
++ let f = f.load_scalar(fx);
++
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
- raw_eq, (v lhs_ref, v rhs_ref) {
++ }
++
++ sym::raw_eq => {
++ intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
++ let lhs_ref = lhs_ref.load_scalar(fx);
++ let rhs_ref = rhs_ref.load_scalar(fx);
+
- let is_eq_value =
- if size == Size::ZERO {
- // No bytes means they're trivially equal
- fx.bcx.ins().iconst(types::I8, 1)
- } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
- // Can't use `trusted` for these loads; they could be unaligned.
- let mut flags = MemFlags::new();
- flags.set_notrap();
- let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
- let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
- let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
- fx.bcx.ins().bint(types::I8, eq)
- } else {
- // Just call `memcmp` (like slices do in core) when the
- // size is too large or it's not a power-of-two.
- let signed_bytes = i64::try_from(size.bytes()).unwrap();
- let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
- let params = vec![AbiParam::new(fx.pointer_type); 3];
- let returns = vec![AbiParam::new(types::I32)];
- let args = &[lhs_ref, rhs_ref, bytes_val];
- let cmp = fx.lib_call("memcmp", params, returns, args)[0];
- let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
- fx.bcx.ins().bint(types::I8, eq)
- };
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
+ // FIXME add and use emit_small_memcmp
- };
++ let is_eq_value = if size == Size::ZERO {
++ // No bytes means they're trivially equal
++ fx.bcx.ins().iconst(types::I8, 1)
++ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
++ // Can't use `trusted` for these loads; they could be unaligned.
++ let mut flags = MemFlags::new();
++ flags.set_notrap();
++ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
++ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
++ let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
++ fx.bcx.ins().bint(types::I8, eq)
++ } else {
++ // Just call `memcmp` (like slices do in core) when the
++ // size is too large or it's not a power-of-two.
++ let signed_bytes = i64::try_from(size.bytes()).unwrap();
++ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
++ let params = vec![AbiParam::new(fx.pointer_type); 3];
++ let returns = vec![AbiParam::new(types::I32)];
++ let args = &[lhs_ref, rhs_ref, bytes_val];
++ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
++ let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
++ fx.bcx.ins().bint(types::I8, eq)
++ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
- const_allocate, (c _size, c _align) {
++ }
++
++ sym::const_allocate => {
++ intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
- };
+ // returns a null pointer at runtime.
+ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
- const_deallocate, (c _ptr, c _size, c _align) {
++ }
+
- };
++ sym::const_deallocate => {
++ intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+ // nop at runtime.
- black_box, (c a) {
++ }
++
++ sym::black_box => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- };
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
- va_copy, (o _dest, o _src) {
++ }
+
+ // FIXME implement variadics in cranelift
- };
- va_arg | va_end, (o _valist) {
- fx.tcx.sess.span_fatal(
- source_info.span,
- "Defining variadic functions is not yet supported by Cranelift",
- );
- };
++ sym::va_copy | sym::va_arg | sym::va_end => {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
++ }
++
++ _ => {
++ fx.tcx
++ .sess
++ .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
++ }
+ }
+
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+}
--- /dev/null
- intrinsic_match! {
- fx, intrinsic, args,
- _ => {
- fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
- };
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::Symbol;
+
+use super::*;
+use crate::prelude::*;
+
+fn report_simd_type_validation_error(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'_>,
+) {
+ fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
- simd_cast, (c a) {
++ match intrinsic {
++ sym::simd_cast => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- };
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
+ let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
+
+ let from_signed = type_sign(lane_ty);
+ let to_signed = type_sign(ret_lane_ty);
+
+ clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
+ });
- simd_eq | simd_ne | simd_lt | simd_le | simd_gt | simd_ge, (c x, c y) {
++ }
++
++ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
+
- (ty::Uint(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane),
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
- (ty::Int(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane),
- (ty::Int(_), sym::simd_lt) => fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane),
++ (ty::Uint(_), sym::simd_ne) => {
++ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
++ }
+ (ty::Uint(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
- (ty::Float(_), sym::simd_eq) => fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane),
- (ty::Float(_), sym::simd_ne) => fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane),
- (ty::Float(_), sym::simd_lt) => fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane),
++ (ty::Int(_), sym::simd_ne) => {
++ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
++ }
++ (ty::Int(_), sym::simd_lt) => {
++ fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
++ }
+ (ty::Int(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
- (ty::Float(_), sym::simd_gt) => fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane),
++ (ty::Float(_), sym::simd_eq) => {
++ fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
++ }
++ (ty::Float(_), sym::simd_ne) => {
++ fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
++ }
++ (ty::Float(_), sym::simd_lt) => {
++ fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
++ }
+ (ty::Float(_), sym::simd_le) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
+ }
- };
++ (ty::Float(_), sym::simd_gt) => {
++ fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
++ }
+ (ty::Float(_), sym::simd_ge) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ _ => unreachable!(),
+ };
+
+ let ty = fx.clif_type(res_lane_ty).unwrap();
+
+ let res_lane = fx.bcx.ins().bint(ty, res_lane);
+ fx.bcx.ins().ineg(res_lane)
+ });
- _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) {
++ }
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
- len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
++ _ if intrinsic.as_str().starts_with("simd_shuffle") => {
++ let (x, y, idx) = match args {
++ [x, y, idx] => (x, y, idx),
++ _ => {
++ bug!("wrong number of args for intrinsic {intrinsic}");
++ }
++ };
++ let x = codegen_operand(fx, x);
++ let y = codegen_operand(fx, y);
++
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u16 = if intrinsic == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ match idx_ty.kind() {
- }).try_into().unwrap()
- }
++ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
++ .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
++ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
- let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
++ })
++ .try_into()
++ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ idx_ty,
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+ } else {
+ intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
+ };
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
- let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
++ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
++ .expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
- (0..ret_lane_count).map(|i| {
- let i = usize::try_from(i).unwrap();
- let idx = rustc_middle::mir::interpret::read_target_uint(
- fx.tcx.data_layout.endian,
- &idx_bytes[4*i.. 4*i + 4],
- ).expect("read_target_uint");
- u16::try_from(idx).expect("try_from u32")
- }).collect::<Vec<u16>>()
++ let size = Size::from_bytes(
++ 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
++ );
+ alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
- };
++ (0..ret_lane_count)
++ .map(|i| {
++ let i = usize::try_from(i).unwrap();
++ let idx = rustc_middle::mir::interpret::read_target_uint(
++ fx.tcx.data_layout.endian,
++ &idx_bytes[4 * i..4 * i + 4],
++ )
++ .expect("read_target_uint");
++ u16::try_from(idx).expect("try_from u32")
++ })
++ .collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
- simd_insert, (c base, o idx, c val) {
++ }
++
++ sym::simd_insert => {
++ let (base, idx, val) = match args {
++ [base, idx, val] => (base, idx, val),
++ _ => {
++ bug!("wrong number of args for intrinsic {intrinsic}");
++ }
++ };
++ let base = codegen_operand(fx, base);
++ let val = codegen_operand(fx, val);
+
- let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ // FIXME validate
- fx.tcx.sess.span_fatal(
- span,
- "Index argument for `simd_insert` is not a constant",
- );
++ let idx_const = if let Some(idx_const) =
++ crate::constant::mir_operand_get_const_val(fx, idx)
++ {
+ idx_const
+ } else {
- let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
++ fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
+ };
+
- fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
++ let idx = idx_const
++ .try_to_bits(Size::from_bytes(4 /* u32*/))
++ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
- };
++ fx.tcx.sess.span_fatal(
++ fx.mir.span,
++ &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
++ );
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
- simd_extract, (c v, o idx) {
++ }
++
++ sym::simd_extract => {
++ let (v, idx) = match args {
++ [v, idx] => (v, idx),
++ _ => {
++ bug!("wrong number of args for intrinsic {intrinsic}");
++ }
++ };
++ let v = codegen_operand(fx, v);
+
- let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
- fx.tcx.sess.span_warn(
- span,
- "Index argument for `simd_extract` is not a constant",
- );
++ let idx_const = if let Some(idx_const) =
++ crate::constant::mir_operand_get_const_val(fx, idx)
++ {
+ idx_const
+ } else {
- let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
++ fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
- fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
++ let idx = idx_const
++ .try_to_bits(Size::from_bytes(4 /* u32*/))
++ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
- };
++ fx.tcx.sess.span_fatal(
++ fx.mir.span,
++ &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
++ );
+ }
+
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
+ ret.write_cvalue(fx, ret_lane);
- simd_neg, (c a) {
++ }
++
++ sym::simd_neg => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
- match lane_ty.kind() {
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
- }
- });
- };
-
- simd_add | simd_sub | simd_mul | simd_div | simd_rem
- | simd_shl | simd_shr | simd_and | simd_or | simd_xor, (c x, c y) {
- if !x.layout().ty.is_simd() {
- report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
- return;
- }
++ simd_for_each_lane(
++ fx,
++ a,
++ ret,
++ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
- simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| match (
- lane_ty.kind(),
- intrinsic,
- ) {
- (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
- (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
- (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
- (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
- (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
-
- (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
- (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
- (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
- (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
- (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
-
- (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
- (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
- (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
- (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
- (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
- "fmodf",
- vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
- vec![AbiParam::new(types::F32)],
- &[x_lane, y_lane],
- )[0],
- (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
- "fmod",
- vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
- vec![AbiParam::new(types::F64)],
- &[x_lane, y_lane],
- )[0],
-
- (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
- (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
- (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
- (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
- (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
-
- (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
- (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
- (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
- (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
- (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
-
- _ => unreachable!(),
++ },
++ );
++ }
++
++ sym::simd_add
++ | sym::simd_sub
++ | sym::simd_mul
++ | sym::simd_div
++ | sym::simd_rem
++ | sym::simd_shl
++ | sym::simd_shr
++ | sym::simd_and
++ | sym::simd_or
++ | sym::simd_xor => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME use vector instructions when possible
- };
++ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
++ match (lane_ty.kind(), intrinsic) {
++ (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
++
++ (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
++ (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
++ (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
++ (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
++ (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
++
++ (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
++ (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
++ (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
++ (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
++ (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
++ "fmodf",
++ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
++ vec![AbiParam::new(types::F32)],
++ &[x_lane, y_lane],
++ )[0],
++ (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
++ "fmod",
++ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
++ vec![AbiParam::new(types::F64)],
++ &[x_lane, y_lane],
++ )[0],
++
++ (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
++
++ (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
++ (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
++ (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
++ (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
++ (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
++
++ _ => unreachable!(),
++ }
+ });
- simd_fma, (c a, c b, c c) {
++ }
++
++ sym::simd_fma => {
++ intrinsic_args!(fx, args => (a, b, c); intrinsic);
+
- ty::Float(FloatTy::F32) => fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty),
- ty::Float(FloatTy::F64) => fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty),
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ assert_eq!(a.layout(), ret.layout());
+
+ let layout = a.layout();
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+
+ for lane in 0..lane_count {
+ let a_lane = a.value_lane(fx, lane);
+ let b_lane = b.value_lane(fx, lane);
+ let c_lane = c.value_lane(fx, lane);
+
+ let res_lane = match lane_ty.kind() {
- };
++ ty::Float(FloatTy::F32) => {
++ fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty)
++ }
++ ty::Float(FloatTy::F64) => {
++ fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty)
++ }
+ _ => unreachable!(),
+ };
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
- simd_fmin | simd_fmax, (c x, c y) {
++ }
++
++ sym::simd_fmin | sym::simd_fmax => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
+
- ty::Float(_) => {},
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match lane_ty.kind() {
- };
++ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
+ sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
+ _ => unreachable!(),
+ }
+ });
- simd_round, (c a) {
++ }
++
++ sym::simd_round => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
- match lane_ty.kind() {
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
- }
- });
- };
++ simd_for_each_lane(
++ fx,
++ a,
++ ret,
++ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_ty),
- simd_fabs | simd_fsqrt | simd_ceil | simd_floor | simd_trunc, (c a) {
++ },
++ );
++ }
++
++ sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
++ intrinsic_args!(fx, args => (a); intrinsic);
+
- ty::Float(_) => {},
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
+ match lane_ty.kind() {
- };
++ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fabs => fx.bcx.ins().fabs(lane),
+ sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
+ sym::simd_ceil => fx.bcx.ins().ceil(lane),
+ sym::simd_floor => fx.bcx.ins().floor(lane),
+ sym::simd_trunc => fx.bcx.ins().trunc(lane),
+ _ => unreachable!(),
+ }
+ });
- simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) {
++ }
++
++ sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
++ intrinsic_args!(fx, args => (v, acc); intrinsic);
++ let acc = acc.load_scalar(fx);
+
- };
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
- simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) {
++ }
++
++ sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
++ intrinsic_args!(fx, args => (v, acc); intrinsic);
++ let acc = acc.load_scalar(fx);
+
- };
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
- simd_reduce_all, (c v) {
++ }
++
++ sym::simd_reduce_all => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
- simd_reduce_any, (c v) {
++ }
++
++ sym::simd_reduce_any => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
- simd_reduce_and, (c v) {
++ }
++
++ sym::simd_reduce_and => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
- simd_reduce_or, (c v) {
++ }
++
++ sym::simd_reduce_or => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
- simd_reduce_xor, (c v) {
++ }
++
++ sym::simd_reduce_xor => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
- simd_reduce_min, (c v) {
++ }
++
++ sym::simd_reduce_min => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let lt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(lt, a, b)
+ });
- simd_reduce_max, (c v) {
++ }
++
++ sym::simd_reduce_max => {
++ intrinsic_args!(fx, args => (v); intrinsic);
+
- };
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let gt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(gt, a, b)
+ });
- simd_select, (c m, c a, c b) {
++ }
++
++ sym::simd_select => {
++ intrinsic_args!(fx, args => (m, a, b); intrinsic);
+
- let res_lane = CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+ if !m.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
+ return;
+ }
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
- };
++ let res_lane =
++ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
++ }
+
+ // simd_saturating_*
+ // simd_bitmask
+ // simd_scatter
+ // simd_gather
++ _ => {
++ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
++ }
+ }
+}