-pub mod llvm;
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
mod simd;
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_span::symbol::{kw, sym};
+
use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
macro intrinsic_pat {
(_) => {
_
},
($name:ident) => {
- stringify!($name)
+ sym::$name
+ },
+ (kw.$name:ident) => {
+ kw::$name
},
($name:literal) => {
- stringify!($name)
+ $name
},
- ($x:ident . $($xs:tt).*) => {
- concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
- }
}
macro intrinsic_arg {
$arg
},
(c $fx:expr, $arg:ident) => {
- trans_operand($fx, $arg)
+ codegen_operand($fx, $arg)
},
(v $fx:expr, $arg:ident) => {
- trans_operand($fx, $arg).load_scalar($fx)
+ codegen_operand($fx, $arg).load_scalar($fx)
}
}
)*) => {
match $intrinsic {
$(
- stringify!($name) => {
+ sym::$name => {
assert!($substs.is_noop());
if let [$(ref $arg),*] = *$args {
let ($($arg,)*) = (
- $(trans_operand($fx, $arg),)*
+ $(codegen_operand($fx, $arg),)*
);
let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
$ret.write_cvalue($fx, res);
}
}
-macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
- let new = $fx.bcx.ins().$op(old, $src);
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
- $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
-
- crate::atomic_shim::unlock_global_lock($fx);
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ match $ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ $fx.tcx.sess.span_err(
+ $span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ $intrinsic, $ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
+ }
}
-macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- // Read old
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-
- // Compare
- let is_eq = codegen_icmp($fx, IntCC::SignedGreaterThan, old, $src);
- let new = $fx.bcx.ins().select(is_eq, old, $src);
-
- // Write new
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-
- let ret_val = CValue::by_val(old, $ret.layout());
- $ret.write_cvalue($fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock($fx);
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ if !$ty.is_simd() {
+ $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
}
-fn lane_type_and_count<'tcx>(
- tcx: TyCtxt<'tcx>,
- layout: TyLayout<'tcx>,
-) -> (TyLayout<'tcx>, u32) {
- assert!(layout.ty.is_simd());
- let lane_count = match layout.fields {
- layout::FieldPlacement::Array { stride: _, count } => u32::try_from(count).unwrap(),
- _ => unreachable!("lane_type_and_count({:?})", layout),
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match &layout.abi {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
};
- let lane_layout = layout.field(&ty::layout::LayoutCx {
- tcx,
- param_env: ParamEnv::reveal_all(),
- }, 0).unwrap();
- (lane_layout, lane_count)
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
}
-fn simd_for_each_lane<'tcx, B: Backend>(
- fx: &mut FunctionCx<'_, 'tcx, B>,
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(
- &mut FunctionCx<'_, 'tcx, B>,
- TyLayout<'tcx>,
- TyLayout<'tcx>,
+ &mut FunctionCx<'_, '_, 'tcx>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
Value,
) -> CValue<'tcx>,
) {
let layout = val.layout();
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
assert_eq!(lane_count, ret_lane_count);
for lane_idx in 0..lane_count {
}
}
-fn simd_pair_for_each_lane<'tcx, B: Backend>(
- fx: &mut FunctionCx<'_, 'tcx, B>,
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
x: CValue<'tcx>,
y: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(
- &mut FunctionCx<'_, 'tcx, B>,
- TyLayout<'tcx>,
- TyLayout<'tcx>,
+ &mut FunctionCx<'_, '_, 'tcx>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
Value,
Value,
) -> CValue<'tcx>,
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {
}
}
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ for lane_idx in 1..lane_count {
+ let lane =
+ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ res_val = f(fx, lane_layout, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane =
+ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
fn bool_to_zero_or_max_uint<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
- layout: TyLayout<'tcx>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
val: Value,
) -> CValue<'tcx> {
let ty = fx.clif_type(layout.ty).unwrap();
ty => ty,
};
- let zero = fx.bcx.ins().iconst(int_ty, 0);
- let max = fx
- .bcx
- .ins()
- .iconst(int_ty, (u64::max_value() >> (64 - int_ty.bits())) as i64);
- let mut res = fx.bcx.ins().select(val, max, zero);
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
if ty.is_float() {
res = fx.bcx.ins().bitcast(ty, res);
}
macro simd_cmp {
- ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind {
- ty::Uint(_) | ty::Int(_) => codegen_icmp(fx, IntCC::$cc, x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
- },
- );
+ ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+ if let Some(vector_ty) = vector_ty {
+ let x = $x.load_scalar($fx);
+ let y = $y.load_scalar($fx);
+ let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+
+ // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+ let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+ $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+ } else {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ },
+ );
+ }
},
- ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+ ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ // FIXME use vector icmp when possible
simd_pair_for_each_lane(
$fx,
$x,
$y,
$ret,
|fx, lane_layout, res_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind {
- ty::Uint(_) => codegen_icmp(fx, IntCC::$cc_u, x_lane, y_lane),
- ty::Int(_) => codegen_icmp(fx, IntCC::$cc_s, x_lane, y_lane),
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
_ => unreachable!("{:?}", lane_layout.ty),
};
bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
$y,
$ret,
|fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind {
+ let res_lane = match lane_layout.ty.kind() {
ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
_ => unreachable!("{:?}", lane_layout.ty),
$y,
$ret,
|fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind {
+ let res_lane = match lane_layout.ty.kind() {
ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
$y,
$ret,
|fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind {
+ let res_lane = match lane_layout.ty.kind() {
ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
_ => unreachable!("{:?}", lane_layout.ty),
};
);
}
-pub fn codegen_intrinsic_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
args: &[mir::Operand<'tcx>],
destination: Option<(CPlace<'tcx>, BasicBlock)>,
let def_id = instance.def_id();
let substs = instance.substs;
- let intrinsic = fx.tcx.item_name(def_id).as_str();
- let intrinsic = &intrinsic[..];
+ let intrinsic = fx.tcx.item_name(def_id);
let ret = match destination {
Some((place, _)) => place,
None => {
// Insert non returning intrinsics here
match intrinsic {
- "abort" => {
- trap_panic(fx, "Called intrinsic::abort.");
- }
- "unreachable" => {
- trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
+ sym::abort => {
+ trap_abort(fx, "Called intrinsic::abort.");
}
- "transmute" => {
- trap_unreachable(
- fx,
- "[corruption] Called intrinsic::transmute with uninhabited argument.",
- );
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
}
_ => unimplemented!("unsupported instrinsic {}", intrinsic),
}
}
};
- if intrinsic.starts_with("simd_") {
+ if intrinsic.as_str().starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
fx.bcx.ins().jump(ret_block, &[]);
sinf64(flt) -> f64 => sin,
cosf32(flt) -> f32 => cosf,
cosf64(flt) -> f64 => cos,
- tanf32(flt) -> f32 => tanf,
- tanf64(flt) -> f64 => tan,
}
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
- unimpl!("unsupported intrinsic {}", intrinsic)
+ fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
};
assume, (c _a) {};
};
copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
- let elem_size = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, elem_size as i64);
assert_eq!(args.len(), 3);
- let byte_amount = fx.bcx.ins().imul(count, elem_size);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
- if intrinsic.ends_with("_nonoverlapping") {
+ if intrinsic == sym::copy_nonoverlapping {
// FIXME emit_small_memcpy
fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
} else {
fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
}
};
- discriminant_value, (c ptr) {
- let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
- let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
- let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
- ret.write_cvalue(fx, discr);
+ // NOTE: the volatile variants have src and dst swapped
+ volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+ }
};
size_of_val, <T> (c ptr) {
let layout = fx.layout_of(T);
ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
};
- _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
+ unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
+ | unchecked_shl | unchecked_shr, (c x, c y) {
// FIXME trap on overflow
let bin_op = match intrinsic {
- "unchecked_sub" => BinOp::Sub,
- "unchecked_div" | "exact_div" => BinOp::Div,
- "unchecked_rem" => BinOp::Rem,
- "unchecked_shl" => BinOp::Shl,
- "unchecked_shr" => BinOp::Shr,
- _ => unreachable!("intrinsic {}", intrinsic),
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
};
- let res = crate::num::trans_int_binop(fx, bin_op, x, y);
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
ret.write_cvalue(fx, res);
};
- _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
+ add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
assert_eq!(x.layout().ty, y.layout().ty);
let bin_op = match intrinsic {
- "add_with_overflow" => BinOp::Add,
- "sub_with_overflow" => BinOp::Sub,
- "mul_with_overflow" => BinOp::Mul,
- _ => unreachable!("intrinsic {}", intrinsic),
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
};
- let res = crate::num::trans_checked_int_binop(
- fx,
- bin_op,
- x,
- y,
- );
- ret.write_cvalue(fx, res);
- };
- _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
- assert_eq!(x.layout().ty, y.layout().ty);
- let bin_op = match intrinsic {
- "wrapping_add" => BinOp::Add,
- "wrapping_sub" => BinOp::Sub,
- "wrapping_mul" => BinOp::Mul,
- _ => unreachable!("intrinsic {}", intrinsic),
- };
- let res = crate::num::trans_int_binop(
+ let res = crate::num::codegen_checked_int_binop(
fx,
bin_op,
x,
);
ret.write_cvalue(fx, res);
};
- _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
+ saturating_add | saturating_sub, <T> (c lhs, c rhs) {
assert_eq!(lhs.layout().ty, rhs.layout().ty);
let bin_op = match intrinsic {
- "saturating_add" => BinOp::Add,
- "saturating_sub" => BinOp::Sub,
- _ => unreachable!("intrinsic {}", intrinsic),
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
};
let signed = type_sign(T);
- let checked_res = crate::num::trans_checked_int_binop(
+ let checked_res = crate::num::codegen_checked_int_binop(
fx,
bin_op,
lhs,
// `select.i8` is not implemented by Cranelift.
let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
- let (min, max) = type_min_max_value(clif_ty, signed);
- let min = fx.bcx.ins().iconst(clif_ty, min);
- let max = fx.bcx.ins().iconst(clif_ty, max);
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
let val = match (intrinsic, signed) {
- ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
- ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
- ("saturating_add", true) => {
+ (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (sym::saturating_add, true) => {
let rhs = rhs.load_scalar(fx);
let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
fx.bcx.ins().select(has_overflow, sat_val, val)
}
- ("saturating_sub", true) => {
+ (sym::saturating_sub, true) => {
let rhs = rhs.load_scalar(fx);
let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
};
rotate_left, <T>(v x, v y) {
let layout = fx.layout_of(T);
+ let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
+ fx.bcx.ins().ireduce(types::I64, y)
+ } else {
+ y
+ };
let res = fx.bcx.ins().rotl(x, y);
ret.write_cvalue(fx, CValue::by_val(res, layout));
};
rotate_right, <T>(v x, v y) {
let layout = fx.layout_of(T);
+ let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
+ fx.bcx.ins().ireduce(types::I64, y)
+ } else {
+ y
+ };
let res = fx.bcx.ins().rotr(x, y);
ret.write_cvalue(fx, CValue::by_val(res, layout));
};
offset | arith_offset, (c base, v offset) {
let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
- let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
let base_val = base.load_scalar(fx);
let res = fx.bcx.ins().iadd(base_val, ptr_diff);
ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
};
- transmute, <src_ty, dst_ty> (c from) {
- assert_eq!(from.layout().ty, src_ty);
- let (addr, meta) = from.force_stack(fx);
- assert!(meta.is_none());
- let dst_layout = fx.layout_of(dst_ty);
- ret.write_cvalue(fx, CValue::by_ref(addr, dst_layout))
+ transmute, (c from) {
+ ret.write_cvalue_transmute(fx, from);
};
- init, () {
- let layout = ret.layout();
- if layout.abi == Abi::Uninhabited {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::init for uninhabited type.");
- return;
- }
-
- match *ret.inner() {
- CPlaceInner::NoPlace => {}
- CPlaceInner::Var(var) => {
- let clif_ty = fx.clif_type(layout.ty).unwrap();
- let val = match clif_ty {
- types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
- types::I128 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- fx.bcx.ins().iconcat(zero, zero)
- }
- types::F32 => {
- let zero = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx.ins().bitcast(types::F32, zero)
- }
- types::F64 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- fx.bcx.ins().bitcast(types::F64, zero)
- }
- _ => panic!("clif_type returned {}", clif_ty),
- };
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
- fx.bcx.def_var(mir_var(var), val);
- }
- _ => {
- let addr = ret.to_ptr(fx).get_addr(fx);
- let layout = ret.layout();
- fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
- }
- }
- };
- uninit, () {
- let layout = ret.layout();
- if layout.abi == Abi::Uninhabited {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::uninit for uninhabited type.");
- return;
- }
- match *ret.inner() {
- CPlaceInner::NoPlace => {},
- CPlaceInner::Var(var) => {
- let clif_ty = fx.clif_type(layout.ty).unwrap();
- let val = match clif_ty {
- types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 42),
- types::I128 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- let fourty_two = fx.bcx.ins().iconst(types::I64, 42);
- fx.bcx.ins().iconcat(fourty_two, zero)
- }
- types::F32 => {
- let zero = fx.bcx.ins().iconst(types::I32, 0xdeadbeef);
- fx.bcx.ins().bitcast(types::F32, zero)
- }
- types::F64 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0xcafebabedeadbeefu64 as i64);
- fx.bcx.ins().bitcast(types::F64, zero)
- }
- _ => panic!("clif_type returned {}", clif_ty),
- };
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
- fx.bcx.def_var(mir_var(var), val);
- }
- CPlaceInner::Addr(_, _) => {
- // Don't write to `ret`, as the destination memory is already uninitialized.
- }
- }
- };
- write_bytes, (c dst, v val, v count) {
+ write_bytes | volatile_set_memory, (c dst, v val, v count) {
let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
- let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
};
ctlz | ctlz_nonzero, <T> (v arg) {
};
bswap, <T> (v arg) {
// FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
- fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
match bcx.func.dfg.value_type(v) {
types::I8 => v,
}
ty => unreachable!("bswap {}", ty),
}
- };
+ }
let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
ret.write_cvalue(fx, res);
};
- panic_if_uninhabited, <T> () {
- if fx.layout_of(T).abi.is_uninhabited() {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::panic_if_uninhabited for uninhabited type.");
+ assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+ let layout = fx.layout_of(T);
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+ span,
+ ));
return;
}
};
- volatile_load, (c ptr) {
+ volatile_load | unaligned_volatile_load, (c ptr) {
// Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
};
- volatile_store, (v ptr, c val) {
+ volatile_store | unaligned_volatile_store, (v ptr, c val) {
// Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
};
- size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name, () {
+ pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
let const_val =
fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
- let val = crate::constant::trans_const_value(
+ let val = crate::constant::codegen_const_value(
fx,
- ty::Const::from_value(fx.tcx, const_val, ret.layout().ty),
+ const_val,
+ ret.layout().ty,
);
ret.write_cvalue(fx, val);
};
let pointee_size: u64 = fx.layout_of(T).size.bytes();
let diff = fx.bcx.ins().isub(ptr, base);
// FIXME this can be an exact division.
- let val = CValue::by_val(fx.bcx.ins().udiv_imm(diff, pointee_size as i64), isize_layout);
+ let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_eq, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_ne, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
ret.write_cvalue(fx, val);
};
ret.write_cvalue(fx, caller_location);
};
- _ if intrinsic.starts_with("atomic_fence"), () {
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_fence"), () {
+ fx.bcx.ins().fence();
};
- _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
};
- _ if intrinsic.starts_with("atomic_load"), (c ptr) {
- crate::atomic_shim::lock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
+ validate_atomic_type!(fx, intrinsic, span, T);
+ let ty = fx.clif_type(T).unwrap();
- let inner_layout =
- fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
- let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
- ret.write_cvalue(fx, val);
+ let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
- crate::atomic_shim::unlock_global_lock(fx);
+ let val = CValue::by_val(val, fx.layout_of(T));
+ ret.write_cvalue(fx, val);
};
- _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
- crate::atomic_shim::lock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
+ validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
- let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
- dest.write_cvalue(fx, val);
+ let val = val.load_scalar(fx);
- crate::atomic_shim::unlock_global_lock(fx);
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
};
- _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
- crate::atomic_shim::lock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
+ let layout = new.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+ let new = new.load_scalar(fx);
- // Write new
- let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
- dest.write_cvalue(fx, src);
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
- crate::atomic_shim::unlock_global_lock(fx);
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
- crate::atomic_shim::lock_global_lock(fx);
-
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+ _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+ let layout = new.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
- // Compare
- let is_eq = codegen_icmp(fx, IntCC::Equal, old, test_old);
- let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
- // Write new
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
- ret.write_cvalue(fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock(fx);
+ ret.write_cvalue(fx, ret_val)
};
- _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
- atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
+ let layout = amount.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
- atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
+ let layout = amount.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
- atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
- crate::atomic_shim::lock_global_lock(fx);
+ _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- let and = fx.bcx.ins().band(old, src);
- let new = fx.bcx.ins().bnot(and);
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
- crate::atomic_shim::unlock_global_lock(fx);
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
- atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
- atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
+
+ // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
+ _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
+ _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
- atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
- atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
- atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
- _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
- atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
+ _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
+ let layout = src.layout();
+ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
};
minnumf32, (v a, v b) {
ret.write_cvalue(fx, val);
};
- try, (v f, v data, v _local_ptr) {
+ kw.Try, (v f, v data, v _catch_fn) {
// FIXME once unwinding is supported, change this to actually catch panics
let f_sig = fx.bcx.func.import_signature(Signature {
call_conv: CallConv::triple_default(fx.triple()),
fx.bcx.ins().call_indirect(f_sig, f, &[data]);
- let ret_val = CValue::const_val(fx, ret.layout(), 0);
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
ret.write_cvalue(fx, ret_val);
};
+
+ fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+ let res = crate::num::codegen_float_binop(fx, match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ }, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ float_to_int_unchecked, (v f) {
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ };
}
if let Some((_, dest)) = destination {