use rustc_apfloat::{Float, Round};
use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
-use rustc_target::abi::{Align, Integer};
+use rustc_target::abi::{Align, Endian, HasDataLayout, Integer, Size};
use crate::*;
-use helpers::{bool_to_simd_element, check_arg_count, simd_element_to_bool};
+use helpers::check_arg_count;
pub enum AtomicOp {
MirOp(mir::BinOp, bool),
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
- ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
+ dest: &PlaceTy<'tcx, Tag>,
+ ret: Option<mir::BasicBlock>,
_unwind: StackPopUnwind,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- if this.emulate_intrinsic(instance, args, ret)? {
+ if this.emulate_intrinsic(instance, args, dest, ret)? {
return Ok(());
}
// All supported intrinsics have a return place.
let intrinsic_name = this.tcx.item_name(instance.def_id());
let intrinsic_name = intrinsic_name.as_str();
- let (dest, ret) = match ret {
+ let ret = match ret {
None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
Some(p) => p,
};
match intrinsic_name {
// Miri overwriting CTFE intrinsics.
"ptr_guaranteed_eq" => {
- let &[ref left, ref right] = check_arg_count(args)?;
+ let [left, right] = check_arg_count(args)?;
let left = this.read_immediate(left)?;
let right = this.read_immediate(right)?;
this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
}
"ptr_guaranteed_ne" => {
- let &[ref left, ref right] = check_arg_count(args)?;
+ let [left, right] = check_arg_count(args)?;
let left = this.read_immediate(left)?;
let right = this.read_immediate(right)?;
this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
// Raw memory accesses
"volatile_load" => {
- let &[ref place] = check_arg_count(args)?;
+ let [place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
this.copy_op(&place.into(), dest)?;
}
"volatile_store" => {
- let &[ref place, ref dest] = check_arg_count(args)?;
+ let [place, dest] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
this.copy_op(dest, &place.into())?;
}
"write_bytes" | "volatile_set_memory" => {
- let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
+ let [ptr, val_byte, count] = check_arg_count(args)?;
let ty = instance.substs.type_at(0);
let ty_layout = this.layout_of(ty)?;
let val_byte = this.read_scalar(val_byte)?.to_u8()?;
let ptr = this.read_pointer(ptr)?;
let count = this.read_scalar(count)?.to_machine_usize(this)?;
+ // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // but no actual allocation can be big enough for the difference to be noticeable.
let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
})?;
- this.memory
- .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
+ this.write_bytes_ptr(
+ ptr,
+ iter::repeat(val_byte).take(byte_count.bytes() as usize),
+ )?;
}
// Floating-point operations
"fabsf32" => {
- let &[ref f] = check_arg_count(args)?;
+ let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f32()?;
// Can be implemented in soft-floats.
this.write_scalar(Scalar::from_f32(f.abs()), dest)?;
}
"fabsf64" => {
- let &[ref f] = check_arg_count(args)?;
+ let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f64()?;
// Can be implemented in soft-floats.
this.write_scalar(Scalar::from_f64(f.abs()), dest)?;
| "truncf32"
| "roundf32"
=> {
- let &[ref f] = check_arg_count(args)?;
+ let [f] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
let f = match intrinsic_name {
| "truncf64"
| "roundf64"
=> {
- let &[ref f] = check_arg_count(args)?;
+ let [f] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
let f = match intrinsic_name {
| "fdiv_fast"
| "frem_fast"
=> {
- let &[ref a, ref b] = check_arg_count(args)?;
+ let [a, b] = check_arg_count(args)?;
let a = this.read_immediate(a)?;
let b = this.read_immediate(b)?;
let op = match intrinsic_name {
| "maxnumf32"
| "copysignf32"
=> {
- let &[ref a, ref b] = check_arg_count(args)?;
+ let [a, b] = check_arg_count(args)?;
let a = this.read_scalar(a)?.to_f32()?;
let b = this.read_scalar(b)?.to_f32()?;
let res = match intrinsic_name {
| "maxnumf64"
| "copysignf64"
=> {
- let &[ref a, ref b] = check_arg_count(args)?;
+ let [a, b] = check_arg_count(args)?;
let a = this.read_scalar(a)?.to_f64()?;
let b = this.read_scalar(b)?.to_f64()?;
let res = match intrinsic_name {
}
"powf32" => {
- let &[ref f, ref f2] = check_arg_count(args)?;
+ let [f, f2] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
}
"powf64" => {
- let &[ref f, ref f2] = check_arg_count(args)?;
+ let [f, f2] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
}
"fmaf32" => {
- let &[ref a, ref b, ref c] = check_arg_count(args)?;
+ let [a, b, c] = check_arg_count(args)?;
let a = this.read_scalar(a)?.to_f32()?;
let b = this.read_scalar(b)?.to_f32()?;
let c = this.read_scalar(c)?.to_f32()?;
}
"fmaf64" => {
- let &[ref a, ref b, ref c] = check_arg_count(args)?;
+ let [a, b, c] = check_arg_count(args)?;
let a = this.read_scalar(a)?.to_f64()?;
let b = this.read_scalar(b)?.to_f64()?;
let c = this.read_scalar(c)?.to_f64()?;
}
"powif32" => {
- let &[ref f, ref i] = check_arg_count(args)?;
+ let [f, i] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
let i = this.read_scalar(i)?.to_i32()?;
}
"powif64" => {
- let &[ref f, ref i] = check_arg_count(args)?;
+ let [f, i] = check_arg_count(args)?;
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
let i = this.read_scalar(i)?.to_i32()?;
}
"float_to_int_unchecked" => {
- let &[ref val] = check_arg_count(args)?;
+ let [val] = check_arg_count(args)?;
let val = this.read_immediate(val)?;
let res = match val.layout.ty.kind() {
| "simd_round"
| "simd_trunc"
| "simd_fsqrt" => {
- let &[ref op] = check_arg_count(args)?;
+ let [op] = check_arg_count(args)?;
let (op, op_len) = this.operand_to_simd(op)?;
let (dest, dest_len) = this.place_to_simd(dest)?;
| "simd_fmax"
| "simd_fmin"
| "simd_saturating_add"
- | "simd_saturating_sub" => {
+ | "simd_saturating_sub"
+ | "simd_arith_offset" => {
use mir::BinOp;
- let &[ref left, ref right] = check_arg_count(args)?;
+ let [left, right] = check_arg_count(args)?;
let (left, left_len) = this.operand_to_simd(left)?;
let (right, right_len) = this.operand_to_simd(right)?;
let (dest, dest_len) = this.place_to_simd(dest)?;
SaturatingOp(BinOp),
FMax,
FMin,
+ WrappingOffset,
}
let which = match intrinsic_name {
"simd_add" => Op::MirOp(BinOp::Add),
"simd_fmin" => Op::FMin,
"simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
"simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
+ "simd_arith_offset" => Op::WrappingOffset,
_ => unreachable!(),
};
val
}
}
+ Op::SaturatingOp(mir_op) => {
+ this.saturating_arith(mir_op, &left, &right)?
+ }
+ Op::WrappingOffset => {
+ let ptr = this.scalar_to_ptr(left.to_scalar()?)?;
+ let offset_count = right.to_scalar()?.to_machine_isize(this)?;
+ let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
+
+ let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ let offset_bytes = offset_count.wrapping_mul(pointee_size);
+ let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, this);
+ Scalar::from_maybe_pointer(offset_ptr, this)
+ }
Op::FMax => {
fmax_op(&left, &right)?
}
Op::FMin => {
fmin_op(&left, &right)?
}
- Op::SaturatingOp(mir_op) => {
- this.saturating_arith(mir_op, &left, &right)?
- }
};
this.write_scalar(val, &dest.into())?;
}
}
"simd_fma" => {
- let &[ref a, ref b, ref c] = check_arg_count(args)?;
+ let [a, b, c] = check_arg_count(args)?;
let (a, a_len) = this.operand_to_simd(a)?;
let (b, b_len) = this.operand_to_simd(b)?;
let (c, c_len) = this.operand_to_simd(c)?;
| "simd_reduce_min" => {
use mir::BinOp;
- let &[ref op] = check_arg_count(args)?;
+ let [op] = check_arg_count(args)?;
let (op, op_len) = this.operand_to_simd(op)?;
let imm_from_bool =
| "simd_reduce_mul_ordered" => {
use mir::BinOp;
- let &[ref op, ref init] = check_arg_count(args)?;
+ let [op, init] = check_arg_count(args)?;
let (op, op_len) = this.operand_to_simd(op)?;
let init = this.read_immediate(init)?;
this.write_immediate(*res, dest)?;
}
"simd_select" => {
- let &[ref mask, ref yes, ref no] = check_arg_count(args)?;
+ let [mask, yes, no] = check_arg_count(args)?;
let (mask, mask_len) = this.operand_to_simd(mask)?;
let (yes, yes_len) = this.operand_to_simd(yes)?;
let (no, no_len) = this.operand_to_simd(no)?;
this.write_immediate(*val, &dest.into())?;
}
}
+ "simd_select_bitmask" => {
+ let [mask, yes, no] = check_arg_count(args)?;
+ let (yes, yes_len) = this.operand_to_simd(yes)?;
+ let (no, no_len) = this.operand_to_simd(no)?;
+ let (dest, dest_len) = this.place_to_simd(dest)?;
+ let bitmask_len = dest_len.max(8);
+
+ assert!(mask.layout.ty.is_integral());
+ assert!(bitmask_len <= 64);
+ assert_eq!(bitmask_len, mask.layout.size.bits());
+ assert_eq!(dest_len, yes_len);
+ assert_eq!(dest_len, no_len);
+
+ let mask: u64 = this
+ .read_scalar(mask)?
+ .check_init()?
+ .to_bits(mask.layout.size)?
+ .try_into()
+ .unwrap();
+ for i in 0..dest_len {
+ let mask =
+ mask & (1 << simd_bitmask_index(i, dest_len, this.data_layout().endian));
+ let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
+ let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
+ let dest = this.mplace_index(&dest, i)?;
+
+ let val = if mask != 0 { yes } else { no };
+ this.write_immediate(*val, &dest.into())?;
+ }
+ for i in dest_len..bitmask_len {
+ // If the mask is "padded", ensure that padding is all-zero.
+ let mask = mask & (1 << i);
+ if mask != 0 {
+ throw_ub_format!(
+ "a SIMD bitmask less than 8 bits long must be filled with 0s for the remaining bits"
+ );
+ }
+ }
+ }
#[rustfmt::skip]
"simd_cast" | "simd_as" => {
- let &[ref op] = check_arg_count(args)?;
+ let [op] = check_arg_count(args)?;
let (op, op_len) = this.operand_to_simd(op)?;
let (dest, dest_len) = this.place_to_simd(dest)?;
}
}
"simd_shuffle" => {
- let &[ref left, ref right, ref index] = check_arg_count(args)?;
+ let [left, right, index] = check_arg_count(args)?;
let (left, left_len) = this.operand_to_simd(left)?;
let (right, right_len) = this.operand_to_simd(right)?;
let (dest, dest_len) = this.place_to_simd(dest)?;
for i in 0..dest_len {
let src_index: u64 = this
- .read_immediate(&this.operand_index(&index, i)?.into())?
+ .read_immediate(&this.operand_index(index, i)?)?
.to_scalar()?
.to_u32()?
.into();
}
}
"simd_gather" => {
- let &[ref passthru, ref ptrs, ref mask] = check_arg_count(args)?;
+ let [passthru, ptrs, mask] = check_arg_count(args)?;
let (passthru, passthru_len) = this.operand_to_simd(passthru)?;
let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
let (mask, mask_len) = this.operand_to_simd(mask)?;
}
}
"simd_scatter" => {
- let &[ref value, ref ptrs, ref mask] = check_arg_count(args)?;
+ let [value, ptrs, mask] = check_arg_count(args)?;
let (value, value_len) = this.operand_to_simd(value)?;
let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
let (mask, mask_len) = this.operand_to_simd(mask)?;
}
}
}
+ "simd_bitmask" => {
+ let [op] = check_arg_count(args)?;
+ let (op, op_len) = this.operand_to_simd(op)?;
+ let bitmask_len = op_len.max(8);
+
+ assert!(dest.layout.ty.is_integral());
+ assert!(bitmask_len <= 64);
+ assert_eq!(bitmask_len, dest.layout.size.bits());
+
+ let mut res = 0u64;
+ for i in 0..op_len {
+ let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
+ if simd_element_to_bool(op)? {
+ res |= 1 << simd_bitmask_index(i, op_len, this.data_layout().endian);
+ }
+ }
+ this.write_int(res, dest)?;
+ }
// Atomic operations
"atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
// Other
"exact_div" => {
- let &[ref num, ref denom] = check_arg_count(args)?;
+ let [num, denom] = check_arg_count(args)?;
this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
}
"try" => return this.handle_try(args, dest, ret),
"breakpoint" => {
- let &[] = check_arg_count(args)?;
+ let [] = check_arg_count(args)?;
// normally this would raise a SIGTRAP, which aborts if no debugger is connected
throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
}
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[ref place] = check_arg_count(args)?;
+ let [place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
// make sure it fits into a scalar; otherwise it cannot be atomic
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
- this.memory.check_ptr_access_align(
+ this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[ref place, ref val] = check_arg_count(args)?;
+ let [place, val] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
- this.memory.check_ptr_access_align(
+ this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOp,
) -> InterpResult<'tcx> {
- let &[] = check_arg_count(args)?;
+ let [] = check_arg_count(args)?;
let _ = atomic;
//FIXME: compiler fences are currently ignored
Ok(())
atomic: AtomicFenceOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[] = check_arg_count(args)?;
+ let [] = check_arg_count(args)?;
this.validate_atomic_fence(atomic)?;
Ok(())
}
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[ref place, ref rhs] = check_arg_count(args)?;
+ let [place, rhs] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
if !place.layout.ty.is_integral() {
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
- this.memory.check_ptr_access_align(
+ this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
match atomic_op {
AtomicOp::Min => {
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
- this.write_immediate(*old, &dest)?; // old value is returned
+ this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::Max => {
let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
- this.write_immediate(*old, &dest)?; // old value is returned
+ this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::MirOp(op, neg) => {
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[ref place, ref new] = check_arg_count(args)?;
+ let [place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
- this.memory.check_ptr_access_align(
+ this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
+ let [place, expect_old, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
- this.memory.check_ptr_access_align(
+ this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),
})
}
+
+fn bool_to_simd_element(b: bool, size: Size) -> Scalar<Tag> {
+ // SIMD uses all-1 as pattern for "true"
+ let val = if b { -1 } else { 0 };
+ Scalar::from_int(val, size)
+}
+
+fn simd_element_to_bool<'tcx>(elem: ImmTy<'tcx, Tag>) -> InterpResult<'tcx, bool> {
+ let val = elem.to_scalar()?.to_int(elem.layout.size)?;
+ Ok(match val {
+ 0 => false,
+ -1 => true,
+ _ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),
+ })
+}
+
+fn simd_bitmask_index(idx: u64, vec_len: u64, endianess: Endian) -> u64 {
+ assert!(idx < vec_len);
+ match endianess {
+ Endian::Little => idx,
+ Endian::Big => vec_len - 1 - idx, // reverse order of bits
+ }
+}