| "simd_shl"
| "simd_shr"
| "simd_and"
- | "simd_or" => {
+ | "simd_or"
+ | "simd_eq" => {
let &[ref left, ref right] = check_arg_count(args)?;
let (left, left_len) = this.operand_to_simd(left)?;
let (right, right_len) = this.operand_to_simd(right)?;
"simd_shr" => mir::BinOp::Shr,
"simd_and" => mir::BinOp::BitAnd,
"simd_or" => mir::BinOp::BitOr,
+ "simd_eq" => mir::BinOp::Eq,
_ => unreachable!(),
};
let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
let dest = this.mplace_index(&dest, i)?;
let (val, overflowed, ty) = this.overflowing_binary_op(op, &left, &right)?;
- assert_eq!(ty, dest.layout.ty);
if matches!(op, mir::BinOp::Shl | mir::BinOp::Shr) {
// Shifts have extra UB as SIMD operations that the MIR binop does not have.
// See <https://github.com/rust-lang/rust/issues/91237>.
throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
}
}
- this.write_scalar(val, &dest.into())?;
+ if matches!(op, mir::BinOp::Eq) {
+ // Special handling for boolean-returning operations
+ assert_eq!(ty, this.tcx.types.bool);
+ let val = val.to_bool().unwrap();
+ let val = if val { -1 } else { 0 }; // SIMD uses all-1 as pattern for "true"
+ let val = Scalar::from_int(val, dest.layout.size);
+ this.write_scalar(val, &dest.into())?;
+ } else {
+ assert_eq!(ty, dest.layout.ty);
+ this.write_scalar(val, &dest.into())?;
+ }
+ }
+ }
+ "simd_reduce_any" => {
+ let &[ref arg] = check_arg_count(args)?;
+ let (arg, arg_len) = this.operand_to_simd(arg)?;
+
+ let mut res = false; // the neutral element
+ for i in 0..arg_len {
+ let op = this.read_immediate(&this.mplace_index(&arg, i)?.into())?;
+ // We convert it to a *signed* integer and expect either 0 or -1 (the latter means all bits were set).
+ let val = op.to_scalar()?.to_int(op.layout.size)?;
+ let val = match val {
+ 0 => false,
+ -1 => true,
+ _ =>
+ throw_ub_format!(
+ "each element of a simd_reduce_any operand must be all-0-bits or all-1-bits"
+ ),
+ };
+ res = res | val;
}
+
+ this.write_scalar(Scalar::from_bool(res), dest)?;
}
// Atomic operations
--- /dev/null
+#![feature(platform_intrinsics, repr_simd)]
+
+extern "platform-intrinsic" {
+ pub(crate) fn simd_reduce_any<T>(x: T) -> bool;
+}
+
+#[repr(simd)]
+#[allow(non_camel_case_types)]
+struct i32x2(i32, i32);
+
+fn main() { unsafe {
+ let x = i32x2(0, 1);
+ simd_reduce_any(x); //~ERROR must be all-0-bits or all-1-bits
+} }
-#![feature(portable_simd)]
+#![feature(portable_simd, platform_intrinsics)]
use std::simd::*;
fn simd_ops_f32() {
assert_eq!(a + b, i32x4::from_array([11, 12, 13, 14]));
assert_eq!(a - b, i32x4::from_array([9, 8, 7, 6]));
assert_eq!(a * b, i32x4::from_array([10, 20, 30, 40]));
- //assert_eq!(a / b, i32x4::from_array([10, 5, 3, 2]));
- //assert_eq!(a / i32x4::splat(2), i32x4::splat(5));
- //assert_eq!(a % b, i32x4::from_array([0, 0, 1, 2]));
+ assert_eq!(a / b, i32x4::from_array([10, 5, 3, 2]));
+ assert_eq!(a / i32x4::splat(2), i32x4::splat(5));
+ assert_eq!(a % b, i32x4::from_array([0, 0, 1, 2]));
assert_eq!(b << i32x4::splat(2), i32x4::from_array([4, 8, 12, 16]));
assert_eq!(b >> i32x4::splat(1), i32x4::from_array([0, 1, 1, 2]));
assert_eq!(b & i32x4::splat(2), i32x4::from_array([0, 2, 2, 0]));
assert_eq!(b | i32x4::splat(2), i32x4::from_array([3, 2, 3, 6]));
}
+fn simd_intrinsics() {
+ extern "platform-intrinsic" {
+ pub(crate) fn simd_eq<T, U>(x: T, y: T) -> U;
+ pub(crate) fn simd_reduce_any<T>(x: T) -> bool;
+ }
+
+ // Make sure simd_eq returns all-1 for `true`
+ let a = i32x4::splat(10);
+ let b = i32x4::from_array([1, 2, 10, 4]);
+ let c: i32x4 = unsafe { simd_eq(a, b) };
+ assert_eq!(c, i32x4::from_array([0, 0, -1, 0]));
+
+ unsafe {
+ assert!(!simd_reduce_any(i32x4::splat(0)));
+ assert!(simd_reduce_any(i32x4::splat(-1)));
+ }
+}
+
fn main() {
simd_ops_f32();
simd_ops_i32();
+ simd_intrinsics();
}