mod assign;
mod deref;
+mod unary;
impl<I, T, const LANES: usize> core::ops::Index<I> for Simd<T, LANES>
where
fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
}
};
-
- // unary op
- {
- impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty
- where
- LaneCount<$lanes2:ident>: SupportedLaneCount,
- {
- type Output = $output:ty;
- fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
- }
- } => {
- impl<const $lanes: usize> core::ops::$trait for $type
- where
- LaneCount<$lanes2>: SupportedLaneCount,
- {
- type Output = $output;
- fn $fn($self_tok) -> Self::Output $body
- }
- }
}
/// Automatically implements operators over vectors and scalars for a particular vector.
impl_op! { @binary $scalar, BitXor::bitxor, simd_xor }
};
- { impl Not for $scalar:ty } => {
- impl_ref_ops! {
- impl<const LANES: usize> core::ops::Not for Simd<$scalar, LANES>
- where
- LaneCount<LANES>: SupportedLaneCount,
- {
- type Output = Self;
- fn not(self) -> Self::Output {
- self ^ Self::splat(!<$scalar>::default())
- }
- }
- }
- };
-
- { impl Neg for $scalar:ty } => {
- impl_ref_ops! {
- impl<const LANES: usize> core::ops::Neg for Simd<$scalar, LANES>
- where
- LaneCount<LANES>: SupportedLaneCount,
- {
- type Output = Self;
- fn neg(self) -> Self::Output {
- unsafe { intrinsics::simd_neg(self) }
- }
- }
- }
- };
-
// generic binary op with assignment when output is `Self`
{ @binary $scalar:ty, $trait:ident :: $trait_fn:ident, $intrinsic:ident } => {
impl_ref_ops! {
impl_op! { impl Mul for $scalar }
impl_op! { impl Div for $scalar }
impl_op! { impl Rem for $scalar }
- impl_op! { impl Neg for $scalar }
)*
};
}
impl_op! { impl BitAnd for $scalar }
impl_op! { impl BitOr for $scalar }
impl_op! { impl BitXor for $scalar }
- impl_op! { impl Not for $scalar }
// Integers panic on divide by 0
impl_ref_ops! {
macro_rules! impl_signed_int_ops {
{ $($scalar:ty),* } => {
impl_unsigned_int_ops! { $($scalar),* }
- $( // scalar
- impl_op! { impl Neg for $scalar }
- )*
};
}
--- /dev/null
+use crate::simd::intrinsics;
+use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
+use core::ops::{Neg, Not}; // unary ops
+
+macro_rules! neg {
+ ($(impl<const LANES: usize> Neg for Simd<$scalar:ty, LANES>)*) => {
+ $(impl<const LANES: usize> Neg for Simd<$scalar, LANES>
+ where
+ $scalar: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Self;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the input"]
+ fn neg(self) -> Self::Output {
+ unsafe { intrinsics::simd_neg(self) }
+ }
+ })*
+ }
+}
+
+neg! {
+ impl<const LANES: usize> Neg for Simd<f32, LANES>
+
+ impl<const LANES: usize> Neg for Simd<f64, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i8, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i16, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i32, LANES>
+
+ impl<const LANES: usize> Neg for Simd<i64, LANES>
+
+ impl<const LANES: usize> Neg for Simd<isize, LANES>
+}
+
+macro_rules! not {
+ ($(impl<const LANES: usize> Not for Simd<$scalar:ty, LANES>)*) => {
+ $(impl<const LANES: usize> Not for Simd<$scalar, LANES>
+ where
+ $scalar: SimdElement,
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ type Output = Self;
+
+ #[inline]
+ #[must_use = "operator returns a new vector without mutating the input"]
+ fn not(self) -> Self::Output {
+ self ^ (Simd::splat(!(0 as $scalar)))
+ }
+ })*
+ }
+}
+
+not! {
+ impl<const LANES: usize> Not for Simd<i8, LANES>
+
+ impl<const LANES: usize> Not for Simd<i16, LANES>
+
+ impl<const LANES: usize> Not for Simd<i32, LANES>
+
+ impl<const LANES: usize> Not for Simd<i64, LANES>
+
+ impl<const LANES: usize> Not for Simd<isize, LANES>
+
+ impl<const LANES: usize> Not for Simd<u8, LANES>
+
+ impl<const LANES: usize> Not for Simd<u16, LANES>
+
+ impl<const LANES: usize> Not for Simd<u32, LANES>
+
+ impl<const LANES: usize> Not for Simd<u64, LANES>
+
+ impl<const LANES: usize> Not for Simd<usize, LANES>
+}