#![no_std]
#![allow(incomplete_features)]
-#![feature(
- repr_simd,
- platform_intrinsics,
- link_llvm_intrinsics,
- simd_ffi,
- const_generics
-)]
+#![feature(repr_simd, platform_intrinsics, simd_ffi, const_generics)]
#![warn(missing_docs)]
//! Portable SIMD module.
--- /dev/null
+/// Implemented for bitmask sizes that are supported by the implementation.
+pub trait LanesAtMost64 {}
+impl LanesAtMost64 for BitMask<1> {}
+impl LanesAtMost64 for BitMask<2> {}
+impl LanesAtMost64 for BitMask<4> {}
+impl LanesAtMost64 for BitMask<8> {}
+impl LanesAtMost64 for BitMask<16> {}
+impl LanesAtMost64 for BitMask<32> {}
+impl LanesAtMost64 for BitMask<64> {}
+
+/// A mask where each lane is represented by a single bit.
+#[derive(Copy, Clone, Debug)]
+#[repr(transparent)]
+pub struct BitMask<const LANES: usize>(u64)
+where
+ BitMask<LANES>: LanesAtMost64;
+
+impl<const LANES: usize> BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ /// Construct a mask by setting all lanes to the given value.
+ pub fn splat(value: bool) -> Self {
+ if value {
+ Self(u64::MAX)
+ } else {
+ Self(u64::MIN)
+ }
+ }
+
+ /// Tests the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn test(&self, lane: usize) -> bool {
+ assert!(lane < LANES, "lane index out of range");
+ (self.0 >> lane) & 0x1 > 0
+ }
+
+ /// Sets the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn set(&mut self, lane: usize, value: bool) {
+ assert!(lane < LANES, "lane index out of range");
+ self.0 ^= ((value ^ self.test(lane)) as u64) << lane
+ }
+}
+
+impl<const LANES: usize> core::ops::BitAnd for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: Self) -> Self {
+ Self(self.0 & rhs.0)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitAnd<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: bool) -> Self {
+ self & Self::splat(rhs)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitAnd<BitMask<LANES>> for bool
+where
+ BitMask<LANES>: LanesAtMost64,
+{
+ type Output = BitMask<LANES>;
+ #[inline]
+ fn bitand(self, rhs: BitMask<LANES>) -> BitMask<LANES> {
+ BitMask::<LANES>::splat(self) & rhs
+ }
+}
+
+impl<const LANES: usize> core::ops::BitOr for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self {
+ Self(self.0 | rhs.0)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitOr<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: bool) -> Self {
+ self | Self::splat(rhs)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitOr<BitMask<LANES>> for bool
+where
+ BitMask<LANES>: LanesAtMost64,
+{
+ type Output = BitMask<LANES>;
+ #[inline]
+ fn bitor(self, rhs: BitMask<LANES>) -> BitMask<LANES> {
+ BitMask::<LANES>::splat(self) | rhs
+ }
+}
+
+impl<const LANES: usize> core::ops::BitXor for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ Self(self.0 ^ rhs.0)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitXor<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: bool) -> Self::Output {
+ self ^ Self::splat(rhs)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitXor<BitMask<LANES>> for bool
+where
+ BitMask<LANES>: LanesAtMost64,
+{
+ type Output = BitMask<LANES>;
+ #[inline]
+ fn bitxor(self, rhs: BitMask<LANES>) -> Self::Output {
+ BitMask::<LANES>::splat(self) ^ rhs
+ }
+}
+
+impl<const LANES: usize> core::ops::Not for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ type Output = BitMask<LANES>;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+impl<const LANES: usize> core::ops::BitAndAssign for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ self.0 &= rhs.0;
+ }
+}
+
+impl<const LANES: usize> core::ops::BitAndAssign<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: bool) {
+ *self &= Self::splat(rhs);
+ }
+}
+
+impl<const LANES: usize> core::ops::BitOrAssign for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ self.0 |= rhs.0;
+ }
+}
+
+impl<const LANES: usize> core::ops::BitOrAssign<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: bool) {
+ *self |= Self::splat(rhs);
+ }
+}
+
+impl<const LANES: usize> core::ops::BitXorAssign for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ self.0 ^= rhs.0;
+ }
+}
+
+impl<const LANES: usize> core::ops::BitXorAssign<bool> for BitMask<LANES>
+where
+ Self: LanesAtMost64,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: bool) {
+ *self ^= Self::splat(rhs);
+ }
+}
--- /dev/null
+//! Masks that take up full SIMD vector registers.
+
+/// The error type returned when converting an integer to a mask fails.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromMaskError(());
+
+impl core::fmt::Display for TryFromMaskError {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ write!(
+ f,
+ "mask vector must have all bits set or unset in each lane"
+ )
+ }
+}
+
+macro_rules! define_mask {
+ { $(#[$attr:meta])* struct $name:ident<const $lanes:ident: usize>($type:ty); } => {
+ $(#[$attr])*
+ #[derive(Copy, Clone, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
+ #[repr(transparent)]
+ pub struct $name<const $lanes: usize>($type);
+
+ impl<const $lanes: usize> $name<$lanes> {
+ /// Construct a mask by setting all lanes to the given value.
+ pub fn splat(value: bool) -> Self {
+ Self(<$type>::splat(
+ if value {
+ -1
+ } else {
+ 0
+ }
+ ))
+ }
+
+ /// Tests the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn test(&self, lane: usize) -> bool {
+ assert!(lane < LANES, "lane index out of range");
+ self.0[lane] == -1
+ }
+
+ /// Sets the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn set(&mut self, lane: usize, value: bool) {
+ assert!(lane < LANES, "lane index out of range");
+ self.0[lane] = if value {
+ -1
+ } else {
+ 0
+ }
+ }
+ }
+
+ impl<const $lanes: usize> core::convert::From<bool> for $name<$lanes> {
+ fn from(value: bool) -> Self {
+ Self::splat(value)
+ }
+ }
+
+ impl<const $lanes: usize> core::convert::TryFrom<$type> for $name<$lanes> {
+ type Error = TryFromMaskError;
+ fn try_from(value: $type) -> Result<Self, Self::Error> {
+ if value.as_slice().iter().all(|x| *x == 0 || *x == -1) {
+ Ok(Self(value))
+ } else {
+ Err(TryFromMaskError(()))
+ }
+ }
+ }
+
+ impl<const $lanes: usize> core::convert::From<$name<$lanes>> for $type {
+ fn from(value: $name<$lanes>) -> Self {
+ value.0
+ }
+ }
+
+ impl<const $lanes: usize> core::convert::From<crate::BitMask<$lanes>> for $name<$lanes>
+ where
+ crate::BitMask<$lanes>: crate::LanesAtMost64,
+ {
+ fn from(value: crate::BitMask<$lanes>) -> Self {
+ // TODO use an intrinsic to do this efficiently (with LLVM's sext instruction)
+ let mut mask = Self::splat(false);
+ for lane in 0..LANES {
+ mask.set(lane, value.test(lane));
+ }
+ mask
+ }
+ }
+
+ impl<const $lanes: usize> core::convert::From<$name<$lanes>> for crate::BitMask<$lanes>
+ where
+ crate::BitMask<$lanes>: crate::LanesAtMost64,
+ {
+ fn from(value: $name<$lanes>) -> Self {
+ // TODO use an intrinsic to do this efficiently (with LLVM's trunc instruction)
+ let mut mask = Self::splat(false);
+ for lane in 0..LANES {
+ mask.set(lane, value.test(lane));
+ }
+ mask
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ f.debug_list()
+ .entries((0..LANES).map(|lane| self.test(lane)))
+ .finish()
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::Binary for $name<$lanes> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::Binary::fmt(&self.0, f)
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::Octal for $name<$lanes> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::Octal::fmt(&self.0, f)
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::LowerHex for $name<$lanes> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::LowerHex::fmt(&self.0, f)
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::UpperHex for $name<$lanes> {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::UpperHex::fmt(&self.0, f)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAnd for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: Self) -> Self {
+ Self(self.0 & rhs.0)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: bool) -> Self {
+ self & Self::splat(rhs)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool {
+ type Output = $name<LANES>;
+ #[inline]
+ fn bitand(self, rhs: $name<LANES>) -> $name<LANES> {
+ $name::<LANES>::splat(self) & rhs
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitOr for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self {
+ Self(self.0 | rhs.0)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: bool) -> Self {
+ self | Self::splat(rhs)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool {
+ type Output = $name<LANES>;
+ #[inline]
+ fn bitor(self, rhs: $name<LANES>) -> $name<LANES> {
+ $name::<LANES>::splat(self) | rhs
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitXor for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ Self(self.0 ^ rhs.0)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES> {
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: bool) -> Self::Output {
+ self ^ Self::splat(rhs)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool {
+ type Output = $name<LANES>;
+ #[inline]
+ fn bitxor(self, rhs: $name<LANES>) -> Self::Output {
+ $name::<LANES>::splat(self) ^ rhs
+ }
+ }
+
+ impl<const LANES: usize> core::ops::Not for $name<LANES> {
+ type Output = $name<LANES>;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES> {
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ self.0 &= rhs.0;
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES> {
+ #[inline]
+ fn bitand_assign(&mut self, rhs: bool) {
+ *self &= Self::splat(rhs);
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES> {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ self.0 |= rhs.0;
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES> {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: bool) {
+ *self |= Self::splat(rhs);
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES> {
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ self.0 ^= rhs.0;
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES> {
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: bool) {
+ *self ^= Self::splat(rhs);
+ }
+ }
+ }
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdI8](crate::SimdI8), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMask8<const LANES: usize>(crate::SimdI8<LANES>);
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdI16](crate::SimdI16), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMask16<const LANES: usize>(crate::SimdI16<LANES>);
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdI32](crate::SimdI32), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMask32<const LANES: usize>(crate::SimdI32<LANES>);
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdI64](crate::SimdI64), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMask64<const LANES: usize>(crate::SimdI64<LANES>);
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdI128](crate::SimdI128), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMask128<const LANES: usize>(crate::SimdI64<LANES>);
+}
+
+define_mask! {
+ /// A mask equivalent to [SimdIsize](crate::SimdIsize), where all bits in the lane must be either set
+ /// or unset.
+ struct SimdMaskSize<const LANES: usize>(crate::SimdI64<LANES>);
+}
+++ /dev/null
-//! Masks that take up full SIMD vector registers.
-
-/// The error type returned when converting an integer to a mask fails.
-#[derive(Debug, Copy, Clone, PartialEq, Eq)]
-pub struct TryFromMaskError(());
-
-impl core::fmt::Display for TryFromMaskError {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- write!(f, "mask vector must have all bits set or unset in each lane")
- }
-}
-
-macro_rules! define_mask {
- { $(#[$attr:meta])* struct $name:ident<const $lanes:ident: usize>($type:ty); } => {
- $(#[$attr])*
- #[derive(Copy, Clone, Default, PartialEq, PartialOrd, Eq, Ord, Hash)]
- #[repr(transparent)]
- pub struct $name<const $lanes: usize>($type);
-
- delegate_ops_to_inner! { $name }
-
- impl<const $lanes: usize> $name<$lanes> {
- /// Construct a mask by setting all lanes to the given value.
- pub fn splat(value: bool) -> Self {
- Self(<$type>::splat(
- if value {
- -1
- } else {
- 0
- }
- ))
- }
-
- /// Tests the value of the specified lane.
- ///
- /// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
- #[inline]
- pub fn test(&self, lane: usize) -> bool {
- self.0[lane] == -1
- }
-
- /// Sets the value of the specified lane.
- ///
- /// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
- #[inline]
- pub fn set(&mut self, lane: usize, value: bool) {
- self.0[lane] = if value {
- -1
- } else {
- 0
- }
- }
- }
-
- impl<const $lanes: usize> core::convert::From<bool> for $name<$lanes> {
- fn from(value: bool) -> Self {
- Self::splat(value)
- }
- }
-
- impl<const $lanes: usize> core::convert::TryFrom<$type> for $name<$lanes> {
- type Error = TryFromMaskError;
- fn try_from(value: $type) -> Result<Self, Self::Error> {
- if value.as_slice().iter().all(|x| *x == 0 || *x == -1) {
- Ok(Self(value))
- } else {
- Err(TryFromMaskError(()))
- }
- }
- }
-
- impl<const $lanes: usize> core::convert::From<$name<$lanes>> for $type {
- fn from(value: $name<$lanes>) -> Self {
- value.0
- }
- }
-
- impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- f.debug_list()
- .entries((0..LANES).map(|lane| self.test(lane)))
- .finish()
- }
- }
-
- impl<const $lanes: usize> core::fmt::Binary for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- core::fmt::Binary::fmt(&self.0, f)
- }
- }
-
- impl<const $lanes: usize> core::fmt::Octal for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- core::fmt::Octal::fmt(&self.0, f)
- }
- }
-
- impl<const $lanes: usize> core::fmt::LowerHex for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- core::fmt::LowerHex::fmt(&self.0, f)
- }
- }
-
- impl<const $lanes: usize> core::fmt::UpperHex for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- core::fmt::UpperHex::fmt(&self.0, f)
- }
- }
- }
-}
-
-define_mask! {
- /// A mask equivalent to [SimdI8](crate::SimdI8), where all bits in the lane must be either set
- /// or unset.
- struct SimdI8Mask<const LANES: usize>(crate::SimdI8<LANES>);
-}
-
-define_mask! {
- /// A mask equivalent to [SimdI16](crate::SimdI16), where all bits in the lane must be either set
- /// or unset.
- struct SimdI16Mask<const LANES: usize>(crate::SimdI16<LANES>);
-}
-
-define_mask! {
- /// A mask equivalent to [SimdI32](crate::SimdI32), where all bits in the lane must be either set
- /// or unset.
- struct SimdI32Mask<const LANES: usize>(crate::SimdI32<LANES>);
-}
-
-define_mask! {
- /// A mask equivalent to [SimdI64](crate::SimdI64), where all bits in the lane must be either set
- /// or unset.
- struct SimdI64Mask<const LANES: usize>(crate::SimdI64<LANES>);
-}
-
-define_mask! {
- /// A mask equivalent to [SimdI128](crate::SimdI128), where all bits in the lane must be either set
- /// or unset.
- struct SimdI128Mask<const LANES: usize>(crate::SimdI64<LANES>);
-}
-
-define_mask! {
- /// A mask equivalent to [SimdIsize](crate::SimdIsize), where all bits in the lane must be either set
- /// or unset.
- struct SimdIsizeMask<const LANES: usize>(crate::SimdI64<LANES>);
-}
-
-macro_rules! implement_mask_ext {
- { $($vector:ident => $mask:ident,)* } => {
- $(
- impl<const LANES: usize> crate::masks::MaskExt<$mask<LANES>> for crate::$vector<LANES> {
- #[inline]
- fn lanes_eq(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_eq(self, other) }
- }
-
- #[inline]
- fn lanes_ne(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_ne(self, other) }
- }
-
- #[inline]
- fn lanes_lt(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_lt(self, other) }
- }
-
- #[inline]
- fn lanes_gt(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_gt(self, other) }
- }
-
- #[inline]
- fn lanes_le(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_le(self, other) }
- }
-
- #[inline]
- fn lanes_ge(&self, other: &Self) -> $mask<LANES> {
- unsafe { crate::intrinsics::simd_ge(self, other) }
- }
- }
- )*
- }
-}
-
-implement_mask_ext! {
- SimdI8 => SimdI8Mask,
- SimdI16 => SimdI16Mask,
- SimdI32 => SimdI32Mask,
- SimdI64 => SimdI64Mask,
- SimdI128 => SimdI128Mask,
- SimdIsize => SimdIsizeMask,
-
- SimdU8 => SimdI8Mask,
- SimdU16 => SimdI16Mask,
- SimdU32 => SimdI32Mask,
- SimdU64 => SimdI64Mask,
- SimdU128 => SimdI128Mask,
- SimdUsize => SimdIsizeMask,
-
- SimdF32 => SimdI32Mask,
- SimdF64 => SimdI64Mask,
-}
//! Types and traits associated with masking lanes of vectors.
#![allow(non_camel_case_types)]
-/// Implements bitwise ops on mask types by delegating the operators to the inner type.
-macro_rules! delegate_ops_to_inner {
- { $name:ident } => {
- impl<const LANES: usize> core::ops::BitAnd for $name<LANES> {
+mod full_masks;
+pub use full_masks::*;
+
+mod bitmask;
+pub use bitmask::*;
+
+macro_rules! define_opaque_mask {
+ {
+ $(#[$attr:meta])*
+ struct $name:ident<const $lanes:ident: usize>($inner_ty:ty);
+ } => {
+ $(#[$attr])*
+ #[allow(non_camel_case_types)]
+ pub struct $name<const $lanes: usize>($inner_ty) where BitMask<LANES>: LanesAtMost64;
+
+ impl<const $lanes: usize> $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ /// Construct a mask by setting all lanes to the given value.
+ pub fn splat(value: bool) -> Self {
+ Self(<$inner_ty>::splat(value))
+ }
+
+ /// Tests the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn test(&self, lane: usize) -> bool {
+ self.0.test(lane)
+ }
+
+ /// Sets the value of the specified lane.
+ ///
+ /// # Panics
+ /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
+ #[inline]
+ pub fn set(&mut self, lane: usize, value: bool) {
+ self.0.set(lane, value);
+ }
+ }
+
+ impl<const $lanes: usize> From<BitMask<$lanes>> for $name<$lanes>
+ where
+ BitMask<$lanes>: LanesAtMost64,
+ {
+ fn from(value: BitMask<$lanes>) -> Self {
+ Self(value.into())
+ }
+ }
+
+ impl<const $lanes: usize> From<$name<$lanes>> for crate::BitMask<$lanes>
+ where
+ BitMask<$lanes>: LanesAtMost64,
+ {
+ fn from(value: $name<$lanes>) -> Self {
+ value.0.into()
+ }
+ }
+
+ impl<const $lanes: usize> Copy for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {}
+
+ impl<const $lanes: usize> Clone for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ impl<const $lanes: usize> Default for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ #[inline]
+ fn default() -> Self {
+ Self::splat(false)
+ }
+ }
+
+ impl<const $lanes: usize> PartialEq for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ impl<const $lanes: usize> PartialOrd for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+ }
+
+ impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> where BitMask<$lanes>: LanesAtMost64 {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ core::fmt::Debug::fmt(&self.0, f)
+ }
+ }
+
+ impl<const LANES: usize> core::ops::BitAnd for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitand(self, rhs: Self) -> Self {
}
}
- impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitAnd<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitand(self, rhs: bool) -> Self {
}
}
- impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool {
+ impl<const LANES: usize> core::ops::BitAnd<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
type Output = $name<LANES>;
#[inline]
fn bitand(self, rhs: $name<LANES>) -> $name<LANES> {
}
}
- impl<const LANES: usize> core::ops::BitOr for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitOr for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self {
}
}
- impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitOr<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitor(self, rhs: bool) -> Self {
}
}
- impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool {
+ impl<const LANES: usize> core::ops::BitOr<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
type Output = $name<LANES>;
#[inline]
fn bitor(self, rhs: $name<LANES>) -> $name<LANES> {
}
}
- impl<const LANES: usize> core::ops::BitXor for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitXor for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitxor(self, rhs: Self) -> Self::Output {
}
}
- impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitXor<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = Self;
#[inline]
fn bitxor(self, rhs: bool) -> Self::Output {
}
}
- impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool {
+ impl<const LANES: usize> core::ops::BitXor<$name<LANES>> for bool where BitMask<LANES>: LanesAtMost64 {
type Output = $name<LANES>;
#[inline]
fn bitxor(self, rhs: $name<LANES>) -> Self::Output {
}
}
- impl<const LANES: usize> core::ops::Not for $name<LANES> {
+ impl<const LANES: usize> core::ops::Not for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
type Output = $name<LANES>;
#[inline]
fn not(self) -> Self::Output {
}
}
- impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitAndAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 &= rhs.0;
}
}
- impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitAndAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
- impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitOrAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0;
}
}
- impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitOrAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
- impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitXorAssign for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 ^= rhs.0;
}
}
- impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES> {
+ impl<const LANES: usize> core::ops::BitXorAssign<bool> for $name<LANES> where BitMask<LANES>: LanesAtMost64 {
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
}
}
- }
-}
-
-pub mod full_masks;
-
-macro_rules! define_opaque_mask {
- {
- $(#[$attr:meta])*
- struct $name:ident<const $lanes:ident: usize>($inner_ty:ty);
- } => {
- $(#[$attr])*
- #[allow(non_camel_case_types)]
- pub struct $name<const $lanes: usize>($inner_ty);
-
- delegate_ops_to_inner! { $name }
-
- impl<const $lanes: usize> $name<$lanes> {
- /// Construct a mask by setting all lanes to the given value.
- pub fn splat(value: bool) -> Self {
- Self(<$inner_ty>::splat(value))
- }
-
- /// Tests the value of the specified lane.
- ///
- /// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
- #[inline]
- pub fn test(&self, lane: usize) -> bool {
- self.0.test(lane)
- }
-
- /// Sets the value of the specified lane.
- ///
- /// # Panics
- /// Panics if `lane` is greater than or equal to the number of lanes in the vector.
- #[inline]
- pub fn set(&mut self, lane: usize, value: bool) {
- self.0.set(lane, value);
- }
- }
-
- impl<const $lanes: usize> Copy for $name<$lanes> {}
-
- impl<const $lanes: usize> Clone for $name<$lanes> {
- #[inline]
- fn clone(&self) -> Self {
- *self
- }
- }
-
- impl<const $lanes: usize> Default for $name<$lanes> {
- #[inline]
- fn default() -> Self {
- Self::splat(false)
- }
- }
-
- impl<const $lanes: usize> PartialEq for $name<$lanes> {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- self.0 == other.0
- }
- }
-
- impl<const $lanes: usize> PartialOrd for $name<$lanes> {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
- self.0.partial_cmp(&other.0)
- }
- }
-
- impl<const $lanes: usize> core::fmt::Debug for $name<$lanes> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
- core::fmt::Debug::fmt(&self.0, f)
- }
- }
};
}
/// Mask for vectors with `LANES` 8-bit elements.
///
/// The layout of this type is unspecified.
- struct Mask8<const LANES: usize>(full_masks::SimdI8Mask<LANES>);
+ struct Mask8<const LANES: usize>(SimdMask8<LANES>);
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 16-bit elements.
///
/// The layout of this type is unspecified.
- struct Mask16<const LANES: usize>(full_masks::SimdI16Mask<LANES>);
+ struct Mask16<const LANES: usize>(SimdMask16<LANES>);
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 32-bit elements.
///
/// The layout of this type is unspecified.
- struct Mask32<const LANES: usize>(full_masks::SimdI32Mask<LANES>);
+ struct Mask32<const LANES: usize>(SimdMask32<LANES>);
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 64-bit elements.
///
/// The layout of this type is unspecified.
- struct Mask64<const LANES: usize>(full_masks::SimdI64Mask<LANES>);
+ struct Mask64<const LANES: usize>(SimdMask64<LANES>);
}
define_opaque_mask! {
/// Mask for vectors with `LANES` 128-bit elements.
///
/// The layout of this type is unspecified.
- struct Mask128<const LANES: usize>(full_masks::SimdI128Mask<LANES>);
+ struct Mask128<const LANES: usize>(SimdMask128<LANES>);
}
define_opaque_mask! {
/// Mask for vectors with `LANES` pointer-width elements.
///
/// The layout of this type is unspecified.
- struct MaskSize<const LANES: usize>(full_masks::SimdIsizeMask<LANES>);
-}
-
-/// Mask-related operations using a particular mask layout.
-pub trait MaskExt<Mask> {
- /// Test if each lane is equal to the corresponding lane in `other`.
- fn lanes_eq(&self, other: &Self) -> Mask;
-
- /// Test if each lane is not equal to the corresponding lane in `other`.
- fn lanes_ne(&self, other: &Self) -> Mask;
-
- /// Test if each lane is less than the corresponding lane in `other`.
- fn lanes_lt(&self, other: &Self) -> Mask;
-
- /// Test if each lane is greater than the corresponding lane in `other`.
- fn lanes_gt(&self, other: &Self) -> Mask;
-
- /// Test if each lane is less than or equal to the corresponding lane in `other`.
- fn lanes_le(&self, other: &Self) -> Mask;
-
- /// Test if each lane is greater than or equal to the corresponding lane in `other`.
- fn lanes_ge(&self, other: &Self) -> Mask;
+ struct MaskSize<const LANES: usize>(SimdMaskSize<LANES>);
}
macro_rules! implement_mask_ops {
{ $($vector:ident => $mask:ident,)* } => {
$(
- impl<const LANES: usize> crate::$vector<LANES> {
+ impl<const LANES: usize> crate::$vector<LANES> where BitMask<LANES>: LanesAtMost64 {
/// Test if each lane is equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_eq(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_eq(self, other))
+ unsafe { $mask(crate::intrinsics::simd_eq(self, other)) }
}
/// Test if each lane is not equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ne(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_ne(self, other))
+ unsafe { $mask(crate::intrinsics::simd_ne(self, other)) }
}
/// Test if each lane is less than the corresponding lane in `other`.
#[inline]
pub fn lanes_lt(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_lt(self, other))
+ unsafe { $mask(crate::intrinsics::simd_lt(self, other)) }
}
/// Test if each lane is greater than the corresponding lane in `other`.
#[inline]
pub fn lanes_gt(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_gt(self, other))
+ unsafe { $mask(crate::intrinsics::simd_gt(self, other)) }
}
/// Test if each lane is less than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_le(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_le(self, other))
+ unsafe { $mask(crate::intrinsics::simd_le(self, other)) }
}
/// Test if each lane is greater than or equal to the corresponding lane in `other`.
#[inline]
pub fn lanes_ge(&self, other: &Self) -> $mask<LANES> {
- $mask(MaskExt::lanes_ge(self, other))
+ unsafe { $mask(crate::intrinsics::simd_ge(self, other)) }
}
}
)*