+impl<T, const LANES: usize> core::ops::BitXor<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: bool) -> Self::Output {
+ self ^ Self::splat(rhs)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXor<Mask<T, LANES>> for bool
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ fn bitxor(self, rhs: Mask<T, LANES>) -> Self::Output {
+ Mask::splat(self) ^ rhs
+ }
+}
+
+impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ type Output = Mask<T, LANES>;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self(!self.0)
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAndAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ self.0 = self.0 & rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitAndAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitand_assign(&mut self, rhs: bool) {
+ *self &= Self::splat(rhs);
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOrAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ self.0 = self.0 | rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitOrAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitor_assign(&mut self, rhs: bool) {
+ *self |= Self::splat(rhs);
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXorAssign for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ self.0 = self.0 ^ rhs.0;
+ }
+}
+
+impl<T, const LANES: usize> core::ops::BitXorAssign<bool> for Mask<T, LANES>
+where
+ T: MaskElement,
+ LaneCount<LANES>: SupportedLaneCount,
+{
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: bool) {
+ *self ^= Self::splat(rhs);
+ }
+}
+
+/// Vector of eight 8-bit masks
+pub type mask8x8 = Mask<i8, 8>;
+
+/// Vector of 16 8-bit masks
+pub type mask8x16 = Mask<i8, 16>;
+
+/// Vector of 32 8-bit masks
+pub type mask8x32 = Mask<i8, 32>;
+
+/// Vector of 16 8-bit masks
+pub type mask8x64 = Mask<i8, 64>;
+
+/// Vector of four 16-bit masks
+pub type mask16x4 = Mask<i16, 4>;
+
+/// Vector of eight 16-bit masks
+pub type mask16x8 = Mask<i16, 8>;
+
+/// Vector of 16 16-bit masks
+pub type mask16x16 = Mask<i16, 16>;
+
+/// Vector of 32 16-bit masks
+pub type mask16x32 = Mask<i32, 32>;
+
+/// Vector of two 32-bit masks
+pub type mask32x2 = Mask<i32, 2>;
+
+/// Vector of four 32-bit masks
+pub type mask32x4 = Mask<i32, 4>;
+
+/// Vector of eight 32-bit masks
+pub type mask32x8 = Mask<i32, 8>;
+
+/// Vector of 16 32-bit masks
+pub type mask32x16 = Mask<i32, 16>;
+
+/// Vector of two 64-bit masks
+pub type mask64x2 = Mask<i64, 2>;
+
+/// Vector of four 64-bit masks
+pub type mask64x4 = Mask<i64, 4>;
+
+/// Vector of eight 64-bit masks
+pub type mask64x8 = Mask<i64, 8>;
+
+/// Vector of two pointer-width masks
+pub type masksizex2 = Mask<isize, 2>;
+
+/// Vector of four pointer-width masks
+pub type masksizex4 = Mask<isize, 4>;
+
+/// Vector of eight pointer-width masks
+pub type masksizex8 = Mask<isize, 8>;
+
+macro_rules! impl_from {
+ { $from:ty => $($to:ty),* } => {
+ $(
+ impl<const LANES: usize> From<Mask<$from, LANES>> for Mask<$to, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ fn from(value: Mask<$from, LANES>) -> Self {
+ Self(value.0.convert())
+ }
+ }
+ )*
+ }