1 use crate::LanesAtMost32;
3 /// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
4 fn invalid_shift_rhs<T>(rhs: T) -> bool
6 T: Default + PartialOrd + core::convert::TryFrom<usize>,
7 <T as core::convert::TryFrom<usize>>::Error: core::fmt::Debug,
9 let bits_in_type = T::try_from(8 * core::mem::size_of::<T>()).unwrap();
10 rhs < T::default() || rhs >= bits_in_type
13 /// Automatically implements operators over references in addition to the provided operator.
14 macro_rules! impl_ref_ops {
17 impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
19 $($bound:path: LanesAtMost32,)*
21 type Output = $output:ty;
24 fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt
27 impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
29 $($bound: LanesAtMost32,)*
31 type Output = $output;
34 fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
37 impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
39 $($bound: LanesAtMost32,)*
41 type Output = <$type as core::ops::$trait<$rhs>>::Output;
44 fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
45 core::ops::$trait::$fn($self_tok, *$rhs_arg)
49 impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type
51 $($bound: LanesAtMost32,)*
53 type Output = <$type as core::ops::$trait<$rhs>>::Output;
56 fn $fn($self_tok, $rhs_arg: $rhs) -> Self::Output {
57 core::ops::$trait::$fn(*$self_tok, $rhs_arg)
61 impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type
63 $($bound: LanesAtMost32,)*
65 type Output = <$type as core::ops::$trait<$rhs>>::Output;
68 fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
69 core::ops::$trait::$fn(*$self_tok, *$rhs_arg)
74 // binary assignment op
76 impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty
78 $($bound:path: LanesAtMost32,)*
81 fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
84 impl<const $lanes: usize> core::ops::$trait<$rhs> for $type
86 $($bound: LanesAtMost32,)*
89 fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
92 impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
94 $($bound: LanesAtMost32,)*
97 fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
98 core::ops::$trait::$fn($self_tok, *$rhs_arg)
105 impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty
107 $($bound:path: LanesAtMost32,)*
109 type Output = $output:ty;
110 fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
113 impl<const $lanes: usize> core::ops::$trait for $type
115 $($bound: LanesAtMost32,)*
117 type Output = $output;
118 fn $fn($self_tok) -> Self::Output $body
121 impl<const $lanes: usize> core::ops::$trait for &'_ $type
123 $($bound: LanesAtMost32,)*
125 type Output = <$type as core::ops::$trait>::Output;
126 fn $fn($self_tok) -> Self::Output {
127 core::ops::$trait::$fn(*$self_tok)
133 /// Automatically implements operators over vectors and scalars for a particular vector.
134 macro_rules! impl_op {
135 { impl Add for $type:ident, $scalar:ty } => {
136 impl_op! { @binary $type, $scalar, Add::add, AddAssign::add_assign, simd_add }
138 { impl Sub for $type:ident, $scalar:ty } => {
139 impl_op! { @binary $type, $scalar, Sub::sub, SubAssign::sub_assign, simd_sub }
141 { impl Mul for $type:ident, $scalar:ty } => {
142 impl_op! { @binary $type, $scalar, Mul::mul, MulAssign::mul_assign, simd_mul }
144 { impl Div for $type:ident, $scalar:ty } => {
145 impl_op! { @binary $type, $scalar, Div::div, DivAssign::div_assign, simd_div }
147 { impl Rem for $type:ident, $scalar:ty } => {
148 impl_op! { @binary $type, $scalar, Rem::rem, RemAssign::rem_assign, simd_rem }
150 { impl Shl for $type:ident, $scalar:ty } => {
151 impl_op! { @binary $type, $scalar, Shl::shl, ShlAssign::shl_assign, simd_shl }
153 { impl Shr for $type:ident, $scalar:ty } => {
154 impl_op! { @binary $type, $scalar, Shr::shr, ShrAssign::shr_assign, simd_shr }
156 { impl BitAnd for $type:ident, $scalar:ty } => {
157 impl_op! { @binary $type, $scalar, BitAnd::bitand, BitAndAssign::bitand_assign, simd_and }
159 { impl BitOr for $type:ident, $scalar:ty } => {
160 impl_op! { @binary $type, $scalar, BitOr::bitor, BitOrAssign::bitor_assign, simd_or }
162 { impl BitXor for $type:ident, $scalar:ty } => {
163 impl_op! { @binary $type, $scalar, BitXor::bitxor, BitXorAssign::bitxor_assign, simd_xor }
166 { impl Not for $type:ident, $scalar:ty } => {
168 impl<const LANES: usize> core::ops::Not for crate::$type<LANES>
170 crate::$type<LANES>: LanesAtMost32,
173 fn not(self) -> Self::Output {
174 self ^ Self::splat(!<$scalar>::default())
180 { impl Neg for $type:ident, $scalar:ty } => {
182 impl<const LANES: usize> core::ops::Neg for crate::$type<LANES>
184 crate::$type<LANES>: LanesAtMost32,
187 fn neg(self) -> Self::Output {
188 unsafe { crate::intrinsics::simd_neg(self) }
194 { impl Index for $type:ident, $scalar:ty } => {
195 impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
198 I: core::slice::SliceIndex<[$scalar]>,
200 type Output = I::Output;
201 fn index(&self, index: I) -> &Self::Output {
202 let slice: &[_] = self.as_ref();
207 impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
210 I: core::slice::SliceIndex<[$scalar]>,
212 fn index_mut(&mut self, index: I) -> &mut Self::Output {
213 let slice: &mut [_] = self.as_mut();
219 // generic binary op with assignment when output is `Self`
220 { @binary $type:ident, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
222 impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES>
224 crate::$type<LANES>: LanesAtMost32,
229 fn $trait_fn(self, rhs: Self) -> Self::Output {
231 crate::intrinsics::$intrinsic(self, rhs)
238 impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES>
240 crate::$type<LANES>: LanesAtMost32,
245 fn $trait_fn(self, rhs: $scalar) -> Self::Output {
246 core::ops::$trait::$trait_fn(self, Self::splat(rhs))
252 impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar
254 crate::$type<LANES>: LanesAtMost32,
256 type Output = crate::$type<LANES>;
259 fn $trait_fn(self, rhs: crate::$type<LANES>) -> Self::Output {
260 core::ops::$trait::$trait_fn(crate::$type::splat(self), rhs)
266 impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES>
268 crate::$type<LANES>: LanesAtMost32,
271 fn $assign_trait_fn(&mut self, rhs: Self) {
273 *self = crate::intrinsics::$intrinsic(*self, rhs);
280 impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES>
282 crate::$type<LANES>: LanesAtMost32,
285 fn $assign_trait_fn(&mut self, rhs: $scalar) {
286 core::ops::$assign_trait::$assign_trait_fn(self, Self::splat(rhs));
293 /// Implements floating-point operators for the provided types.
294 macro_rules! impl_float_ops {
295 { $($scalar:ty => $($vector:ident),*;)* } => {
298 impl_op! { impl Add for $vector, $scalar }
299 impl_op! { impl Sub for $vector, $scalar }
300 impl_op! { impl Mul for $vector, $scalar }
301 impl_op! { impl Div for $vector, $scalar }
302 impl_op! { impl Rem for $vector, $scalar }
303 impl_op! { impl Neg for $vector, $scalar }
304 impl_op! { impl Index for $vector, $scalar }
310 /// Implements unsigned integer operators for the provided types.
311 macro_rules! impl_unsigned_int_ops {
312 { $($scalar:ty => $($vector:ident),*;)* } => {
315 impl_op! { impl Add for $vector, $scalar }
316 impl_op! { impl Sub for $vector, $scalar }
317 impl_op! { impl Mul for $vector, $scalar }
318 impl_op! { impl BitAnd for $vector, $scalar }
319 impl_op! { impl BitOr for $vector, $scalar }
320 impl_op! { impl BitXor for $vector, $scalar }
321 impl_op! { impl Not for $vector, $scalar }
322 impl_op! { impl Index for $vector, $scalar }
324 // Integers panic on divide by 0
326 impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES>
328 crate::$vector<LANES>: LanesAtMost32,
333 fn div(self, rhs: Self) -> Self::Output {
338 panic!("attempt to divide by zero");
341 // Guards for div(MIN, -1),
342 // this check only applies to signed ints
343 if <$scalar>::MIN != 0 && self.as_slice().iter()
344 .zip(rhs.as_slice().iter())
345 .any(|(x,y)| *x == <$scalar>::MIN && *y == -1 as _) {
346 panic!("attempt to divide with overflow");
348 unsafe { crate::intrinsics::simd_div(self, rhs) }
354 impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES>
356 crate::$vector<LANES>: LanesAtMost32,
361 fn div(self, rhs: $scalar) -> Self::Output {
363 panic!("attempt to divide by zero");
365 if <$scalar>::MIN != 0 &&
366 self.as_slice().iter().any(|x| *x == <$scalar>::MIN) &&
368 panic!("attempt to divide with overflow");
370 let rhs = Self::splat(rhs);
371 unsafe { crate::intrinsics::simd_div(self, rhs) }
377 impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar
379 crate::$vector<LANES>: LanesAtMost32,
381 type Output = crate::$vector<LANES>;
384 fn div(self, rhs: crate::$vector<LANES>) -> Self::Output {
385 crate::$vector::splat(self) / rhs
391 impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES>
393 crate::$vector<LANES>: LanesAtMost32,
396 fn div_assign(&mut self, rhs: Self) {
403 impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES>
405 crate::$vector<LANES>: LanesAtMost32,
408 fn div_assign(&mut self, rhs: $scalar) {
414 // remainder panics on zero divisor
416 impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES>
418 crate::$vector<LANES>: LanesAtMost32,
423 fn rem(self, rhs: Self) -> Self::Output {
428 panic!("attempt to calculate the remainder with a divisor of zero");
431 // Guards for rem(MIN, -1)
432 // this branch applies the check only to signed ints
433 if <$scalar>::MIN != 0 && self.as_slice().iter()
434 .zip(rhs.as_slice().iter())
435 .any(|(x,y)| *x == <$scalar>::MIN && *y == -1 as _) {
436 panic!("attempt to calculate the remainder with overflow");
438 unsafe { crate::intrinsics::simd_rem(self, rhs) }
444 impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES>
446 crate::$vector<LANES>: LanesAtMost32,
451 fn rem(self, rhs: $scalar) -> Self::Output {
453 panic!("attempt to calculate the remainder with a divisor of zero");
455 if <$scalar>::MIN != 0 &&
456 self.as_slice().iter().any(|x| *x == <$scalar>::MIN) &&
458 panic!("attempt to calculate the remainder with overflow");
460 let rhs = Self::splat(rhs);
461 unsafe { crate::intrinsics::simd_rem(self, rhs) }
467 impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar
469 crate::$vector<LANES>: LanesAtMost32,
471 type Output = crate::$vector<LANES>;
474 fn rem(self, rhs: crate::$vector<LANES>) -> Self::Output {
475 crate::$vector::splat(self) % rhs
481 impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES>
483 crate::$vector<LANES>: LanesAtMost32,
486 fn rem_assign(&mut self, rhs: Self) {
493 impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES>
495 crate::$vector<LANES>: LanesAtMost32,
498 fn rem_assign(&mut self, rhs: $scalar) {
504 // shifts panic on overflow
506 impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES>
508 crate::$vector<LANES>: LanesAtMost32,
513 fn shl(self, rhs: Self) -> Self::Output {
514 // TODO there is probably a better way of doing this
518 .any(invalid_shift_rhs)
520 panic!("attempt to shift left with overflow");
522 unsafe { crate::intrinsics::simd_shl(self, rhs) }
528 impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES>
530 crate::$vector<LANES>: LanesAtMost32,
535 fn shl(self, rhs: $scalar) -> Self::Output {
536 if invalid_shift_rhs(rhs) {
537 panic!("attempt to shift left with overflow");
539 let rhs = Self::splat(rhs);
540 unsafe { crate::intrinsics::simd_shl(self, rhs) }
547 impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES>
549 crate::$vector<LANES>: LanesAtMost32,
552 fn shl_assign(&mut self, rhs: Self) {
553 *self = *self << rhs;
559 impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES>
561 crate::$vector<LANES>: LanesAtMost32,
564 fn shl_assign(&mut self, rhs: $scalar) {
565 *self = *self << rhs;
571 impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES>
573 crate::$vector<LANES>: LanesAtMost32,
578 fn shr(self, rhs: Self) -> Self::Output {
579 // TODO there is probably a better way of doing this
583 .any(invalid_shift_rhs)
585 panic!("attempt to shift with overflow");
587 unsafe { crate::intrinsics::simd_shr(self, rhs) }
593 impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES>
595 crate::$vector<LANES>: LanesAtMost32,
600 fn shr(self, rhs: $scalar) -> Self::Output {
601 if invalid_shift_rhs(rhs) {
602 panic!("attempt to shift with overflow");
604 let rhs = Self::splat(rhs);
605 unsafe { crate::intrinsics::simd_shr(self, rhs) }
612 impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES>
614 crate::$vector<LANES>: LanesAtMost32,
617 fn shr_assign(&mut self, rhs: Self) {
618 *self = *self >> rhs;
624 impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES>
626 crate::$vector<LANES>: LanesAtMost32,
629 fn shr_assign(&mut self, rhs: $scalar) {
630 *self = *self >> rhs;
639 /// Implements unsigned integer operators for the provided types.
640 macro_rules! impl_signed_int_ops {
641 { $($scalar:ty => $($vector:ident),*;)* } => {
642 impl_unsigned_int_ops! { $($scalar => $($vector),*;)* }
645 impl_op! { impl Neg for $vector, $scalar }
651 impl_unsigned_int_ops! {
659 impl_signed_int_ops! {