9 // Vectors of pointers are not for public use at the current time.
13 intrinsics, LaneCount, Mask, MaskElement, SimdPartialOrd, SupportedLaneCount, Swizzle,
16 /// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
18 /// Two vectors of the same type and length will, by convention, support the operators (+, *, etc.) that `T` does.
19 /// These take the lanes at each index on the left-hand side and right-hand side, perform the operation,
20 /// and return the result in the same lane in a vector of equal size. For a given operator, this is equivalent to zipping
21 /// the two arrays together and mapping the operator over each lane.
24 /// # #![feature(array_zip, portable_simd)]
25 /// # use core::simd::{Simd};
26 /// let a0: [i32; 4] = [-2, 0, 2, 4];
27 /// let a1 = [10, 9, 8, 7];
28 /// let zm_add = a0.zip(a1).map(|(lhs, rhs)| lhs + rhs);
29 /// let zm_mul = a0.zip(a1).map(|(lhs, rhs)| lhs * rhs);
31 /// // `Simd<T, N>` implements `From<[T; N]>
32 /// let (v0, v1) = (Simd::from(a0), Simd::from(a1));
33 /// // Which means arrays implement `Into<Simd<T, N>>`.
34 /// assert_eq!(v0 + v1, zm_add.into());
35 /// assert_eq!(v0 * v1, zm_mul.into());
38 /// `Simd` with integers has the quirk that these operations are also inherently wrapping, as if `T` was [`Wrapping<T>`].
39 /// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
40 /// This means there is no warning on overflows, even in "debug" builds.
41 /// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
42 /// and even "debug builds" are unlikely to tolerate the loss of performance.
43 /// You may want to consider using explicitly checked arithmetic if such is required.
44 /// Division by zero still causes a panic, so you may want to consider using floating point numbers if that is unacceptable.
46 /// [`Wrapping<T>`]: core::num::Wrapping
49 /// `Simd<T, N>` has a layout similar to `[T; N]` (identical "shapes"), but with a greater alignment.
50 /// `[T; N]` is aligned to `T`, but `Simd<T, N>` will have an alignment based on both `T` and `N`.
51 /// It is thus sound to [`transmute`] `Simd<T, N>` to `[T; N]`, and will typically optimize to zero cost,
52 /// but the reverse transmutation is more likely to require a copy the compiler cannot simply elide.
55 /// Due to Rust's safety guarantees, `Simd<T, N>` is currently passed to and from functions via memory, not SIMD registers,
56 /// except as an optimization. `#[inline]` hints are recommended on functions that accept `Simd<T, N>` or return it.
57 /// The need for this may be corrected in the future.
59 /// # Safe SIMD with Unsafe Rust
61 /// Operations with `Simd` are typically safe, but there are many reasons to want to combine SIMD with `unsafe` code.
62 /// Care must be taken to respect differences between `Simd` and other types it may be transformed into or derived from.
63 /// In particular, the layout of `Simd<T, N>` may be similar to `[T; N]`, and may allow some transmutations,
64 /// but references to `[T; N]` are not interchangeable with those to `Simd<T, N>`.
65 /// Thus, when using `unsafe` Rust to read and write `Simd<T, N>` through [raw pointers], it is a good idea to first try with
66 /// [`read_unaligned`] and [`write_unaligned`]. This is because:
67 /// - [`read`] and [`write`] require full alignment (in this case, `Simd<T, N>`'s alignment)
68 /// - the likely source for reading or destination for writing `Simd<T, N>` is [`[T]`](slice) and similar types, aligned to `T`
69 /// - combining these actions would violate the `unsafe` contract and explode the program into a puff of **undefined behavior**
70 /// - the compiler can implicitly adjust layouts to make unaligned reads or writes fully aligned if it sees the optimization
71 /// - most contemporary processors suffer no performance penalty for "unaligned" reads and writes that are aligned at runtime
73 /// By imposing less obligations, unaligned functions are less likely to make the program unsound,
74 /// and may be just as fast as stricter alternatives.
75 /// When trying to guarantee alignment, [`[T]::as_simd`][as_simd] is an option for converting `[T]` to `[Simd<T, N>]`,
76 /// and allows soundly operating on an aligned SIMD body, but it may cost more time when handling the scalar head and tail.
77 /// If these are not sufficient, then it is most ideal to design data structures to be already aligned
78 /// to the `Simd<T, N>` you wish to use before using `unsafe` Rust to read or write.
79 /// More conventional ways to compensate for these facts, like materializing `Simd` to or from an array first,
80 /// are handled by safe methods like [`Simd::from_array`] and [`Simd::from_slice`].
82 /// [`transmute`]: core::mem::transmute
83 /// [raw pointers]: pointer
84 /// [`read_unaligned`]: pointer::read_unaligned
85 /// [`write_unaligned`]: pointer::write_unaligned
86 /// [`read`]: pointer::read
87 /// [`write`]: pointer::write
88 /// [as_simd]: slice::as_simd
90 pub struct Simd<T, const LANES: usize>([T; LANES])
93 LaneCount<LANES>: SupportedLaneCount;
95 impl<T, const LANES: usize> Simd<T, LANES>
97 LaneCount<LANES>: SupportedLaneCount,
100 /// Number of lanes in this vector.
101 pub const LANES: usize = LANES;
103 /// Returns the number of lanes in this SIMD vector.
108 /// # #![feature(portable_simd)]
109 /// # use core::simd::u32x4;
110 /// let v = u32x4::splat(0);
111 /// assert_eq!(v.lanes(), 4);
113 pub const fn lanes(&self) -> usize {
117 /// Constructs a new SIMD vector with all lanes set to the given value.
122 /// # #![feature(portable_simd)]
123 /// # use core::simd::u32x4;
124 /// let v = u32x4::splat(8);
125 /// assert_eq!(v.as_array(), &[8, 8, 8, 8]);
127 pub fn splat(value: T) -> Self {
128 // This is preferred over `[value; LANES]`, since it's explicitly a splat:
129 // https://github.com/rust-lang/rust/issues/97804
131 impl<const LANES: usize> Swizzle<1, LANES> for Splat {
132 const INDEX: [usize; LANES] = [0; LANES];
134 Splat::swizzle(Simd::<T, 1>::from([value]))
137 /// Returns an array reference containing the entire SIMD vector.
142 /// # #![feature(portable_simd)]
143 /// # use core::simd::{Simd, u64x4};
144 /// let v: u64x4 = Simd::from_array([0, 1, 2, 3]);
145 /// assert_eq!(v.as_array(), &[0, 1, 2, 3]);
147 pub const fn as_array(&self) -> &[T; LANES] {
151 /// Returns a mutable array reference containing the entire SIMD vector.
152 pub fn as_mut_array(&mut self) -> &mut [T; LANES] {
156 /// Converts an array to a SIMD vector.
157 pub const fn from_array(array: [T; LANES]) -> Self {
161 /// Converts a SIMD vector to an array.
162 pub const fn to_array(self) -> [T; LANES] {
166 /// Converts a slice to a SIMD vector containing `slice[..LANES]`.
170 /// Panics if the slice's length is less than the vector's `Simd::LANES`.
175 /// # #![feature(portable_simd)]
176 /// # use core::simd::u32x4;
177 /// let source = vec![1, 2, 3, 4, 5, 6];
178 /// let v = u32x4::from_slice(&source);
179 /// assert_eq!(v.as_array(), &[1, 2, 3, 4]);
182 pub const fn from_slice(slice: &[T]) -> Self {
183 assert!(slice.len() >= LANES, "slice length must be at least the number of lanes");
184 let mut array = [slice[0]; LANES];
193 /// Performs lanewise conversion of a SIMD vector's elements to another SIMD-valid type.
195 /// This follows the semantics of Rust's `as` conversion for casting
196 /// integers to unsigned integers (interpreting as the other type, so `-1` to `MAX`),
197 /// and from floats to integers (truncating, or saturating at the limits) for each lane,
202 /// # #![feature(portable_simd)]
203 /// # use core::simd::Simd;
204 /// let floats: Simd<f32, 4> = Simd::from_array([1.9, -4.5, f32::INFINITY, f32::NAN]);
205 /// let ints = floats.cast::<i32>();
206 /// assert_eq!(ints, Simd::from_array([1, -4, i32::MAX, 0]));
208 /// // Formally equivalent, but `Simd::cast` can optimize better.
209 /// assert_eq!(ints, Simd::from_array(floats.to_array().map(|x| x as i32)));
211 /// // The float conversion does not round-trip.
212 /// let floats_again = ints.cast();
213 /// assert_ne!(floats, floats_again);
214 /// assert_eq!(floats_again, Simd::from_array([1.0, -4.0, 2147483647.0, 0.0]));
218 pub fn cast<U: SimdElement>(self) -> Simd<U, LANES> {
219 // Safety: The input argument is a vector of a valid SIMD element type.
220 unsafe { intrinsics::simd_as(self) }
223 /// Rounds toward zero and converts to the same-width integer type, assuming that
224 /// the value is finite and fits in that type.
230 /// * Not be infinite
231 /// * Be representable in the return type, after truncating off its fractional part
233 /// If these requirements are infeasible or costly, consider using the safe function [cast],
234 /// which saturates on conversion.
236 /// [cast]: Simd::cast
238 pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, LANES>
240 T: core::convert::FloatToInt<I>,
243 // Safety: `self` is a vector, and `FloatToInt` ensures the type can be casted to
245 unsafe { intrinsics::simd_cast(self) }
248 /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
249 /// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
253 /// # #![feature(portable_simd)]
254 /// # use core::simd::Simd;
255 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
256 /// let idxs = Simd::from_array([9, 3, 0, 5]);
257 /// let alt = Simd::from_array([-5, -4, -3, -2]);
259 /// let result = Simd::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds.
260 /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15]));
264 pub fn gather_or(slice: &[T], idxs: Simd<usize, LANES>, or: Self) -> Self {
265 Self::gather_select(slice, Mask::splat(true), idxs, or)
268 /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
269 /// If an index is out-of-bounds, the lane is set to the default value for the type.
273 /// # #![feature(portable_simd)]
274 /// # use core::simd::Simd;
275 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
276 /// let idxs = Simd::from_array([9, 3, 0, 5]);
278 /// let result = Simd::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds.
279 /// assert_eq!(result, Simd::from_array([0, 13, 10, 15]));
283 pub fn gather_or_default(slice: &[T], idxs: Simd<usize, LANES>) -> Self
287 Self::gather_or(slice, idxs, Self::splat(T::default()))
290 /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
291 /// The mask `enable`s all `true` lanes and disables all `false` lanes.
292 /// If an index is disabled or is out-of-bounds, the lane is selected from the `or` vector.
296 /// # #![feature(portable_simd)]
297 /// # use core::simd::{Simd, Mask};
298 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
299 /// let idxs = Simd::from_array([9, 3, 0, 5]);
300 /// let alt = Simd::from_array([-5, -4, -3, -2]);
301 /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
303 /// let result = Simd::gather_select(&vec, enable, idxs, alt); // Note the lane that is out-of-bounds.
304 /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
308 pub fn gather_select(
310 enable: Mask<isize, LANES>,
311 idxs: Simd<usize, LANES>,
314 let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
315 // Safety: We have masked-off out-of-bounds lanes.
316 unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) }
319 /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
320 /// The mask `enable`s all `true` lanes and disables all `false` lanes.
321 /// If an index is disabled, the lane is selected from the `or` vector.
325 /// Calling this function with an `enable`d out-of-bounds index is *[undefined behavior]*
326 /// even if the resulting value is not used.
330 /// # #![feature(portable_simd)]
331 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
332 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
333 /// # use simd::{Simd, SimdPartialOrd, Mask};
334 /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
335 /// let idxs = Simd::from_array([9, 3, 0, 5]);
336 /// let alt = Simd::from_array([-5, -4, -3, -2]);
337 /// let enable = Mask::from_array([true, true, true, false]); // Note the final mask lane.
338 /// // If this mask was used to gather, it would be unsound. Let's fix that.
339 /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
341 /// // We have masked the OOB lane, so it's safe to gather now.
342 /// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) };
343 /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2]));
345 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
348 pub unsafe fn gather_select_unchecked(
350 enable: Mask<isize, LANES>,
351 idxs: Simd<usize, LANES>,
354 let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr());
355 // Ferris forgive me, I have done pointer arithmetic here.
356 let ptrs = base_ptr.wrapping_add(idxs);
357 // Safety: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
358 unsafe { intrinsics::simd_gather(or, ptrs, enable.to_int()) }
361 /// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`.
362 /// If two lanes in the scattered vector would write to the same index
363 /// only the last lane is guaranteed to actually be written.
367 /// # #![feature(portable_simd)]
368 /// # use core::simd::Simd;
369 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
370 /// let idxs = Simd::from_array([9, 3, 0, 0]);
371 /// let vals = Simd::from_array([-27, 82, -41, 124]);
373 /// vals.scatter(&mut vec, idxs); // index 0 receives two writes.
374 /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]);
377 pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, LANES>) {
378 self.scatter_select(slice, Mask::splat(true), idxs)
381 /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
382 /// The mask `enable`s all `true` lanes and disables all `false` lanes.
383 /// If an enabled index is out-of-bounds, the lane is not written.
384 /// If two enabled lanes in the scattered vector would write to the same index,
385 /// only the last lane is guaranteed to actually be written.
389 /// # #![feature(portable_simd)]
390 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
391 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
392 /// # use simd::{Simd, Mask};
393 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
394 /// let idxs = Simd::from_array([9, 3, 0, 0]);
395 /// let vals = Simd::from_array([-27, 82, -41, 124]);
396 /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
398 /// vals.scatter_select(&mut vec, enable, idxs); // index 0's second write is masked, thus omitted.
399 /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
402 pub fn scatter_select(
405 enable: Mask<isize, LANES>,
406 idxs: Simd<usize, LANES>,
408 let enable: Mask<isize, LANES> = enable & idxs.simd_lt(Simd::splat(slice.len()));
409 // Safety: We have masked-off out-of-bounds lanes.
410 unsafe { self.scatter_select_unchecked(slice, enable, idxs) }
413 /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`.
414 /// The mask `enable`s all `true` lanes and disables all `false` lanes.
415 /// If two enabled lanes in the scattered vector would write to the same index,
416 /// only the last lane is guaranteed to actually be written.
420 /// Calling this function with an enabled out-of-bounds index is *[undefined behavior]*,
421 /// and may lead to memory corruption.
425 /// # #![feature(portable_simd)]
426 /// # #[cfg(feature = "as_crate")] use core_simd::simd;
427 /// # #[cfg(not(feature = "as_crate"))] use core::simd;
428 /// # use simd::{Simd, SimdPartialOrd, Mask};
429 /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
430 /// let idxs = Simd::from_array([9, 3, 0, 0]);
431 /// let vals = Simd::from_array([-27, 82, -41, 124]);
432 /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane.
433 /// // If this mask was used to scatter, it would be unsound. Let's fix that.
434 /// let enable = enable & idxs.simd_lt(Simd::splat(vec.len()));
436 /// // We have masked the OOB lane, so it's safe to scatter now.
437 /// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); }
438 /// // index 0's second write is masked, thus was omitted.
439 /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]);
441 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
443 pub unsafe fn scatter_select_unchecked(
446 enable: Mask<isize, LANES>,
447 idxs: Simd<usize, LANES>,
449 // Safety: This block works with *mut T derived from &mut 'a [T],
450 // which means it is delicate in Rust's borrowing model, circa 2021:
451 // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts!
452 // Even though this block is largely safe methods, it must be exactly this way
453 // to prevent invalidating the raw ptrs while they're live.
454 // Thus, entering this block requires all values to use being already ready:
455 // 0. idxs we want to write to, which are used to construct the mask.
456 // 1. enable, which depends on an initial &'a [T] and the idxs.
457 // 2. actual values to scatter (self).
458 // 3. &mut [T] which will become our base ptr.
460 // Now Entering ☢️ *mut T Zone
461 let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr());
462 // Ferris forgive me, I have done pointer arithmetic here.
463 let ptrs = base_ptr.wrapping_add(idxs);
464 // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
465 intrinsics::simd_scatter(self, ptrs, enable.to_int())
466 // Cleared ☢️ *mut T Zone
471 impl<T, const LANES: usize> Copy for Simd<T, LANES>
474 LaneCount<LANES>: SupportedLaneCount,
478 impl<T, const LANES: usize> Clone for Simd<T, LANES>
481 LaneCount<LANES>: SupportedLaneCount,
483 fn clone(&self) -> Self {
488 impl<T, const LANES: usize> Default for Simd<T, LANES>
490 LaneCount<LANES>: SupportedLaneCount,
491 T: SimdElement + Default,
494 fn default() -> Self {
495 Self::splat(T::default())
499 impl<T, const LANES: usize> PartialEq for Simd<T, LANES>
501 LaneCount<LANES>: SupportedLaneCount,
502 T: SimdElement + PartialEq,
505 fn eq(&self, other: &Self) -> bool {
506 // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
508 let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_eq(*self, *other);
509 Mask::from_int_unchecked(tfvec)
512 // Two vectors are equal if all lanes tested true for vertical equality.
516 #[allow(clippy::partialeq_ne_impl)]
518 fn ne(&self, other: &Self) -> bool {
519 // Safety: All SIMD vectors are SimdPartialEq, and the comparison produces a valid mask.
521 let tfvec: Simd<<T as SimdElement>::Mask, LANES> = intrinsics::simd_ne(*self, *other);
522 Mask::from_int_unchecked(tfvec)
525 // Two vectors are non-equal if any lane tested true for vertical non-equality.
530 impl<T, const LANES: usize> PartialOrd for Simd<T, LANES>
532 LaneCount<LANES>: SupportedLaneCount,
533 T: SimdElement + PartialOrd,
536 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
537 // TODO use SIMD equality
538 self.to_array().partial_cmp(other.as_ref())
542 impl<T, const LANES: usize> Eq for Simd<T, LANES>
544 LaneCount<LANES>: SupportedLaneCount,
549 impl<T, const LANES: usize> Ord for Simd<T, LANES>
551 LaneCount<LANES>: SupportedLaneCount,
552 T: SimdElement + Ord,
555 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
556 // TODO use SIMD equality
557 self.to_array().cmp(other.as_ref())
561 impl<T, const LANES: usize> core::hash::Hash for Simd<T, LANES>
563 LaneCount<LANES>: SupportedLaneCount,
564 T: SimdElement + core::hash::Hash,
567 fn hash<H>(&self, state: &mut H)
569 H: core::hash::Hasher,
571 self.as_array().hash(state)
576 impl<T, const LANES: usize> AsRef<[T; LANES]> for Simd<T, LANES>
578 LaneCount<LANES>: SupportedLaneCount,
582 fn as_ref(&self) -> &[T; LANES] {
587 impl<T, const LANES: usize> AsMut<[T; LANES]> for Simd<T, LANES>
589 LaneCount<LANES>: SupportedLaneCount,
593 fn as_mut(&mut self) -> &mut [T; LANES] {
599 impl<T, const LANES: usize> AsRef<[T]> for Simd<T, LANES>
601 LaneCount<LANES>: SupportedLaneCount,
605 fn as_ref(&self) -> &[T] {
610 impl<T, const LANES: usize> AsMut<[T]> for Simd<T, LANES>
612 LaneCount<LANES>: SupportedLaneCount,
616 fn as_mut(&mut self) -> &mut [T] {
621 // vector/array conversion
622 impl<T, const LANES: usize> From<[T; LANES]> for Simd<T, LANES>
624 LaneCount<LANES>: SupportedLaneCount,
627 fn from(array: [T; LANES]) -> Self {
632 impl<T, const LANES: usize> From<Simd<T, LANES>> for [T; LANES]
634 LaneCount<LANES>: SupportedLaneCount,
637 fn from(vector: Simd<T, LANES>) -> Self {
647 /// Marker trait for types that may be used as SIMD vector elements.
650 /// This trait, when implemented, asserts the compiler can monomorphize
651 /// `#[repr(simd)]` structs with the marked type as an element.
652 /// Strictly, it is valid to impl if the vector will not be miscompiled.
653 /// Practically, it is user-unfriendly to impl it if the vector won't compile,
654 /// even when no soundness guarantees are broken by allowing the user to try.
655 pub unsafe trait SimdElement: Sealed + Copy {
656 /// The mask element type corresponding to this element type.
657 type Mask: MaskElement;
660 impl Sealed for u8 {}
662 // Safety: u8 is a valid SIMD element type, and is supported by this API
663 unsafe impl SimdElement for u8 {
667 impl Sealed for u16 {}
669 // Safety: u16 is a valid SIMD element type, and is supported by this API
670 unsafe impl SimdElement for u16 {
674 impl Sealed for u32 {}
676 // Safety: u32 is a valid SIMD element type, and is supported by this API
677 unsafe impl SimdElement for u32 {
681 impl Sealed for u64 {}
683 // Safety: u64 is a valid SIMD element type, and is supported by this API
684 unsafe impl SimdElement for u64 {
688 impl Sealed for usize {}
690 // Safety: usize is a valid SIMD element type, and is supported by this API
691 unsafe impl SimdElement for usize {
695 impl Sealed for i8 {}
697 // Safety: i8 is a valid SIMD element type, and is supported by this API
698 unsafe impl SimdElement for i8 {
702 impl Sealed for i16 {}
704 // Safety: i16 is a valid SIMD element type, and is supported by this API
705 unsafe impl SimdElement for i16 {
709 impl Sealed for i32 {}
711 // Safety: i32 is a valid SIMD element type, and is supported by this API
712 unsafe impl SimdElement for i32 {
716 impl Sealed for i64 {}
718 // Safety: i64 is a valid SIMD element type, and is supported by this API
719 unsafe impl SimdElement for i64 {
723 impl Sealed for isize {}
725 // Safety: isize is a valid SIMD element type, and is supported by this API
726 unsafe impl SimdElement for isize {
730 impl Sealed for f32 {}
732 // Safety: f32 is a valid SIMD element type, and is supported by this API
733 unsafe impl SimdElement for f32 {
737 impl Sealed for f64 {}
739 // Safety: f64 is a valid SIMD element type, and is supported by this API
740 unsafe impl SimdElement for f64 {