2 use crate::cmp::Ordering::{self, Equal, Greater, Less};
3 use crate::intrinsics::{self, const_eval_select};
5 use crate::slice::{self, SliceIndex};
7 impl<T: ?Sized> *const T {
8 /// Returns `true` if the pointer is null.
10 /// Note that unsized types have many possible null pointers, as only the
11 /// raw data pointer is considered, not their length, vtable, etc.
12 /// Therefore, two pointers that are null may still not compare equal to
15 /// ## Behavior during const evaluation
17 /// When this function is used during const evaluation, it may return `false` for pointers
18 /// that turn out to be null at runtime. Specifically, when a pointer to some memory
19 /// is offset beyond its bounds in such a way that the resulting pointer is null,
20 /// the function will still return `false`. There is no way for CTFE to know
21 /// the absolute position of that memory, so we cannot tell if the pointer is
29 /// let s: &str = "Follow the rabbit";
30 /// let ptr: *const u8 = s.as_ptr();
31 /// assert!(!ptr.is_null());
33 #[stable(feature = "rust1", since = "1.0.0")]
34 #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
36 pub const fn is_null(self) -> bool {
38 fn runtime_impl(ptr: *const u8) -> bool {
43 const fn const_impl(ptr: *const u8) -> bool {
44 // Compare via a cast to a thin pointer, so fat pointers are only
45 // considering their "data" part for null-ness.
46 match (ptr).guaranteed_eq(null_mut()) {
52 // SAFETY: The two versions are equivalent at runtime.
53 unsafe { const_eval_select((self as *const u8,), const_impl, runtime_impl) }
56 /// Casts to a pointer of another type.
57 #[stable(feature = "ptr_cast", since = "1.38.0")]
58 #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
60 pub const fn cast<U>(self) -> *const U {
64 /// Use the pointer value in a new pointer of another type.
66 /// In case `val` is a (fat) pointer to an unsized type, this operation
67 /// will ignore the pointer part, whereas for (thin) pointers to sized
68 /// types, this has the same effect as a simple cast.
70 /// The resulting pointer will have provenance of `self`, i.e., for a fat
71 /// pointer, this operation is semantically the same as creating a new
72 /// fat pointer with the data pointer value of `self` but the metadata of
77 /// This function is primarily useful for allowing byte-wise pointer
78 /// arithmetic on potentially fat pointers:
81 /// #![feature(set_ptr_value)]
82 /// # use core::fmt::Debug;
83 /// let arr: [i32; 3] = [1, 2, 3];
84 /// let mut ptr = arr.as_ptr() as *const dyn Debug;
85 /// let thin = ptr as *const u8;
87 /// ptr = thin.add(8).with_metadata_of(ptr);
88 /// # assert_eq!(*(ptr as *const i32), 3);
89 /// println!("{:?}", &*ptr); // will print "3"
92 #[unstable(feature = "set_ptr_value", issue = "75091")]
93 #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
94 #[must_use = "returns a new pointer rather than modifying its argument"]
96 pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
100 from_raw_parts::<U>(self as *const (), metadata(meta))
103 /// Changes constness without changing the type.
105 /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
107 #[stable(feature = "ptr_const_cast", since = "1.65.0")]
108 #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
110 pub const fn cast_mut(self) -> *mut T {
114 /// Casts a pointer to its raw bits.
116 /// This is equivalent to `as usize`, but is more specific to enhance readability.
117 /// The inverse method is [`from_bits`](#method.from_bits).
119 /// In particular, `*p as usize` and `p as usize` will both compile for
120 /// pointers to numeric types but do very different things, so using this
121 /// helps emphasize that reading the bits was intentional.
126 /// #![feature(ptr_to_from_bits)]
127 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
128 /// let array = [13, 42];
129 /// let p0: *const i32 = &array[0];
130 /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
131 /// let p1: *const i32 = &array[1];
132 /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
135 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
138 note = "replaced by the `exposed_addr` method, or update your code \
139 to follow the strict provenance rules using its APIs"
142 pub fn to_bits(self) -> usize
149 /// Creates a pointer from its raw bits.
151 /// This is equivalent to `as *const T`, but is more specific to enhance readability.
152 /// The inverse method is [`to_bits`](#method.to_bits).
157 /// #![feature(ptr_to_from_bits)]
158 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
159 /// use std::ptr::NonNull;
160 /// let dangling: *const u8 = NonNull::dangling().as_ptr();
161 /// assert_eq!(<*const u8>::from_bits(1), dangling);
164 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
167 note = "replaced by the `ptr::from_exposed_addr` function, or update \
168 your code to follow the strict provenance rules using its APIs"
170 #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
172 pub fn from_bits(bits: usize) -> Self
179 /// Gets the "address" portion of the pointer.
181 /// This is similar to `self as usize`, which semantically discards *provenance* and
182 /// *address-space* information. However, unlike `self as usize`, casting the returned address
183 /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
184 /// properly restore the lost information and obtain a dereferenceable pointer, use
185 /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
187 /// If using those APIs is not possible because there is no way to preserve a pointer with the
188 /// required provenance, use [`expose_addr`][pointer::expose_addr] and
189 /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
190 /// your code less portable and less amenable to tools that check for compliance with the Rust
193 /// On most platforms this will produce a value with the same bytes as the original
194 /// pointer, because all the bytes are dedicated to describing the address.
195 /// Platforms which need to store additional information in the pointer may
196 /// perform a change of representation to produce a value containing only the address
197 /// portion of the pointer. What that means is up to the platform to define.
199 /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
200 /// might change in the future (including possibly weakening this so it becomes wholly
201 /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
204 #[unstable(feature = "strict_provenance", issue = "95228")]
205 pub fn addr(self) -> usize {
206 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
207 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
209 unsafe { mem::transmute(self.cast::<()>()) }
212 /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
213 /// use in [`from_exposed_addr`][].
215 /// This is equivalent to `self as usize`, which semantically discards *provenance* and
216 /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
217 /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
218 /// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
219 /// provenance. (Reconstructing address space information, if required, is your responsibility.)
221 /// Using this method means that code is *not* following Strict Provenance rules. Supporting
222 /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
223 /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
224 /// use [`addr`][pointer::addr] wherever possible.
226 /// On most platforms this will produce a value with the same bytes as the original pointer,
227 /// because all the bytes are dedicated to describing the address. Platforms which need to store
228 /// additional information in the pointer may not support this operation, since the 'expose'
229 /// side-effect which is required for [`from_exposed_addr`][] to work is typically not
232 /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
233 /// [module documentation][crate::ptr] for details.
235 /// [`from_exposed_addr`]: from_exposed_addr
238 #[unstable(feature = "strict_provenance", issue = "95228")]
239 pub fn expose_addr(self) -> usize {
240 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
241 self.cast::<()>() as usize
244 /// Creates a new pointer with the given address.
246 /// This performs the same operation as an `addr as ptr` cast, but copies
247 /// the *address-space* and *provenance* of `self` to the new pointer.
248 /// This allows us to dynamically preserve and propagate this important
249 /// information in a way that is otherwise impossible with a unary cast.
251 /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
252 /// `self` to the given address, and therefore has all the same capabilities and restrictions.
254 /// This API and its claimed semantics are part of the Strict Provenance experiment,
255 /// see the [module documentation][crate::ptr] for details.
258 #[unstable(feature = "strict_provenance", issue = "95228")]
259 pub fn with_addr(self, addr: usize) -> Self {
260 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
262 // In the mean-time, this operation is defined to be "as if" it was
263 // a wrapping_offset, so we can emulate it as such. This should properly
264 // restore pointer provenance even under today's compiler.
265 let self_addr = self.addr() as isize;
266 let dest_addr = addr as isize;
267 let offset = dest_addr.wrapping_sub(self_addr);
269 // This is the canonical desugarring of this operation
270 self.wrapping_byte_offset(offset)
273 /// Creates a new pointer by mapping `self`'s address to a new one.
275 /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
277 /// This API and its claimed semantics are part of the Strict Provenance experiment,
278 /// see the [module documentation][crate::ptr] for details.
281 #[unstable(feature = "strict_provenance", issue = "95228")]
282 pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
283 self.with_addr(f(self.addr()))
286 /// Decompose a (possibly wide) pointer into its address and metadata components.
288 /// The pointer can be later reconstructed with [`from_raw_parts`].
289 #[unstable(feature = "ptr_metadata", issue = "81513")]
290 #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
292 pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
293 (self.cast(), metadata(self))
296 /// Returns `None` if the pointer is null, or else returns a shared reference to
297 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
298 /// must be used instead.
300 /// [`as_uninit_ref`]: #method.as_uninit_ref
304 /// When calling this method, you have to ensure that *either* the pointer is null *or*
305 /// all of the following is true:
307 /// * The pointer must be properly aligned.
309 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
311 /// * The pointer must point to an initialized instance of `T`.
313 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
314 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
315 /// In particular, while this reference exists, the memory the pointer points to must
316 /// not get mutated (except inside `UnsafeCell`).
318 /// This applies even if the result of this method is unused!
319 /// (The part about being initialized is not yet fully decided, but until
320 /// it is, the only safe approach is to ensure that they are indeed initialized.)
322 /// [the module documentation]: crate::ptr#safety
329 /// let ptr: *const u8 = &10u8 as *const u8;
332 /// if let Some(val_back) = ptr.as_ref() {
333 /// println!("We got back the value: {val_back}!");
338 /// # Null-unchecked version
340 /// If you are sure the pointer can never be null and are looking for some kind of
341 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
342 /// dereference the pointer directly.
345 /// let ptr: *const u8 = &10u8 as *const u8;
348 /// let val_back = &*ptr;
349 /// println!("We got back the value: {val_back}!");
352 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
353 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
355 pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
356 // SAFETY: the caller must guarantee that `self` is valid
357 // for a reference if it isn't null.
358 if self.is_null() { None } else { unsafe { Some(&*self) } }
361 /// Returns `None` if the pointer is null, or else returns a shared reference to
362 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
363 /// that the value has to be initialized.
365 /// [`as_ref`]: #method.as_ref
369 /// When calling this method, you have to ensure that *either* the pointer is null *or*
370 /// all of the following is true:
372 /// * The pointer must be properly aligned.
374 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
376 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
377 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
378 /// In particular, while this reference exists, the memory the pointer points to must
379 /// not get mutated (except inside `UnsafeCell`).
381 /// This applies even if the result of this method is unused!
383 /// [the module documentation]: crate::ptr#safety
390 /// #![feature(ptr_as_uninit)]
392 /// let ptr: *const u8 = &10u8 as *const u8;
395 /// if let Some(val_back) = ptr.as_uninit_ref() {
396 /// println!("We got back the value: {}!", val_back.assume_init());
401 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
402 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
403 pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
407 // SAFETY: the caller must guarantee that `self` meets all the
408 // requirements for a reference.
409 if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
412 /// Calculates the offset from a pointer.
414 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
415 /// offset of `3 * size_of::<T>()` bytes.
419 /// If any of the following conditions are violated, the result is Undefined
422 /// * Both the starting and resulting pointer must be either in bounds or one
423 /// byte past the end of the same [allocated object].
425 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
427 /// * The offset being in bounds cannot rely on "wrapping around" the address
428 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
430 /// The compiler and standard library generally tries to ensure allocations
431 /// never reach a size where an offset is a concern. For instance, `Vec`
432 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
433 /// `vec.as_ptr().add(vec.len())` is always safe.
435 /// Most platforms fundamentally can't even construct such an allocation.
436 /// For instance, no known 64-bit platform can ever serve a request
437 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
438 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
439 /// more than `isize::MAX` bytes with things like Physical Address
440 /// Extension. As such, memory acquired directly from allocators or memory
441 /// mapped files *may* be too large to handle with this function.
443 /// Consider using [`wrapping_offset`] instead if these constraints are
444 /// difficult to satisfy. The only advantage of this method is that it
445 /// enables more aggressive compiler optimizations.
447 /// [`wrapping_offset`]: #method.wrapping_offset
448 /// [allocated object]: crate::ptr#allocated-object
455 /// let s: &str = "123";
456 /// let ptr: *const u8 = s.as_ptr();
459 /// println!("{}", *ptr.offset(1) as char);
460 /// println!("{}", *ptr.offset(2) as char);
463 #[stable(feature = "rust1", since = "1.0.0")]
464 #[must_use = "returns a new pointer rather than modifying its argument"]
465 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
467 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
468 pub const unsafe fn offset(self, count: isize) -> *const T
472 // SAFETY: the caller must uphold the safety contract for `offset`.
473 unsafe { intrinsics::offset(self, count) }
476 /// Calculates the offset from a pointer in bytes.
478 /// `count` is in units of **bytes**.
480 /// This is purely a convenience for casting to a `u8` pointer and
481 /// using [offset][pointer::offset] on it. See that method for documentation
482 /// and safety requirements.
484 /// For non-`Sized` pointees this operation changes only the data pointer,
485 /// leaving the metadata untouched.
488 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
489 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
490 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
491 pub const unsafe fn byte_offset(self, count: isize) -> Self {
492 // SAFETY: the caller must uphold the safety contract for `offset`.
493 unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
496 /// Calculates the offset from a pointer using wrapping arithmetic.
498 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
499 /// offset of `3 * size_of::<T>()` bytes.
503 /// This operation itself is always safe, but using the resulting pointer is not.
505 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
506 /// be used to read or write other allocated objects.
508 /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
509 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
510 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
511 /// `x` and `y` point into the same allocated object.
513 /// Compared to [`offset`], this method basically delays the requirement of staying within the
514 /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
515 /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
516 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
517 /// can be optimized better and is thus preferable in performance-sensitive code.
519 /// The delayed check only considers the value of the pointer that was dereferenced, not the
520 /// intermediate values used during the computation of the final result. For example,
521 /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
522 /// words, leaving the allocated object and then re-entering it later is permitted.
524 /// [`offset`]: #method.offset
525 /// [allocated object]: crate::ptr#allocated-object
532 /// // Iterate using a raw pointer in increments of two elements
533 /// let data = [1u8, 2, 3, 4, 5];
534 /// let mut ptr: *const u8 = data.as_ptr();
536 /// let end_rounded_up = ptr.wrapping_offset(6);
538 /// // This loop prints "1, 3, 5, "
539 /// while ptr != end_rounded_up {
541 /// print!("{}, ", *ptr);
543 /// ptr = ptr.wrapping_offset(step);
546 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
547 #[must_use = "returns a new pointer rather than modifying its argument"]
548 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
550 pub const fn wrapping_offset(self, count: isize) -> *const T
554 // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
555 unsafe { intrinsics::arith_offset(self, count) }
558 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
560 /// `count` is in units of **bytes**.
562 /// This is purely a convenience for casting to a `u8` pointer and
563 /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
564 /// for documentation.
566 /// For non-`Sized` pointees this operation changes only the data pointer,
567 /// leaving the metadata untouched.
570 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
571 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
572 pub const fn wrapping_byte_offset(self, count: isize) -> Self {
573 self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
576 /// Masks out bits of the pointer according to a mask.
578 /// This is convenience for `ptr.map_addr(|a| a & mask)`.
580 /// For non-`Sized` pointees this operation changes only the data pointer,
581 /// leaving the metadata untouched.
586 /// #![feature(ptr_mask, strict_provenance)]
588 /// let ptr: *const u32 = &v;
590 /// // `u32` is 4 bytes aligned,
591 /// // which means that lower 2 bits are always 0.
592 /// let tag_mask = 0b11;
593 /// let ptr_mask = !tag_mask;
595 /// // We can store something in these lower bits
596 /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
598 /// // Get the "tag" back
599 /// let tag = tagged_ptr.addr() & tag_mask;
600 /// assert_eq!(tag, 0b10);
602 /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
603 /// // To get original pointer `mask` can be used:
604 /// let masked_ptr = tagged_ptr.mask(ptr_mask);
605 /// assert_eq!(unsafe { *masked_ptr }, 17);
607 #[unstable(feature = "ptr_mask", issue = "98290")]
608 #[must_use = "returns a new pointer rather than modifying its argument"]
610 pub fn mask(self, mask: usize) -> *const T {
611 intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
614 /// Calculates the distance between two pointers. The returned value is in
615 /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
617 /// This function is the inverse of [`offset`].
619 /// [`offset`]: #method.offset
623 /// If any of the following conditions are violated, the result is Undefined
626 /// * Both the starting and other pointer must be either in bounds or one
627 /// byte past the end of the same [allocated object].
629 /// * Both pointers must be *derived from* a pointer to the same object.
630 /// (See below for an example.)
632 /// * The distance between the pointers, in bytes, must be an exact multiple
633 /// of the size of `T`.
635 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
637 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
639 /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
640 /// address space, so two pointers within some value of any Rust type `T` will always satisfy
641 /// the last two conditions. The standard library also generally ensures that allocations
642 /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
643 /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
644 /// always satisfies the last two conditions.
646 /// Most platforms fundamentally can't even construct such a large allocation.
647 /// For instance, no known 64-bit platform can ever serve a request
648 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
649 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
650 /// more than `isize::MAX` bytes with things like Physical Address
651 /// Extension. As such, memory acquired directly from allocators or memory
652 /// mapped files *may* be too large to handle with this function.
653 /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
654 /// such large allocations either.)
656 /// [`add`]: #method.add
657 /// [allocated object]: crate::ptr#allocated-object
661 /// This function panics if `T` is a Zero-Sized Type ("ZST").
669 /// let ptr1: *const i32 = &a[1];
670 /// let ptr2: *const i32 = &a[3];
672 /// assert_eq!(ptr2.offset_from(ptr1), 2);
673 /// assert_eq!(ptr1.offset_from(ptr2), -2);
674 /// assert_eq!(ptr1.offset(2), ptr2);
675 /// assert_eq!(ptr2.offset(-2), ptr1);
679 /// *Incorrect* usage:
682 /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
683 /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
684 /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
685 /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
686 /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
687 /// assert_eq!(ptr2 as usize, ptr2_other as usize);
688 /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
689 /// // computing their offset is undefined behavior, even though
690 /// // they point to the same address!
692 /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
695 #[stable(feature = "ptr_offset_from", since = "1.47.0")]
696 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
698 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
699 pub const unsafe fn offset_from(self, origin: *const T) -> isize
703 let pointee_size = mem::size_of::<T>();
704 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
705 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
706 unsafe { intrinsics::ptr_offset_from(self, origin) }
709 /// Calculates the distance between two pointers. The returned value is in
710 /// units of **bytes**.
712 /// This is purely a convenience for casting to a `u8` pointer and
713 /// using [offset_from][pointer::offset_from] on it. See that method for
714 /// documentation and safety requirements.
716 /// For non-`Sized` pointees this operation considers only the data pointers,
717 /// ignoring the metadata.
719 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
720 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
721 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
722 pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
723 // SAFETY: the caller must uphold the safety contract for `offset_from`.
724 unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
727 /// Calculates the distance between two pointers, *where it's known that
728 /// `self` is equal to or greater than `origin`*. The returned value is in
729 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
731 /// This computes the same value that [`offset_from`](#method.offset_from)
732 /// would compute, but with the added precondition that the offset is
733 /// guaranteed to be non-negative. This method is equivalent to
734 /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
735 /// but it provides slightly more information to the optimizer, which can
736 /// sometimes allow it to optimize slightly better with some backends.
738 /// This method can be though of as recovering the `count` that was passed
739 /// to [`add`](#method.add) (or, with the parameters in the other order,
740 /// to [`sub`](#method.sub)). The following are all equivalent, assuming
741 /// that their safety preconditions are met:
743 /// # #![feature(ptr_sub_ptr)]
744 /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool {
745 /// ptr.sub_ptr(origin) == count
747 /// origin.add(count) == ptr
749 /// ptr.sub(count) == origin
755 /// - The distance between the pointers must be non-negative (`self >= origin`)
757 /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
758 /// apply to this method as well; see it for the full details.
760 /// Importantly, despite the return type of this method being able to represent
761 /// a larger offset, it's still *not permitted* to pass pointers which differ
762 /// by more than `isize::MAX` *bytes*. As such, the result of this method will
763 /// always be less than or equal to `isize::MAX as usize`.
767 /// This function panics if `T` is a Zero-Sized Type ("ZST").
772 /// #![feature(ptr_sub_ptr)]
775 /// let ptr1: *const i32 = &a[1];
776 /// let ptr2: *const i32 = &a[3];
778 /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
779 /// assert_eq!(ptr1.add(2), ptr2);
780 /// assert_eq!(ptr2.sub(2), ptr1);
781 /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
784 /// // This would be incorrect, as the pointers are not correctly ordered:
785 /// // ptr1.sub_ptr(ptr2)
787 #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
788 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
790 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
791 pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
796 // SAFETY: The comparison has no side-effects, and the intrinsic
797 // does this check internally in the CTFE implementation.
799 assert_unsafe_precondition!(
800 "ptr::sub_ptr requires `this >= origin`",
801 [T](this: *const T, origin: *const T) => this >= origin
805 let pointee_size = mem::size_of::<T>();
806 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
807 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
808 unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
811 /// Returns whether two pointers are guaranteed to be equal.
813 /// At runtime this function behaves like `Some(self == other)`.
814 /// However, in some contexts (e.g., compile-time evaluation),
815 /// it is not always possible to determine equality of two pointers, so this function may
816 /// spuriously return `None` for pointers that later actually turn out to have its equality known.
817 /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
819 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
820 /// version and unsafe code must not
821 /// rely on the result of this function for soundness. It is suggested to only use this function
822 /// for performance optimizations where spurious `None` return values by this function do not
823 /// affect the outcome, but just the performance.
824 /// The consequences of using this method to make runtime and compile-time code behave
825 /// differently have not been explored. This method should not be used to introduce such
826 /// differences, and it should also not be stabilized before we have a better understanding
828 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
829 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
831 pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
835 match intrinsics::ptr_guaranteed_cmp(self as _, other as _) {
837 other => Some(other == 1),
841 /// Returns whether two pointers are guaranteed to be inequal.
843 /// At runtime this function behaves like `Some(self != other)`.
844 /// However, in some contexts (e.g., compile-time evaluation),
845 /// it is not always possible to determine inequality of two pointers, so this function may
846 /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
847 /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
849 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
850 /// version and unsafe code must not
851 /// rely on the result of this function for soundness. It is suggested to only use this function
852 /// for performance optimizations where spurious `None` return values by this function do not
853 /// affect the outcome, but just the performance.
854 /// The consequences of using this method to make runtime and compile-time code behave
855 /// differently have not been explored. This method should not be used to introduce such
856 /// differences, and it should also not be stabilized before we have a better understanding
858 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
859 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
861 pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
865 match self.guaranteed_eq(other) {
867 Some(eq) => Some(!eq),
871 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
873 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
874 /// offset of `3 * size_of::<T>()` bytes.
878 /// If any of the following conditions are violated, the result is Undefined
881 /// * Both the starting and resulting pointer must be either in bounds or one
882 /// byte past the end of the same [allocated object].
884 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
886 /// * The offset being in bounds cannot rely on "wrapping around" the address
887 /// space. That is, the infinite-precision sum must fit in a `usize`.
889 /// The compiler and standard library generally tries to ensure allocations
890 /// never reach a size where an offset is a concern. For instance, `Vec`
891 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
892 /// `vec.as_ptr().add(vec.len())` is always safe.
894 /// Most platforms fundamentally can't even construct such an allocation.
895 /// For instance, no known 64-bit platform can ever serve a request
896 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
897 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
898 /// more than `isize::MAX` bytes with things like Physical Address
899 /// Extension. As such, memory acquired directly from allocators or memory
900 /// mapped files *may* be too large to handle with this function.
902 /// Consider using [`wrapping_add`] instead if these constraints are
903 /// difficult to satisfy. The only advantage of this method is that it
904 /// enables more aggressive compiler optimizations.
906 /// [`wrapping_add`]: #method.wrapping_add
907 /// [allocated object]: crate::ptr#allocated-object
914 /// let s: &str = "123";
915 /// let ptr: *const u8 = s.as_ptr();
918 /// println!("{}", *ptr.add(1) as char);
919 /// println!("{}", *ptr.add(2) as char);
922 #[stable(feature = "pointer_methods", since = "1.26.0")]
923 #[must_use = "returns a new pointer rather than modifying its argument"]
924 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
926 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
927 pub const unsafe fn add(self, count: usize) -> Self
931 // SAFETY: the caller must uphold the safety contract for `offset`.
932 unsafe { self.offset(count as isize) }
935 /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
937 /// `count` is in units of bytes.
939 /// This is purely a convenience for casting to a `u8` pointer and
940 /// using [add][pointer::add] on it. See that method for documentation
941 /// and safety requirements.
943 /// For non-`Sized` pointees this operation changes only the data pointer,
944 /// leaving the metadata untouched.
947 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
948 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
949 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
950 pub const unsafe fn byte_add(self, count: usize) -> Self {
951 // SAFETY: the caller must uphold the safety contract for `add`.
952 unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
955 /// Calculates the offset from a pointer (convenience for
956 /// `.offset((count as isize).wrapping_neg())`).
958 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
959 /// offset of `3 * size_of::<T>()` bytes.
963 /// If any of the following conditions are violated, the result is Undefined
966 /// * Both the starting and resulting pointer must be either in bounds or one
967 /// byte past the end of the same [allocated object].
969 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
971 /// * The offset being in bounds cannot rely on "wrapping around" the address
972 /// space. That is, the infinite-precision sum must fit in a usize.
974 /// The compiler and standard library generally tries to ensure allocations
975 /// never reach a size where an offset is a concern. For instance, `Vec`
976 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
977 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
979 /// Most platforms fundamentally can't even construct such an allocation.
980 /// For instance, no known 64-bit platform can ever serve a request
981 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
982 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
983 /// more than `isize::MAX` bytes with things like Physical Address
984 /// Extension. As such, memory acquired directly from allocators or memory
985 /// mapped files *may* be too large to handle with this function.
987 /// Consider using [`wrapping_sub`] instead if these constraints are
988 /// difficult to satisfy. The only advantage of this method is that it
989 /// enables more aggressive compiler optimizations.
991 /// [`wrapping_sub`]: #method.wrapping_sub
992 /// [allocated object]: crate::ptr#allocated-object
999 /// let s: &str = "123";
1002 /// let end: *const u8 = s.as_ptr().add(3);
1003 /// println!("{}", *end.sub(1) as char);
1004 /// println!("{}", *end.sub(2) as char);
1007 #[stable(feature = "pointer_methods", since = "1.26.0")]
1008 #[must_use = "returns a new pointer rather than modifying its argument"]
1009 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1011 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1012 pub const unsafe fn sub(self, count: usize) -> Self
1016 // SAFETY: the caller must uphold the safety contract for `offset`.
1017 unsafe { self.offset((count as isize).wrapping_neg()) }
1020 /// Calculates the offset from a pointer in bytes (convenience for
1021 /// `.byte_offset((count as isize).wrapping_neg())`).
1023 /// `count` is in units of bytes.
1025 /// This is purely a convenience for casting to a `u8` pointer and
1026 /// using [sub][pointer::sub] on it. See that method for documentation
1027 /// and safety requirements.
1029 /// For non-`Sized` pointees this operation changes only the data pointer,
1030 /// leaving the metadata untouched.
1033 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1034 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1035 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1036 pub const unsafe fn byte_sub(self, count: usize) -> Self {
1037 // SAFETY: the caller must uphold the safety contract for `sub`.
1038 unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
1041 /// Calculates the offset from a pointer using wrapping arithmetic.
1042 /// (convenience for `.wrapping_offset(count as isize)`)
1044 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1045 /// offset of `3 * size_of::<T>()` bytes.
1049 /// This operation itself is always safe, but using the resulting pointer is not.
1051 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1052 /// be used to read or write other allocated objects.
1054 /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
1055 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1056 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1057 /// `x` and `y` point into the same allocated object.
1059 /// Compared to [`add`], this method basically delays the requirement of staying within the
1060 /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
1061 /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
1062 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
1063 /// can be optimized better and is thus preferable in performance-sensitive code.
1065 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1066 /// intermediate values used during the computation of the final result. For example,
1067 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1068 /// allocated object and then re-entering it later is permitted.
1070 /// [`add`]: #method.add
1071 /// [allocated object]: crate::ptr#allocated-object
1078 /// // Iterate using a raw pointer in increments of two elements
1079 /// let data = [1u8, 2, 3, 4, 5];
1080 /// let mut ptr: *const u8 = data.as_ptr();
1082 /// let end_rounded_up = ptr.wrapping_add(6);
1084 /// // This loop prints "1, 3, 5, "
1085 /// while ptr != end_rounded_up {
1087 /// print!("{}, ", *ptr);
1089 /// ptr = ptr.wrapping_add(step);
1092 #[stable(feature = "pointer_methods", since = "1.26.0")]
1093 #[must_use = "returns a new pointer rather than modifying its argument"]
1094 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1096 pub const fn wrapping_add(self, count: usize) -> Self
1100 self.wrapping_offset(count as isize)
1103 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1104 /// (convenience for `.wrapping_byte_offset(count as isize)`)
1106 /// `count` is in units of bytes.
1108 /// This is purely a convenience for casting to a `u8` pointer and
1109 /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
1111 /// For non-`Sized` pointees this operation changes only the data pointer,
1112 /// leaving the metadata untouched.
1115 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1116 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1117 pub const fn wrapping_byte_add(self, count: usize) -> Self {
1118 self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
1121 /// Calculates the offset from a pointer using wrapping arithmetic.
1122 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1124 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1125 /// offset of `3 * size_of::<T>()` bytes.
1129 /// This operation itself is always safe, but using the resulting pointer is not.
1131 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1132 /// be used to read or write other allocated objects.
1134 /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
1135 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1136 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1137 /// `x` and `y` point into the same allocated object.
1139 /// Compared to [`sub`], this method basically delays the requirement of staying within the
1140 /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
1141 /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
1142 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
1143 /// can be optimized better and is thus preferable in performance-sensitive code.
1145 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1146 /// intermediate values used during the computation of the final result. For example,
1147 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1148 /// allocated object and then re-entering it later is permitted.
1150 /// [`sub`]: #method.sub
1151 /// [allocated object]: crate::ptr#allocated-object
1158 /// // Iterate using a raw pointer in increments of two elements (backwards)
1159 /// let data = [1u8, 2, 3, 4, 5];
1160 /// let mut ptr: *const u8 = data.as_ptr();
1161 /// let start_rounded_down = ptr.wrapping_sub(2);
1162 /// ptr = ptr.wrapping_add(4);
1164 /// // This loop prints "5, 3, 1, "
1165 /// while ptr != start_rounded_down {
1167 /// print!("{}, ", *ptr);
1169 /// ptr = ptr.wrapping_sub(step);
1172 #[stable(feature = "pointer_methods", since = "1.26.0")]
1173 #[must_use = "returns a new pointer rather than modifying its argument"]
1174 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1176 pub const fn wrapping_sub(self, count: usize) -> Self
1180 self.wrapping_offset((count as isize).wrapping_neg())
1183 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1184 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1186 /// `count` is in units of bytes.
1188 /// This is purely a convenience for casting to a `u8` pointer and
1189 /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
1191 /// For non-`Sized` pointees this operation changes only the data pointer,
1192 /// leaving the metadata untouched.
1195 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1196 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1197 pub const fn wrapping_byte_sub(self, count: usize) -> Self {
1198 self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
1201 /// Reads the value from `self` without moving it. This leaves the
1202 /// memory in `self` unchanged.
1204 /// See [`ptr::read`] for safety concerns and examples.
1206 /// [`ptr::read`]: crate::ptr::read()
1207 #[stable(feature = "pointer_methods", since = "1.26.0")]
1208 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
1210 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1211 pub const unsafe fn read(self) -> T
1215 // SAFETY: the caller must uphold the safety contract for `read`.
1216 unsafe { read(self) }
1219 /// Performs a volatile read of the value from `self` without moving it. This
1220 /// leaves the memory in `self` unchanged.
1222 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1223 /// to not be elided or reordered by the compiler across other volatile
1226 /// See [`ptr::read_volatile`] for safety concerns and examples.
1228 /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
1229 #[stable(feature = "pointer_methods", since = "1.26.0")]
1231 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1232 pub unsafe fn read_volatile(self) -> T
1236 // SAFETY: the caller must uphold the safety contract for `read_volatile`.
1237 unsafe { read_volatile(self) }
1240 /// Reads the value from `self` without moving it. This leaves the
1241 /// memory in `self` unchanged.
1243 /// Unlike `read`, the pointer may be unaligned.
1245 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1247 /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
1248 #[stable(feature = "pointer_methods", since = "1.26.0")]
1249 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
1251 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1252 pub const unsafe fn read_unaligned(self) -> T
1256 // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
1257 unsafe { read_unaligned(self) }
1260 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1261 /// and destination may overlap.
1263 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1265 /// See [`ptr::copy`] for safety concerns and examples.
1267 /// [`ptr::copy`]: crate::ptr::copy()
1268 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1269 #[stable(feature = "pointer_methods", since = "1.26.0")]
1271 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1272 pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
1276 // SAFETY: the caller must uphold the safety contract for `copy`.
1277 unsafe { copy(self, dest, count) }
1280 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1281 /// and destination may *not* overlap.
1283 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1285 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1287 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
1288 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1289 #[stable(feature = "pointer_methods", since = "1.26.0")]
1291 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1292 pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1296 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1297 unsafe { copy_nonoverlapping(self, dest, count) }
1300 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1303 /// If it is not possible to align the pointer, the implementation returns
1304 /// `usize::MAX`. It is permissible for the implementation to *always*
1305 /// return `usize::MAX`. Only your algorithm's performance can depend
1306 /// on getting a usable offset here, not its correctness.
1308 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1309 /// used with the `wrapping_add` method.
1311 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1312 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1313 /// the returned offset is correct in all terms other than alignment.
1317 /// The function panics if `align` is not a power-of-two.
1321 /// Accessing adjacent `u8` as `u16`
1324 /// use std::mem::align_of;
1327 /// let x = [5_u8, 6, 7, 8, 9];
1328 /// let ptr = x.as_ptr();
1329 /// let offset = ptr.align_offset(align_of::<u16>());
1331 /// if offset < x.len() - 1 {
1332 /// let u16_ptr = ptr.add(offset).cast::<u16>();
1333 /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
1335 /// // while the pointer can be aligned via `offset`, it would point
1336 /// // outside the allocation
1342 #[stable(feature = "align_offset", since = "1.36.0")]
1343 #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
1344 pub const fn align_offset(self, align: usize) -> usize
1348 if !align.is_power_of_two() {
1349 panic!("align_offset: align is not a power-of-two");
1353 // SAFETY: `align` has been checked to be a power of 2 above
1354 unsafe { align_offset(self, align) }
1358 /// Returns whether the pointer is properly aligned for `T`.
1364 /// #![feature(pointer_is_aligned)]
1365 /// #![feature(pointer_byte_offsets)]
1367 /// // On some platforms, the alignment of i32 is less than 4.
1368 /// #[repr(align(4))]
1369 /// struct AlignedI32(i32);
1371 /// let data = AlignedI32(42);
1372 /// let ptr = &data as *const AlignedI32;
1374 /// assert!(ptr.is_aligned());
1375 /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
1378 /// # At compiletime
1379 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1380 /// [tracking issue] for details.**
1382 /// At compiletime, the compiler may not know where a value will end up in memory.
1383 /// Calling this function on a pointer created from a reference at compiletime will only
1384 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1385 /// is never aligned if cast to a type with a stricter alignment than the reference's
1386 /// underlying allocation.
1389 /// #![feature(pointer_is_aligned)]
1390 /// #![feature(const_pointer_is_aligned)]
1392 /// // On some platforms, the alignment of primitives is less than their size.
1393 /// #[repr(align(4))]
1394 /// struct AlignedI32(i32);
1395 /// #[repr(align(8))]
1396 /// struct AlignedI64(i64);
1399 /// let data = AlignedI32(42);
1400 /// let ptr = &data as *const AlignedI32;
1401 /// assert!(ptr.is_aligned());
1403 /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
1404 /// let ptr1 = ptr.cast::<AlignedI64>();
1405 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1406 /// assert!(!ptr1.is_aligned());
1407 /// assert!(!ptr2.is_aligned());
1411 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1412 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1415 /// #![feature(pointer_is_aligned)]
1416 /// #![feature(const_pointer_is_aligned)]
1418 /// // On some platforms, the alignment of primitives is less than their size.
1419 /// #[repr(align(4))]
1420 /// struct AlignedI32(i32);
1421 /// #[repr(align(8))]
1422 /// struct AlignedI64(i64);
1424 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1425 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1426 /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
1427 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
1429 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1430 /// let runtime_ptr = COMPTIME_PTR;
1432 /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
1433 /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
1437 /// If a pointer is created from a fixed address, this function behaves the same during
1438 /// runtime and compiletime.
1441 /// #![feature(pointer_is_aligned)]
1442 /// #![feature(const_pointer_is_aligned)]
1444 /// // On some platforms, the alignment of primitives is less than their size.
1445 /// #[repr(align(4))]
1446 /// struct AlignedI32(i32);
1447 /// #[repr(align(8))]
1448 /// struct AlignedI64(i64);
1451 /// let ptr = 40 as *const AlignedI32;
1452 /// assert!(ptr.is_aligned());
1454 /// // For pointers with a known address, runtime and compiletime behavior are identical.
1455 /// let ptr1 = ptr.cast::<AlignedI64>();
1456 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1457 /// assert!(ptr1.is_aligned());
1458 /// assert!(!ptr2.is_aligned());
1462 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1465 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1466 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1467 pub const fn is_aligned(self) -> bool
1471 self.is_aligned_to(mem::align_of::<T>())
1474 /// Returns whether the pointer is aligned to `align`.
1476 /// For non-`Sized` pointees this operation considers only the data pointer,
1477 /// ignoring the metadata.
1481 /// The function panics if `align` is not a power-of-two (this includes 0).
1487 /// #![feature(pointer_is_aligned)]
1488 /// #![feature(pointer_byte_offsets)]
1490 /// // On some platforms, the alignment of i32 is less than 4.
1491 /// #[repr(align(4))]
1492 /// struct AlignedI32(i32);
1494 /// let data = AlignedI32(42);
1495 /// let ptr = &data as *const AlignedI32;
1497 /// assert!(ptr.is_aligned_to(1));
1498 /// assert!(ptr.is_aligned_to(2));
1499 /// assert!(ptr.is_aligned_to(4));
1501 /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
1502 /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
1504 /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
1507 /// # At compiletime
1508 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1509 /// [tracking issue] for details.**
1511 /// At compiletime, the compiler may not know where a value will end up in memory.
1512 /// Calling this function on a pointer created from a reference at compiletime will only
1513 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1514 /// cannot be stricter aligned than the reference's underlying allocation.
1517 /// #![feature(pointer_is_aligned)]
1518 /// #![feature(const_pointer_is_aligned)]
1520 /// // On some platforms, the alignment of i32 is less than 4.
1521 /// #[repr(align(4))]
1522 /// struct AlignedI32(i32);
1525 /// let data = AlignedI32(42);
1526 /// let ptr = &data as *const AlignedI32;
1528 /// assert!(ptr.is_aligned_to(1));
1529 /// assert!(ptr.is_aligned_to(2));
1530 /// assert!(ptr.is_aligned_to(4));
1532 /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
1533 /// assert!(!ptr.is_aligned_to(8));
1534 /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
1538 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1539 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1542 /// #![feature(pointer_is_aligned)]
1543 /// #![feature(const_pointer_is_aligned)]
1545 /// // On some platforms, the alignment of i32 is less than 4.
1546 /// #[repr(align(4))]
1547 /// struct AlignedI32(i32);
1549 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1550 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1551 /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
1552 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
1554 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1555 /// let runtime_ptr = COMPTIME_PTR;
1557 /// runtime_ptr.is_aligned_to(8),
1558 /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
1562 /// If a pointer is created from a fixed address, this function behaves the same during
1563 /// runtime and compiletime.
1566 /// #![feature(pointer_is_aligned)]
1567 /// #![feature(const_pointer_is_aligned)]
1570 /// let ptr = 40 as *const u8;
1571 /// assert!(ptr.is_aligned_to(1));
1572 /// assert!(ptr.is_aligned_to(2));
1573 /// assert!(ptr.is_aligned_to(4));
1574 /// assert!(ptr.is_aligned_to(8));
1575 /// assert!(!ptr.is_aligned_to(16));
1579 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1582 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1583 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1584 pub const fn is_aligned_to(self, align: usize) -> bool {
1585 if !align.is_power_of_two() {
1586 panic!("is_aligned_to: align is not a power-of-two");
1590 fn runtime_impl(ptr: *const (), align: usize) -> bool {
1591 ptr.addr() & (align - 1) == 0
1595 const fn const_impl(ptr: *const (), align: usize) -> bool {
1596 // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
1597 // The cast to `()` is used to
1598 // 1. deal with fat pointers; and
1599 // 2. ensure that `align_offset` doesn't actually try to compute an offset.
1600 ptr.align_offset(align) == 0
1603 // SAFETY: The two versions are equivalent at runtime.
1604 unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
1608 impl<T> *const [T] {
1609 /// Returns the length of a raw slice.
1611 /// The returned value is the number of **elements**, not the number of bytes.
1613 /// This function is safe, even when the raw slice cannot be cast to a slice
1614 /// reference because the pointer is null or unaligned.
1619 /// #![feature(slice_ptr_len)]
1623 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1624 /// assert_eq!(slice.len(), 3);
1627 #[unstable(feature = "slice_ptr_len", issue = "71146")]
1628 #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
1629 pub const fn len(self) -> usize {
1633 /// Returns a raw pointer to the slice's buffer.
1635 /// This is equivalent to casting `self` to `*const T`, but more type-safe.
1640 /// #![feature(slice_ptr_get)]
1643 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1644 /// assert_eq!(slice.as_ptr(), ptr::null());
1647 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1648 #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
1649 pub const fn as_ptr(self) -> *const T {
1653 /// Returns a raw pointer to an element or subslice, without doing bounds
1656 /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
1657 /// is *[undefined behavior]* even if the resulting pointer is not used.
1659 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1664 /// #![feature(slice_ptr_get)]
1666 /// let x = &[1, 2, 4] as *const [i32];
1669 /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
1672 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1673 #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
1675 pub const unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
1677 I: ~const SliceIndex<[T]>,
1679 // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
1680 unsafe { index.get_unchecked(self) }
1683 /// Returns `None` if the pointer is null, or else returns a shared slice to
1684 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
1685 /// that the value has to be initialized.
1687 /// [`as_ref`]: #method.as_ref
1691 /// When calling this method, you have to ensure that *either* the pointer is null *or*
1692 /// all of the following is true:
1694 /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
1695 /// and it must be properly aligned. This means in particular:
1697 /// * The entire memory range of this slice must be contained within a single [allocated object]!
1698 /// Slices can never span across multiple allocated objects.
1700 /// * The pointer must be aligned even for zero-length slices. One
1701 /// reason for this is that enum layout optimizations may rely on references
1702 /// (including slices of any length) being aligned and non-null to distinguish
1703 /// them from other data. You can obtain a pointer that is usable as `data`
1704 /// for zero-length slices using [`NonNull::dangling()`].
1706 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
1707 /// See the safety documentation of [`pointer::offset`].
1709 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
1710 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
1711 /// In particular, while this reference exists, the memory the pointer points to must
1712 /// not get mutated (except inside `UnsafeCell`).
1714 /// This applies even if the result of this method is unused!
1716 /// See also [`slice::from_raw_parts`][].
1718 /// [valid]: crate::ptr#safety
1719 /// [allocated object]: crate::ptr#allocated-object
1721 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
1722 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
1723 pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
1727 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
1728 Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
1733 // Equality for pointers
1734 #[stable(feature = "rust1", since = "1.0.0")]
1735 impl<T: ?Sized> PartialEq for *const T {
1737 fn eq(&self, other: &*const T) -> bool {
1742 #[stable(feature = "rust1", since = "1.0.0")]
1743 impl<T: ?Sized> Eq for *const T {}
1745 // Comparison for pointers
1746 #[stable(feature = "rust1", since = "1.0.0")]
1747 impl<T: ?Sized> Ord for *const T {
1749 fn cmp(&self, other: &*const T) -> Ordering {
1752 } else if self == other {
1760 #[stable(feature = "rust1", since = "1.0.0")]
1761 impl<T: ?Sized> PartialOrd for *const T {
1763 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
1764 Some(self.cmp(other))
1768 fn lt(&self, other: &*const T) -> bool {
1773 fn le(&self, other: &*const T) -> bool {
1778 fn gt(&self, other: &*const T) -> bool {
1783 fn ge(&self, other: &*const T) -> bool {