2 use crate::cmp::Ordering::{self, Equal, Greater, Less};
5 use crate::slice::{self, SliceIndex};
7 impl<T: ?Sized> *const T {
8 /// Returns `true` if the pointer is null.
10 /// Note that unsized types have many possible null pointers, as only the
11 /// raw data pointer is considered, not their length, vtable, etc.
12 /// Therefore, two pointers that are null may still not compare equal to
15 /// ## Behavior during const evaluation
17 /// When this function is used during const evaluation, it may return `false` for pointers
18 /// that turn out to be null at runtime. Specifically, when a pointer to some memory
19 /// is offset beyond its bounds in such a way that the resulting pointer is null,
20 /// the function will still return `false`. There is no way for CTFE to know
21 /// the absolute position of that memory, so we cannot tell if the pointer is
29 /// let s: &str = "Follow the rabbit";
30 /// let ptr: *const u8 = s.as_ptr();
31 /// assert!(!ptr.is_null());
33 #[stable(feature = "rust1", since = "1.0.0")]
34 #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
36 pub const fn is_null(self) -> bool {
37 // Compare via a cast to a thin pointer, so fat pointers are only
38 // considering their "data" part for null-ness.
39 match (self as *const u8).guaranteed_eq(null()) {
45 /// Casts to a pointer of another type.
46 #[stable(feature = "ptr_cast", since = "1.38.0")]
47 #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
49 pub const fn cast<U>(self) -> *const U {
53 /// Use the pointer value in a new pointer of another type.
55 /// In case `val` is a (fat) pointer to an unsized type, this operation
56 /// will ignore the pointer part, whereas for (thin) pointers to sized
57 /// types, this has the same effect as a simple cast.
59 /// The resulting pointer will have provenance of `self`, i.e., for a fat
60 /// pointer, this operation is semantically the same as creating a new
61 /// fat pointer with the data pointer value of `self` but the metadata of
66 /// This function is primarily useful for allowing byte-wise pointer
67 /// arithmetic on potentially fat pointers:
70 /// #![feature(set_ptr_value)]
71 /// # use core::fmt::Debug;
72 /// let arr: [i32; 3] = [1, 2, 3];
73 /// let mut ptr = arr.as_ptr() as *const dyn Debug;
74 /// let thin = ptr as *const u8;
76 /// ptr = thin.add(8).with_metadata_of(ptr);
77 /// # assert_eq!(*(ptr as *const i32), 3);
78 /// println!("{:?}", &*ptr); // will print "3"
81 #[unstable(feature = "set_ptr_value", issue = "75091")]
82 #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
83 #[must_use = "returns a new pointer rather than modifying its argument"]
85 pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
89 from_raw_parts::<U>(self as *const (), metadata(meta))
92 /// Changes constness without changing the type.
94 /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
96 #[stable(feature = "ptr_const_cast", since = "1.65.0")]
97 #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
99 pub const fn cast_mut(self) -> *mut T {
103 /// Casts a pointer to its raw bits.
105 /// This is equivalent to `as usize`, but is more specific to enhance readability.
106 /// The inverse method is [`from_bits`](#method.from_bits).
108 /// In particular, `*p as usize` and `p as usize` will both compile for
109 /// pointers to numeric types but do very different things, so using this
110 /// helps emphasize that reading the bits was intentional.
115 /// #![feature(ptr_to_from_bits)]
116 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
117 /// let array = [13, 42];
118 /// let p0: *const i32 = &array[0];
119 /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
120 /// let p1: *const i32 = &array[1];
121 /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
124 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
127 note = "replaced by the `exposed_addr` method, or update your code \
128 to follow the strict provenance rules using its APIs"
131 pub fn to_bits(self) -> usize
138 /// Creates a pointer from its raw bits.
140 /// This is equivalent to `as *const T`, but is more specific to enhance readability.
141 /// The inverse method is [`to_bits`](#method.to_bits).
146 /// #![feature(ptr_to_from_bits)]
147 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
148 /// use std::ptr::NonNull;
149 /// let dangling: *const u8 = NonNull::dangling().as_ptr();
150 /// assert_eq!(<*const u8>::from_bits(1), dangling);
153 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
156 note = "replaced by the `ptr::from_exposed_addr` function, or update \
157 your code to follow the strict provenance rules using its APIs"
159 #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
161 pub fn from_bits(bits: usize) -> Self
168 /// Gets the "address" portion of the pointer.
170 /// This is similar to `self as usize`, which semantically discards *provenance* and
171 /// *address-space* information. However, unlike `self as usize`, casting the returned address
172 /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
173 /// properly restore the lost information and obtain a dereferenceable pointer, use
174 /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
176 /// If using those APIs is not possible because there is no way to preserve a pointer with the
177 /// required provenance, use [`expose_addr`][pointer::expose_addr] and
178 /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
179 /// your code less portable and less amenable to tools that check for compliance with the Rust
182 /// On most platforms this will produce a value with the same bytes as the original
183 /// pointer, because all the bytes are dedicated to describing the address.
184 /// Platforms which need to store additional information in the pointer may
185 /// perform a change of representation to produce a value containing only the address
186 /// portion of the pointer. What that means is up to the platform to define.
188 /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
189 /// might change in the future (including possibly weakening this so it becomes wholly
190 /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
193 #[unstable(feature = "strict_provenance", issue = "95228")]
194 pub fn addr(self) -> usize
198 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
199 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
201 unsafe { mem::transmute(self) }
204 /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
205 /// use in [`from_exposed_addr`][].
207 /// This is equivalent to `self as usize`, which semantically discards *provenance* and
208 /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
209 /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
210 /// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
211 /// provenance. (Reconstructing address space information, if required, is your responsibility.)
213 /// Using this method means that code is *not* following Strict Provenance rules. Supporting
214 /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
215 /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
216 /// use [`addr`][pointer::addr] wherever possible.
218 /// On most platforms this will produce a value with the same bytes as the original pointer,
219 /// because all the bytes are dedicated to describing the address. Platforms which need to store
220 /// additional information in the pointer may not support this operation, since the 'expose'
221 /// side-effect which is required for [`from_exposed_addr`][] to work is typically not
224 /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
225 /// [module documentation][crate::ptr] for details.
227 /// [`from_exposed_addr`]: from_exposed_addr
230 #[unstable(feature = "strict_provenance", issue = "95228")]
231 pub fn expose_addr(self) -> usize
235 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
239 /// Creates a new pointer with the given address.
241 /// This performs the same operation as an `addr as ptr` cast, but copies
242 /// the *address-space* and *provenance* of `self` to the new pointer.
243 /// This allows us to dynamically preserve and propagate this important
244 /// information in a way that is otherwise impossible with a unary cast.
246 /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
247 /// `self` to the given address, and therefore has all the same capabilities and restrictions.
249 /// This API and its claimed semantics are part of the Strict Provenance experiment,
250 /// see the [module documentation][crate::ptr] for details.
253 #[unstable(feature = "strict_provenance", issue = "95228")]
254 pub fn with_addr(self, addr: usize) -> Self
258 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
260 // In the mean-time, this operation is defined to be "as if" it was
261 // a wrapping_offset, so we can emulate it as such. This should properly
262 // restore pointer provenance even under today's compiler.
263 let self_addr = self.addr() as isize;
264 let dest_addr = addr as isize;
265 let offset = dest_addr.wrapping_sub(self_addr);
267 // This is the canonical desugarring of this operation
268 self.wrapping_byte_offset(offset)
271 /// Creates a new pointer by mapping `self`'s address to a new one.
273 /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
275 /// This API and its claimed semantics are part of the Strict Provenance experiment,
276 /// see the [module documentation][crate::ptr] for details.
279 #[unstable(feature = "strict_provenance", issue = "95228")]
280 pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
284 self.with_addr(f(self.addr()))
287 /// Decompose a (possibly wide) pointer into its address and metadata components.
289 /// The pointer can be later reconstructed with [`from_raw_parts`].
290 #[unstable(feature = "ptr_metadata", issue = "81513")]
291 #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
293 pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
294 (self.cast(), metadata(self))
297 /// Returns `None` if the pointer is null, or else returns a shared reference to
298 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
299 /// must be used instead.
301 /// [`as_uninit_ref`]: #method.as_uninit_ref
305 /// When calling this method, you have to ensure that *either* the pointer is null *or*
306 /// all of the following is true:
308 /// * The pointer must be properly aligned.
310 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
312 /// * The pointer must point to an initialized instance of `T`.
314 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
315 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
316 /// In particular, while this reference exists, the memory the pointer points to must
317 /// not get mutated (except inside `UnsafeCell`).
319 /// This applies even if the result of this method is unused!
320 /// (The part about being initialized is not yet fully decided, but until
321 /// it is, the only safe approach is to ensure that they are indeed initialized.)
323 /// [the module documentation]: crate::ptr#safety
330 /// let ptr: *const u8 = &10u8 as *const u8;
333 /// if let Some(val_back) = ptr.as_ref() {
334 /// println!("We got back the value: {val_back}!");
339 /// # Null-unchecked version
341 /// If you are sure the pointer can never be null and are looking for some kind of
342 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
343 /// dereference the pointer directly.
346 /// let ptr: *const u8 = &10u8 as *const u8;
349 /// let val_back = &*ptr;
350 /// println!("We got back the value: {val_back}!");
353 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
354 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
356 pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
357 // SAFETY: the caller must guarantee that `self` is valid
358 // for a reference if it isn't null.
359 if self.is_null() { None } else { unsafe { Some(&*self) } }
362 /// Returns `None` if the pointer is null, or else returns a shared reference to
363 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
364 /// that the value has to be initialized.
366 /// [`as_ref`]: #method.as_ref
370 /// When calling this method, you have to ensure that *either* the pointer is null *or*
371 /// all of the following is true:
373 /// * The pointer must be properly aligned.
375 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
377 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
378 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
379 /// In particular, while this reference exists, the memory the pointer points to must
380 /// not get mutated (except inside `UnsafeCell`).
382 /// This applies even if the result of this method is unused!
384 /// [the module documentation]: crate::ptr#safety
391 /// #![feature(ptr_as_uninit)]
393 /// let ptr: *const u8 = &10u8 as *const u8;
396 /// if let Some(val_back) = ptr.as_uninit_ref() {
397 /// println!("We got back the value: {}!", val_back.assume_init());
402 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
403 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
404 pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
408 // SAFETY: the caller must guarantee that `self` meets all the
409 // requirements for a reference.
410 if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
413 /// Calculates the offset from a pointer.
415 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
416 /// offset of `3 * size_of::<T>()` bytes.
420 /// If any of the following conditions are violated, the result is Undefined
423 /// * Both the starting and resulting pointer must be either in bounds or one
424 /// byte past the end of the same [allocated object].
426 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
428 /// * The offset being in bounds cannot rely on "wrapping around" the address
429 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
431 /// The compiler and standard library generally tries to ensure allocations
432 /// never reach a size where an offset is a concern. For instance, `Vec`
433 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
434 /// `vec.as_ptr().add(vec.len())` is always safe.
436 /// Most platforms fundamentally can't even construct such an allocation.
437 /// For instance, no known 64-bit platform can ever serve a request
438 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
439 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
440 /// more than `isize::MAX` bytes with things like Physical Address
441 /// Extension. As such, memory acquired directly from allocators or memory
442 /// mapped files *may* be too large to handle with this function.
444 /// Consider using [`wrapping_offset`] instead if these constraints are
445 /// difficult to satisfy. The only advantage of this method is that it
446 /// enables more aggressive compiler optimizations.
448 /// [`wrapping_offset`]: #method.wrapping_offset
449 /// [allocated object]: crate::ptr#allocated-object
456 /// let s: &str = "123";
457 /// let ptr: *const u8 = s.as_ptr();
460 /// println!("{}", *ptr.offset(1) as char);
461 /// println!("{}", *ptr.offset(2) as char);
464 #[stable(feature = "rust1", since = "1.0.0")]
465 #[must_use = "returns a new pointer rather than modifying its argument"]
466 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
468 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
469 pub const unsafe fn offset(self, count: isize) -> *const T
473 // SAFETY: the caller must uphold the safety contract for `offset`.
474 unsafe { intrinsics::offset(self, count) }
477 /// Calculates the offset from a pointer in bytes.
479 /// `count` is in units of **bytes**.
481 /// This is purely a convenience for casting to a `u8` pointer and
482 /// using [offset][pointer::offset] on it. See that method for documentation
483 /// and safety requirements.
485 /// For non-`Sized` pointees this operation changes only the data pointer,
486 /// leaving the metadata untouched.
489 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
490 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
491 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
492 pub const unsafe fn byte_offset(self, count: isize) -> Self {
493 // SAFETY: the caller must uphold the safety contract for `offset`.
494 unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
497 /// Calculates the offset from a pointer using wrapping arithmetic.
499 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
500 /// offset of `3 * size_of::<T>()` bytes.
504 /// This operation itself is always safe, but using the resulting pointer is not.
506 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
507 /// be used to read or write other allocated objects.
509 /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
510 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
511 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
512 /// `x` and `y` point into the same allocated object.
514 /// Compared to [`offset`], this method basically delays the requirement of staying within the
515 /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
516 /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
517 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
518 /// can be optimized better and is thus preferable in performance-sensitive code.
520 /// The delayed check only considers the value of the pointer that was dereferenced, not the
521 /// intermediate values used during the computation of the final result. For example,
522 /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
523 /// words, leaving the allocated object and then re-entering it later is permitted.
525 /// [`offset`]: #method.offset
526 /// [allocated object]: crate::ptr#allocated-object
533 /// // Iterate using a raw pointer in increments of two elements
534 /// let data = [1u8, 2, 3, 4, 5];
535 /// let mut ptr: *const u8 = data.as_ptr();
537 /// let end_rounded_up = ptr.wrapping_offset(6);
539 /// // This loop prints "1, 3, 5, "
540 /// while ptr != end_rounded_up {
542 /// print!("{}, ", *ptr);
544 /// ptr = ptr.wrapping_offset(step);
547 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
548 #[must_use = "returns a new pointer rather than modifying its argument"]
549 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
551 pub const fn wrapping_offset(self, count: isize) -> *const T
555 // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
556 unsafe { intrinsics::arith_offset(self, count) }
559 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
561 /// `count` is in units of **bytes**.
563 /// This is purely a convenience for casting to a `u8` pointer and
564 /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
565 /// for documentation.
567 /// For non-`Sized` pointees this operation changes only the data pointer,
568 /// leaving the metadata untouched.
571 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
572 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
573 pub const fn wrapping_byte_offset(self, count: isize) -> Self {
574 self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
577 /// Masks out bits of the pointer according to a mask.
579 /// This is convenience for `ptr.map_addr(|a| a & mask)`.
581 /// For non-`Sized` pointees this operation changes only the data pointer,
582 /// leaving the metadata untouched.
587 /// #![feature(ptr_mask, strict_provenance)]
589 /// let ptr: *const u32 = &v;
591 /// // `u32` is 4 bytes aligned,
592 /// // which means that lower 2 bits are always 0.
593 /// let tag_mask = 0b11;
594 /// let ptr_mask = !tag_mask;
596 /// // We can store something in these lower bits
597 /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
599 /// // Get the "tag" back
600 /// let tag = tagged_ptr.addr() & tag_mask;
601 /// assert_eq!(tag, 0b10);
603 /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
604 /// // To get original pointer `mask` can be used:
605 /// let masked_ptr = tagged_ptr.mask(ptr_mask);
606 /// assert_eq!(unsafe { *masked_ptr }, 17);
608 #[unstable(feature = "ptr_mask", issue = "98290")]
609 #[must_use = "returns a new pointer rather than modifying its argument"]
611 pub fn mask(self, mask: usize) -> *const T {
612 intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
615 /// Calculates the distance between two pointers. The returned value is in
616 /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
618 /// This function is the inverse of [`offset`].
620 /// [`offset`]: #method.offset
624 /// If any of the following conditions are violated, the result is Undefined
627 /// * Both the starting and other pointer must be either in bounds or one
628 /// byte past the end of the same [allocated object].
630 /// * Both pointers must be *derived from* a pointer to the same object.
631 /// (See below for an example.)
633 /// * The distance between the pointers, in bytes, must be an exact multiple
634 /// of the size of `T`.
636 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
638 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
640 /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
641 /// address space, so two pointers within some value of any Rust type `T` will always satisfy
642 /// the last two conditions. The standard library also generally ensures that allocations
643 /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
644 /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
645 /// always satisfies the last two conditions.
647 /// Most platforms fundamentally can't even construct such a large allocation.
648 /// For instance, no known 64-bit platform can ever serve a request
649 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
650 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
651 /// more than `isize::MAX` bytes with things like Physical Address
652 /// Extension. As such, memory acquired directly from allocators or memory
653 /// mapped files *may* be too large to handle with this function.
654 /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
655 /// such large allocations either.)
657 /// [`add`]: #method.add
658 /// [allocated object]: crate::ptr#allocated-object
662 /// This function panics if `T` is a Zero-Sized Type ("ZST").
670 /// let ptr1: *const i32 = &a[1];
671 /// let ptr2: *const i32 = &a[3];
673 /// assert_eq!(ptr2.offset_from(ptr1), 2);
674 /// assert_eq!(ptr1.offset_from(ptr2), -2);
675 /// assert_eq!(ptr1.offset(2), ptr2);
676 /// assert_eq!(ptr2.offset(-2), ptr1);
680 /// *Incorrect* usage:
683 /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
684 /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
685 /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
686 /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
687 /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
688 /// assert_eq!(ptr2 as usize, ptr2_other as usize);
689 /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
690 /// // computing their offset is undefined behavior, even though
691 /// // they point to the same address!
693 /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
696 #[stable(feature = "ptr_offset_from", since = "1.47.0")]
697 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
699 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
700 pub const unsafe fn offset_from(self, origin: *const T) -> isize
704 let pointee_size = mem::size_of::<T>();
705 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
706 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
707 unsafe { intrinsics::ptr_offset_from(self, origin) }
710 /// Calculates the distance between two pointers. The returned value is in
711 /// units of **bytes**.
713 /// This is purely a convenience for casting to a `u8` pointer and
714 /// using [offset_from][pointer::offset_from] on it. See that method for
715 /// documentation and safety requirements.
717 /// For non-`Sized` pointees this operation considers only the data pointers,
718 /// ignoring the metadata.
720 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
721 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
722 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
723 pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
724 // SAFETY: the caller must uphold the safety contract for `offset_from`.
725 unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
728 /// Calculates the distance between two pointers, *where it's known that
729 /// `self` is equal to or greater than `origin`*. The returned value is in
730 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
732 /// This computes the same value that [`offset_from`](#method.offset_from)
733 /// would compute, but with the added precondition that the offset is
734 /// guaranteed to be non-negative. This method is equivalent to
735 /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
736 /// but it provides slightly more information to the optimizer, which can
737 /// sometimes allow it to optimize slightly better with some backends.
739 /// This method can be though of as recovering the `count` that was passed
740 /// to [`add`](#method.add) (or, with the parameters in the other order,
741 /// to [`sub`](#method.sub)). The following are all equivalent, assuming
742 /// that their safety preconditions are met:
744 /// # #![feature(ptr_sub_ptr)]
745 /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool {
746 /// ptr.sub_ptr(origin) == count
748 /// origin.add(count) == ptr
750 /// ptr.sub(count) == origin
756 /// - The distance between the pointers must be non-negative (`self >= origin`)
758 /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
759 /// apply to this method as well; see it for the full details.
761 /// Importantly, despite the return type of this method being able to represent
762 /// a larger offset, it's still *not permitted* to pass pointers which differ
763 /// by more than `isize::MAX` *bytes*. As such, the result of this method will
764 /// always be less than or equal to `isize::MAX as usize`.
768 /// This function panics if `T` is a Zero-Sized Type ("ZST").
773 /// #![feature(ptr_sub_ptr)]
776 /// let ptr1: *const i32 = &a[1];
777 /// let ptr2: *const i32 = &a[3];
779 /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
780 /// assert_eq!(ptr1.add(2), ptr2);
781 /// assert_eq!(ptr2.sub(2), ptr1);
782 /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
785 /// // This would be incorrect, as the pointers are not correctly ordered:
786 /// // ptr1.sub_ptr(ptr2)
788 #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
789 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
791 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
792 pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
797 // SAFETY: The comparison has no side-effects, and the intrinsic
798 // does this check internally in the CTFE implementation.
800 assert_unsafe_precondition!(
801 "ptr::sub_ptr requires `this >= origin`",
802 [T](this: *const T, origin: *const T) => this >= origin
806 let pointee_size = mem::size_of::<T>();
807 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
808 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
809 unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
812 /// Returns whether two pointers are guaranteed to be equal.
814 /// At runtime this function behaves like `Some(self == other)`.
815 /// However, in some contexts (e.g., compile-time evaluation),
816 /// it is not always possible to determine equality of two pointers, so this function may
817 /// spuriously return `None` for pointers that later actually turn out to have its equality known.
818 /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
820 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
821 /// version and unsafe code must not
822 /// rely on the result of this function for soundness. It is suggested to only use this function
823 /// for performance optimizations where spurious `None` return values by this function do not
824 /// affect the outcome, but just the performance.
825 /// The consequences of using this method to make runtime and compile-time code behave
826 /// differently have not been explored. This method should not be used to introduce such
827 /// differences, and it should also not be stabilized before we have a better understanding
829 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
830 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
832 pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
836 match intrinsics::ptr_guaranteed_cmp(self as _, other as _) {
838 other => Some(other == 1),
842 /// Returns whether two pointers are guaranteed to be inequal.
844 /// At runtime this function behaves like `Some(self != other)`.
845 /// However, in some contexts (e.g., compile-time evaluation),
846 /// it is not always possible to determine inequality of two pointers, so this function may
847 /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
848 /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
850 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
851 /// version and unsafe code must not
852 /// rely on the result of this function for soundness. It is suggested to only use this function
853 /// for performance optimizations where spurious `None` return values by this function do not
854 /// affect the outcome, but just the performance.
855 /// The consequences of using this method to make runtime and compile-time code behave
856 /// differently have not been explored. This method should not be used to introduce such
857 /// differences, and it should also not be stabilized before we have a better understanding
859 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
860 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
862 pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
866 match self.guaranteed_eq(other) {
868 Some(eq) => Some(!eq),
872 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
874 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
875 /// offset of `3 * size_of::<T>()` bytes.
879 /// If any of the following conditions are violated, the result is Undefined
882 /// * Both the starting and resulting pointer must be either in bounds or one
883 /// byte past the end of the same [allocated object].
885 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
887 /// * The offset being in bounds cannot rely on "wrapping around" the address
888 /// space. That is, the infinite-precision sum must fit in a `usize`.
890 /// The compiler and standard library generally tries to ensure allocations
891 /// never reach a size where an offset is a concern. For instance, `Vec`
892 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
893 /// `vec.as_ptr().add(vec.len())` is always safe.
895 /// Most platforms fundamentally can't even construct such an allocation.
896 /// For instance, no known 64-bit platform can ever serve a request
897 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
898 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
899 /// more than `isize::MAX` bytes with things like Physical Address
900 /// Extension. As such, memory acquired directly from allocators or memory
901 /// mapped files *may* be too large to handle with this function.
903 /// Consider using [`wrapping_add`] instead if these constraints are
904 /// difficult to satisfy. The only advantage of this method is that it
905 /// enables more aggressive compiler optimizations.
907 /// [`wrapping_add`]: #method.wrapping_add
908 /// [allocated object]: crate::ptr#allocated-object
915 /// let s: &str = "123";
916 /// let ptr: *const u8 = s.as_ptr();
919 /// println!("{}", *ptr.add(1) as char);
920 /// println!("{}", *ptr.add(2) as char);
923 #[stable(feature = "pointer_methods", since = "1.26.0")]
924 #[must_use = "returns a new pointer rather than modifying its argument"]
925 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
927 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
928 pub const unsafe fn add(self, count: usize) -> Self
932 // SAFETY: the caller must uphold the safety contract for `offset`.
933 unsafe { self.offset(count as isize) }
936 /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
938 /// `count` is in units of bytes.
940 /// This is purely a convenience for casting to a `u8` pointer and
941 /// using [add][pointer::add] on it. See that method for documentation
942 /// and safety requirements.
944 /// For non-`Sized` pointees this operation changes only the data pointer,
945 /// leaving the metadata untouched.
948 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
949 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
950 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
951 pub const unsafe fn byte_add(self, count: usize) -> Self {
952 // SAFETY: the caller must uphold the safety contract for `add`.
953 unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
956 /// Calculates the offset from a pointer (convenience for
957 /// `.offset((count as isize).wrapping_neg())`).
959 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
960 /// offset of `3 * size_of::<T>()` bytes.
964 /// If any of the following conditions are violated, the result is Undefined
967 /// * Both the starting and resulting pointer must be either in bounds or one
968 /// byte past the end of the same [allocated object].
970 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
972 /// * The offset being in bounds cannot rely on "wrapping around" the address
973 /// space. That is, the infinite-precision sum must fit in a usize.
975 /// The compiler and standard library generally tries to ensure allocations
976 /// never reach a size where an offset is a concern. For instance, `Vec`
977 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
978 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
980 /// Most platforms fundamentally can't even construct such an allocation.
981 /// For instance, no known 64-bit platform can ever serve a request
982 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
983 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
984 /// more than `isize::MAX` bytes with things like Physical Address
985 /// Extension. As such, memory acquired directly from allocators or memory
986 /// mapped files *may* be too large to handle with this function.
988 /// Consider using [`wrapping_sub`] instead if these constraints are
989 /// difficult to satisfy. The only advantage of this method is that it
990 /// enables more aggressive compiler optimizations.
992 /// [`wrapping_sub`]: #method.wrapping_sub
993 /// [allocated object]: crate::ptr#allocated-object
1000 /// let s: &str = "123";
1003 /// let end: *const u8 = s.as_ptr().add(3);
1004 /// println!("{}", *end.sub(1) as char);
1005 /// println!("{}", *end.sub(2) as char);
1008 #[stable(feature = "pointer_methods", since = "1.26.0")]
1009 #[must_use = "returns a new pointer rather than modifying its argument"]
1010 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1012 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1013 pub const unsafe fn sub(self, count: usize) -> Self
1017 // SAFETY: the caller must uphold the safety contract for `offset`.
1018 unsafe { self.offset((count as isize).wrapping_neg()) }
1021 /// Calculates the offset from a pointer in bytes (convenience for
1022 /// `.byte_offset((count as isize).wrapping_neg())`).
1024 /// `count` is in units of bytes.
1026 /// This is purely a convenience for casting to a `u8` pointer and
1027 /// using [sub][pointer::sub] on it. See that method for documentation
1028 /// and safety requirements.
1030 /// For non-`Sized` pointees this operation changes only the data pointer,
1031 /// leaving the metadata untouched.
1034 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1035 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1036 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1037 pub const unsafe fn byte_sub(self, count: usize) -> Self {
1038 // SAFETY: the caller must uphold the safety contract for `sub`.
1039 unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
1042 /// Calculates the offset from a pointer using wrapping arithmetic.
1043 /// (convenience for `.wrapping_offset(count as isize)`)
1045 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1046 /// offset of `3 * size_of::<T>()` bytes.
1050 /// This operation itself is always safe, but using the resulting pointer is not.
1052 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1053 /// be used to read or write other allocated objects.
1055 /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
1056 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1057 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1058 /// `x` and `y` point into the same allocated object.
1060 /// Compared to [`add`], this method basically delays the requirement of staying within the
1061 /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
1062 /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
1063 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
1064 /// can be optimized better and is thus preferable in performance-sensitive code.
1066 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1067 /// intermediate values used during the computation of the final result. For example,
1068 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1069 /// allocated object and then re-entering it later is permitted.
1071 /// [`add`]: #method.add
1072 /// [allocated object]: crate::ptr#allocated-object
1079 /// // Iterate using a raw pointer in increments of two elements
1080 /// let data = [1u8, 2, 3, 4, 5];
1081 /// let mut ptr: *const u8 = data.as_ptr();
1083 /// let end_rounded_up = ptr.wrapping_add(6);
1085 /// // This loop prints "1, 3, 5, "
1086 /// while ptr != end_rounded_up {
1088 /// print!("{}, ", *ptr);
1090 /// ptr = ptr.wrapping_add(step);
1093 #[stable(feature = "pointer_methods", since = "1.26.0")]
1094 #[must_use = "returns a new pointer rather than modifying its argument"]
1095 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1097 pub const fn wrapping_add(self, count: usize) -> Self
1101 self.wrapping_offset(count as isize)
1104 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1105 /// (convenience for `.wrapping_byte_offset(count as isize)`)
1107 /// `count` is in units of bytes.
1109 /// This is purely a convenience for casting to a `u8` pointer and
1110 /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
1112 /// For non-`Sized` pointees this operation changes only the data pointer,
1113 /// leaving the metadata untouched.
1116 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1117 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1118 pub const fn wrapping_byte_add(self, count: usize) -> Self {
1119 self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
1122 /// Calculates the offset from a pointer using wrapping arithmetic.
1123 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1125 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1126 /// offset of `3 * size_of::<T>()` bytes.
1130 /// This operation itself is always safe, but using the resulting pointer is not.
1132 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1133 /// be used to read or write other allocated objects.
1135 /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
1136 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1137 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1138 /// `x` and `y` point into the same allocated object.
1140 /// Compared to [`sub`], this method basically delays the requirement of staying within the
1141 /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
1142 /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
1143 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
1144 /// can be optimized better and is thus preferable in performance-sensitive code.
1146 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1147 /// intermediate values used during the computation of the final result. For example,
1148 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1149 /// allocated object and then re-entering it later is permitted.
1151 /// [`sub`]: #method.sub
1152 /// [allocated object]: crate::ptr#allocated-object
1159 /// // Iterate using a raw pointer in increments of two elements (backwards)
1160 /// let data = [1u8, 2, 3, 4, 5];
1161 /// let mut ptr: *const u8 = data.as_ptr();
1162 /// let start_rounded_down = ptr.wrapping_sub(2);
1163 /// ptr = ptr.wrapping_add(4);
1165 /// // This loop prints "5, 3, 1, "
1166 /// while ptr != start_rounded_down {
1168 /// print!("{}, ", *ptr);
1170 /// ptr = ptr.wrapping_sub(step);
1173 #[stable(feature = "pointer_methods", since = "1.26.0")]
1174 #[must_use = "returns a new pointer rather than modifying its argument"]
1175 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1177 pub const fn wrapping_sub(self, count: usize) -> Self
1181 self.wrapping_offset((count as isize).wrapping_neg())
1184 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1185 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1187 /// `count` is in units of bytes.
1189 /// This is purely a convenience for casting to a `u8` pointer and
1190 /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
1192 /// For non-`Sized` pointees this operation changes only the data pointer,
1193 /// leaving the metadata untouched.
1196 #[unstable(feature = "pointer_byte_offsets", issue = "96283")]
1197 #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
1198 pub const fn wrapping_byte_sub(self, count: usize) -> Self {
1199 self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
1202 /// Reads the value from `self` without moving it. This leaves the
1203 /// memory in `self` unchanged.
1205 /// See [`ptr::read`] for safety concerns and examples.
1207 /// [`ptr::read`]: crate::ptr::read()
1208 #[stable(feature = "pointer_methods", since = "1.26.0")]
1209 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
1211 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1212 pub const unsafe fn read(self) -> T
1216 // SAFETY: the caller must uphold the safety contract for `read`.
1217 unsafe { read(self) }
1220 /// Performs a volatile read of the value from `self` without moving it. This
1221 /// leaves the memory in `self` unchanged.
1223 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1224 /// to not be elided or reordered by the compiler across other volatile
1227 /// See [`ptr::read_volatile`] for safety concerns and examples.
1229 /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
1230 #[stable(feature = "pointer_methods", since = "1.26.0")]
1232 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1233 pub unsafe fn read_volatile(self) -> T
1237 // SAFETY: the caller must uphold the safety contract for `read_volatile`.
1238 unsafe { read_volatile(self) }
1241 /// Reads the value from `self` without moving it. This leaves the
1242 /// memory in `self` unchanged.
1244 /// Unlike `read`, the pointer may be unaligned.
1246 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1248 /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
1249 #[stable(feature = "pointer_methods", since = "1.26.0")]
1250 #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
1252 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1253 pub const unsafe fn read_unaligned(self) -> T
1257 // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
1258 unsafe { read_unaligned(self) }
1261 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1262 /// and destination may overlap.
1264 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1266 /// See [`ptr::copy`] for safety concerns and examples.
1268 /// [`ptr::copy`]: crate::ptr::copy()
1269 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1270 #[stable(feature = "pointer_methods", since = "1.26.0")]
1272 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1273 pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
1277 // SAFETY: the caller must uphold the safety contract for `copy`.
1278 unsafe { copy(self, dest, count) }
1281 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1282 /// and destination may *not* overlap.
1284 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1286 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1288 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
1289 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
1290 #[stable(feature = "pointer_methods", since = "1.26.0")]
1292 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1293 pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1297 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1298 unsafe { copy_nonoverlapping(self, dest, count) }
1301 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1304 /// If it is not possible to align the pointer, the implementation returns
1305 /// `usize::MAX`. It is permissible for the implementation to *always*
1306 /// return `usize::MAX`. Only your algorithm's performance can depend
1307 /// on getting a usable offset here, not its correctness.
1309 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1310 /// used with the `wrapping_add` method.
1312 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1313 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1314 /// the returned offset is correct in all terms other than alignment.
1318 /// The function panics if `align` is not a power-of-two.
1322 /// Accessing adjacent `u8` as `u16`
1325 /// use std::mem::align_of;
1328 /// let x = [5_u8, 6, 7, 8, 9];
1329 /// let ptr = x.as_ptr();
1330 /// let offset = ptr.align_offset(align_of::<u16>());
1332 /// if offset < x.len() - 1 {
1333 /// let u16_ptr = ptr.add(offset).cast::<u16>();
1334 /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
1336 /// // while the pointer can be aligned via `offset`, it would point
1337 /// // outside the allocation
1343 #[stable(feature = "align_offset", since = "1.36.0")]
1344 #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
1345 pub const fn align_offset(self, align: usize) -> usize
1349 if !align.is_power_of_two() {
1350 panic!("align_offset: align is not a power-of-two");
1355 fn rt_impl<T>(p: *const T, align: usize) -> usize {
1356 // SAFETY: `align` has been checked to be a power of 2 above
1357 unsafe { align_offset(p, align) }
1360 const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
1365 // It is permissible for `align_offset` to always return `usize::MAX`,
1366 // algorithm correctness can not depend on `align_offset` returning non-max values.
1368 // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
1369 unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
1372 #[cfg(not(bootstrap))]
1374 // SAFETY: `align` has been checked to be a power of 2 above
1375 unsafe { align_offset(self, align) }
1379 /// Returns whether the pointer is properly aligned for `T`.
1385 /// #![feature(pointer_is_aligned)]
1386 /// #![feature(pointer_byte_offsets)]
1388 /// // On some platforms, the alignment of i32 is less than 4.
1389 /// #[repr(align(4))]
1390 /// struct AlignedI32(i32);
1392 /// let data = AlignedI32(42);
1393 /// let ptr = &data as *const AlignedI32;
1395 /// assert!(ptr.is_aligned());
1396 /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
1399 /// # At compiletime
1400 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1401 /// [tracking issue] for details.**
1403 /// At compiletime, the compiler may not know where a value will end up in memory.
1404 /// Calling this function on a pointer created from a reference at compiletime will only
1405 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1406 /// is never aligned if cast to a type with a stricter alignment than the reference's
1407 /// underlying allocation.
1409 #[cfg_attr(bootstrap, doc = "```ignore")]
1410 #[cfg_attr(not(bootstrap), doc = "```")]
1411 /// #![feature(pointer_is_aligned)]
1412 /// #![feature(const_pointer_is_aligned)]
1414 /// // On some platforms, the alignment of primitives is less than their size.
1415 /// #[repr(align(4))]
1416 /// struct AlignedI32(i32);
1417 /// #[repr(align(8))]
1418 /// struct AlignedI64(i64);
1421 /// let data = AlignedI32(42);
1422 /// let ptr = &data as *const AlignedI32;
1423 /// assert!(ptr.is_aligned());
1425 /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
1426 /// let ptr1 = ptr.cast::<AlignedI64>();
1427 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1428 /// assert!(!ptr1.is_aligned());
1429 /// assert!(!ptr2.is_aligned());
1433 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1434 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1436 #[cfg_attr(bootstrap, doc = "```ignore")]
1437 #[cfg_attr(not(bootstrap), doc = "```")]
1438 /// #![feature(pointer_is_aligned)]
1439 /// #![feature(const_pointer_is_aligned)]
1441 /// // On some platforms, the alignment of primitives is less than their size.
1442 /// #[repr(align(4))]
1443 /// struct AlignedI32(i32);
1444 /// #[repr(align(8))]
1445 /// struct AlignedI64(i64);
1447 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1448 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1449 /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
1450 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
1452 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1453 /// let runtime_ptr = COMPTIME_PTR;
1455 /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
1456 /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
1460 /// If a pointer is created from a fixed address, this function behaves the same during
1461 /// runtime and compiletime.
1463 #[cfg_attr(bootstrap, doc = "```ignore")]
1464 #[cfg_attr(not(bootstrap), doc = "```")]
1465 /// #![feature(pointer_is_aligned)]
1466 /// #![feature(const_pointer_is_aligned)]
1468 /// // On some platforms, the alignment of primitives is less than their size.
1469 /// #[repr(align(4))]
1470 /// struct AlignedI32(i32);
1471 /// #[repr(align(8))]
1472 /// struct AlignedI64(i64);
1475 /// let ptr = 40 as *const AlignedI32;
1476 /// assert!(ptr.is_aligned());
1478 /// // For pointers with a known address, runtime and compiletime behavior are identical.
1479 /// let ptr1 = ptr.cast::<AlignedI64>();
1480 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1481 /// assert!(ptr1.is_aligned());
1482 /// assert!(!ptr2.is_aligned());
1486 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1489 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1490 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1491 pub const fn is_aligned(self) -> bool
1495 self.is_aligned_to(mem::align_of::<T>())
1498 /// Returns whether the pointer is aligned to `align`.
1500 /// For non-`Sized` pointees this operation considers only the data pointer,
1501 /// ignoring the metadata.
1505 /// The function panics if `align` is not a power-of-two (this includes 0).
1511 /// #![feature(pointer_is_aligned)]
1512 /// #![feature(pointer_byte_offsets)]
1514 /// // On some platforms, the alignment of i32 is less than 4.
1515 /// #[repr(align(4))]
1516 /// struct AlignedI32(i32);
1518 /// let data = AlignedI32(42);
1519 /// let ptr = &data as *const AlignedI32;
1521 /// assert!(ptr.is_aligned_to(1));
1522 /// assert!(ptr.is_aligned_to(2));
1523 /// assert!(ptr.is_aligned_to(4));
1525 /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
1526 /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
1528 /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
1531 /// # At compiletime
1532 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1533 /// [tracking issue] for details.**
1535 /// At compiletime, the compiler may not know where a value will end up in memory.
1536 /// Calling this function on a pointer created from a reference at compiletime will only
1537 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1538 /// cannot be stricter aligned than the reference's underlying allocation.
1540 #[cfg_attr(bootstrap, doc = "```ignore")]
1541 #[cfg_attr(not(bootstrap), doc = "```")]
1542 /// #![feature(pointer_is_aligned)]
1543 /// #![feature(const_pointer_is_aligned)]
1545 /// // On some platforms, the alignment of i32 is less than 4.
1546 /// #[repr(align(4))]
1547 /// struct AlignedI32(i32);
1550 /// let data = AlignedI32(42);
1551 /// let ptr = &data as *const AlignedI32;
1553 /// assert!(ptr.is_aligned_to(1));
1554 /// assert!(ptr.is_aligned_to(2));
1555 /// assert!(ptr.is_aligned_to(4));
1557 /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
1558 /// assert!(!ptr.is_aligned_to(8));
1559 /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
1563 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1564 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1566 #[cfg_attr(bootstrap, doc = "```ignore")]
1567 #[cfg_attr(not(bootstrap), doc = "```")]
1568 /// #![feature(pointer_is_aligned)]
1569 /// #![feature(const_pointer_is_aligned)]
1571 /// // On some platforms, the alignment of i32 is less than 4.
1572 /// #[repr(align(4))]
1573 /// struct AlignedI32(i32);
1575 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1576 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1577 /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
1578 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
1580 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1581 /// let runtime_ptr = COMPTIME_PTR;
1583 /// runtime_ptr.is_aligned_to(8),
1584 /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
1588 /// If a pointer is created from a fixed address, this function behaves the same during
1589 /// runtime and compiletime.
1591 #[cfg_attr(bootstrap, doc = "```ignore")]
1592 #[cfg_attr(not(bootstrap), doc = "```")]
1593 /// #![feature(pointer_is_aligned)]
1594 /// #![feature(const_pointer_is_aligned)]
1597 /// let ptr = 40 as *const u8;
1598 /// assert!(ptr.is_aligned_to(1));
1599 /// assert!(ptr.is_aligned_to(2));
1600 /// assert!(ptr.is_aligned_to(4));
1601 /// assert!(ptr.is_aligned_to(8));
1602 /// assert!(!ptr.is_aligned_to(16));
1606 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1609 #[unstable(feature = "pointer_is_aligned", issue = "96284")]
1610 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1611 pub const fn is_aligned_to(self, align: usize) -> bool {
1612 if !align.is_power_of_two() {
1613 panic!("is_aligned_to: align is not a power-of-two");
1616 // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
1617 // The cast to `()` is used to
1618 // 1. deal with fat pointers; and
1619 // 2. ensure that `align_offset` doesn't actually try to compute an offset.
1620 self.cast::<()>().align_offset(align) == 0
1624 impl<T> *const [T] {
1625 /// Returns the length of a raw slice.
1627 /// The returned value is the number of **elements**, not the number of bytes.
1629 /// This function is safe, even when the raw slice cannot be cast to a slice
1630 /// reference because the pointer is null or unaligned.
1635 /// #![feature(slice_ptr_len)]
1639 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1640 /// assert_eq!(slice.len(), 3);
1643 #[unstable(feature = "slice_ptr_len", issue = "71146")]
1644 #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
1645 pub const fn len(self) -> usize {
1649 /// Returns a raw pointer to the slice's buffer.
1651 /// This is equivalent to casting `self` to `*const T`, but more type-safe.
1656 /// #![feature(slice_ptr_get)]
1659 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1660 /// assert_eq!(slice.as_ptr(), ptr::null());
1663 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1664 #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
1665 pub const fn as_ptr(self) -> *const T {
1669 /// Returns a raw pointer to an element or subslice, without doing bounds
1672 /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
1673 /// is *[undefined behavior]* even if the resulting pointer is not used.
1675 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1680 /// #![feature(slice_ptr_get)]
1682 /// let x = &[1, 2, 4] as *const [i32];
1685 /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
1688 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1689 #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
1691 pub const unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
1693 I: ~const SliceIndex<[T]>,
1695 // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
1696 unsafe { index.get_unchecked(self) }
1699 /// Returns `None` if the pointer is null, or else returns a shared slice to
1700 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
1701 /// that the value has to be initialized.
1703 /// [`as_ref`]: #method.as_ref
1707 /// When calling this method, you have to ensure that *either* the pointer is null *or*
1708 /// all of the following is true:
1710 /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
1711 /// and it must be properly aligned. This means in particular:
1713 /// * The entire memory range of this slice must be contained within a single [allocated object]!
1714 /// Slices can never span across multiple allocated objects.
1716 /// * The pointer must be aligned even for zero-length slices. One
1717 /// reason for this is that enum layout optimizations may rely on references
1718 /// (including slices of any length) being aligned and non-null to distinguish
1719 /// them from other data. You can obtain a pointer that is usable as `data`
1720 /// for zero-length slices using [`NonNull::dangling()`].
1722 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
1723 /// See the safety documentation of [`pointer::offset`].
1725 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
1726 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
1727 /// In particular, while this reference exists, the memory the pointer points to must
1728 /// not get mutated (except inside `UnsafeCell`).
1730 /// This applies even if the result of this method is unused!
1732 /// See also [`slice::from_raw_parts`][].
1734 /// [valid]: crate::ptr#safety
1735 /// [allocated object]: crate::ptr#allocated-object
1737 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
1738 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
1739 pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
1743 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
1744 Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
1749 // Equality for pointers
1750 #[stable(feature = "rust1", since = "1.0.0")]
1751 impl<T: ?Sized> PartialEq for *const T {
1753 fn eq(&self, other: &*const T) -> bool {
1758 #[stable(feature = "rust1", since = "1.0.0")]
1759 impl<T: ?Sized> Eq for *const T {}
1761 // Comparison for pointers
1762 #[stable(feature = "rust1", since = "1.0.0")]
1763 impl<T: ?Sized> Ord for *const T {
1765 fn cmp(&self, other: &*const T) -> Ordering {
1768 } else if self == other {
1776 #[stable(feature = "rust1", since = "1.0.0")]
1777 impl<T: ?Sized> PartialOrd for *const T {
1779 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
1780 Some(self.cmp(other))
1784 fn lt(&self, other: &*const T) -> bool {
1789 fn le(&self, other: &*const T) -> bool {
1794 fn gt(&self, other: &*const T) -> bool {
1799 fn ge(&self, other: &*const T) -> bool {