1 //! Manually manage memory through raw pointers.
3 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
7 //! Many functions in this module take raw pointers as arguments and read from
8 //! or write to them. For this to be safe, these pointers must be *valid*.
9 //! Whether a pointer is valid depends on the operation it is used for
10 //! (read or write), and the extent of the memory that is accessed (i.e.,
11 //! how many bytes are read/written). Most functions use `*mut T` and `*const T`
12 //! to access only a single value, in which case the documentation omits the size
13 //! and implicitly assumes it to be `size_of::<T>()` bytes.
15 //! The precise rules for validity are not determined yet. The guarantees that are
16 //! provided at this point are very minimal:
18 //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
19 //! * All pointers (except for the null pointer) are valid for all operations of
21 //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
22 //! be *dereferencable*: the memory range of the given size starting at the pointer must all be
23 //! within the bounds of a single allocated object. Note that in Rust,
24 //! every (stack-allocated) variable is considered a separate allocated object.
25 //! * All accesses performed by functions in this module are *non-atomic* in the sense
26 //! of [atomic operations] used to synchronize between threads. This means it is
27 //! undefined behavior to perform two concurrent accesses to the same location from different
28 //! threads unless both accesses only read from memory. Notice that this explicitly
29 //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
30 //! be used for inter-thread synchronization.
31 //! * The result of casting a reference to a pointer is valid for as long as the
32 //! underlying object is live and no reference (just raw pointers) is used to
33 //! access the same memory.
35 //! These axioms, along with careful use of [`offset`] for pointer arithmetic,
36 //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
37 //! will be provided eventually, as the [aliasing] rules are being determined. For more
38 //! information, see the [book] as well as the section in the reference devoted
39 //! to [undefined behavior][ub].
43 //! Valid raw pointers as defined above are not necessarily properly aligned (where
44 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
45 //! aligned to `mem::align_of::<T>()`). However, most functions require their
46 //! arguments to be properly aligned, and will explicitly state
47 //! this requirement in their documentation. Notable exceptions to this are
48 //! [`read_unaligned`] and [`write_unaligned`].
50 //! When a function requires proper alignment, it does so even if the access
51 //! has size 0, i.e., even if memory is not actually touched. Consider using
52 //! [`NonNull::dangling`] in such cases.
54 //! [aliasing]: ../../nomicon/aliasing.html
55 //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
56 //! [ub]: ../../reference/behavior-considered-undefined.html
57 //! [null]: ./fn.null.html
58 //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
59 //! [atomic operations]: ../../std/sync/atomic/index.html
60 //! [`copy`]: ../../std/ptr/fn.copy.html
61 //! [`offset`]: ../../std/primitive.pointer.html#method.offset
62 //! [`read_unaligned`]: ./fn.read_unaligned.html
63 //! [`write_unaligned`]: ./fn.write_unaligned.html
64 //! [`read_volatile`]: ./fn.read_volatile.html
65 //! [`write_volatile`]: ./fn.write_volatile.html
66 //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling
68 // ignore-tidy-undocumented-unsafe
70 #![stable(feature = "rust1", since = "1.0.0")]
72 use crate::intrinsics;
75 use crate::mem::{self, MaybeUninit};
76 use crate::cmp::Ordering::{self, Less, Equal, Greater};
78 #[stable(feature = "rust1", since = "1.0.0")]
79 pub use crate::intrinsics::copy_nonoverlapping;
81 #[stable(feature = "rust1", since = "1.0.0")]
82 pub use crate::intrinsics::copy;
84 #[stable(feature = "rust1", since = "1.0.0")]
85 pub use crate::intrinsics::write_bytes;
88 #[stable(feature = "nonnull", since = "1.25.0")]
89 pub use non_null::NonNull;
92 #[unstable(feature = "ptr_internals", issue = "0")]
93 pub use unique::Unique;
95 /// Executes the destructor (if any) of the pointed-to value.
97 /// This is semantically equivalent to calling [`ptr::read`] and discarding
98 /// the result, but has the following advantages:
100 /// * It is *required* to use `drop_in_place` to drop unsized types like
101 /// trait objects, because they can't be read out onto the stack and
102 /// dropped normally.
104 /// * It is friendlier to the optimizer to do this over [`ptr::read`] when
105 /// dropping manually allocated memory (e.g., when writing Box/Rc/Vec),
106 /// as the compiler doesn't need to prove that it's sound to elide the
109 /// Unaligned values cannot be dropped in place, they must be copied to an aligned
110 /// location first using [`ptr::read_unaligned`].
112 /// [`ptr::read`]: ../ptr/fn.read.html
113 /// [`ptr::read_unaligned`]: ../ptr/fn.read_unaligned.html
117 /// Behavior is undefined if any of the following conditions are violated:
119 /// * `to_drop` must be [valid] for reads.
121 /// * `to_drop` must be properly aligned.
123 /// Additionally, if `T` is not [`Copy`], using the pointed-to value after
124 /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
125 /// foo` counts as a use because it will cause the value to be dropped
126 /// again. [`write`] can be used to overwrite data without causing it to be
129 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
131 /// [valid]: ../ptr/index.html#safety
132 /// [`Copy`]: ../marker/trait.Copy.html
133 /// [`write`]: ../ptr/fn.write.html
137 /// Manually remove the last item from a vector:
143 /// let last = Rc::new(1);
144 /// let weak = Rc::downgrade(&last);
146 /// let mut v = vec![Rc::new(0), last];
149 /// // Get a raw pointer to the last element in `v`.
150 /// let ptr = &mut v[1] as *mut _;
151 /// // Shorten `v` to prevent the last item from being dropped. We do that first,
152 /// // to prevent issues if the `drop_in_place` below panics.
154 /// // Without a call `drop_in_place`, the last item would never be dropped,
155 /// // and the memory it manages would be leaked.
156 /// ptr::drop_in_place(ptr);
159 /// assert_eq!(v, &[0.into()]);
161 /// // Ensure that the last item was dropped.
162 /// assert!(weak.upgrade().is_none());
165 /// Notice that the compiler performs this copy automatically when dropping packed structs,
166 /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
168 #[stable(feature = "drop_in_place", since = "1.8.0")]
170 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
171 real_drop_in_place(&mut *to_drop)
174 // The real `drop_in_place` -- the one that gets called implicitly when variables go
175 // out of scope -- should have a safe reference and not a raw pointer as argument
176 // type. When we drop a local variable, we access it with a pointer that behaves
177 // like a safe reference; transmuting that to a raw pointer does not mean we can
178 // actually access it with raw pointers.
179 #[lang = "drop_in_place"]
180 #[allow(unconditional_recursion)]
181 unsafe fn real_drop_in_place<T: ?Sized>(to_drop: &mut T) {
182 // Code here does not matter - this is replaced by the
183 // real drop glue by the compiler.
184 real_drop_in_place(to_drop)
187 /// Creates a null raw pointer.
194 /// let p: *const i32 = ptr::null();
195 /// assert!(p.is_null());
198 #[stable(feature = "rust1", since = "1.0.0")]
200 pub const fn null<T>() -> *const T { 0 as *const T }
202 /// Creates a null mutable raw pointer.
209 /// let p: *mut i32 = ptr::null_mut();
210 /// assert!(p.is_null());
213 #[stable(feature = "rust1", since = "1.0.0")]
215 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
218 pub(crate) union Repr<T> {
219 pub(crate) rust: *const [T],
221 pub(crate) raw: FatPtr<T>,
225 pub(crate) struct FatPtr<T> {
227 pub(crate) len: usize,
230 /// Forms a raw slice from a pointer and a length.
232 /// The `len` argument is the number of **elements**, not the number of bytes.
234 /// This function is safe, but actually using the return value is unsafe.
235 /// See the documentation of [`from_raw_parts`] for slice safety requirements.
237 /// [`from_raw_parts`]: ../../std/slice/fn.from_raw_parts.html
242 /// #![feature(slice_from_raw_parts)]
245 /// // create a slice pointer when starting out with a pointer to the first element
246 /// let mut x = [5, 6, 7];
247 /// let ptr = &mut x[0] as *mut _;
248 /// let slice = ptr::slice_from_raw_parts_mut(ptr, 3);
249 /// assert_eq!(unsafe { &*slice }[2], 7);
252 #[unstable(feature = "slice_from_raw_parts", reason = "recently added", issue = "36925")]
253 pub fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
254 unsafe { Repr { raw: FatPtr { data, len } }.rust }
257 /// Performs the same functionality as [`slice_from_raw_parts`], except that a
258 /// raw mutable slice is returned, as opposed to a raw immutable slice.
260 /// See the documentation of [`slice_from_raw_parts`] for more details.
262 /// This function is safe, but actually using the return value is unsafe.
263 /// See the documentation of [`from_raw_parts_mut`] for slice safety requirements.
265 /// [`slice_from_raw_parts`]: fn.slice_from_raw_parts.html
266 /// [`from_raw_parts_mut`]: ../../std/slice/fn.from_raw_parts_mut.html
268 #[unstable(feature = "slice_from_raw_parts", reason = "recently added", issue = "36925")]
269 pub fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
270 unsafe { Repr { raw: FatPtr { data, len } }.rust_mut }
273 /// Swaps the values at two mutable locations of the same type, without
274 /// deinitializing either.
276 /// But for the following two exceptions, this function is semantically
277 /// equivalent to [`mem::swap`]:
279 /// * It operates on raw pointers instead of references. When references are
280 /// available, [`mem::swap`] should be preferred.
282 /// * The two pointed-to values may overlap. If the values do overlap, then the
283 /// overlapping region of memory from `x` will be used. This is demonstrated
284 /// in the second example below.
286 /// [`mem::swap`]: ../mem/fn.swap.html
290 /// Behavior is undefined if any of the following conditions are violated:
292 /// * Both `x` and `y` must be [valid] for reads and writes.
294 /// * Both `x` and `y` must be properly aligned.
296 /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
298 /// [valid]: ../ptr/index.html#safety
302 /// Swapping two non-overlapping regions:
307 /// let mut array = [0, 1, 2, 3];
309 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
310 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
314 /// assert_eq!([2, 3, 0, 1], array);
318 /// Swapping two overlapping regions:
323 /// let mut array = [0, 1, 2, 3];
325 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
326 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
330 /// // The indices `1..3` of the slice overlap between `x` and `y`.
331 /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
332 /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
333 /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
334 /// // This implementation is defined to make the latter choice.
335 /// assert_eq!([1, 0, 1, 2], array);
339 #[stable(feature = "rust1", since = "1.0.0")]
340 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
341 // Give ourselves some scratch space to work with.
342 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
343 let mut tmp = MaybeUninit::<T>::uninit();
346 copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
347 copy(y, x, 1); // `x` and `y` may overlap
348 copy_nonoverlapping(tmp.as_ptr(), y, 1);
351 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
352 /// beginning at `x` and `y`. The two regions must *not* overlap.
356 /// Behavior is undefined if any of the following conditions are violated:
358 /// * Both `x` and `y` must be [valid] for reads and writes of `count *
359 /// size_of::<T>()` bytes.
361 /// * Both `x` and `y` must be properly aligned.
363 /// * The region of memory beginning at `x` with a size of `count *
364 /// size_of::<T>()` bytes must *not* overlap with the region of memory
365 /// beginning at `y` with the same size.
367 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
368 /// the pointers must be non-NULL and properly aligned.
370 /// [valid]: ../ptr/index.html#safety
379 /// let mut x = [1, 2, 3, 4];
380 /// let mut y = [7, 8, 9];
383 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
386 /// assert_eq!(x, [7, 8, 3, 4]);
387 /// assert_eq!(y, [1, 2, 9]);
390 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
391 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
392 let x = x as *mut u8;
393 let y = y as *mut u8;
394 let len = mem::size_of::<T>() * count;
395 swap_nonoverlapping_bytes(x, y, len)
399 pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
400 // For types smaller than the block optimization below,
401 // just swap directly to avoid pessimizing codegen.
402 if mem::size_of::<T>() < 32 {
404 copy_nonoverlapping(y, x, 1);
407 swap_nonoverlapping(x, y, 1);
412 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
413 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
414 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
415 // Haswell E processors. LLVM is more able to optimize if we give a struct a
416 // #[repr(simd)], even if we don't actually use this struct directly.
418 // FIXME repr(simd) broken on emscripten and redox
419 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))]
420 struct Block(u64, u64, u64, u64);
421 struct UnalignedBlock(u64, u64, u64, u64);
423 let block_size = mem::size_of::<Block>();
425 // Loop through x & y, copying them `Block` at a time
426 // The optimizer should unroll the loop fully for most types
427 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
429 while i + block_size <= len {
430 // Create some uninitialized memory as scratch space
431 // Declaring `t` here avoids aligning the stack when this loop is unused
432 let mut t = mem::MaybeUninit::<Block>::uninit();
433 let t = t.as_mut_ptr() as *mut u8;
437 // Swap a block of bytes of x & y, using t as a temporary buffer
438 // This should be optimized into efficient SIMD operations where available
439 copy_nonoverlapping(x, t, block_size);
440 copy_nonoverlapping(y, x, block_size);
441 copy_nonoverlapping(t, y, block_size);
446 // Swap any remaining bytes
447 let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit();
450 let t = t.as_mut_ptr() as *mut u8;
454 copy_nonoverlapping(x, t, rem);
455 copy_nonoverlapping(y, x, rem);
456 copy_nonoverlapping(t, y, rem);
460 /// Moves `src` into the pointed `dst`, returning the previous `dst` value.
462 /// Neither value is dropped.
464 /// This function is semantically equivalent to [`mem::replace`] except that it
465 /// operates on raw pointers instead of references. When references are
466 /// available, [`mem::replace`] should be preferred.
468 /// [`mem::replace`]: ../mem/fn.replace.html
472 /// Behavior is undefined if any of the following conditions are violated:
474 /// * `dst` must be [valid] for writes.
476 /// * `dst` must be properly aligned.
478 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
480 /// [valid]: ../ptr/index.html#safety
487 /// let mut rust = vec!['b', 'u', 's', 't'];
489 /// // `mem::replace` would have the same effect without requiring the unsafe
492 /// ptr::replace(&mut rust[0], 'r')
495 /// assert_eq!(b, 'b');
496 /// assert_eq!(rust, &['r', 'u', 's', 't']);
499 #[stable(feature = "rust1", since = "1.0.0")]
500 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
501 mem::swap(&mut *dst, &mut src); // cannot overlap
505 /// Reads the value from `src` without moving it. This leaves the
506 /// memory in `src` unchanged.
510 /// Behavior is undefined if any of the following conditions are violated:
512 /// * `src` must be [valid] for reads.
514 /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
517 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
525 /// let y = &x as *const i32;
528 /// assert_eq!(std::ptr::read(y), 12);
532 /// Manually implement [`mem::swap`]:
537 /// fn swap<T>(a: &mut T, b: &mut T) {
539 /// // Create a bitwise copy of the value at `a` in `tmp`.
540 /// let tmp = ptr::read(a);
542 /// // Exiting at this point (either by explicitly returning or by
543 /// // calling a function which panics) would cause the value in `tmp` to
544 /// // be dropped while the same value is still referenced by `a`. This
545 /// // could trigger undefined behavior if `T` is not `Copy`.
547 /// // Create a bitwise copy of the value at `b` in `a`.
548 /// // This is safe because mutable references cannot alias.
549 /// ptr::copy_nonoverlapping(b, a, 1);
551 /// // As above, exiting here could trigger undefined behavior because
552 /// // the same value is referenced by `a` and `b`.
554 /// // Move `tmp` into `b`.
555 /// ptr::write(b, tmp);
557 /// // `tmp` has been moved (`write` takes ownership of its second argument),
558 /// // so nothing is dropped implicitly here.
562 /// let mut foo = "foo".to_owned();
563 /// let mut bar = "bar".to_owned();
565 /// swap(&mut foo, &mut bar);
567 /// assert_eq!(foo, "bar");
568 /// assert_eq!(bar, "foo");
571 /// ## Ownership of the Returned Value
573 /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
574 /// If `T` is not [`Copy`], using both the returned value and the value at
575 /// `*src` can violate memory safety. Note that assigning to `*src` counts as a
576 /// use because it will attempt to drop the value at `*src`.
578 /// [`write`] can be used to overwrite data without causing it to be dropped.
583 /// let mut s = String::from("foo");
585 /// // `s2` now points to the same underlying memory as `s`.
586 /// let mut s2: String = ptr::read(&s);
588 /// assert_eq!(s2, "foo");
590 /// // Assigning to `s2` causes its original value to be dropped. Beyond
591 /// // this point, `s` must no longer be used, as the underlying memory has
593 /// s2 = String::default();
594 /// assert_eq!(s2, "");
596 /// // Assigning to `s` would cause the old value to be dropped again,
597 /// // resulting in undefined behavior.
598 /// // s = String::from("bar"); // ERROR
600 /// // `ptr::write` can be used to overwrite a value without dropping it.
601 /// ptr::write(&mut s, String::from("bar"));
604 /// assert_eq!(s, "bar");
607 /// [`mem::swap`]: ../mem/fn.swap.html
608 /// [valid]: ../ptr/index.html#safety
609 /// [`Copy`]: ../marker/trait.Copy.html
610 /// [`read_unaligned`]: ./fn.read_unaligned.html
611 /// [`write`]: ./fn.write.html
613 #[stable(feature = "rust1", since = "1.0.0")]
614 pub unsafe fn read<T>(src: *const T) -> T {
615 let mut tmp = MaybeUninit::<T>::uninit();
616 copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
620 /// Reads the value from `src` without moving it. This leaves the
621 /// memory in `src` unchanged.
623 /// Unlike [`read`], `read_unaligned` works with unaligned pointers.
627 /// Behavior is undefined if any of the following conditions are violated:
629 /// * `src` must be [valid] for reads.
631 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
632 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
633 /// value and the value at `*src` can [violate memory safety][read-ownership].
635 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
637 /// [`Copy`]: ../marker/trait.Copy.html
638 /// [`read`]: ./fn.read.html
639 /// [`write_unaligned`]: ./fn.write_unaligned.html
640 /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value
641 /// [valid]: ../ptr/index.html#safety
643 /// ## On `packed` structs
645 /// It is currently impossible to create raw pointers to unaligned fields
646 /// of a packed struct.
648 /// Attempting to create a raw pointer to an `unaligned` struct field with
649 /// an expression such as `&packed.unaligned as *const FieldType` creates an
650 /// intermediate unaligned reference before converting that to a raw pointer.
651 /// That this reference is temporary and immediately cast is inconsequential
652 /// as the compiler always expects references to be properly aligned.
653 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
654 /// *undefined behavior* in your program.
656 /// An example of what not to do and how this relates to `read_unaligned` is:
659 /// #[repr(packed, C)]
665 /// let packed = Packed {
667 /// unaligned: 0x01020304,
671 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
673 /// // A temporary unaligned reference is created here which results in
674 /// // undefined behavior regardless of whether the reference is used or not.
675 /// &packed.unaligned
676 /// // Casting to a raw pointer doesn't help; the mistake already happened.
679 /// let v = std::ptr::read_unaligned(unaligned);
685 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
686 // FIXME: Update docs based on outcome of RFC #2582 and friends.
690 /// Read an usize value from a byte buffer:
695 /// fn read_usize(x: &[u8]) -> usize {
696 /// assert!(x.len() >= mem::size_of::<usize>());
698 /// let ptr = x.as_ptr() as *const usize;
700 /// unsafe { ptr.read_unaligned() }
704 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
705 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
706 let mut tmp = MaybeUninit::<T>::uninit();
707 copy_nonoverlapping(src as *const u8,
708 tmp.as_mut_ptr() as *mut u8,
709 mem::size_of::<T>());
713 /// Overwrites a memory location with the given value without reading or
714 /// dropping the old value.
716 /// `write` does not drop the contents of `dst`. This is safe, but it could leak
717 /// allocations or resources, so care should be taken not to overwrite an object
718 /// that should be dropped.
720 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
721 /// location pointed to by `dst`.
723 /// This is appropriate for initializing uninitialized memory, or overwriting
724 /// memory that has previously been [`read`] from.
726 /// [`read`]: ./fn.read.html
730 /// Behavior is undefined if any of the following conditions are violated:
732 /// * `dst` must be [valid] for writes.
734 /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
737 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
739 /// [valid]: ../ptr/index.html#safety
740 /// [`write_unaligned`]: ./fn.write_unaligned.html
748 /// let y = &mut x as *mut i32;
752 /// std::ptr::write(y, z);
753 /// assert_eq!(std::ptr::read(y), 12);
757 /// Manually implement [`mem::swap`]:
762 /// fn swap<T>(a: &mut T, b: &mut T) {
764 /// // Create a bitwise copy of the value at `a` in `tmp`.
765 /// let tmp = ptr::read(a);
767 /// // Exiting at this point (either by explicitly returning or by
768 /// // calling a function which panics) would cause the value in `tmp` to
769 /// // be dropped while the same value is still referenced by `a`. This
770 /// // could trigger undefined behavior if `T` is not `Copy`.
772 /// // Create a bitwise copy of the value at `b` in `a`.
773 /// // This is safe because mutable references cannot alias.
774 /// ptr::copy_nonoverlapping(b, a, 1);
776 /// // As above, exiting here could trigger undefined behavior because
777 /// // the same value is referenced by `a` and `b`.
779 /// // Move `tmp` into `b`.
780 /// ptr::write(b, tmp);
782 /// // `tmp` has been moved (`write` takes ownership of its second argument),
783 /// // so nothing is dropped implicitly here.
787 /// let mut foo = "foo".to_owned();
788 /// let mut bar = "bar".to_owned();
790 /// swap(&mut foo, &mut bar);
792 /// assert_eq!(foo, "bar");
793 /// assert_eq!(bar, "foo");
796 /// [`mem::swap`]: ../mem/fn.swap.html
798 #[stable(feature = "rust1", since = "1.0.0")]
799 pub unsafe fn write<T>(dst: *mut T, src: T) {
800 intrinsics::move_val_init(&mut *dst, src)
803 /// Overwrites a memory location with the given value without reading or
804 /// dropping the old value.
806 /// Unlike [`write`], the pointer may be unaligned.
808 /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
809 /// could leak allocations or resources, so care should be taken not to overwrite
810 /// an object that should be dropped.
812 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
813 /// location pointed to by `dst`.
815 /// This is appropriate for initializing uninitialized memory, or overwriting
816 /// memory that has previously been read with [`read_unaligned`].
818 /// [`write`]: ./fn.write.html
819 /// [`read_unaligned`]: ./fn.read_unaligned.html
823 /// Behavior is undefined if any of the following conditions are violated:
825 /// * `dst` must be [valid] for writes.
827 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
829 /// [valid]: ../ptr/index.html#safety
831 /// ## On `packed` structs
833 /// It is currently impossible to create raw pointers to unaligned fields
834 /// of a packed struct.
836 /// Attempting to create a raw pointer to an `unaligned` struct field with
837 /// an expression such as `&packed.unaligned as *const FieldType` creates an
838 /// intermediate unaligned reference before converting that to a raw pointer.
839 /// That this reference is temporary and immediately cast is inconsequential
840 /// as the compiler always expects references to be properly aligned.
841 /// As a result, using `&packed.unaligned as *const FieldType` causes immediate
842 /// *undefined behavior* in your program.
844 /// An example of what not to do and how this relates to `write_unaligned` is:
847 /// #[repr(packed, C)]
853 /// let v = 0x01020304;
854 /// let mut packed: Packed = unsafe { std::mem::zeroed() };
857 /// // Here we attempt to take the address of a 32-bit integer which is not aligned.
859 /// // A temporary unaligned reference is created here which results in
860 /// // undefined behavior regardless of whether the reference is used or not.
861 /// &mut packed.unaligned
862 /// // Casting to a raw pointer doesn't help; the mistake already happened.
865 /// std::ptr::write_unaligned(unaligned, v);
871 /// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
872 // FIXME: Update docs based on outcome of RFC #2582 and friends.
876 /// Write an usize value to a byte buffer:
881 /// fn write_usize(x: &mut [u8], val: usize) {
882 /// assert!(x.len() >= mem::size_of::<usize>());
884 /// let ptr = x.as_mut_ptr() as *mut usize;
886 /// unsafe { ptr.write_unaligned(val) }
890 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
891 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
892 copy_nonoverlapping(&src as *const T as *const u8,
894 mem::size_of::<T>());
898 /// Performs a volatile read of the value from `src` without moving it. This
899 /// leaves the memory in `src` unchanged.
901 /// Volatile operations are intended to act on I/O memory, and are guaranteed
902 /// to not be elided or reordered by the compiler across other volatile
905 /// [`write_volatile`]: ./fn.write_volatile.html
909 /// Rust does not currently have a rigorously and formally defined memory model,
910 /// so the precise semantics of what "volatile" means here is subject to change
911 /// over time. That being said, the semantics will almost always end up pretty
912 /// similar to [C11's definition of volatile][c11].
914 /// The compiler shouldn't change the relative order or number of volatile
915 /// memory operations. However, volatile memory operations on zero-sized types
916 /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
917 /// and may be ignored.
919 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
923 /// Behavior is undefined if any of the following conditions are violated:
925 /// * `src` must be [valid] for reads.
927 /// * `src` must be properly aligned.
929 /// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
930 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
931 /// value and the value at `*src` can [violate memory safety][read-ownership].
932 /// However, storing non-[`Copy`] types in volatile memory is almost certainly
935 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
937 /// [valid]: ../ptr/index.html#safety
938 /// [`Copy`]: ../marker/trait.Copy.html
939 /// [`read`]: ./fn.read.html
940 /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value
942 /// Just like in C, whether an operation is volatile has no bearing whatsoever
943 /// on questions involving concurrent access from multiple threads. Volatile
944 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
945 /// a race between a `read_volatile` and any write operation to the same location
946 /// is undefined behavior.
954 /// let y = &x as *const i32;
957 /// assert_eq!(std::ptr::read_volatile(y), 12);
961 #[stable(feature = "volatile", since = "1.9.0")]
962 pub unsafe fn read_volatile<T>(src: *const T) -> T {
963 intrinsics::volatile_load(src)
966 /// Performs a volatile write of a memory location with the given value without
967 /// reading or dropping the old value.
969 /// Volatile operations are intended to act on I/O memory, and are guaranteed
970 /// to not be elided or reordered by the compiler across other volatile
973 /// `write_volatile` does not drop the contents of `dst`. This is safe, but it
974 /// could leak allocations or resources, so care should be taken not to overwrite
975 /// an object that should be dropped.
977 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
978 /// location pointed to by `dst`.
980 /// [`read_volatile`]: ./fn.read_volatile.html
984 /// Rust does not currently have a rigorously and formally defined memory model,
985 /// so the precise semantics of what "volatile" means here is subject to change
986 /// over time. That being said, the semantics will almost always end up pretty
987 /// similar to [C11's definition of volatile][c11].
989 /// The compiler shouldn't change the relative order or number of volatile
990 /// memory operations. However, volatile memory operations on zero-sized types
991 /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
992 /// and may be ignored.
994 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
998 /// Behavior is undefined if any of the following conditions are violated:
1000 /// * `dst` must be [valid] for writes.
1002 /// * `dst` must be properly aligned.
1004 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
1006 /// [valid]: ../ptr/index.html#safety
1008 /// Just like in C, whether an operation is volatile has no bearing whatsoever
1009 /// on questions involving concurrent access from multiple threads. Volatile
1010 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
1011 /// a race between a `write_volatile` and any other operation (reading or writing)
1012 /// on the same location is undefined behavior.
1020 /// let y = &mut x as *mut i32;
1024 /// std::ptr::write_volatile(y, z);
1025 /// assert_eq!(std::ptr::read_volatile(y), 12);
1029 #[stable(feature = "volatile", since = "1.9.0")]
1030 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
1031 intrinsics::volatile_store(dst, src);
1034 #[lang = "const_ptr"]
1035 impl<T: ?Sized> *const T {
1036 /// Returns `true` if the pointer is null.
1038 /// Note that unsized types have many possible null pointers, as only the
1039 /// raw data pointer is considered, not their length, vtable, etc.
1040 /// Therefore, two pointers that are null may still not compare equal to
1048 /// let s: &str = "Follow the rabbit";
1049 /// let ptr: *const u8 = s.as_ptr();
1050 /// assert!(!ptr.is_null());
1052 #[stable(feature = "rust1", since = "1.0.0")]
1054 pub fn is_null(self) -> bool {
1055 // Compare via a cast to a thin pointer, so fat pointers are only
1056 // considering their "data" part for null-ness.
1057 (self as *const u8) == null()
1060 /// Casts to a pointer of another type.
1061 #[stable(feature = "ptr_cast", since = "1.38.0")]
1063 pub const fn cast<U>(self) -> *const U {
1067 /// Returns `None` if the pointer is null, or else returns a reference to
1068 /// the value wrapped in `Some`.
1072 /// While this method and its mutable counterpart are useful for
1073 /// null-safety, it is important to note that this is still an unsafe
1074 /// operation because the returned value could be pointing to invalid
1077 /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
1078 /// all of the following is true:
1079 /// - it is properly aligned
1080 /// - it must point to an initialized instance of T; in particular, the pointer must be
1081 /// "dereferencable" in the sense defined [here].
1083 /// This applies even if the result of this method is unused!
1084 /// (The part about being initialized is not yet fully decided, but until
1085 /// it is, the only safe approach is to ensure that they are indeed initialized.)
1087 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1088 /// not necessarily reflect the actual lifetime of the data. *You* must enforce
1089 /// Rust's aliasing rules. In particular, for the duration of this lifetime,
1090 /// the memory the pointer points to must not get mutated (except inside `UnsafeCell`).
1092 /// [here]: https://doc.rust-lang.org/std/ptr/index.html#safety
1099 /// let ptr: *const u8 = &10u8 as *const u8;
1102 /// if let Some(val_back) = ptr.as_ref() {
1103 /// println!("We got back the value: {}!", val_back);
1108 /// # Null-unchecked version
1110 /// If you are sure the pointer can never be null and are looking for some kind of
1111 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
1112 /// dereference the pointer directly.
1115 /// let ptr: *const u8 = &10u8 as *const u8;
1118 /// let val_back = &*ptr;
1119 /// println!("We got back the value: {}!", val_back);
1122 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1124 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1132 /// Calculates the offset from a pointer.
1134 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1135 /// offset of `3 * size_of::<T>()` bytes.
1139 /// If any of the following conditions are violated, the result is Undefined
1142 /// * Both the starting and resulting pointer must be either in bounds or one
1143 /// byte past the end of the same allocated object. Note that in Rust,
1144 /// every (stack-allocated) variable is considered a separate allocated object.
1146 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1148 /// * The offset being in bounds cannot rely on "wrapping around" the address
1149 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1151 /// The compiler and standard library generally tries to ensure allocations
1152 /// never reach a size where an offset is a concern. For instance, `Vec`
1153 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1154 /// `vec.as_ptr().add(vec.len())` is always safe.
1156 /// Most platforms fundamentally can't even construct such an allocation.
1157 /// For instance, no known 64-bit platform can ever serve a request
1158 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1159 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1160 /// more than `isize::MAX` bytes with things like Physical Address
1161 /// Extension. As such, memory acquired directly from allocators or memory
1162 /// mapped files *may* be too large to handle with this function.
1164 /// Consider using [`wrapping_offset`] instead if these constraints are
1165 /// difficult to satisfy. The only advantage of this method is that it
1166 /// enables more aggressive compiler optimizations.
1168 /// [`wrapping_offset`]: #method.wrapping_offset
1175 /// let s: &str = "123";
1176 /// let ptr: *const u8 = s.as_ptr();
1179 /// println!("{}", *ptr.offset(1) as char);
1180 /// println!("{}", *ptr.offset(2) as char);
1183 #[stable(feature = "rust1", since = "1.0.0")]
1185 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
1186 intrinsics::offset(self, count)
1189 /// Calculates the offset from a pointer using wrapping arithmetic.
1191 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1192 /// offset of `3 * size_of::<T>()` bytes.
1196 /// The resulting pointer does not need to be in bounds, but it is
1197 /// potentially hazardous to dereference (which requires `unsafe`).
1199 /// In particular, the resulting pointer remains attached to the same allocated
1200 /// object that `self` points to. It may *not* be used to access a
1201 /// different allocated object. Note that in Rust,
1202 /// every (stack-allocated) variable is considered a separate allocated object.
1204 /// In other words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1205 /// *not* the same as `y`, and dereferencing it is undefined behavior
1206 /// unless `x` and `y` point into the same allocated object.
1208 /// Compared to [`offset`], this method basically delays the requirement of staying
1209 /// within the same allocated object: [`offset`] is immediate Undefined Behavior when
1210 /// crossing object boundaries; `wrapping_offset` produces a pointer but still leads
1211 /// to Undefined Behavior if that pointer is dereferenced. [`offset`] can be optimized
1212 /// better and is thus preferrable in performance-sensitive code.
1214 /// If you need to cross object boundaries, cast the pointer to an integer and
1215 /// do the arithmetic there.
1217 /// [`offset`]: #method.offset
1224 /// // Iterate using a raw pointer in increments of two elements
1225 /// let data = [1u8, 2, 3, 4, 5];
1226 /// let mut ptr: *const u8 = data.as_ptr();
1228 /// let end_rounded_up = ptr.wrapping_offset(6);
1230 /// // This loop prints "1, 3, 5, "
1231 /// while ptr != end_rounded_up {
1233 /// print!("{}, ", *ptr);
1235 /// ptr = ptr.wrapping_offset(step);
1238 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1240 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
1242 intrinsics::arith_offset(self, count)
1246 /// Calculates the distance between two pointers. The returned value is in
1247 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1249 /// This function is the inverse of [`offset`].
1251 /// [`offset`]: #method.offset
1252 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
1256 /// If any of the following conditions are violated, the result is Undefined
1259 /// * Both the starting and other pointer must be either in bounds or one
1260 /// byte past the end of the same allocated object. Note that in Rust,
1261 /// every (stack-allocated) variable is considered a separate allocated object.
1263 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1265 /// * The distance between the pointers, in bytes, must be an exact multiple
1266 /// of the size of `T`.
1268 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1270 /// The compiler and standard library generally try to ensure allocations
1271 /// never reach a size where an offset is a concern. For instance, `Vec`
1272 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1273 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1275 /// Most platforms fundamentally can't even construct such an allocation.
1276 /// For instance, no known 64-bit platform can ever serve a request
1277 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1278 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1279 /// more than `isize::MAX` bytes with things like Physical Address
1280 /// Extension. As such, memory acquired directly from allocators or memory
1281 /// mapped files *may* be too large to handle with this function.
1283 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1284 /// difficult to satisfy. The only advantage of this method is that it
1285 /// enables more aggressive compiler optimizations.
1289 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1296 /// #![feature(ptr_offset_from)]
1299 /// let ptr1: *const i32 = &a[1];
1300 /// let ptr2: *const i32 = &a[3];
1302 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1303 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1304 /// assert_eq!(ptr1.offset(2), ptr2);
1305 /// assert_eq!(ptr2.offset(-2), ptr1);
1308 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1309 #[rustc_const_unstable(feature = "const_ptr_offset_from")]
1311 pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1312 let pointee_size = mem::size_of::<T>();
1313 let ok = 0 < pointee_size && pointee_size <= isize::max_value() as usize;
1314 // assert that the pointee size is valid in a const eval compatible way
1315 // FIXME: do this with a real assert at some point
1316 [()][(!ok) as usize];
1317 intrinsics::ptr_offset_from(self, origin)
1320 /// Calculates the distance between two pointers. The returned value is in
1321 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1323 /// If the address different between the two pointers is not a multiple of
1324 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1327 /// Though this method is safe for any two pointers, note that its result
1328 /// will be mostly useless if the two pointers aren't into the same allocated
1329 /// object, for example if they point to two different local variables.
1333 /// This function panics if `T` is a zero-sized type.
1340 /// #![feature(ptr_wrapping_offset_from)]
1343 /// let ptr1: *const i32 = &a[1];
1344 /// let ptr2: *const i32 = &a[3];
1345 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1346 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1347 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1348 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1350 /// let ptr1: *const i32 = 3 as _;
1351 /// let ptr2: *const i32 = 13 as _;
1352 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1354 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1356 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1357 let pointee_size = mem::size_of::<T>();
1358 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
1360 let d = isize::wrapping_sub(self as _, origin as _);
1361 d.wrapping_div(pointee_size as _)
1364 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1366 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1367 /// offset of `3 * size_of::<T>()` bytes.
1371 /// If any of the following conditions are violated, the result is Undefined
1374 /// * Both the starting and resulting pointer must be either in bounds or one
1375 /// byte past the end of the same allocated object. Note that in Rust,
1376 /// every (stack-allocated) variable is considered a separate allocated object.
1378 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1380 /// * The offset being in bounds cannot rely on "wrapping around" the address
1381 /// space. That is, the infinite-precision sum must fit in a `usize`.
1383 /// The compiler and standard library generally tries to ensure allocations
1384 /// never reach a size where an offset is a concern. For instance, `Vec`
1385 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1386 /// `vec.as_ptr().add(vec.len())` is always safe.
1388 /// Most platforms fundamentally can't even construct such an allocation.
1389 /// For instance, no known 64-bit platform can ever serve a request
1390 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1391 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1392 /// more than `isize::MAX` bytes with things like Physical Address
1393 /// Extension. As such, memory acquired directly from allocators or memory
1394 /// mapped files *may* be too large to handle with this function.
1396 /// Consider using [`wrapping_add`] instead if these constraints are
1397 /// difficult to satisfy. The only advantage of this method is that it
1398 /// enables more aggressive compiler optimizations.
1400 /// [`wrapping_add`]: #method.wrapping_add
1407 /// let s: &str = "123";
1408 /// let ptr: *const u8 = s.as_ptr();
1411 /// println!("{}", *ptr.add(1) as char);
1412 /// println!("{}", *ptr.add(2) as char);
1415 #[stable(feature = "pointer_methods", since = "1.26.0")]
1417 pub unsafe fn add(self, count: usize) -> Self
1420 self.offset(count as isize)
1423 /// Calculates the offset from a pointer (convenience for
1424 /// `.offset((count as isize).wrapping_neg())`).
1426 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1427 /// offset of `3 * size_of::<T>()` bytes.
1431 /// If any of the following conditions are violated, the result is Undefined
1434 /// * Both the starting and resulting pointer must be either in bounds or one
1435 /// byte past the end of the same allocated object. Note that in Rust,
1436 /// every (stack-allocated) variable is considered a separate allocated object.
1438 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1440 /// * The offset being in bounds cannot rely on "wrapping around" the address
1441 /// space. That is, the infinite-precision sum must fit in a usize.
1443 /// The compiler and standard library generally tries to ensure allocations
1444 /// never reach a size where an offset is a concern. For instance, `Vec`
1445 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1446 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1448 /// Most platforms fundamentally can't even construct such an allocation.
1449 /// For instance, no known 64-bit platform can ever serve a request
1450 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1451 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1452 /// more than `isize::MAX` bytes with things like Physical Address
1453 /// Extension. As such, memory acquired directly from allocators or memory
1454 /// mapped files *may* be too large to handle with this function.
1456 /// Consider using [`wrapping_sub`] instead if these constraints are
1457 /// difficult to satisfy. The only advantage of this method is that it
1458 /// enables more aggressive compiler optimizations.
1460 /// [`wrapping_sub`]: #method.wrapping_sub
1467 /// let s: &str = "123";
1470 /// let end: *const u8 = s.as_ptr().add(3);
1471 /// println!("{}", *end.sub(1) as char);
1472 /// println!("{}", *end.sub(2) as char);
1475 #[stable(feature = "pointer_methods", since = "1.26.0")]
1477 pub unsafe fn sub(self, count: usize) -> Self
1480 self.offset((count as isize).wrapping_neg())
1483 /// Calculates the offset from a pointer using wrapping arithmetic.
1484 /// (convenience for `.wrapping_offset(count as isize)`)
1486 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1487 /// offset of `3 * size_of::<T>()` bytes.
1491 /// The resulting pointer does not need to be in bounds, but it is
1492 /// potentially hazardous to dereference (which requires `unsafe`).
1494 /// In particular, the resulting pointer remains attached to the same allocated
1495 /// object that `self` points to. It may *not* be used to access a
1496 /// different allocated object. Note that in Rust,
1497 /// every (stack-allocated) variable is considered a separate allocated object.
1499 /// Compared to [`add`], this method basically delays the requirement of staying
1500 /// within the same allocated object: [`add`] is immediate Undefined Behavior when
1501 /// crossing object boundaries; `wrapping_add` produces a pointer but still leads
1502 /// to Undefined Behavior if that pointer is dereferenced. [`add`] can be optimized
1503 /// better and is thus preferrable in performance-sensitive code.
1505 /// If you need to cross object boundaries, cast the pointer to an integer and
1506 /// do the arithmetic there.
1508 /// [`add`]: #method.add
1515 /// // Iterate using a raw pointer in increments of two elements
1516 /// let data = [1u8, 2, 3, 4, 5];
1517 /// let mut ptr: *const u8 = data.as_ptr();
1519 /// let end_rounded_up = ptr.wrapping_add(6);
1521 /// // This loop prints "1, 3, 5, "
1522 /// while ptr != end_rounded_up {
1524 /// print!("{}, ", *ptr);
1526 /// ptr = ptr.wrapping_add(step);
1529 #[stable(feature = "pointer_methods", since = "1.26.0")]
1531 pub fn wrapping_add(self, count: usize) -> Self
1534 self.wrapping_offset(count as isize)
1537 /// Calculates the offset from a pointer using wrapping arithmetic.
1538 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1540 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1541 /// offset of `3 * size_of::<T>()` bytes.
1545 /// The resulting pointer does not need to be in bounds, but it is
1546 /// potentially hazardous to dereference (which requires `unsafe`).
1548 /// In particular, the resulting pointer remains attached to the same allocated
1549 /// object that `self` points to. It may *not* be used to access a
1550 /// different allocated object. Note that in Rust,
1551 /// every (stack-allocated) variable is considered a separate allocated object.
1553 /// Compared to [`sub`], this method basically delays the requirement of staying
1554 /// within the same allocated object: [`sub`] is immediate Undefined Behavior when
1555 /// crossing object boundaries; `wrapping_sub` produces a pointer but still leads
1556 /// to Undefined Behavior if that pointer is dereferenced. [`sub`] can be optimized
1557 /// better and is thus preferrable in performance-sensitive code.
1559 /// If you need to cross object boundaries, cast the pointer to an integer and
1560 /// do the arithmetic there.
1562 /// [`sub`]: #method.sub
1569 /// // Iterate using a raw pointer in increments of two elements (backwards)
1570 /// let data = [1u8, 2, 3, 4, 5];
1571 /// let mut ptr: *const u8 = data.as_ptr();
1572 /// let start_rounded_down = ptr.wrapping_sub(2);
1573 /// ptr = ptr.wrapping_add(4);
1575 /// // This loop prints "5, 3, 1, "
1576 /// while ptr != start_rounded_down {
1578 /// print!("{}, ", *ptr);
1580 /// ptr = ptr.wrapping_sub(step);
1583 #[stable(feature = "pointer_methods", since = "1.26.0")]
1585 pub fn wrapping_sub(self, count: usize) -> Self
1588 self.wrapping_offset((count as isize).wrapping_neg())
1591 /// Reads the value from `self` without moving it. This leaves the
1592 /// memory in `self` unchanged.
1594 /// See [`ptr::read`] for safety concerns and examples.
1596 /// [`ptr::read`]: ./ptr/fn.read.html
1597 #[stable(feature = "pointer_methods", since = "1.26.0")]
1599 pub unsafe fn read(self) -> T
1605 /// Performs a volatile read of the value from `self` without moving it. This
1606 /// leaves the memory in `self` unchanged.
1608 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1609 /// to not be elided or reordered by the compiler across other volatile
1612 /// See [`ptr::read_volatile`] for safety concerns and examples.
1614 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
1615 #[stable(feature = "pointer_methods", since = "1.26.0")]
1617 pub unsafe fn read_volatile(self) -> T
1623 /// Reads the value from `self` without moving it. This leaves the
1624 /// memory in `self` unchanged.
1626 /// Unlike `read`, the pointer may be unaligned.
1628 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1630 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
1631 #[stable(feature = "pointer_methods", since = "1.26.0")]
1633 pub unsafe fn read_unaligned(self) -> T
1636 read_unaligned(self)
1639 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1640 /// and destination may overlap.
1642 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1644 /// See [`ptr::copy`] for safety concerns and examples.
1646 /// [`ptr::copy`]: ./ptr/fn.copy.html
1647 #[stable(feature = "pointer_methods", since = "1.26.0")]
1649 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1652 copy(self, dest, count)
1655 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1656 /// and destination may *not* overlap.
1658 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1660 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1662 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
1663 #[stable(feature = "pointer_methods", since = "1.26.0")]
1665 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1668 copy_nonoverlapping(self, dest, count)
1671 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1674 /// If it is not possible to align the pointer, the implementation returns
1675 /// `usize::max_value()`. It is permissible for the implementation to *always*
1676 /// return `usize::max_value()`. Only your algorithm's performance can depend
1677 /// on getting a usable offset here, not its correctness.
1679 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1680 /// used with the `wrapping_add` method.
1682 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1683 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1684 /// the returned offset is correct in all terms other than alignment.
1688 /// The function panics if `align` is not a power-of-two.
1692 /// Accessing adjacent `u8` as `u16`
1695 /// # fn foo(n: usize) {
1696 /// # use std::mem::align_of;
1698 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1699 /// let ptr = &x[n] as *const u8;
1700 /// let offset = ptr.align_offset(align_of::<u16>());
1701 /// if offset < x.len() - n - 1 {
1702 /// let u16_ptr = ptr.add(offset) as *const u16;
1703 /// assert_ne!(*u16_ptr, 500);
1705 /// // while the pointer can be aligned via `offset`, it would point
1706 /// // outside the allocation
1710 #[stable(feature = "align_offset", since = "1.36.0")]
1711 pub fn align_offset(self, align: usize) -> usize where T: Sized {
1712 if !align.is_power_of_two() {
1713 panic!("align_offset: align is not a power-of-two");
1716 align_offset(self, align)
1723 impl<T: ?Sized> *mut T {
1724 /// Returns `true` if the pointer is null.
1726 /// Note that unsized types have many possible null pointers, as only the
1727 /// raw data pointer is considered, not their length, vtable, etc.
1728 /// Therefore, two pointers that are null may still not compare equal to
1736 /// let mut s = [1, 2, 3];
1737 /// let ptr: *mut u32 = s.as_mut_ptr();
1738 /// assert!(!ptr.is_null());
1740 #[stable(feature = "rust1", since = "1.0.0")]
1742 pub fn is_null(self) -> bool {
1743 // Compare via a cast to a thin pointer, so fat pointers are only
1744 // considering their "data" part for null-ness.
1745 (self as *mut u8) == null_mut()
1748 /// Casts to a pointer of another type.
1749 #[stable(feature = "ptr_cast", since = "1.38.0")]
1751 pub const fn cast<U>(self) -> *mut U {
1755 /// Returns `None` if the pointer is null, or else returns a reference to
1756 /// the value wrapped in `Some`.
1760 /// While this method and its mutable counterpart are useful for
1761 /// null-safety, it is important to note that this is still an unsafe
1762 /// operation because the returned value could be pointing to invalid
1765 /// When calling this method, you have to ensure that if the pointer is
1766 /// non-NULL, then it is properly aligned, dereferencable (for the whole
1767 /// size of `T`) and points to an initialized instance of `T`. This applies
1768 /// even if the result of this method is unused!
1769 /// (The part about being initialized is not yet fully decided, but until
1770 /// it is, the only safe approach is to ensure that they are indeed initialized.)
1772 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1773 /// not necessarily reflect the actual lifetime of the data. It is up to the
1774 /// caller to ensure that for the duration of this lifetime, the memory this
1775 /// pointer points to does not get written to outside of `UnsafeCell<U>`.
1782 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1785 /// if let Some(val_back) = ptr.as_ref() {
1786 /// println!("We got back the value: {}!", val_back);
1791 /// # Null-unchecked version
1793 /// If you are sure the pointer can never be null and are looking for some kind of
1794 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
1795 /// dereference the pointer directly.
1798 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1801 /// let val_back = &*ptr;
1802 /// println!("We got back the value: {}!", val_back);
1805 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1807 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1815 /// Calculates the offset from a pointer.
1817 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1818 /// offset of `3 * size_of::<T>()` bytes.
1822 /// If any of the following conditions are violated, the result is Undefined
1825 /// * Both the starting and resulting pointer must be either in bounds or one
1826 /// byte past the end of the same allocated object. Note that in Rust,
1827 /// every (stack-allocated) variable is considered a separate allocated object.
1829 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1831 /// * The offset being in bounds cannot rely on "wrapping around" the address
1832 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1834 /// The compiler and standard library generally tries to ensure allocations
1835 /// never reach a size where an offset is a concern. For instance, `Vec`
1836 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1837 /// `vec.as_ptr().add(vec.len())` is always safe.
1839 /// Most platforms fundamentally can't even construct such an allocation.
1840 /// For instance, no known 64-bit platform can ever serve a request
1841 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1842 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1843 /// more than `isize::MAX` bytes with things like Physical Address
1844 /// Extension. As such, memory acquired directly from allocators or memory
1845 /// mapped files *may* be too large to handle with this function.
1847 /// Consider using [`wrapping_offset`] instead if these constraints are
1848 /// difficult to satisfy. The only advantage of this method is that it
1849 /// enables more aggressive compiler optimizations.
1851 /// [`wrapping_offset`]: #method.wrapping_offset
1858 /// let mut s = [1, 2, 3];
1859 /// let ptr: *mut u32 = s.as_mut_ptr();
1862 /// println!("{}", *ptr.offset(1));
1863 /// println!("{}", *ptr.offset(2));
1866 #[stable(feature = "rust1", since = "1.0.0")]
1868 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1869 intrinsics::offset(self, count) as *mut T
1872 /// Calculates the offset from a pointer using wrapping arithmetic.
1873 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1874 /// offset of `3 * size_of::<T>()` bytes.
1878 /// The resulting pointer does not need to be in bounds, but it is
1879 /// potentially hazardous to dereference (which requires `unsafe`).
1881 /// In particular, the resulting pointer remains attached to the same allocated
1882 /// object that `self` points to. It may *not* be used to access a
1883 /// different allocated object. Note that in Rust,
1884 /// every (stack-allocated) variable is considered a separate allocated object.
1886 /// In other words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1887 /// *not* the same as `y`, and dereferencing it is undefined behavior
1888 /// unless `x` and `y` point into the same allocated object.
1890 /// Compared to [`offset`], this method basically delays the requirement of staying
1891 /// within the same allocated object: [`offset`] is immediate Undefined Behavior when
1892 /// crossing object boundaries; `wrapping_offset` produces a pointer but still leads
1893 /// to Undefined Behavior if that pointer is dereferenced. [`offset`] can be optimized
1894 /// better and is thus preferrable in performance-sensitive code.
1896 /// If you need to cross object boundaries, cast the pointer to an integer and
1897 /// do the arithmetic there.
1899 /// [`offset`]: #method.offset
1906 /// // Iterate using a raw pointer in increments of two elements
1907 /// let mut data = [1u8, 2, 3, 4, 5];
1908 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1910 /// let end_rounded_up = ptr.wrapping_offset(6);
1912 /// while ptr != end_rounded_up {
1916 /// ptr = ptr.wrapping_offset(step);
1918 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1920 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1922 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1924 intrinsics::arith_offset(self, count) as *mut T
1928 /// Returns `None` if the pointer is null, or else returns a mutable
1929 /// reference to the value wrapped in `Some`.
1933 /// As with [`as_ref`], this is unsafe because it cannot verify the validity
1934 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1935 /// returned is indeed a valid lifetime for the contained data.
1937 /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
1938 /// all of the following is true:
1939 /// - it is properly aligned
1940 /// - it must point to an initialized instance of T; in particular, the pointer must be
1941 /// "dereferencable" in the sense defined [here].
1943 /// This applies even if the result of this method is unused!
1944 /// (The part about being initialized is not yet fully decided, but until
1945 /// it is the only safe approach is to ensure that they are indeed initialized.)
1947 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1948 /// not necessarily reflect the actual lifetime of the data. *You* must enforce
1949 /// Rust's aliasing rules. In particular, for the duration of this lifetime,
1950 /// the memory this pointer points to must not get accessed (read or written)
1951 /// through any other pointer.
1953 /// [here]: https://doc.rust-lang.org/std/ptr/index.html#safety
1954 /// [`as_ref`]: #method.as_ref
1961 /// let mut s = [1, 2, 3];
1962 /// let ptr: *mut u32 = s.as_mut_ptr();
1963 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1964 /// *first_value = 4;
1965 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1967 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1969 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1977 /// Calculates the distance between two pointers. The returned value is in
1978 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1980 /// This function is the inverse of [`offset`].
1982 /// [`offset`]: #method.offset-1
1983 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1987 /// If any of the following conditions are violated, the result is Undefined
1990 /// * Both the starting and other pointer must be either in bounds or one
1991 /// byte past the end of the same allocated object. Note that in Rust,
1992 /// every (stack-allocated) variable is considered a separate allocated object.
1994 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1996 /// * The distance between the pointers, in bytes, must be an exact multiple
1997 /// of the size of `T`.
1999 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
2001 /// The compiler and standard library generally try to ensure allocations
2002 /// never reach a size where an offset is a concern. For instance, `Vec`
2003 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
2004 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
2006 /// Most platforms fundamentally can't even construct such an allocation.
2007 /// For instance, no known 64-bit platform can ever serve a request
2008 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
2009 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
2010 /// more than `isize::MAX` bytes with things like Physical Address
2011 /// Extension. As such, memory acquired directly from allocators or memory
2012 /// mapped files *may* be too large to handle with this function.
2014 /// Consider using [`wrapping_offset_from`] instead if these constraints are
2015 /// difficult to satisfy. The only advantage of this method is that it
2016 /// enables more aggressive compiler optimizations.
2020 /// This function panics if `T` is a Zero-Sized Type ("ZST").
2027 /// #![feature(ptr_offset_from)]
2029 /// let mut a = [0; 5];
2030 /// let ptr1: *mut i32 = &mut a[1];
2031 /// let ptr2: *mut i32 = &mut a[3];
2033 /// assert_eq!(ptr2.offset_from(ptr1), 2);
2034 /// assert_eq!(ptr1.offset_from(ptr2), -2);
2035 /// assert_eq!(ptr1.offset(2), ptr2);
2036 /// assert_eq!(ptr2.offset(-2), ptr1);
2039 #[unstable(feature = "ptr_offset_from", issue = "41079")]
2040 #[rustc_const_unstable(feature = "const_ptr_offset_from")]
2042 pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
2043 (self as *const T).offset_from(origin)
2046 /// Calculates the distance between two pointers. The returned value is in
2047 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
2049 /// If the address different between the two pointers is not a multiple of
2050 /// `mem::size_of::<T>()` then the result of the division is rounded towards
2053 /// Though this method is safe for any two pointers, note that its result
2054 /// will be mostly useless if the two pointers aren't into the same allocated
2055 /// object, for example if they point to two different local variables.
2059 /// This function panics if `T` is a zero-sized type.
2066 /// #![feature(ptr_wrapping_offset_from)]
2068 /// let mut a = [0; 5];
2069 /// let ptr1: *mut i32 = &mut a[1];
2070 /// let ptr2: *mut i32 = &mut a[3];
2071 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
2072 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
2073 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
2074 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
2076 /// let ptr1: *mut i32 = 3 as _;
2077 /// let ptr2: *mut i32 = 13 as _;
2078 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
2080 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
2082 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
2083 (self as *const T).wrapping_offset_from(origin)
2086 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
2088 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
2089 /// offset of `3 * size_of::<T>()` bytes.
2093 /// If any of the following conditions are violated, the result is Undefined
2096 /// * Both the starting and resulting pointer must be either in bounds or one
2097 /// byte past the end of the same allocated object. Note that in Rust,
2098 /// every (stack-allocated) variable is considered a separate allocated object.
2100 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
2102 /// * The offset being in bounds cannot rely on "wrapping around" the address
2103 /// space. That is, the infinite-precision sum must fit in a `usize`.
2105 /// The compiler and standard library generally tries to ensure allocations
2106 /// never reach a size where an offset is a concern. For instance, `Vec`
2107 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
2108 /// `vec.as_ptr().add(vec.len())` is always safe.
2110 /// Most platforms fundamentally can't even construct such an allocation.
2111 /// For instance, no known 64-bit platform can ever serve a request
2112 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
2113 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
2114 /// more than `isize::MAX` bytes with things like Physical Address
2115 /// Extension. As such, memory acquired directly from allocators or memory
2116 /// mapped files *may* be too large to handle with this function.
2118 /// Consider using [`wrapping_add`] instead if these constraints are
2119 /// difficult to satisfy. The only advantage of this method is that it
2120 /// enables more aggressive compiler optimizations.
2122 /// [`wrapping_add`]: #method.wrapping_add
2129 /// let s: &str = "123";
2130 /// let ptr: *const u8 = s.as_ptr();
2133 /// println!("{}", *ptr.add(1) as char);
2134 /// println!("{}", *ptr.add(2) as char);
2137 #[stable(feature = "pointer_methods", since = "1.26.0")]
2139 pub unsafe fn add(self, count: usize) -> Self
2142 self.offset(count as isize)
2145 /// Calculates the offset from a pointer (convenience for
2146 /// `.offset((count as isize).wrapping_neg())`).
2148 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
2149 /// offset of `3 * size_of::<T>()` bytes.
2153 /// If any of the following conditions are violated, the result is Undefined
2156 /// * Both the starting and resulting pointer must be either in bounds or one
2157 /// byte past the end of the same allocated object. Note that in Rust,
2158 /// every (stack-allocated) variable is considered a separate allocated object.
2160 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
2162 /// * The offset being in bounds cannot rely on "wrapping around" the address
2163 /// space. That is, the infinite-precision sum must fit in a usize.
2165 /// The compiler and standard library generally tries to ensure allocations
2166 /// never reach a size where an offset is a concern. For instance, `Vec`
2167 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
2168 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
2170 /// Most platforms fundamentally can't even construct such an allocation.
2171 /// For instance, no known 64-bit platform can ever serve a request
2172 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
2173 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
2174 /// more than `isize::MAX` bytes with things like Physical Address
2175 /// Extension. As such, memory acquired directly from allocators or memory
2176 /// mapped files *may* be too large to handle with this function.
2178 /// Consider using [`wrapping_sub`] instead if these constraints are
2179 /// difficult to satisfy. The only advantage of this method is that it
2180 /// enables more aggressive compiler optimizations.
2182 /// [`wrapping_sub`]: #method.wrapping_sub
2189 /// let s: &str = "123";
2192 /// let end: *const u8 = s.as_ptr().add(3);
2193 /// println!("{}", *end.sub(1) as char);
2194 /// println!("{}", *end.sub(2) as char);
2197 #[stable(feature = "pointer_methods", since = "1.26.0")]
2199 pub unsafe fn sub(self, count: usize) -> Self
2202 self.offset((count as isize).wrapping_neg())
2205 /// Calculates the offset from a pointer using wrapping arithmetic.
2206 /// (convenience for `.wrapping_offset(count as isize)`)
2208 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
2209 /// offset of `3 * size_of::<T>()` bytes.
2213 /// The resulting pointer does not need to be in bounds, but it is
2214 /// potentially hazardous to dereference (which requires `unsafe`).
2216 /// In particular, the resulting pointer remains attached to the same allocated
2217 /// object that `self` points to. It may *not* be used to access a
2218 /// different allocated object. Note that in Rust,
2219 /// every (stack-allocated) variable is considered a separate allocated object.
2221 /// Compared to [`add`], this method basically delays the requirement of staying
2222 /// within the same allocated object: [`add`] is immediate Undefined Behavior when
2223 /// crossing object boundaries; `wrapping_add` produces a pointer but still leads
2224 /// to Undefined Behavior if that pointer is dereferenced. [`add`] can be optimized
2225 /// better and is thus preferrable in performance-sensitive code.
2227 /// If you need to cross object boundaries, cast the pointer to an integer and
2228 /// do the arithmetic there.
2230 /// [`add`]: #method.add
2237 /// // Iterate using a raw pointer in increments of two elements
2238 /// let data = [1u8, 2, 3, 4, 5];
2239 /// let mut ptr: *const u8 = data.as_ptr();
2241 /// let end_rounded_up = ptr.wrapping_add(6);
2243 /// // This loop prints "1, 3, 5, "
2244 /// while ptr != end_rounded_up {
2246 /// print!("{}, ", *ptr);
2248 /// ptr = ptr.wrapping_add(step);
2251 #[stable(feature = "pointer_methods", since = "1.26.0")]
2253 pub fn wrapping_add(self, count: usize) -> Self
2256 self.wrapping_offset(count as isize)
2259 /// Calculates the offset from a pointer using wrapping arithmetic.
2260 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
2262 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
2263 /// offset of `3 * size_of::<T>()` bytes.
2267 /// The resulting pointer does not need to be in bounds, but it is
2268 /// potentially hazardous to dereference (which requires `unsafe`).
2270 /// In particular, the resulting pointer remains attached to the same allocated
2271 /// object that `self` points to. It may *not* be used to access a
2272 /// different allocated object. Note that in Rust,
2273 /// every (stack-allocated) variable is considered a separate allocated object.
2275 /// Compared to [`sub`], this method basically delays the requirement of staying
2276 /// within the same allocated object: [`sub`] is immediate Undefined Behavior when
2277 /// crossing object boundaries; `wrapping_sub` produces a pointer but still leads
2278 /// to Undefined Behavior if that pointer is dereferenced. [`sub`] can be optimized
2279 /// better and is thus preferrable in performance-sensitive code.
2281 /// If you need to cross object boundaries, cast the pointer to an integer and
2282 /// do the arithmetic there.
2284 /// [`sub`]: #method.sub
2291 /// // Iterate using a raw pointer in increments of two elements (backwards)
2292 /// let data = [1u8, 2, 3, 4, 5];
2293 /// let mut ptr: *const u8 = data.as_ptr();
2294 /// let start_rounded_down = ptr.wrapping_sub(2);
2295 /// ptr = ptr.wrapping_add(4);
2297 /// // This loop prints "5, 3, 1, "
2298 /// while ptr != start_rounded_down {
2300 /// print!("{}, ", *ptr);
2302 /// ptr = ptr.wrapping_sub(step);
2305 #[stable(feature = "pointer_methods", since = "1.26.0")]
2307 pub fn wrapping_sub(self, count: usize) -> Self
2310 self.wrapping_offset((count as isize).wrapping_neg())
2313 /// Reads the value from `self` without moving it. This leaves the
2314 /// memory in `self` unchanged.
2316 /// See [`ptr::read`] for safety concerns and examples.
2318 /// [`ptr::read`]: ./ptr/fn.read.html
2319 #[stable(feature = "pointer_methods", since = "1.26.0")]
2321 pub unsafe fn read(self) -> T
2327 /// Performs a volatile read of the value from `self` without moving it. This
2328 /// leaves the memory in `self` unchanged.
2330 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2331 /// to not be elided or reordered by the compiler across other volatile
2334 /// See [`ptr::read_volatile`] for safety concerns and examples.
2336 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
2337 #[stable(feature = "pointer_methods", since = "1.26.0")]
2339 pub unsafe fn read_volatile(self) -> T
2345 /// Reads the value from `self` without moving it. This leaves the
2346 /// memory in `self` unchanged.
2348 /// Unlike `read`, the pointer may be unaligned.
2350 /// See [`ptr::read_unaligned`] for safety concerns and examples.
2352 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
2353 #[stable(feature = "pointer_methods", since = "1.26.0")]
2355 pub unsafe fn read_unaligned(self) -> T
2358 read_unaligned(self)
2361 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2362 /// and destination may overlap.
2364 /// NOTE: this has the *same* argument order as [`ptr::copy`].
2366 /// See [`ptr::copy`] for safety concerns and examples.
2368 /// [`ptr::copy`]: ./ptr/fn.copy.html
2369 #[stable(feature = "pointer_methods", since = "1.26.0")]
2371 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
2374 copy(self, dest, count)
2377 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2378 /// and destination may *not* overlap.
2380 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
2382 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2384 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2385 #[stable(feature = "pointer_methods", since = "1.26.0")]
2387 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
2390 copy_nonoverlapping(self, dest, count)
2393 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2394 /// and destination may overlap.
2396 /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
2398 /// See [`ptr::copy`] for safety concerns and examples.
2400 /// [`ptr::copy`]: ./ptr/fn.copy.html
2401 #[stable(feature = "pointer_methods", since = "1.26.0")]
2403 pub unsafe fn copy_from(self, src: *const T, count: usize)
2406 copy(src, self, count)
2409 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2410 /// and destination may *not* overlap.
2412 /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
2414 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2416 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2417 #[stable(feature = "pointer_methods", since = "1.26.0")]
2419 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2422 copy_nonoverlapping(src, self, count)
2425 /// Executes the destructor (if any) of the pointed-to value.
2427 /// See [`ptr::drop_in_place`] for safety concerns and examples.
2429 /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html
2430 #[stable(feature = "pointer_methods", since = "1.26.0")]
2432 pub unsafe fn drop_in_place(self) {
2436 /// Overwrites a memory location with the given value without reading or
2437 /// dropping the old value.
2439 /// See [`ptr::write`] for safety concerns and examples.
2441 /// [`ptr::write`]: ./ptr/fn.write.html
2442 #[stable(feature = "pointer_methods", since = "1.26.0")]
2444 pub unsafe fn write(self, val: T)
2450 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2451 /// bytes of memory starting at `self` to `val`.
2453 /// See [`ptr::write_bytes`] for safety concerns and examples.
2455 /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html
2456 #[stable(feature = "pointer_methods", since = "1.26.0")]
2458 pub unsafe fn write_bytes(self, val: u8, count: usize)
2461 write_bytes(self, val, count)
2464 /// Performs a volatile write of a memory location with the given value without
2465 /// reading or dropping the old value.
2467 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2468 /// to not be elided or reordered by the compiler across other volatile
2471 /// See [`ptr::write_volatile`] for safety concerns and examples.
2473 /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html
2474 #[stable(feature = "pointer_methods", since = "1.26.0")]
2476 pub unsafe fn write_volatile(self, val: T)
2479 write_volatile(self, val)
2482 /// Overwrites a memory location with the given value without reading or
2483 /// dropping the old value.
2485 /// Unlike `write`, the pointer may be unaligned.
2487 /// See [`ptr::write_unaligned`] for safety concerns and examples.
2489 /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html
2490 #[stable(feature = "pointer_methods", since = "1.26.0")]
2492 pub unsafe fn write_unaligned(self, val: T)
2495 write_unaligned(self, val)
2498 /// Replaces the value at `self` with `src`, returning the old
2499 /// value, without dropping either.
2501 /// See [`ptr::replace`] for safety concerns and examples.
2503 /// [`ptr::replace`]: ./ptr/fn.replace.html
2504 #[stable(feature = "pointer_methods", since = "1.26.0")]
2506 pub unsafe fn replace(self, src: T) -> T
2512 /// Swaps the values at two mutable locations of the same type, without
2513 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2514 /// otherwise equivalent.
2516 /// See [`ptr::swap`] for safety concerns and examples.
2518 /// [`ptr::swap`]: ./ptr/fn.swap.html
2519 #[stable(feature = "pointer_methods", since = "1.26.0")]
2521 pub unsafe fn swap(self, with: *mut T)
2527 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
2530 /// If it is not possible to align the pointer, the implementation returns
2531 /// `usize::max_value()`. It is permissible for the implementation to *always*
2532 /// return `usize::max_value()`. Only your algorithm's performance can depend
2533 /// on getting a usable offset here, not its correctness.
2535 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
2536 /// used with the `wrapping_add` method.
2538 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
2539 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
2540 /// the returned offset is correct in all terms other than alignment.
2544 /// The function panics if `align` is not a power-of-two.
2548 /// Accessing adjacent `u8` as `u16`
2551 /// # fn foo(n: usize) {
2552 /// # use std::mem::align_of;
2554 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
2555 /// let ptr = &x[n] as *const u8;
2556 /// let offset = ptr.align_offset(align_of::<u16>());
2557 /// if offset < x.len() - n - 1 {
2558 /// let u16_ptr = ptr.add(offset) as *const u16;
2559 /// assert_ne!(*u16_ptr, 500);
2561 /// // while the pointer can be aligned via `offset`, it would point
2562 /// // outside the allocation
2566 #[stable(feature = "align_offset", since = "1.36.0")]
2567 pub fn align_offset(self, align: usize) -> usize where T: Sized {
2568 if !align.is_power_of_two() {
2569 panic!("align_offset: align is not a power-of-two");
2572 align_offset(self, align)
2577 /// Align pointer `p`.
2579 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
2580 /// to pointer `p` so that pointer `p` would get aligned to `a`.
2582 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
2583 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
2586 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
2587 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
2588 /// than trying to adapt this to accommodate that change.
2590 /// Any questions go to @nagisa.
2591 #[lang="align_offset"]
2592 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
2593 /// Calculate multiplicative modular inverse of `x` modulo `m`.
2595 /// This implementation is tailored for align_offset and has following preconditions:
2597 /// * `m` is a power-of-two;
2598 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
2600 /// Implementation of this function shall not panic. Ever.
2602 fn mod_inv(x: usize, m: usize) -> usize {
2603 /// Multiplicative modular inverse table modulo 2⁴ = 16.
2605 /// Note, that this table does not contain values where inverse does not exist (i.e., for
2606 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
2607 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
2608 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
2609 const INV_TABLE_MOD: usize = 16;
2611 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
2613 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
2614 if m <= INV_TABLE_MOD {
2615 table_inverse & (m - 1)
2617 // We iterate "up" using the following formula:
2619 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
2621 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
2622 let mut inverse = table_inverse;
2623 let mut going_mod = INV_TABLE_MOD_SQUARED;
2625 // y = y * (2 - xy) mod n
2627 // Note, that we use wrapping operations here intentionally – the original formula
2628 // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
2629 // usize::max_value()` instead, because we take the result `mod n` at the end
2631 inverse = inverse.wrapping_mul(
2632 2usize.wrapping_sub(x.wrapping_mul(inverse))
2633 ) & (going_mod - 1);
2635 return inverse & (m - 1);
2637 going_mod = going_mod.wrapping_mul(going_mod);
2642 let stride = mem::size_of::<T>();
2643 let a_minus_one = a.wrapping_sub(1);
2644 let pmoda = p as usize & a_minus_one;
2647 // Already aligned. Yay!
2652 return if stride == 0 {
2653 // If the pointer is not aligned, and the element is zero-sized, then no amount of
2654 // elements will ever align the pointer.
2657 a.wrapping_sub(pmoda)
2661 let smoda = stride & a_minus_one;
2662 // a is power-of-two so cannot be 0. stride = 0 is handled above.
2663 let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
2664 let gcd = 1usize << gcdpow;
2666 if p as usize & (gcd - 1) == 0 {
2667 // This branch solves for the following linear congruence equation:
2669 // $$ p + so ≡ 0 mod a $$
2671 // $p$ here is the pointer value, $s$ – stride of `T`, $o$ offset in `T`s, and $a$ – the
2672 // requested alignment.
2675 // o = (a - (p mod a))/g * ((s/g)⁻¹ mod a)
2677 // The first term is “the relative alignment of p to a”, the second term is “how does
2678 // incrementing p by s bytes change the relative alignment of p”. Division by `g` is
2679 // necessary to make this equation well formed if $a$ and $s$ are not co-prime.
2681 // Furthermore, the result produced by this solution is not “minimal”, so it is necessary
2682 // to take the result $o mod lcm(s, a)$. We can replace $lcm(s, a)$ with just a $a / g$.
2683 let j = a.wrapping_sub(pmoda) >> gcdpow;
2684 let k = smoda >> gcdpow;
2685 return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow);
2688 // Cannot be aligned at all.
2694 // Equality for pointers
2695 #[stable(feature = "rust1", since = "1.0.0")]
2696 impl<T: ?Sized> PartialEq for *const T {
2698 fn eq(&self, other: &*const T) -> bool { *self == *other }
2701 #[stable(feature = "rust1", since = "1.0.0")]
2702 impl<T: ?Sized> Eq for *const T {}
2704 #[stable(feature = "rust1", since = "1.0.0")]
2705 impl<T: ?Sized> PartialEq for *mut T {
2707 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2710 #[stable(feature = "rust1", since = "1.0.0")]
2711 impl<T: ?Sized> Eq for *mut T {}
2713 /// Compares raw pointers for equality.
2715 /// This is the same as using the `==` operator, but less generic:
2716 /// the arguments have to be `*const T` raw pointers,
2717 /// not anything that implements `PartialEq`.
2719 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2720 /// by their address rather than comparing the values they point to
2721 /// (which is what the `PartialEq for &T` implementation does).
2729 /// let other_five = 5;
2730 /// let five_ref = &five;
2731 /// let same_five_ref = &five;
2732 /// let other_five_ref = &other_five;
2734 /// assert!(five_ref == same_five_ref);
2735 /// assert!(ptr::eq(five_ref, same_five_ref));
2737 /// assert!(five_ref == other_five_ref);
2738 /// assert!(!ptr::eq(five_ref, other_five_ref));
2741 /// Slices are also compared by their length (fat pointers):
2744 /// let a = [1, 2, 3];
2745 /// assert!(std::ptr::eq(&a[..3], &a[..3]));
2746 /// assert!(!std::ptr::eq(&a[..2], &a[..3]));
2747 /// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
2750 /// Traits are also compared by their implementation:
2753 /// #[repr(transparent)]
2754 /// struct Wrapper { member: i32 }
2757 /// impl Trait for Wrapper {}
2758 /// impl Trait for i32 {}
2760 /// let wrapper = Wrapper { member: 10 };
2762 /// // Pointers have equal addresses.
2763 /// assert!(std::ptr::eq(
2764 /// &wrapper as *const Wrapper as *const u8,
2765 /// &wrapper.member as *const i32 as *const u8
2768 /// // Objects have equal addresses, but `Trait` has different implementations.
2769 /// assert!(!std::ptr::eq(
2770 /// &wrapper as &dyn Trait,
2771 /// &wrapper.member as &dyn Trait,
2773 /// assert!(!std::ptr::eq(
2774 /// &wrapper as &dyn Trait as *const dyn Trait,
2775 /// &wrapper.member as &dyn Trait as *const dyn Trait,
2778 /// // Converting the reference to a `*const u8` compares by address.
2779 /// assert!(std::ptr::eq(
2780 /// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
2781 /// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
2784 #[stable(feature = "ptr_eq", since = "1.17.0")]
2786 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2790 /// Hash a raw pointer.
2792 /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
2793 /// by its address rather than the value it points to
2794 /// (which is what the `Hash for &T` implementation does).
2799 /// use std::collections::hash_map::DefaultHasher;
2800 /// use std::hash::{Hash, Hasher};
2804 /// let five_ref = &five;
2806 /// let mut hasher = DefaultHasher::new();
2807 /// ptr::hash(five_ref, &mut hasher);
2808 /// let actual = hasher.finish();
2810 /// let mut hasher = DefaultHasher::new();
2811 /// (five_ref as *const i32).hash(&mut hasher);
2812 /// let expected = hasher.finish();
2814 /// assert_eq!(actual, expected);
2816 #[stable(feature = "ptr_hash", since = "1.35.0")]
2817 pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
2818 use crate::hash::Hash;
2822 // Impls for function pointers
2823 macro_rules! fnptr_impls_safety_abi {
2824 ($FnTy: ty, $($Arg: ident),*) => {
2825 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2826 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2828 fn eq(&self, other: &Self) -> bool {
2829 *self as usize == *other as usize
2833 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2834 impl<Ret, $($Arg),*> Eq for $FnTy {}
2836 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2837 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2839 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2840 (*self as usize).partial_cmp(&(*other as usize))
2844 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2845 impl<Ret, $($Arg),*> Ord for $FnTy {
2847 fn cmp(&self, other: &Self) -> Ordering {
2848 (*self as usize).cmp(&(*other as usize))
2852 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2853 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2854 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2855 state.write_usize(*self as usize)
2859 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2860 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2861 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2862 fmt::Pointer::fmt(&(*self as *const ()), f)
2866 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2867 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2868 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2869 fmt::Pointer::fmt(&(*self as *const ()), f)
2875 macro_rules! fnptr_impls_args {
2876 ($($Arg: ident),+) => {
2877 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
2878 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
2879 fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
2880 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
2881 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
2882 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
2885 // No variadic functions with 0 parameters
2886 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2887 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2888 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2889 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2893 fnptr_impls_args! { }
2894 fnptr_impls_args! { A }
2895 fnptr_impls_args! { A, B }
2896 fnptr_impls_args! { A, B, C }
2897 fnptr_impls_args! { A, B, C, D }
2898 fnptr_impls_args! { A, B, C, D, E }
2899 fnptr_impls_args! { A, B, C, D, E, F }
2900 fnptr_impls_args! { A, B, C, D, E, F, G }
2901 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2902 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2903 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2904 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2905 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2907 // Comparison for pointers
2908 #[stable(feature = "rust1", since = "1.0.0")]
2909 impl<T: ?Sized> Ord for *const T {
2911 fn cmp(&self, other: &*const T) -> Ordering {
2914 } else if self == other {
2922 #[stable(feature = "rust1", since = "1.0.0")]
2923 impl<T: ?Sized> PartialOrd for *const T {
2925 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2926 Some(self.cmp(other))
2930 fn lt(&self, other: &*const T) -> bool { *self < *other }
2933 fn le(&self, other: &*const T) -> bool { *self <= *other }
2936 fn gt(&self, other: &*const T) -> bool { *self > *other }
2939 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2942 #[stable(feature = "rust1", since = "1.0.0")]
2943 impl<T: ?Sized> Ord for *mut T {
2945 fn cmp(&self, other: &*mut T) -> Ordering {
2948 } else if self == other {
2956 #[stable(feature = "rust1", since = "1.0.0")]
2957 impl<T: ?Sized> PartialOrd for *mut T {
2959 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2960 Some(self.cmp(other))
2964 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2967 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2970 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2973 fn ge(&self, other: &*mut T) -> bool { *self >= *other }