1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Manually manage memory through raw pointers.
13 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 //! Many functions in this module take raw pointers as arguments and read from
18 //! or write to them. For this to be safe, these pointers must be *valid*.
19 //! Whether a pointer is valid depends on the operation it is used for
20 //! (read or write), and the extent of the memory that is accessed (i.e.,
21 //! how many bytes are read/written). Most functions use `*mut T` and `*const T`
22 //! to access only a single value, in which case the documentation omits the size
23 //! and implicitly assumes it to be `size_of::<T>()` bytes.
25 //! The precise rules for validity are not determined yet. The guarantees that are
26 //! provided at this point are very minimal:
28 //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
29 //! * All pointers (except for the null pointer) are valid for all operations of
31 //! * All accesses performed by functions in this module are *non-atomic* in the sense
32 //! of [atomic operations] used to synchronize between threads. This means it is
33 //! undefined behavior to perform two concurrent accesses to the same location from different
34 //! threads unless both accesses only read from memory. Notice that this explicitly
35 //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
36 //! be used for inter-thread synchronization.
37 //! * The result of casting a reference to a pointer is valid for as long as the
38 //! underlying object is live and no reference (just raw pointers) is used to
39 //! access the same memory.
41 //! These axioms, along with careful use of [`offset`] for pointer arithmetic,
42 //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
43 //! will be provided eventually, as the [aliasing] rules are being determined. For more
44 //! information, see the [book] as well as the section in the reference devoted
45 //! to [undefined behavior][ub].
49 //! Valid raw pointers as defined above are not necessarily properly aligned (where
50 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
51 //! aligned to `mem::align_of::<T>()`). However, most functions require their
52 //! arguments to be properly aligned, and will explicitly state
53 //! this requirement in their documentation. Notable exceptions to this are
54 //! [`read_unaligned`] and [`write_unaligned`].
56 //! When a function requires proper alignment, it does so even if the access
57 //! has size 0, i.e., even if memory is not actually touched. Consider using
58 //! [`NonNull::dangling`] in such cases.
60 //! [aliasing]: ../../nomicon/aliasing.html
61 //! [book]: ../../book/second-edition/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
62 //! [ub]: ../../reference/behavior-considered-undefined.html
63 //! [null]: ./fn.null.html
64 //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
65 //! [atomic operations]: ../../std/sync/atomic/index.html
66 //! [`copy`]: ../../std/ptr/fn.copy.html
67 //! [`offset`]: ../../std/primitive.pointer.html#method.offset
68 //! [`read_unaligned`]: ./fn.read_unaligned.html
69 //! [`write_unaligned`]: ./fn.write_unaligned.html
70 //! [`read_volatile`]: ./fn.read_volatile.html
71 //! [`write_volatile`]: ./fn.write_volatile.html
72 //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling
74 #![stable(feature = "rust1", since = "1.0.0")]
78 use ops::CoerceUnsized;
81 use marker::{PhantomData, Unsize};
85 use cmp::Ordering::{self, Less, Equal, Greater};
87 #[stable(feature = "rust1", since = "1.0.0")]
88 pub use intrinsics::copy_nonoverlapping;
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub use intrinsics::copy;
93 #[stable(feature = "rust1", since = "1.0.0")]
94 pub use intrinsics::write_bytes;
96 /// Executes the destructor (if any) of the pointed-to value.
98 /// This is semantically equivalent to calling [`ptr::read`] and discarding
99 /// the result, but has the following advantages:
101 /// * It is *required* to use `drop_in_place` to drop unsized types like
102 /// trait objects, because they can't be read out onto the stack and
103 /// dropped normally.
105 /// * It is friendlier to the optimizer to do this over [`ptr::read`] when
106 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
107 /// as the compiler doesn't need to prove that it's sound to elide the
110 /// [`ptr::read`]: ../ptr/fn.read.html
114 /// Behavior is undefined if any of the following conditions are violated:
116 /// * `to_drop` must be [valid] for reads.
118 /// * `to_drop` must be properly aligned. See the example below for how to drop
119 /// an unaligned pointer.
121 /// Additionally, if `T` is not [`Copy`], using the pointed-to value after
122 /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
123 /// foo` counts as a use because it will cause the the value to be dropped
124 /// again. [`write`] can be used to overwrite data without causing it to be
127 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
129 /// [valid]: ../ptr/index.html#safety
130 /// [`Copy`]: ../marker/trait.Copy.html
131 /// [`write`]: ../ptr/fn.write.html
135 /// Manually remove the last item from a vector:
141 /// let last = Rc::new(1);
142 /// let weak = Rc::downgrade(&last);
144 /// let mut v = vec![Rc::new(0), last];
147 /// // Get a raw pointer to the last element in `v`.
148 /// let ptr = &mut v[1] as *mut _;
149 /// // Shorten `v` to prevent the last item from being dropped. We do that first,
150 /// // to prevent issues if the `drop_in_place` below panics.
152 /// // Without a call `drop_in_place`, the last item would never be dropped,
153 /// // and the memory it manages would be leaked.
154 /// ptr::drop_in_place(ptr);
157 /// assert_eq!(v, &[0.into()]);
159 /// // Ensure that the last item was dropped.
160 /// assert!(weak.upgrade().is_none());
163 /// Unaligned values cannot be dropped in place, they must be copied to an aligned
169 /// unsafe fn drop_after_copy<T>(to_drop: *mut T) {
170 /// let mut copy: T = mem::uninitialized();
171 /// ptr::copy(to_drop, &mut copy, 1);
175 /// #[repr(packed, C)]
178 /// unaligned: Vec<i32>,
181 /// let mut p = Packed { _padding: 0, unaligned: vec![42] };
183 /// drop_after_copy(&mut p.unaligned as *mut _);
188 /// Notice that the compiler performs this copy automatically when dropping packed structs,
189 /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
191 #[stable(feature = "drop_in_place", since = "1.8.0")]
192 #[lang = "drop_in_place"]
193 #[allow(unconditional_recursion)]
194 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
195 // Code here does not matter - this is replaced by the
196 // real drop glue by the compiler.
197 drop_in_place(to_drop);
200 /// Creates a null raw pointer.
207 /// let p: *const i32 = ptr::null();
208 /// assert!(p.is_null());
211 #[stable(feature = "rust1", since = "1.0.0")]
213 pub const fn null<T>() -> *const T { 0 as *const T }
215 /// Creates a null mutable raw pointer.
222 /// let p: *mut i32 = ptr::null_mut();
223 /// assert!(p.is_null());
226 #[stable(feature = "rust1", since = "1.0.0")]
228 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
230 /// Swaps the values at two mutable locations of the same type, without
231 /// deinitializing either.
233 /// But for the following two exceptions, this function is semantically
234 /// equivalent to [`mem::swap`]:
236 /// * It operates on raw pointers instead of references. When references are
237 /// available, [`mem::swap`] should be preferred.
239 /// * The two pointed-to values may overlap. If the values do overlap, then the
240 /// overlapping region of memory from `x` will be used. This is demonstrated
241 /// in the second example below.
243 /// [`mem::swap`]: ../mem/fn.swap.html
247 /// Behavior is undefined if any of the following conditions are violated:
249 /// * Both `x` and `y` must be [valid] for reads and writes.
251 /// * Both `x` and `y` must be properly aligned.
253 /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
255 /// [valid]: ../ptr/index.html#safety
259 /// Swapping two non-overlapping regions:
264 /// let mut array = [0, 1, 2, 3];
266 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
267 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
271 /// assert_eq!([2, 3, 0, 1], array);
275 /// Swapping two overlapping regions:
280 /// let mut array = [0, 1, 2, 3];
282 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
283 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
287 /// // The indices `1..3` of the slice overlap between `x` and `y`.
288 /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
289 /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
290 /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
291 /// // This implementation is defined to make the latter choice.
292 /// assert_eq!([1, 0, 1, 2], array);
296 #[stable(feature = "rust1", since = "1.0.0")]
297 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
298 // Give ourselves some scratch space to work with
299 let mut tmp: T = mem::uninitialized();
302 copy_nonoverlapping(x, &mut tmp, 1);
303 copy(y, x, 1); // `x` and `y` may overlap
304 copy_nonoverlapping(&tmp, y, 1);
306 // y and t now point to the same thing, but we need to completely forget `tmp`
307 // because it's no longer relevant.
311 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
312 /// beginning at `x` and `y`. The two regions must *not* overlap.
316 /// Behavior is undefined if any of the following conditions are violated:
318 /// * Both `x` and `y` must be [valid] for reads and writes of `count *
319 /// size_of::<T>()` bytes.
321 /// * Both `x` and `y` must be properly aligned.
323 /// * The region of memory beginning at `x` with a size of `count *
324 /// size_of::<T>()` bytes must *not* overlap with the region of memory
325 /// beginning at `y` with the same size.
327 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
328 /// the pointers must be non-NULL and properly aligned.
330 /// [valid]: ../ptr/index.html#safety
339 /// let mut x = [1, 2, 3, 4];
340 /// let mut y = [7, 8, 9];
343 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
346 /// assert_eq!(x, [7, 8, 3, 4]);
347 /// assert_eq!(y, [1, 2, 9]);
350 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
351 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
352 let x = x as *mut u8;
353 let y = y as *mut u8;
354 let len = mem::size_of::<T>() * count;
355 swap_nonoverlapping_bytes(x, y, len)
359 pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
360 // For types smaller than the block optimization below,
361 // just swap directly to avoid pessimizing codegen.
362 if mem::size_of::<T>() < 32 {
364 copy_nonoverlapping(y, x, 1);
367 swap_nonoverlapping(x, y, 1);
372 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
373 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
374 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
375 // Haswell E processors. LLVM is more able to optimize if we give a struct a
376 // #[repr(simd)], even if we don't actually use this struct directly.
378 // FIXME repr(simd) broken on emscripten and redox
379 // It's also broken on big-endian powerpc64 and s390x. #42778
380 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
381 target_endian = "big")),
383 struct Block(u64, u64, u64, u64);
384 struct UnalignedBlock(u64, u64, u64, u64);
386 let block_size = mem::size_of::<Block>();
388 // Loop through x & y, copying them `Block` at a time
389 // The optimizer should unroll the loop fully for most types
390 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
392 while i + block_size <= len {
393 // Create some uninitialized memory as scratch space
394 // Declaring `t` here avoids aligning the stack when this loop is unused
395 let mut t: Block = mem::uninitialized();
396 let t = &mut t as *mut _ as *mut u8;
400 // Swap a block of bytes of x & y, using t as a temporary buffer
401 // This should be optimized into efficient SIMD operations where available
402 copy_nonoverlapping(x, t, block_size);
403 copy_nonoverlapping(y, x, block_size);
404 copy_nonoverlapping(t, y, block_size);
409 // Swap any remaining bytes
410 let mut t: UnalignedBlock = mem::uninitialized();
413 let t = &mut t as *mut _ as *mut u8;
417 copy_nonoverlapping(x, t, rem);
418 copy_nonoverlapping(y, x, rem);
419 copy_nonoverlapping(t, y, rem);
423 /// Moves `src` into the pointed `dst`, returning the previous `dst` value.
425 /// Neither value is dropped.
427 /// This function is semantically equivalent to [`mem::replace`] except that it
428 /// operates on raw pointers instead of references. When references are
429 /// available, [`mem::replace`] should be preferred.
431 /// [`mem::replace`]: ../mem/fn.replace.html
435 /// Behavior is undefined if any of the following conditions are violated:
437 /// * `dst` must be [valid] for writes.
439 /// * `dst` must be properly aligned.
441 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
443 /// [valid]: ../ptr/index.html#safety
450 /// let mut rust = vec!['b', 'u', 's', 't'];
452 /// // `mem::replace` would have the same effect without requiring the unsafe
455 /// ptr::replace(&mut rust[0], 'r')
458 /// assert_eq!(b, 'b');
459 /// assert_eq!(rust, &['r', 'u', 's', 't']);
462 #[stable(feature = "rust1", since = "1.0.0")]
463 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
464 mem::swap(&mut *dst, &mut src); // cannot overlap
468 /// Reads the value from `src` without moving it. This leaves the
469 /// memory in `src` unchanged.
473 /// Behavior is undefined if any of the following conditions are violated:
475 /// * `src` must be [valid] for reads.
477 /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
480 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
488 /// let y = &x as *const i32;
491 /// assert_eq!(std::ptr::read(y), 12);
495 /// Manually implement [`mem::swap`]:
500 /// fn swap<T>(a: &mut T, b: &mut T) {
502 /// // Create a bitwise copy of the value at `a` in `tmp`.
503 /// let tmp = ptr::read(a);
505 /// // Exiting at this point (either by explicitly returning or by
506 /// // calling a function which panics) would cause the value in `tmp` to
507 /// // be dropped while the same value is still referenced by `a`. This
508 /// // could trigger undefined behavior if `T` is not `Copy`.
510 /// // Create a bitwise copy of the value at `b` in `a`.
511 /// // This is safe because mutable references cannot alias.
512 /// ptr::copy_nonoverlapping(b, a, 1);
514 /// // As above, exiting here could trigger undefined behavior because
515 /// // the same value is referenced by `a` and `b`.
517 /// // Move `tmp` into `b`.
518 /// ptr::write(b, tmp);
520 /// // `tmp` has been moved (`write` takes ownership of its second argument),
521 /// // so nothing is dropped implicitly here.
525 /// let mut foo = "foo".to_owned();
526 /// let mut bar = "bar".to_owned();
528 /// swap(&mut foo, &mut bar);
530 /// assert_eq!(foo, "bar");
531 /// assert_eq!(bar, "foo");
534 /// ## Ownership of the Returned Value
536 /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
537 /// If `T` is not [`Copy`], using both the returned value and the value at
538 /// `*src` can violate memory safety. Note that assigning to `*src` counts as a
539 /// use because it will attempt to drop the value at `*src`.
541 /// [`write`] can be used to overwrite data without causing it to be dropped.
546 /// let mut s = String::from("foo");
548 /// // `s2` now points to the same underlying memory as `s`.
549 /// let mut s2: String = ptr::read(&s);
551 /// assert_eq!(s2, "foo");
553 /// // Assigning to `s2` causes its original value to be dropped. Beyond
554 /// // this point, `s` must no longer be used, as the underlying memory has
556 /// s2 = String::default();
557 /// assert_eq!(s2, "");
559 /// // Assigning to `s` would cause the old value to be dropped again,
560 /// // resulting in undefined behavior.
561 /// // s = String::from("bar"); // ERROR
563 /// // `ptr::write` can be used to overwrite a value without dropping it.
564 /// ptr::write(&mut s, String::from("bar"));
567 /// assert_eq!(s, "bar");
570 /// [`mem::swap`]: ../mem/fn.swap.html
571 /// [valid]: ../ptr/index.html#safety
572 /// [`Copy`]: ../marker/trait.Copy.html
573 /// [`read_unaligned`]: ./fn.read_unaligned.html
574 /// [`write`]: ./fn.write.html
576 #[stable(feature = "rust1", since = "1.0.0")]
577 pub unsafe fn read<T>(src: *const T) -> T {
578 let mut tmp: T = mem::uninitialized();
579 copy_nonoverlapping(src, &mut tmp, 1);
583 /// Reads the value from `src` without moving it. This leaves the
584 /// memory in `src` unchanged.
586 /// Unlike [`read`], `read_unaligned` works with unaligned pointers.
590 /// Behavior is undefined if any of the following conditions are violated:
592 /// * `src` must be [valid] for reads.
594 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
595 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
596 /// value and the value at `*src` can [violate memory safety][read-ownership].
598 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
600 /// [`Copy`]: ../marker/trait.Copy.html
601 /// [`read`]: ./fn.read.html
602 /// [`write_unaligned`]: ./fn.write_unaligned.html
603 /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value
604 /// [valid]: ../ptr/index.html#safety
608 /// Access members of a packed struct by reference:
613 /// #[repr(packed, C)]
621 /// unaligned: 0x01020304,
625 /// // Take the address of a 32-bit integer which is not aligned.
626 /// // This must be done as a raw pointer; unaligned references are invalid.
627 /// let unaligned = &x.unaligned as *const u32;
629 /// // Dereferencing normally will emit an aligned load instruction,
630 /// // causing undefined behavior.
631 /// // let v = *unaligned; // ERROR
633 /// // Instead, use `read_unaligned` to read improperly aligned values.
634 /// let v = ptr::read_unaligned(unaligned);
639 /// // Accessing unaligned values directly is safe.
640 /// assert!(x.unaligned == v);
643 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
644 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
645 let mut tmp: T = mem::uninitialized();
646 copy_nonoverlapping(src as *const u8,
647 &mut tmp as *mut T as *mut u8,
648 mem::size_of::<T>());
652 /// Overwrites a memory location with the given value without reading or
653 /// dropping the old value.
655 /// `write` does not drop the contents of `dst`. This is safe, but it could leak
656 /// allocations or resources, so care should be taken not to overwrite an object
657 /// that should be dropped.
659 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
660 /// location pointed to by `dst`.
662 /// This is appropriate for initializing uninitialized memory, or overwriting
663 /// memory that has previously been [`read`] from.
665 /// [`read`]: ./fn.read.html
669 /// Behavior is undefined if any of the following conditions are violated:
671 /// * `dst` must be [valid] for writes.
673 /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
676 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
678 /// [valid]: ../ptr/index.html#safety
679 /// [`write_unaligned`]: ./fn.write_unaligned.html
687 /// let y = &mut x as *mut i32;
691 /// std::ptr::write(y, z);
692 /// assert_eq!(std::ptr::read(y), 12);
696 /// Manually implement [`mem::swap`]:
701 /// fn swap<T>(a: &mut T, b: &mut T) {
703 /// // Create a bitwise copy of the value at `a` in `tmp`.
704 /// let tmp = ptr::read(a);
706 /// // Exiting at this point (either by explicitly returning or by
707 /// // calling a function which panics) would cause the value in `tmp` to
708 /// // be dropped while the same value is still referenced by `a`. This
709 /// // could trigger undefined behavior if `T` is not `Copy`.
711 /// // Create a bitwise copy of the value at `b` in `a`.
712 /// // This is safe because mutable references cannot alias.
713 /// ptr::copy_nonoverlapping(b, a, 1);
715 /// // As above, exiting here could trigger undefined behavior because
716 /// // the same value is referenced by `a` and `b`.
718 /// // Move `tmp` into `b`.
719 /// ptr::write(b, tmp);
721 /// // `tmp` has been moved (`write` takes ownership of its second argument),
722 /// // so nothing is dropped implicitly here.
726 /// let mut foo = "foo".to_owned();
727 /// let mut bar = "bar".to_owned();
729 /// swap(&mut foo, &mut bar);
731 /// assert_eq!(foo, "bar");
732 /// assert_eq!(bar, "foo");
735 /// [`mem::swap`]: ../mem/fn.swap.html
737 #[stable(feature = "rust1", since = "1.0.0")]
738 pub unsafe fn write<T>(dst: *mut T, src: T) {
739 intrinsics::move_val_init(&mut *dst, src)
742 /// Overwrites a memory location with the given value without reading or
743 /// dropping the old value.
745 /// Unlike [`write`], the pointer may be unaligned.
747 /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
748 /// could leak allocations or resources, so care should be taken not to overwrite
749 /// an object that should be dropped.
751 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
752 /// location pointed to by `dst`.
754 /// This is appropriate for initializing uninitialized memory, or overwriting
755 /// memory that has previously been read with [`read_unaligned`].
757 /// [`write`]: ./fn.write.html
758 /// [`read_unaligned`]: ./fn.read_unaligned.html
762 /// Behavior is undefined if any of the following conditions are violated:
764 /// * `dst` must be [valid] for writes.
766 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
768 /// [valid]: ../ptr/index.html#safety
772 /// Access fields in a packed struct:
775 /// use std::{mem, ptr};
777 /// #[repr(packed, C)]
778 /// #[derive(Default)]
784 /// let v = 0x01020304;
785 /// let mut x: Packed = unsafe { mem::zeroed() };
788 /// // Take a reference to a 32-bit integer which is not aligned.
789 /// let unaligned = &mut x.unaligned as *mut u32;
791 /// // Dereferencing normally will emit an aligned store instruction,
792 /// // causing undefined behavior because the pointer is not aligned.
793 /// // *unaligned = v; // ERROR
795 /// // Instead, use `write_unaligned` to write improperly aligned values.
796 /// ptr::write_unaligned(unaligned, v);
799 /// // Accessing unaligned values directly is safe.
800 /// assert!(x.unaligned == v);
803 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
804 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
805 copy_nonoverlapping(&src as *const T as *const u8,
807 mem::size_of::<T>());
811 /// Performs a volatile read of the value from `src` without moving it. This
812 /// leaves the memory in `src` unchanged.
814 /// Volatile operations are intended to act on I/O memory, and are guaranteed
815 /// to not be elided or reordered by the compiler across other volatile
818 /// Memory accessed with `read_volatile` or [`write_volatile`] should not be
819 /// accessed with non-volatile operations.
821 /// [`write_volatile`]: ./fn.write_volatile.html
825 /// Rust does not currently have a rigorously and formally defined memory model,
826 /// so the precise semantics of what "volatile" means here is subject to change
827 /// over time. That being said, the semantics will almost always end up pretty
828 /// similar to [C11's definition of volatile][c11].
830 /// The compiler shouldn't change the relative order or number of volatile
831 /// memory operations. However, volatile memory operations on zero-sized types
832 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
833 /// and may be ignored.
835 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
839 /// Behavior is undefined if any of the following conditions are violated:
841 /// * `src` must be [valid] for reads.
843 /// * `src` must be properly aligned.
845 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
846 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
847 /// value and the value at `*src` can [violate memory safety][read-ownership].
848 /// However, storing non-[`Copy`] types in volatile memory is almost certainly
851 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
853 /// [valid]: ../ptr/index.html#safety
854 /// [`Copy`]: ../marker/trait.Copy.html
855 /// [`read`]: ./fn.read.html
857 /// Just like in C, whether an operation is volatile has no bearing whatsoever
858 /// on questions involving concurrent access from multiple threads. Volatile
859 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
860 /// a race between a `read_volatile` and any write operation to the same location
861 /// is undefined behavior.
869 /// let y = &x as *const i32;
872 /// assert_eq!(std::ptr::read_volatile(y), 12);
876 #[stable(feature = "volatile", since = "1.9.0")]
877 pub unsafe fn read_volatile<T>(src: *const T) -> T {
878 intrinsics::volatile_load(src)
881 /// Performs a volatile write of a memory location with the given value without
882 /// reading or dropping the old value.
884 /// Volatile operations are intended to act on I/O memory, and are guaranteed
885 /// to not be elided or reordered by the compiler across other volatile
888 /// Memory accessed with [`read_volatile`] or `write_volatile` should not be
889 /// accessed with non-volatile operations.
891 /// `write_volatile` does not drop the contents of `dst`. This is safe, but it
892 /// could leak allocations or resources, so care should be taken not to overwrite
893 /// an object that should be dropped.
895 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
896 /// location pointed to by `dst`.
898 /// [`read_volatile`]: ./fn.read_volatile.html
902 /// Rust does not currently have a rigorously and formally defined memory model,
903 /// so the precise semantics of what "volatile" means here is subject to change
904 /// over time. That being said, the semantics will almost always end up pretty
905 /// similar to [C11's definition of volatile][c11].
907 /// The compiler shouldn't change the relative order or number of volatile
908 /// memory operations. However, volatile memory operations on zero-sized types
909 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
910 /// and may be ignored.
912 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
916 /// Behavior is undefined if any of the following conditions are violated:
918 /// * `dst` must be [valid] for writes.
920 /// * `dst` must be properly aligned.
922 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
924 /// [valid]: ../ptr/index.html#safety
926 /// Just like in C, whether an operation is volatile has no bearing whatsoever
927 /// on questions involving concurrent access from multiple threads. Volatile
928 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
929 /// a race between a `write_volatile` and any other operation (reading or writing)
930 /// on the same location is undefined behavior.
938 /// let y = &mut x as *mut i32;
942 /// std::ptr::write_volatile(y, z);
943 /// assert_eq!(std::ptr::read_volatile(y), 12);
947 #[stable(feature = "volatile", since = "1.9.0")]
948 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
949 intrinsics::volatile_store(dst, src);
952 #[lang = "const_ptr"]
953 impl<T: ?Sized> *const T {
954 /// Returns `true` if the pointer is null.
956 /// Note that unsized types have many possible null pointers, as only the
957 /// raw data pointer is considered, not their length, vtable, etc.
958 /// Therefore, two pointers that are null may still not compare equal to
966 /// let s: &str = "Follow the rabbit";
967 /// let ptr: *const u8 = s.as_ptr();
968 /// assert!(!ptr.is_null());
970 #[stable(feature = "rust1", since = "1.0.0")]
972 pub fn is_null(self) -> bool {
973 // Compare via a cast to a thin pointer, so fat pointers are only
974 // considering their "data" part for null-ness.
975 (self as *const u8) == null()
978 /// Returns `None` if the pointer is null, or else returns a reference to
979 /// the value wrapped in `Some`.
983 /// While this method and its mutable counterpart are useful for
984 /// null-safety, it is important to note that this is still an unsafe
985 /// operation because the returned value could be pointing to invalid
988 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
989 /// not necessarily reflect the actual lifetime of the data.
996 /// let ptr: *const u8 = &10u8 as *const u8;
999 /// if let Some(val_back) = ptr.as_ref() {
1000 /// println!("We got back the value: {}!", val_back);
1005 /// # Null-unchecked version
1007 /// If you are sure the pointer can never be null and are looking for some kind of
1008 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
1009 /// dereference the pointer directly.
1012 /// let ptr: *const u8 = &10u8 as *const u8;
1015 /// let val_back = &*ptr;
1016 /// println!("We got back the value: {}!", val_back);
1019 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1021 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1029 /// Calculates the offset from a pointer.
1031 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1032 /// offset of `3 * size_of::<T>()` bytes.
1036 /// If any of the following conditions are violated, the result is Undefined
1039 /// * Both the starting and resulting pointer must be either in bounds or one
1040 /// byte past the end of the same allocated object.
1042 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1044 /// * The offset being in bounds cannot rely on "wrapping around" the address
1045 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1047 /// The compiler and standard library generally tries to ensure allocations
1048 /// never reach a size where an offset is a concern. For instance, `Vec`
1049 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1050 /// `vec.as_ptr().add(vec.len())` is always safe.
1052 /// Most platforms fundamentally can't even construct such an allocation.
1053 /// For instance, no known 64-bit platform can ever serve a request
1054 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1055 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1056 /// more than `isize::MAX` bytes with things like Physical Address
1057 /// Extension. As such, memory acquired directly from allocators or memory
1058 /// mapped files *may* be too large to handle with this function.
1060 /// Consider using `wrapping_offset` instead if these constraints are
1061 /// difficult to satisfy. The only advantage of this method is that it
1062 /// enables more aggressive compiler optimizations.
1069 /// let s: &str = "123";
1070 /// let ptr: *const u8 = s.as_ptr();
1073 /// println!("{}", *ptr.offset(1) as char);
1074 /// println!("{}", *ptr.offset(2) as char);
1077 #[stable(feature = "rust1", since = "1.0.0")]
1079 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
1080 intrinsics::offset(self, count)
1083 /// Calculates the offset from a pointer using wrapping arithmetic.
1085 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1086 /// offset of `3 * size_of::<T>()` bytes.
1090 /// The resulting pointer does not need to be in bounds, but it is
1091 /// potentially hazardous to dereference (which requires `unsafe`).
1092 /// In particular, the resulting pointer may *not* be used to access a
1093 /// different allocated object than the one `self` points to. In other
1094 /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1095 /// *not* the same as `y`, and dereferencing it is undefined behavior
1096 /// unless `x` and `y` point into the same allocated object.
1098 /// Always use `.offset(count)` instead when possible, because `offset`
1099 /// allows the compiler to optimize better. If you need to cross object
1100 /// boundaries, cast the pointer to an integer and do the arithmetic there.
1107 /// // Iterate using a raw pointer in increments of two elements
1108 /// let data = [1u8, 2, 3, 4, 5];
1109 /// let mut ptr: *const u8 = data.as_ptr();
1111 /// let end_rounded_up = ptr.wrapping_offset(6);
1113 /// // This loop prints "1, 3, 5, "
1114 /// while ptr != end_rounded_up {
1116 /// print!("{}, ", *ptr);
1118 /// ptr = ptr.wrapping_offset(step);
1121 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1123 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
1125 intrinsics::arith_offset(self, count)
1129 /// Calculates the distance between two pointers. The returned value is in
1130 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1132 /// This function is the inverse of [`offset`].
1134 /// [`offset`]: #method.offset
1135 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
1139 /// If any of the following conditions are violated, the result is Undefined
1142 /// * Both the starting and other pointer must be either in bounds or one
1143 /// byte past the end of the same allocated object.
1145 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1147 /// * The distance between the pointers, in bytes, must be an exact multiple
1148 /// of the size of `T`.
1150 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1152 /// The compiler and standard library generally try to ensure allocations
1153 /// never reach a size where an offset is a concern. For instance, `Vec`
1154 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1155 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1157 /// Most platforms fundamentally can't even construct such an allocation.
1158 /// For instance, no known 64-bit platform can ever serve a request
1159 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1160 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1161 /// more than `isize::MAX` bytes with things like Physical Address
1162 /// Extension. As such, memory acquired directly from allocators or memory
1163 /// mapped files *may* be too large to handle with this function.
1165 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1166 /// difficult to satisfy. The only advantage of this method is that it
1167 /// enables more aggressive compiler optimizations.
1171 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1178 /// #![feature(ptr_offset_from)]
1181 /// let ptr1: *const i32 = &a[1];
1182 /// let ptr2: *const i32 = &a[3];
1184 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1185 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1186 /// assert_eq!(ptr1.offset(2), ptr2);
1187 /// assert_eq!(ptr2.offset(-2), ptr1);
1190 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1192 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1193 let pointee_size = mem::size_of::<T>();
1194 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
1196 // This is the same sequence that Clang emits for pointer subtraction.
1197 // It can be neither `nsw` nor `nuw` because the input is treated as
1198 // unsigned but then the output is treated as signed, so neither works.
1199 let d = isize::wrapping_sub(self as _, origin as _);
1200 intrinsics::exact_div(d, pointee_size as _)
1203 /// Calculates the distance between two pointers. The returned value is in
1204 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1206 /// If the address different between the two pointers is not a multiple of
1207 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1210 /// Though this method is safe for any two pointers, note that its result
1211 /// will be mostly useless if the two pointers aren't into the same allocated
1212 /// object, for example if they point to two different local variables.
1216 /// This function panics if `T` is a zero-sized type.
1223 /// #![feature(ptr_wrapping_offset_from)]
1226 /// let ptr1: *const i32 = &a[1];
1227 /// let ptr2: *const i32 = &a[3];
1228 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1229 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1230 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1231 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1233 /// let ptr1: *const i32 = 3 as _;
1234 /// let ptr2: *const i32 = 13 as _;
1235 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1237 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1239 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1240 let pointee_size = mem::size_of::<T>();
1241 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
1243 let d = isize::wrapping_sub(self as _, origin as _);
1244 d.wrapping_div(pointee_size as _)
1247 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1249 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1250 /// offset of `3 * size_of::<T>()` bytes.
1254 /// If any of the following conditions are violated, the result is Undefined
1257 /// * Both the starting and resulting pointer must be either in bounds or one
1258 /// byte past the end of the same allocated object.
1260 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1262 /// * The offset being in bounds cannot rely on "wrapping around" the address
1263 /// space. That is, the infinite-precision sum must fit in a `usize`.
1265 /// The compiler and standard library generally tries to ensure allocations
1266 /// never reach a size where an offset is a concern. For instance, `Vec`
1267 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1268 /// `vec.as_ptr().add(vec.len())` is always safe.
1270 /// Most platforms fundamentally can't even construct such an allocation.
1271 /// For instance, no known 64-bit platform can ever serve a request
1272 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1273 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1274 /// more than `isize::MAX` bytes with things like Physical Address
1275 /// Extension. As such, memory acquired directly from allocators or memory
1276 /// mapped files *may* be too large to handle with this function.
1278 /// Consider using `wrapping_offset` instead if these constraints are
1279 /// difficult to satisfy. The only advantage of this method is that it
1280 /// enables more aggressive compiler optimizations.
1287 /// let s: &str = "123";
1288 /// let ptr: *const u8 = s.as_ptr();
1291 /// println!("{}", *ptr.add(1) as char);
1292 /// println!("{}", *ptr.add(2) as char);
1295 #[stable(feature = "pointer_methods", since = "1.26.0")]
1297 pub unsafe fn add(self, count: usize) -> Self
1300 self.offset(count as isize)
1303 /// Calculates the offset from a pointer (convenience for
1304 /// `.offset((count as isize).wrapping_neg())`).
1306 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1307 /// offset of `3 * size_of::<T>()` bytes.
1311 /// If any of the following conditions are violated, the result is Undefined
1314 /// * Both the starting and resulting pointer must be either in bounds or one
1315 /// byte past the end of the same allocated object.
1317 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1319 /// * The offset being in bounds cannot rely on "wrapping around" the address
1320 /// space. That is, the infinite-precision sum must fit in a usize.
1322 /// The compiler and standard library generally tries to ensure allocations
1323 /// never reach a size where an offset is a concern. For instance, `Vec`
1324 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1325 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1327 /// Most platforms fundamentally can't even construct such an allocation.
1328 /// For instance, no known 64-bit platform can ever serve a request
1329 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1330 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1331 /// more than `isize::MAX` bytes with things like Physical Address
1332 /// Extension. As such, memory acquired directly from allocators or memory
1333 /// mapped files *may* be too large to handle with this function.
1335 /// Consider using `wrapping_offset` instead if these constraints are
1336 /// difficult to satisfy. The only advantage of this method is that it
1337 /// enables more aggressive compiler optimizations.
1344 /// let s: &str = "123";
1347 /// let end: *const u8 = s.as_ptr().add(3);
1348 /// println!("{}", *end.sub(1) as char);
1349 /// println!("{}", *end.sub(2) as char);
1352 #[stable(feature = "pointer_methods", since = "1.26.0")]
1354 pub unsafe fn sub(self, count: usize) -> Self
1357 self.offset((count as isize).wrapping_neg())
1360 /// Calculates the offset from a pointer using wrapping arithmetic.
1361 /// (convenience for `.wrapping_offset(count as isize)`)
1363 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1364 /// offset of `3 * size_of::<T>()` bytes.
1368 /// The resulting pointer does not need to be in bounds, but it is
1369 /// potentially hazardous to dereference (which requires `unsafe`).
1371 /// Always use `.add(count)` instead when possible, because `add`
1372 /// allows the compiler to optimize better.
1379 /// // Iterate using a raw pointer in increments of two elements
1380 /// let data = [1u8, 2, 3, 4, 5];
1381 /// let mut ptr: *const u8 = data.as_ptr();
1383 /// let end_rounded_up = ptr.wrapping_add(6);
1385 /// // This loop prints "1, 3, 5, "
1386 /// while ptr != end_rounded_up {
1388 /// print!("{}, ", *ptr);
1390 /// ptr = ptr.wrapping_add(step);
1393 #[stable(feature = "pointer_methods", since = "1.26.0")]
1395 pub fn wrapping_add(self, count: usize) -> Self
1398 self.wrapping_offset(count as isize)
1401 /// Calculates the offset from a pointer using wrapping arithmetic.
1402 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1404 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1405 /// offset of `3 * size_of::<T>()` bytes.
1409 /// The resulting pointer does not need to be in bounds, but it is
1410 /// potentially hazardous to dereference (which requires `unsafe`).
1412 /// Always use `.sub(count)` instead when possible, because `sub`
1413 /// allows the compiler to optimize better.
1420 /// // Iterate using a raw pointer in increments of two elements (backwards)
1421 /// let data = [1u8, 2, 3, 4, 5];
1422 /// let mut ptr: *const u8 = data.as_ptr();
1423 /// let start_rounded_down = ptr.wrapping_sub(2);
1424 /// ptr = ptr.wrapping_add(4);
1426 /// // This loop prints "5, 3, 1, "
1427 /// while ptr != start_rounded_down {
1429 /// print!("{}, ", *ptr);
1431 /// ptr = ptr.wrapping_sub(step);
1434 #[stable(feature = "pointer_methods", since = "1.26.0")]
1436 pub fn wrapping_sub(self, count: usize) -> Self
1439 self.wrapping_offset((count as isize).wrapping_neg())
1442 /// Reads the value from `self` without moving it. This leaves the
1443 /// memory in `self` unchanged.
1445 /// See [`ptr::read`] for safety concerns and examples.
1447 /// [`ptr::read`]: ./ptr/fn.read.html
1448 #[stable(feature = "pointer_methods", since = "1.26.0")]
1450 pub unsafe fn read(self) -> T
1456 /// Performs a volatile read of the value from `self` without moving it. This
1457 /// leaves the memory in `self` unchanged.
1459 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1460 /// to not be elided or reordered by the compiler across other volatile
1463 /// See [`ptr::read_volatile`] for safety concerns and examples.
1465 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
1466 #[stable(feature = "pointer_methods", since = "1.26.0")]
1468 pub unsafe fn read_volatile(self) -> T
1474 /// Reads the value from `self` without moving it. This leaves the
1475 /// memory in `self` unchanged.
1477 /// Unlike `read`, the pointer may be unaligned.
1479 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1481 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
1482 #[stable(feature = "pointer_methods", since = "1.26.0")]
1484 pub unsafe fn read_unaligned(self) -> T
1487 read_unaligned(self)
1490 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1491 /// and destination may overlap.
1493 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1495 /// See [`ptr::copy`] for safety concerns and examples.
1497 /// [`ptr::copy`]: ./ptr/fn.copy.html
1498 #[stable(feature = "pointer_methods", since = "1.26.0")]
1500 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1503 copy(self, dest, count)
1506 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1507 /// and destination may *not* overlap.
1509 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1511 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1513 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
1514 #[stable(feature = "pointer_methods", since = "1.26.0")]
1516 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1519 copy_nonoverlapping(self, dest, count)
1522 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1525 /// If it is not possible to align the pointer, the implementation returns
1526 /// `usize::max_value()`.
1528 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1529 /// used with the `offset` or `offset_to` methods.
1531 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
1532 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1533 /// the returned offset is correct in all terms other than alignment.
1537 /// The function panics if `align` is not a power-of-two.
1541 /// Accessing adjacent `u8` as `u16`
1544 /// # #![feature(align_offset)]
1545 /// # fn foo(n: usize) {
1546 /// # use std::mem::align_of;
1548 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1549 /// let ptr = &x[n] as *const u8;
1550 /// let offset = ptr.align_offset(align_of::<u16>());
1551 /// if offset < x.len() - n - 1 {
1552 /// let u16_ptr = ptr.add(offset) as *const u16;
1553 /// assert_ne!(*u16_ptr, 500);
1555 /// // while the pointer can be aligned via `offset`, it would point
1556 /// // outside the allocation
1560 #[unstable(feature = "align_offset", issue = "44488")]
1561 pub fn align_offset(self, align: usize) -> usize where T: Sized {
1562 if !align.is_power_of_two() {
1563 panic!("align_offset: align is not a power-of-two");
1566 align_offset(self, align)
1573 impl<T: ?Sized> *mut T {
1574 /// Returns `true` if the pointer is null.
1576 /// Note that unsized types have many possible null pointers, as only the
1577 /// raw data pointer is considered, not their length, vtable, etc.
1578 /// Therefore, two pointers that are null may still not compare equal to
1586 /// let mut s = [1, 2, 3];
1587 /// let ptr: *mut u32 = s.as_mut_ptr();
1588 /// assert!(!ptr.is_null());
1590 #[stable(feature = "rust1", since = "1.0.0")]
1592 pub fn is_null(self) -> bool {
1593 // Compare via a cast to a thin pointer, so fat pointers are only
1594 // considering their "data" part for null-ness.
1595 (self as *mut u8) == null_mut()
1598 /// Returns `None` if the pointer is null, or else returns a reference to
1599 /// the value wrapped in `Some`.
1603 /// While this method and its mutable counterpart are useful for
1604 /// null-safety, it is important to note that this is still an unsafe
1605 /// operation because the returned value could be pointing to invalid
1608 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1609 /// not necessarily reflect the actual lifetime of the data.
1616 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1619 /// if let Some(val_back) = ptr.as_ref() {
1620 /// println!("We got back the value: {}!", val_back);
1625 /// # Null-unchecked version
1627 /// If you are sure the pointer can never be null and are looking for some kind of
1628 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
1629 /// dereference the pointer directly.
1632 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1635 /// let val_back = &*ptr;
1636 /// println!("We got back the value: {}!", val_back);
1639 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1641 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1649 /// Calculates the offset from a pointer.
1651 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1652 /// offset of `3 * size_of::<T>()` bytes.
1656 /// If any of the following conditions are violated, the result is Undefined
1659 /// * Both the starting and resulting pointer must be either in bounds or one
1660 /// byte past the end of the same allocated object.
1662 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1664 /// * The offset being in bounds cannot rely on "wrapping around" the address
1665 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1667 /// The compiler and standard library generally tries to ensure allocations
1668 /// never reach a size where an offset is a concern. For instance, `Vec`
1669 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1670 /// `vec.as_ptr().add(vec.len())` is always safe.
1672 /// Most platforms fundamentally can't even construct such an allocation.
1673 /// For instance, no known 64-bit platform can ever serve a request
1674 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1675 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1676 /// more than `isize::MAX` bytes with things like Physical Address
1677 /// Extension. As such, memory acquired directly from allocators or memory
1678 /// mapped files *may* be too large to handle with this function.
1680 /// Consider using `wrapping_offset` instead if these constraints are
1681 /// difficult to satisfy. The only advantage of this method is that it
1682 /// enables more aggressive compiler optimizations.
1689 /// let mut s = [1, 2, 3];
1690 /// let ptr: *mut u32 = s.as_mut_ptr();
1693 /// println!("{}", *ptr.offset(1));
1694 /// println!("{}", *ptr.offset(2));
1697 #[stable(feature = "rust1", since = "1.0.0")]
1699 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1700 intrinsics::offset(self, count) as *mut T
1703 /// Calculates the offset from a pointer using wrapping arithmetic.
1704 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1705 /// offset of `3 * size_of::<T>()` bytes.
1709 /// The resulting pointer does not need to be in bounds, but it is
1710 /// potentially hazardous to dereference (which requires `unsafe`).
1711 /// In particular, the resulting pointer may *not* be used to access a
1712 /// different allocated object than the one `self` points to. In other
1713 /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1714 /// *not* the same as `y`, and dereferencing it is undefined behavior
1715 /// unless `x` and `y` point into the same allocated object.
1717 /// Always use `.offset(count)` instead when possible, because `offset`
1718 /// allows the compiler to optimize better. If you need to cross object
1719 /// boundaries, cast the pointer to an integer and do the arithmetic there.
1726 /// // Iterate using a raw pointer in increments of two elements
1727 /// let mut data = [1u8, 2, 3, 4, 5];
1728 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1730 /// let end_rounded_up = ptr.wrapping_offset(6);
1732 /// while ptr != end_rounded_up {
1736 /// ptr = ptr.wrapping_offset(step);
1738 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1740 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1742 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1744 intrinsics::arith_offset(self, count) as *mut T
1748 /// Returns `None` if the pointer is null, or else returns a mutable
1749 /// reference to the value wrapped in `Some`.
1753 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1754 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1755 /// returned is indeed a valid lifetime for the contained data.
1762 /// let mut s = [1, 2, 3];
1763 /// let ptr: *mut u32 = s.as_mut_ptr();
1764 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1765 /// *first_value = 4;
1766 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1768 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1770 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1778 /// Calculates the distance between two pointers. The returned value is in
1779 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1781 /// This function is the inverse of [`offset`].
1783 /// [`offset`]: #method.offset-1
1784 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1788 /// If any of the following conditions are violated, the result is Undefined
1791 /// * Both the starting and other pointer must be either in bounds or one
1792 /// byte past the end of the same allocated object.
1794 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1796 /// * The distance between the pointers, in bytes, must be an exact multiple
1797 /// of the size of `T`.
1799 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1801 /// The compiler and standard library generally try to ensure allocations
1802 /// never reach a size where an offset is a concern. For instance, `Vec`
1803 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1804 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1806 /// Most platforms fundamentally can't even construct such an allocation.
1807 /// For instance, no known 64-bit platform can ever serve a request
1808 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1809 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1810 /// more than `isize::MAX` bytes with things like Physical Address
1811 /// Extension. As such, memory acquired directly from allocators or memory
1812 /// mapped files *may* be too large to handle with this function.
1814 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1815 /// difficult to satisfy. The only advantage of this method is that it
1816 /// enables more aggressive compiler optimizations.
1820 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1827 /// #![feature(ptr_offset_from)]
1829 /// let mut a = [0; 5];
1830 /// let ptr1: *mut i32 = &mut a[1];
1831 /// let ptr2: *mut i32 = &mut a[3];
1833 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1834 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1835 /// assert_eq!(ptr1.offset(2), ptr2);
1836 /// assert_eq!(ptr2.offset(-2), ptr1);
1839 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1841 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1842 (self as *const T).offset_from(origin)
1845 /// Calculates the distance between two pointers. The returned value is in
1846 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1848 /// If the address different between the two pointers is not a multiple of
1849 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1852 /// Though this method is safe for any two pointers, note that its result
1853 /// will be mostly useless if the two pointers aren't into the same allocated
1854 /// object, for example if they point to two different local variables.
1858 /// This function panics if `T` is a zero-sized type.
1865 /// #![feature(ptr_wrapping_offset_from)]
1867 /// let mut a = [0; 5];
1868 /// let ptr1: *mut i32 = &mut a[1];
1869 /// let ptr2: *mut i32 = &mut a[3];
1870 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1871 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1872 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1873 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1875 /// let ptr1: *mut i32 = 3 as _;
1876 /// let ptr2: *mut i32 = 13 as _;
1877 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1879 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1881 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1882 (self as *const T).wrapping_offset_from(origin)
1885 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1887 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1888 /// offset of `3 * size_of::<T>()` bytes.
1892 /// If any of the following conditions are violated, the result is Undefined
1895 /// * Both the starting and resulting pointer must be either in bounds or one
1896 /// byte past the end of the same allocated object.
1898 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1900 /// * The offset being in bounds cannot rely on "wrapping around" the address
1901 /// space. That is, the infinite-precision sum must fit in a `usize`.
1903 /// The compiler and standard library generally tries to ensure allocations
1904 /// never reach a size where an offset is a concern. For instance, `Vec`
1905 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1906 /// `vec.as_ptr().add(vec.len())` is always safe.
1908 /// Most platforms fundamentally can't even construct such an allocation.
1909 /// For instance, no known 64-bit platform can ever serve a request
1910 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1911 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1912 /// more than `isize::MAX` bytes with things like Physical Address
1913 /// Extension. As such, memory acquired directly from allocators or memory
1914 /// mapped files *may* be too large to handle with this function.
1916 /// Consider using `wrapping_offset` instead if these constraints are
1917 /// difficult to satisfy. The only advantage of this method is that it
1918 /// enables more aggressive compiler optimizations.
1925 /// let s: &str = "123";
1926 /// let ptr: *const u8 = s.as_ptr();
1929 /// println!("{}", *ptr.add(1) as char);
1930 /// println!("{}", *ptr.add(2) as char);
1933 #[stable(feature = "pointer_methods", since = "1.26.0")]
1935 pub unsafe fn add(self, count: usize) -> Self
1938 self.offset(count as isize)
1941 /// Calculates the offset from a pointer (convenience for
1942 /// `.offset((count as isize).wrapping_neg())`).
1944 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1945 /// offset of `3 * size_of::<T>()` bytes.
1949 /// If any of the following conditions are violated, the result is Undefined
1952 /// * Both the starting and resulting pointer must be either in bounds or one
1953 /// byte past the end of the same allocated object.
1955 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1957 /// * The offset being in bounds cannot rely on "wrapping around" the address
1958 /// space. That is, the infinite-precision sum must fit in a usize.
1960 /// The compiler and standard library generally tries to ensure allocations
1961 /// never reach a size where an offset is a concern. For instance, `Vec`
1962 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1963 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1965 /// Most platforms fundamentally can't even construct such an allocation.
1966 /// For instance, no known 64-bit platform can ever serve a request
1967 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1968 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1969 /// more than `isize::MAX` bytes with things like Physical Address
1970 /// Extension. As such, memory acquired directly from allocators or memory
1971 /// mapped files *may* be too large to handle with this function.
1973 /// Consider using `wrapping_offset` instead if these constraints are
1974 /// difficult to satisfy. The only advantage of this method is that it
1975 /// enables more aggressive compiler optimizations.
1982 /// let s: &str = "123";
1985 /// let end: *const u8 = s.as_ptr().add(3);
1986 /// println!("{}", *end.sub(1) as char);
1987 /// println!("{}", *end.sub(2) as char);
1990 #[stable(feature = "pointer_methods", since = "1.26.0")]
1992 pub unsafe fn sub(self, count: usize) -> Self
1995 self.offset((count as isize).wrapping_neg())
1998 /// Calculates the offset from a pointer using wrapping arithmetic.
1999 /// (convenience for `.wrapping_offset(count as isize)`)
2001 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
2002 /// offset of `3 * size_of::<T>()` bytes.
2006 /// The resulting pointer does not need to be in bounds, but it is
2007 /// potentially hazardous to dereference (which requires `unsafe`).
2009 /// Always use `.add(count)` instead when possible, because `add`
2010 /// allows the compiler to optimize better.
2017 /// // Iterate using a raw pointer in increments of two elements
2018 /// let data = [1u8, 2, 3, 4, 5];
2019 /// let mut ptr: *const u8 = data.as_ptr();
2021 /// let end_rounded_up = ptr.wrapping_add(6);
2023 /// // This loop prints "1, 3, 5, "
2024 /// while ptr != end_rounded_up {
2026 /// print!("{}, ", *ptr);
2028 /// ptr = ptr.wrapping_add(step);
2031 #[stable(feature = "pointer_methods", since = "1.26.0")]
2033 pub fn wrapping_add(self, count: usize) -> Self
2036 self.wrapping_offset(count as isize)
2039 /// Calculates the offset from a pointer using wrapping arithmetic.
2040 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
2042 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
2043 /// offset of `3 * size_of::<T>()` bytes.
2047 /// The resulting pointer does not need to be in bounds, but it is
2048 /// potentially hazardous to dereference (which requires `unsafe`).
2050 /// Always use `.sub(count)` instead when possible, because `sub`
2051 /// allows the compiler to optimize better.
2058 /// // Iterate using a raw pointer in increments of two elements (backwards)
2059 /// let data = [1u8, 2, 3, 4, 5];
2060 /// let mut ptr: *const u8 = data.as_ptr();
2061 /// let start_rounded_down = ptr.wrapping_sub(2);
2062 /// ptr = ptr.wrapping_add(4);
2064 /// // This loop prints "5, 3, 1, "
2065 /// while ptr != start_rounded_down {
2067 /// print!("{}, ", *ptr);
2069 /// ptr = ptr.wrapping_sub(step);
2072 #[stable(feature = "pointer_methods", since = "1.26.0")]
2074 pub fn wrapping_sub(self, count: usize) -> Self
2077 self.wrapping_offset((count as isize).wrapping_neg())
2080 /// Reads the value from `self` without moving it. This leaves the
2081 /// memory in `self` unchanged.
2083 /// See [`ptr::read`] for safety concerns and examples.
2085 /// [`ptr::read`]: ./ptr/fn.read.html
2086 #[stable(feature = "pointer_methods", since = "1.26.0")]
2088 pub unsafe fn read(self) -> T
2094 /// Performs a volatile read of the value from `self` without moving it. This
2095 /// leaves the memory in `self` unchanged.
2097 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2098 /// to not be elided or reordered by the compiler across other volatile
2101 /// See [`ptr::read_volatile`] for safety concerns and examples.
2103 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
2104 #[stable(feature = "pointer_methods", since = "1.26.0")]
2106 pub unsafe fn read_volatile(self) -> T
2112 /// Reads the value from `self` without moving it. This leaves the
2113 /// memory in `self` unchanged.
2115 /// Unlike `read`, the pointer may be unaligned.
2117 /// See [`ptr::read_unaligned`] for safety concerns and examples.
2119 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
2120 #[stable(feature = "pointer_methods", since = "1.26.0")]
2122 pub unsafe fn read_unaligned(self) -> T
2125 read_unaligned(self)
2128 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2129 /// and destination may overlap.
2131 /// NOTE: this has the *same* argument order as [`ptr::copy`].
2133 /// See [`ptr::copy`] for safety concerns and examples.
2135 /// [`ptr::copy`]: ./ptr/fn.copy.html
2136 #[stable(feature = "pointer_methods", since = "1.26.0")]
2138 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
2141 copy(self, dest, count)
2144 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2145 /// and destination may *not* overlap.
2147 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
2149 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2151 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2152 #[stable(feature = "pointer_methods", since = "1.26.0")]
2154 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
2157 copy_nonoverlapping(self, dest, count)
2160 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2161 /// and destination may overlap.
2163 /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
2165 /// See [`ptr::copy`] for safety concerns and examples.
2167 /// [`ptr::copy`]: ./ptr/fn.copy.html
2168 #[stable(feature = "pointer_methods", since = "1.26.0")]
2170 pub unsafe fn copy_from(self, src: *const T, count: usize)
2173 copy(src, self, count)
2176 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2177 /// and destination may *not* overlap.
2179 /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
2181 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2183 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2184 #[stable(feature = "pointer_methods", since = "1.26.0")]
2186 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2189 copy_nonoverlapping(src, self, count)
2192 /// Executes the destructor (if any) of the pointed-to value.
2194 /// See [`ptr::drop_in_place`] for safety concerns and examples.
2196 /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html
2197 #[stable(feature = "pointer_methods", since = "1.26.0")]
2199 pub unsafe fn drop_in_place(self) {
2203 /// Overwrites a memory location with the given value without reading or
2204 /// dropping the old value.
2206 /// See [`ptr::write`] for safety concerns and examples.
2208 /// [`ptr::write`]: ./ptr/fn.write.html
2209 #[stable(feature = "pointer_methods", since = "1.26.0")]
2211 pub unsafe fn write(self, val: T)
2217 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2218 /// bytes of memory starting at `self` to `val`.
2220 /// See [`ptr::write_bytes`] for safety concerns and examples.
2222 /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html
2223 #[stable(feature = "pointer_methods", since = "1.26.0")]
2225 pub unsafe fn write_bytes(self, val: u8, count: usize)
2228 write_bytes(self, val, count)
2231 /// Performs a volatile write of a memory location with the given value without
2232 /// reading or dropping the old value.
2234 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2235 /// to not be elided or reordered by the compiler across other volatile
2238 /// See [`ptr::write_volatile`] for safety concerns and examples.
2240 /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html
2241 #[stable(feature = "pointer_methods", since = "1.26.0")]
2243 pub unsafe fn write_volatile(self, val: T)
2246 write_volatile(self, val)
2249 /// Overwrites a memory location with the given value without reading or
2250 /// dropping the old value.
2252 /// Unlike `write`, the pointer may be unaligned.
2254 /// See [`ptr::write_unaligned`] for safety concerns and examples.
2256 /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html
2257 #[stable(feature = "pointer_methods", since = "1.26.0")]
2259 pub unsafe fn write_unaligned(self, val: T)
2262 write_unaligned(self, val)
2265 /// Replaces the value at `self` with `src`, returning the old
2266 /// value, without dropping either.
2268 /// See [`ptr::replace`] for safety concerns and examples.
2270 /// [`ptr::replace`]: ./ptr/fn.replace.html
2271 #[stable(feature = "pointer_methods", since = "1.26.0")]
2273 pub unsafe fn replace(self, src: T) -> T
2279 /// Swaps the values at two mutable locations of the same type, without
2280 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2281 /// otherwise equivalent.
2283 /// See [`ptr::swap`] for safety concerns and examples.
2285 /// [`ptr::swap`]: ./ptr/fn.swap.html
2286 #[stable(feature = "pointer_methods", since = "1.26.0")]
2288 pub unsafe fn swap(self, with: *mut T)
2294 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
2297 /// If it is not possible to align the pointer, the implementation returns
2298 /// `usize::max_value()`.
2300 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
2301 /// used with the `offset` or `offset_to` methods.
2303 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
2304 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
2305 /// the returned offset is correct in all terms other than alignment.
2309 /// The function panics if `align` is not a power-of-two.
2313 /// Accessing adjacent `u8` as `u16`
2316 /// # #![feature(align_offset)]
2317 /// # fn foo(n: usize) {
2318 /// # use std::mem::align_of;
2320 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
2321 /// let ptr = &x[n] as *const u8;
2322 /// let offset = ptr.align_offset(align_of::<u16>());
2323 /// if offset < x.len() - n - 1 {
2324 /// let u16_ptr = ptr.add(offset) as *const u16;
2325 /// assert_ne!(*u16_ptr, 500);
2327 /// // while the pointer can be aligned via `offset`, it would point
2328 /// // outside the allocation
2332 #[unstable(feature = "align_offset", issue = "44488")]
2333 pub fn align_offset(self, align: usize) -> usize where T: Sized {
2334 if !align.is_power_of_two() {
2335 panic!("align_offset: align is not a power-of-two");
2338 align_offset(self, align)
2343 /// Align pointer `p`.
2345 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
2346 /// to pointer `p` so that pointer `p` would get aligned to `a`.
2348 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
2349 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
2352 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
2353 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
2354 /// than trying to adapt this to accommodate that change.
2356 /// Any questions go to @nagisa.
2357 #[lang="align_offset"]
2358 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
2359 /// Calculate multiplicative modular inverse of `x` modulo `m`.
2361 /// This implementation is tailored for align_offset and has following preconditions:
2363 /// * `m` is a power-of-two;
2364 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
2366 /// Implementation of this function shall not panic. Ever.
2368 fn mod_inv(x: usize, m: usize) -> usize {
2369 /// Multiplicative modular inverse table modulo 2⁴ = 16.
2371 /// Note, that this table does not contain values where inverse does not exist (i.e. for
2372 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
2373 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
2374 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
2375 const INV_TABLE_MOD: usize = 16;
2377 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
2379 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
2380 if m <= INV_TABLE_MOD {
2381 table_inverse & (m - 1)
2383 // We iterate "up" using the following formula:
2385 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
2387 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
2388 let mut inverse = table_inverse;
2389 let mut going_mod = INV_TABLE_MOD_SQUARED;
2391 // y = y * (2 - xy) mod n
2393 // Note, that we use wrapping operations here intentionally – the original formula
2394 // uses e.g. subtraction `mod n`. It is entirely fine to do them `mod
2395 // usize::max_value()` instead, because we take the result `mod n` at the end
2397 inverse = inverse.wrapping_mul(
2398 2usize.wrapping_sub(x.wrapping_mul(inverse))
2399 ) & (going_mod - 1);
2401 return inverse & (m - 1);
2403 going_mod = going_mod.wrapping_mul(going_mod);
2408 let stride = ::mem::size_of::<T>();
2409 let a_minus_one = a.wrapping_sub(1);
2410 let pmoda = p as usize & a_minus_one;
2413 // Already aligned. Yay!
2418 return if stride == 0 {
2419 // If the pointer is not aligned, and the element is zero-sized, then no amount of
2420 // elements will ever align the pointer.
2423 a.wrapping_sub(pmoda)
2427 let smoda = stride & a_minus_one;
2428 // a is power-of-two so cannot be 0. stride = 0 is handled above.
2429 let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
2430 let gcd = 1usize << gcdpow;
2432 if p as usize & (gcd - 1) == 0 {
2433 // This branch solves for the following linear congruence equation:
2435 // $$ p + so ≡ 0 mod a $$
2437 // $p$ here is the pointer value, $s$ – stride of `T`, $o$ offset in `T`s, and $a$ – the
2438 // requested alignment.
2441 // o = (a - (p mod a))/g * ((s/g)⁻¹ mod a)
2443 // The first term is “the relative alignment of p to a”, the second term is “how does
2444 // incrementing p by s bytes change the relative alignment of p”. Division by `g` is
2445 // necessary to make this equation well formed if $a$ and $s$ are not co-prime.
2447 // Furthermore, the result produced by this solution is not “minimal”, so it is necessary
2448 // to take the result $o mod lcm(s, a)$. We can replace $lcm(s, a)$ with just a $a / g$.
2449 let j = a.wrapping_sub(pmoda) >> gcdpow;
2450 let k = smoda >> gcdpow;
2451 return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow);
2454 // Cannot be aligned at all.
2460 // Equality for pointers
2461 #[stable(feature = "rust1", since = "1.0.0")]
2462 impl<T: ?Sized> PartialEq for *const T {
2464 fn eq(&self, other: &*const T) -> bool { *self == *other }
2467 #[stable(feature = "rust1", since = "1.0.0")]
2468 impl<T: ?Sized> Eq for *const T {}
2470 #[stable(feature = "rust1", since = "1.0.0")]
2471 impl<T: ?Sized> PartialEq for *mut T {
2473 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2476 #[stable(feature = "rust1", since = "1.0.0")]
2477 impl<T: ?Sized> Eq for *mut T {}
2479 /// Compare raw pointers for equality.
2481 /// This is the same as using the `==` operator, but less generic:
2482 /// the arguments have to be `*const T` raw pointers,
2483 /// not anything that implements `PartialEq`.
2485 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2486 /// by their address rather than comparing the values they point to
2487 /// (which is what the `PartialEq for &T` implementation does).
2495 /// let other_five = 5;
2496 /// let five_ref = &five;
2497 /// let same_five_ref = &five;
2498 /// let other_five_ref = &other_five;
2500 /// assert!(five_ref == same_five_ref);
2501 /// assert!(five_ref == other_five_ref);
2503 /// assert!(ptr::eq(five_ref, same_five_ref));
2504 /// assert!(!ptr::eq(five_ref, other_five_ref));
2506 #[stable(feature = "ptr_eq", since = "1.17.0")]
2508 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2512 // Impls for function pointers
2513 macro_rules! fnptr_impls_safety_abi {
2514 ($FnTy: ty, $($Arg: ident),*) => {
2515 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2516 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2518 fn eq(&self, other: &Self) -> bool {
2519 *self as usize == *other as usize
2523 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2524 impl<Ret, $($Arg),*> Eq for $FnTy {}
2526 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2527 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2529 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2530 (*self as usize).partial_cmp(&(*other as usize))
2534 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2535 impl<Ret, $($Arg),*> Ord for $FnTy {
2537 fn cmp(&self, other: &Self) -> Ordering {
2538 (*self as usize).cmp(&(*other as usize))
2542 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2543 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2544 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2545 state.write_usize(*self as usize)
2549 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2550 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2551 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2552 fmt::Pointer::fmt(&(*self as *const ()), f)
2556 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2557 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2558 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2559 fmt::Pointer::fmt(&(*self as *const ()), f)
2565 macro_rules! fnptr_impls_args {
2566 ($($Arg: ident),+) => {
2567 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2568 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2569 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2570 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2571 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2572 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2575 // No variadic functions with 0 parameters
2576 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2577 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2578 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2579 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2583 fnptr_impls_args! { }
2584 fnptr_impls_args! { A }
2585 fnptr_impls_args! { A, B }
2586 fnptr_impls_args! { A, B, C }
2587 fnptr_impls_args! { A, B, C, D }
2588 fnptr_impls_args! { A, B, C, D, E }
2589 fnptr_impls_args! { A, B, C, D, E, F }
2590 fnptr_impls_args! { A, B, C, D, E, F, G }
2591 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2592 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2593 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2594 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2595 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2597 // Comparison for pointers
2598 #[stable(feature = "rust1", since = "1.0.0")]
2599 impl<T: ?Sized> Ord for *const T {
2601 fn cmp(&self, other: &*const T) -> Ordering {
2604 } else if self == other {
2612 #[stable(feature = "rust1", since = "1.0.0")]
2613 impl<T: ?Sized> PartialOrd for *const T {
2615 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2616 Some(self.cmp(other))
2620 fn lt(&self, other: &*const T) -> bool { *self < *other }
2623 fn le(&self, other: &*const T) -> bool { *self <= *other }
2626 fn gt(&self, other: &*const T) -> bool { *self > *other }
2629 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2632 #[stable(feature = "rust1", since = "1.0.0")]
2633 impl<T: ?Sized> Ord for *mut T {
2635 fn cmp(&self, other: &*mut T) -> Ordering {
2638 } else if self == other {
2646 #[stable(feature = "rust1", since = "1.0.0")]
2647 impl<T: ?Sized> PartialOrd for *mut T {
2649 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2650 Some(self.cmp(other))
2654 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2657 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2660 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2663 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2666 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2667 /// of this wrapper owns the referent. Useful for building abstractions like
2668 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2670 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2671 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2672 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2673 /// the referent of the pointer should not be modified without a unique path to
2674 /// its owning Unique.
2676 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2677 /// consider using `NonNull`, which has weaker semantics.
2679 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2680 /// is never dereferenced. This is so that enums may use this forbidden value
2681 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2682 /// However the pointer may still dangle if it isn't dereferenced.
2684 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2685 /// for any type which upholds Unique's aliasing requirements.
2686 #[unstable(feature = "ptr_internals", issue = "0",
2687 reason = "use NonNull instead and consider PhantomData<T> \
2688 (if you also use #[may_dangle]), Send, and/or Sync")]
2690 #[repr(transparent)]
2691 pub struct Unique<T: ?Sized> {
2692 pointer: NonZero<*const T>,
2693 // NOTE: this marker has no consequences for variance, but is necessary
2694 // for dropck to understand that we logically own a `T`.
2696 // For details, see:
2697 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2698 _marker: PhantomData<T>,
2701 #[unstable(feature = "ptr_internals", issue = "0")]
2702 impl<T: ?Sized> fmt::Debug for Unique<T> {
2703 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2704 fmt::Pointer::fmt(&self.as_ptr(), f)
2708 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2709 /// reference is unaliased. Note that this aliasing invariant is
2710 /// unenforced by the type system; the abstraction using the
2711 /// `Unique` must enforce it.
2712 #[unstable(feature = "ptr_internals", issue = "0")]
2713 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2715 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2716 /// reference is unaliased. Note that this aliasing invariant is
2717 /// unenforced by the type system; the abstraction using the
2718 /// `Unique` must enforce it.
2719 #[unstable(feature = "ptr_internals", issue = "0")]
2720 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2722 #[unstable(feature = "ptr_internals", issue = "0")]
2723 impl<T: Sized> Unique<T> {
2724 /// Creates a new `Unique` that is dangling, but well-aligned.
2726 /// This is useful for initializing types which lazily allocate, like
2727 /// `Vec::new` does.
2729 /// Note that the pointer value may potentially represent a valid pointer to
2730 /// a `T`, which means this must not be used as a "not yet initialized"
2731 /// sentinel value. Types that lazily allocate must track initialization by
2732 /// some other means.
2733 // FIXME: rename to dangling() to match NonNull?
2734 pub const fn empty() -> Self {
2736 Unique::new_unchecked(mem::align_of::<T>() as *mut T)
2741 #[unstable(feature = "ptr_internals", issue = "0")]
2742 impl<T: ?Sized> Unique<T> {
2743 /// Creates a new `Unique`.
2747 /// `ptr` must be non-null.
2748 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2749 Unique { pointer: NonZero(ptr as _), _marker: PhantomData }
2752 /// Creates a new `Unique` if `ptr` is non-null.
2753 pub fn new(ptr: *mut T) -> Option<Self> {
2755 Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData })
2761 /// Acquires the underlying `*mut` pointer.
2762 pub fn as_ptr(self) -> *mut T {
2763 self.pointer.0 as *mut T
2766 /// Dereferences the content.
2768 /// The resulting lifetime is bound to self so this behaves "as if"
2769 /// it were actually an instance of T that is getting borrowed. If a longer
2770 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2771 pub unsafe fn as_ref(&self) -> &T {
2775 /// Mutably dereferences the content.
2777 /// The resulting lifetime is bound to self so this behaves "as if"
2778 /// it were actually an instance of T that is getting borrowed. If a longer
2779 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2780 pub unsafe fn as_mut(&mut self) -> &mut T {
2785 #[unstable(feature = "ptr_internals", issue = "0")]
2786 impl<T: ?Sized> Clone for Unique<T> {
2787 fn clone(&self) -> Self {
2792 #[unstable(feature = "ptr_internals", issue = "0")]
2793 impl<T: ?Sized> Copy for Unique<T> { }
2795 #[unstable(feature = "ptr_internals", issue = "0")]
2796 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2798 #[unstable(feature = "ptr_internals", issue = "0")]
2799 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2800 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2801 fmt::Pointer::fmt(&self.as_ptr(), f)
2805 #[unstable(feature = "ptr_internals", issue = "0")]
2806 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2807 fn from(reference: &'a mut T) -> Self {
2808 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2812 #[unstable(feature = "ptr_internals", issue = "0")]
2813 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2814 fn from(reference: &'a T) -> Self {
2815 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2819 #[unstable(feature = "ptr_internals", issue = "0")]
2820 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2821 fn from(p: NonNull<T>) -> Self {
2822 Unique { pointer: p.pointer, _marker: PhantomData }
2826 /// `*mut T` but non-zero and covariant.
2828 /// This is often the correct thing to use when building data structures using
2829 /// raw pointers, but is ultimately more dangerous to use because of its additional
2830 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2832 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2833 /// is never dereferenced. This is so that enums may use this forbidden value
2834 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
2835 /// However the pointer may still dangle if it isn't dereferenced.
2837 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2838 /// for your use case, you should include some PhantomData in your type to
2839 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2840 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2841 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2842 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2843 #[stable(feature = "nonnull", since = "1.25.0")]
2844 #[repr(transparent)]
2845 pub struct NonNull<T: ?Sized> {
2846 pointer: NonZero<*const T>,
2849 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2850 // NB: This impl is unnecessary, but should provide better error messages.
2851 #[stable(feature = "nonnull", since = "1.25.0")]
2852 impl<T: ?Sized> !Send for NonNull<T> { }
2854 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2855 // NB: This impl is unnecessary, but should provide better error messages.
2856 #[stable(feature = "nonnull", since = "1.25.0")]
2857 impl<T: ?Sized> !Sync for NonNull<T> { }
2859 impl<T: Sized> NonNull<T> {
2860 /// Creates a new `NonNull` that is dangling, but well-aligned.
2862 /// This is useful for initializing types which lazily allocate, like
2863 /// `Vec::new` does.
2865 /// Note that the pointer value may potentially represent a valid pointer to
2866 /// a `T`, which means this must not be used as a "not yet initialized"
2867 /// sentinel value. Types that lazily allocate must track initialization by
2868 /// some other means.
2869 #[stable(feature = "nonnull", since = "1.25.0")]
2871 pub fn dangling() -> Self {
2873 let ptr = mem::align_of::<T>() as *mut T;
2874 NonNull::new_unchecked(ptr)
2879 impl<T: ?Sized> NonNull<T> {
2880 /// Creates a new `NonNull`.
2884 /// `ptr` must be non-null.
2885 #[stable(feature = "nonnull", since = "1.25.0")]
2887 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2888 NonNull { pointer: NonZero(ptr as _) }
2891 /// Creates a new `NonNull` if `ptr` is non-null.
2892 #[stable(feature = "nonnull", since = "1.25.0")]
2894 pub fn new(ptr: *mut T) -> Option<Self> {
2896 Some(NonNull { pointer: NonZero(ptr as _) })
2902 /// Acquires the underlying `*mut` pointer.
2903 #[stable(feature = "nonnull", since = "1.25.0")]
2905 pub fn as_ptr(self) -> *mut T {
2906 self.pointer.0 as *mut T
2909 /// Dereferences the content.
2911 /// The resulting lifetime is bound to self so this behaves "as if"
2912 /// it were actually an instance of T that is getting borrowed. If a longer
2913 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2914 #[stable(feature = "nonnull", since = "1.25.0")]
2916 pub unsafe fn as_ref(&self) -> &T {
2920 /// Mutably dereferences the content.
2922 /// The resulting lifetime is bound to self so this behaves "as if"
2923 /// it were actually an instance of T that is getting borrowed. If a longer
2924 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2925 #[stable(feature = "nonnull", since = "1.25.0")]
2927 pub unsafe fn as_mut(&mut self) -> &mut T {
2931 /// Cast to a pointer of another type
2932 #[stable(feature = "nonnull_cast", since = "1.27.0")]
2934 pub fn cast<U>(self) -> NonNull<U> {
2936 NonNull::new_unchecked(self.as_ptr() as *mut U)
2941 #[stable(feature = "nonnull", since = "1.25.0")]
2942 impl<T: ?Sized> Clone for NonNull<T> {
2943 fn clone(&self) -> Self {
2948 #[stable(feature = "nonnull", since = "1.25.0")]
2949 impl<T: ?Sized> Copy for NonNull<T> { }
2951 #[unstable(feature = "coerce_unsized", issue = "27732")]
2952 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2954 #[stable(feature = "nonnull", since = "1.25.0")]
2955 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2956 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2957 fmt::Pointer::fmt(&self.as_ptr(), f)
2961 #[stable(feature = "nonnull", since = "1.25.0")]
2962 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2963 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2964 fmt::Pointer::fmt(&self.as_ptr(), f)
2968 #[stable(feature = "nonnull", since = "1.25.0")]
2969 impl<T: ?Sized> Eq for NonNull<T> {}
2971 #[stable(feature = "nonnull", since = "1.25.0")]
2972 impl<T: ?Sized> PartialEq for NonNull<T> {
2974 fn eq(&self, other: &Self) -> bool {
2975 self.as_ptr() == other.as_ptr()
2979 #[stable(feature = "nonnull", since = "1.25.0")]
2980 impl<T: ?Sized> Ord for NonNull<T> {
2982 fn cmp(&self, other: &Self) -> Ordering {
2983 self.as_ptr().cmp(&other.as_ptr())
2987 #[stable(feature = "nonnull", since = "1.25.0")]
2988 impl<T: ?Sized> PartialOrd for NonNull<T> {
2990 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2991 self.as_ptr().partial_cmp(&other.as_ptr())
2995 #[stable(feature = "nonnull", since = "1.25.0")]
2996 impl<T: ?Sized> hash::Hash for NonNull<T> {
2998 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2999 self.as_ptr().hash(state)
3003 #[unstable(feature = "ptr_internals", issue = "0")]
3004 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
3006 fn from(unique: Unique<T>) -> Self {
3007 NonNull { pointer: unique.pointer }
3011 #[stable(feature = "nonnull", since = "1.25.0")]
3012 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
3014 fn from(reference: &'a mut T) -> Self {
3015 NonNull { pointer: NonZero(reference as _) }
3019 #[stable(feature = "nonnull", since = "1.25.0")]
3020 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
3022 fn from(reference: &'a T) -> Self {
3023 NonNull { pointer: NonZero(reference as _) }