1 //! Manually manage memory through raw pointers.
3 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
7 //! Many functions in this module take raw pointers as arguments and read from
8 //! or write to them. For this to be safe, these pointers must be *valid*.
9 //! Whether a pointer is valid depends on the operation it is used for
10 //! (read or write), and the extent of the memory that is accessed (i.e.,
11 //! how many bytes are read/written). Most functions use `*mut T` and `*const T`
12 //! to access only a single value, in which case the documentation omits the size
13 //! and implicitly assumes it to be `size_of::<T>()` bytes.
15 //! The precise rules for validity are not determined yet. The guarantees that are
16 //! provided at this point are very minimal:
18 //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
19 //! * All pointers (except for the null pointer) are valid for all operations of
21 //! * All accesses performed by functions in this module are *non-atomic* in the sense
22 //! of [atomic operations] used to synchronize between threads. This means it is
23 //! undefined behavior to perform two concurrent accesses to the same location from different
24 //! threads unless both accesses only read from memory. Notice that this explicitly
25 //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
26 //! be used for inter-thread synchronization.
27 //! * The result of casting a reference to a pointer is valid for as long as the
28 //! underlying object is live and no reference (just raw pointers) is used to
29 //! access the same memory.
31 //! These axioms, along with careful use of [`offset`] for pointer arithmetic,
32 //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
33 //! will be provided eventually, as the [aliasing] rules are being determined. For more
34 //! information, see the [book] as well as the section in the reference devoted
35 //! to [undefined behavior][ub].
39 //! Valid raw pointers as defined above are not necessarily properly aligned (where
40 //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
41 //! aligned to `mem::align_of::<T>()`). However, most functions require their
42 //! arguments to be properly aligned, and will explicitly state
43 //! this requirement in their documentation. Notable exceptions to this are
44 //! [`read_unaligned`] and [`write_unaligned`].
46 //! When a function requires proper alignment, it does so even if the access
47 //! has size 0, i.e., even if memory is not actually touched. Consider using
48 //! [`NonNull::dangling`] in such cases.
50 //! [aliasing]: ../../nomicon/aliasing.html
51 //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
52 //! [ub]: ../../reference/behavior-considered-undefined.html
53 //! [null]: ./fn.null.html
54 //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
55 //! [atomic operations]: ../../std/sync/atomic/index.html
56 //! [`copy`]: ../../std/ptr/fn.copy.html
57 //! [`offset`]: ../../std/primitive.pointer.html#method.offset
58 //! [`read_unaligned`]: ./fn.read_unaligned.html
59 //! [`write_unaligned`]: ./fn.write_unaligned.html
60 //! [`read_volatile`]: ./fn.read_volatile.html
61 //! [`write_volatile`]: ./fn.write_volatile.html
62 //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling
64 #![stable(feature = "rust1", since = "1.0.0")]
68 use ops::{CoerceUnsized, DispatchFromDyn};
71 use marker::{PhantomData, Unsize};
72 use mem::{self, MaybeUninit};
74 use cmp::Ordering::{self, Less, Equal, Greater};
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub use intrinsics::copy_nonoverlapping;
79 #[stable(feature = "rust1", since = "1.0.0")]
80 pub use intrinsics::copy;
82 #[stable(feature = "rust1", since = "1.0.0")]
83 pub use intrinsics::write_bytes;
85 /// Executes the destructor (if any) of the pointed-to value.
87 /// This is semantically equivalent to calling [`ptr::read`] and discarding
88 /// the result, but has the following advantages:
90 /// * It is *required* to use `drop_in_place` to drop unsized types like
91 /// trait objects, because they can't be read out onto the stack and
94 /// * It is friendlier to the optimizer to do this over [`ptr::read`] when
95 /// dropping manually allocated memory (e.g., when writing Box/Rc/Vec),
96 /// as the compiler doesn't need to prove that it's sound to elide the
99 /// [`ptr::read`]: ../ptr/fn.read.html
103 /// Behavior is undefined if any of the following conditions are violated:
105 /// * `to_drop` must be [valid] for reads.
107 /// * `to_drop` must be properly aligned. See the example below for how to drop
108 /// an unaligned pointer.
110 /// Additionally, if `T` is not [`Copy`], using the pointed-to value after
111 /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
112 /// foo` counts as a use because it will cause the value to be dropped
113 /// again. [`write`] can be used to overwrite data without causing it to be
116 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
118 /// [valid]: ../ptr/index.html#safety
119 /// [`Copy`]: ../marker/trait.Copy.html
120 /// [`write`]: ../ptr/fn.write.html
124 /// Manually remove the last item from a vector:
130 /// let last = Rc::new(1);
131 /// let weak = Rc::downgrade(&last);
133 /// let mut v = vec![Rc::new(0), last];
136 /// // Get a raw pointer to the last element in `v`.
137 /// let ptr = &mut v[1] as *mut _;
138 /// // Shorten `v` to prevent the last item from being dropped. We do that first,
139 /// // to prevent issues if the `drop_in_place` below panics.
141 /// // Without a call `drop_in_place`, the last item would never be dropped,
142 /// // and the memory it manages would be leaked.
143 /// ptr::drop_in_place(ptr);
146 /// assert_eq!(v, &[0.into()]);
148 /// // Ensure that the last item was dropped.
149 /// assert!(weak.upgrade().is_none());
152 /// Unaligned values cannot be dropped in place, they must be copied to an aligned
158 /// unsafe fn drop_after_copy<T>(to_drop: *mut T) {
159 /// let mut copy: T = mem::uninitialized();
160 /// ptr::copy(to_drop, &mut copy, 1);
164 /// #[repr(packed, C)]
167 /// unaligned: Vec<i32>,
170 /// let mut p = Packed { _padding: 0, unaligned: vec![42] };
172 /// drop_after_copy(&mut p.unaligned as *mut _);
177 /// Notice that the compiler performs this copy automatically when dropping packed structs,
178 /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
180 #[stable(feature = "drop_in_place", since = "1.8.0")]
182 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
183 real_drop_in_place(&mut *to_drop)
186 // The real `drop_in_place` -- the one that gets called implicitly when variables go
187 // out of scope -- should have a safe reference and not a raw pointer as argument
188 // type. When we drop a local variable, we access it with a pointer that behaves
189 // like a safe reference; transmuting that to a raw pointer does not mean we can
190 // actually access it with raw pointers.
191 #[lang = "drop_in_place"]
192 #[allow(unconditional_recursion)]
193 unsafe fn real_drop_in_place<T: ?Sized>(to_drop: &mut T) {
194 // Code here does not matter - this is replaced by the
195 // real drop glue by the compiler.
196 real_drop_in_place(to_drop)
199 /// Creates a null raw pointer.
206 /// let p: *const i32 = ptr::null();
207 /// assert!(p.is_null());
210 #[stable(feature = "rust1", since = "1.0.0")]
212 pub const fn null<T>() -> *const T { 0 as *const T }
214 /// Creates a null mutable raw pointer.
221 /// let p: *mut i32 = ptr::null_mut();
222 /// assert!(p.is_null());
225 #[stable(feature = "rust1", since = "1.0.0")]
227 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
229 /// Swaps the values at two mutable locations of the same type, without
230 /// deinitializing either.
232 /// But for the following two exceptions, this function is semantically
233 /// equivalent to [`mem::swap`]:
235 /// * It operates on raw pointers instead of references. When references are
236 /// available, [`mem::swap`] should be preferred.
238 /// * The two pointed-to values may overlap. If the values do overlap, then the
239 /// overlapping region of memory from `x` will be used. This is demonstrated
240 /// in the second example below.
242 /// [`mem::swap`]: ../mem/fn.swap.html
246 /// Behavior is undefined if any of the following conditions are violated:
248 /// * Both `x` and `y` must be [valid] for reads and writes.
250 /// * Both `x` and `y` must be properly aligned.
252 /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
254 /// [valid]: ../ptr/index.html#safety
258 /// Swapping two non-overlapping regions:
263 /// let mut array = [0, 1, 2, 3];
265 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
266 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
270 /// assert_eq!([2, 3, 0, 1], array);
274 /// Swapping two overlapping regions:
279 /// let mut array = [0, 1, 2, 3];
281 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
282 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
286 /// // The indices `1..3` of the slice overlap between `x` and `y`.
287 /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
288 /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
289 /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
290 /// // This implementation is defined to make the latter choice.
291 /// assert_eq!([1, 0, 1, 2], array);
295 #[stable(feature = "rust1", since = "1.0.0")]
296 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
297 // Give ourselves some scratch space to work with.
298 // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
299 let mut tmp = MaybeUninit::<T>::uninitialized();
302 copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
303 copy(y, x, 1); // `x` and `y` may overlap
304 copy_nonoverlapping(tmp.as_ptr(), y, 1);
307 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
308 /// beginning at `x` and `y`. The two regions must *not* overlap.
312 /// Behavior is undefined if any of the following conditions are violated:
314 /// * Both `x` and `y` must be [valid] for reads and writes of `count *
315 /// size_of::<T>()` bytes.
317 /// * Both `x` and `y` must be properly aligned.
319 /// * The region of memory beginning at `x` with a size of `count *
320 /// size_of::<T>()` bytes must *not* overlap with the region of memory
321 /// beginning at `y` with the same size.
323 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
324 /// the pointers must be non-NULL and properly aligned.
326 /// [valid]: ../ptr/index.html#safety
335 /// let mut x = [1, 2, 3, 4];
336 /// let mut y = [7, 8, 9];
339 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
342 /// assert_eq!(x, [7, 8, 3, 4]);
343 /// assert_eq!(y, [1, 2, 9]);
346 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
347 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
348 let x = x as *mut u8;
349 let y = y as *mut u8;
350 let len = mem::size_of::<T>() * count;
351 swap_nonoverlapping_bytes(x, y, len)
355 pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
356 // For types smaller than the block optimization below,
357 // just swap directly to avoid pessimizing codegen.
358 if mem::size_of::<T>() < 32 {
360 copy_nonoverlapping(y, x, 1);
363 swap_nonoverlapping(x, y, 1);
368 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
369 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
370 // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
371 // Haswell E processors. LLVM is more able to optimize if we give a struct a
372 // #[repr(simd)], even if we don't actually use this struct directly.
374 // FIXME repr(simd) broken on emscripten and redox
375 // It's also broken on big-endian powerpc64 and s390x. #42778
376 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
377 target_endian = "big")),
379 struct Block(u64, u64, u64, u64);
380 struct UnalignedBlock(u64, u64, u64, u64);
382 let block_size = mem::size_of::<Block>();
384 // Loop through x & y, copying them `Block` at a time
385 // The optimizer should unroll the loop fully for most types
386 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
388 while i + block_size <= len {
389 // Create some uninitialized memory as scratch space
390 // Declaring `t` here avoids aligning the stack when this loop is unused
391 let mut t = mem::MaybeUninit::<Block>::uninitialized();
392 let t = t.as_mut_ptr() as *mut u8;
396 // Swap a block of bytes of x & y, using t as a temporary buffer
397 // This should be optimized into efficient SIMD operations where available
398 copy_nonoverlapping(x, t, block_size);
399 copy_nonoverlapping(y, x, block_size);
400 copy_nonoverlapping(t, y, block_size);
405 // Swap any remaining bytes
406 let mut t = mem::MaybeUninit::<UnalignedBlock>::uninitialized();
409 let t = t.as_mut_ptr() as *mut u8;
413 copy_nonoverlapping(x, t, rem);
414 copy_nonoverlapping(y, x, rem);
415 copy_nonoverlapping(t, y, rem);
419 /// Moves `src` into the pointed `dst`, returning the previous `dst` value.
421 /// Neither value is dropped.
423 /// This function is semantically equivalent to [`mem::replace`] except that it
424 /// operates on raw pointers instead of references. When references are
425 /// available, [`mem::replace`] should be preferred.
427 /// [`mem::replace`]: ../mem/fn.replace.html
431 /// Behavior is undefined if any of the following conditions are violated:
433 /// * `dst` must be [valid] for writes.
435 /// * `dst` must be properly aligned.
437 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
439 /// [valid]: ../ptr/index.html#safety
446 /// let mut rust = vec!['b', 'u', 's', 't'];
448 /// // `mem::replace` would have the same effect without requiring the unsafe
451 /// ptr::replace(&mut rust[0], 'r')
454 /// assert_eq!(b, 'b');
455 /// assert_eq!(rust, &['r', 'u', 's', 't']);
458 #[stable(feature = "rust1", since = "1.0.0")]
459 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
460 mem::swap(&mut *dst, &mut src); // cannot overlap
464 /// Reads the value from `src` without moving it. This leaves the
465 /// memory in `src` unchanged.
469 /// Behavior is undefined if any of the following conditions are violated:
471 /// * `src` must be [valid] for reads.
473 /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
476 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
484 /// let y = &x as *const i32;
487 /// assert_eq!(std::ptr::read(y), 12);
491 /// Manually implement [`mem::swap`]:
496 /// fn swap<T>(a: &mut T, b: &mut T) {
498 /// // Create a bitwise copy of the value at `a` in `tmp`.
499 /// let tmp = ptr::read(a);
501 /// // Exiting at this point (either by explicitly returning or by
502 /// // calling a function which panics) would cause the value in `tmp` to
503 /// // be dropped while the same value is still referenced by `a`. This
504 /// // could trigger undefined behavior if `T` is not `Copy`.
506 /// // Create a bitwise copy of the value at `b` in `a`.
507 /// // This is safe because mutable references cannot alias.
508 /// ptr::copy_nonoverlapping(b, a, 1);
510 /// // As above, exiting here could trigger undefined behavior because
511 /// // the same value is referenced by `a` and `b`.
513 /// // Move `tmp` into `b`.
514 /// ptr::write(b, tmp);
516 /// // `tmp` has been moved (`write` takes ownership of its second argument),
517 /// // so nothing is dropped implicitly here.
521 /// let mut foo = "foo".to_owned();
522 /// let mut bar = "bar".to_owned();
524 /// swap(&mut foo, &mut bar);
526 /// assert_eq!(foo, "bar");
527 /// assert_eq!(bar, "foo");
530 /// ## Ownership of the Returned Value
532 /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
533 /// If `T` is not [`Copy`], using both the returned value and the value at
534 /// `*src` can violate memory safety. Note that assigning to `*src` counts as a
535 /// use because it will attempt to drop the value at `*src`.
537 /// [`write`] can be used to overwrite data without causing it to be dropped.
542 /// let mut s = String::from("foo");
544 /// // `s2` now points to the same underlying memory as `s`.
545 /// let mut s2: String = ptr::read(&s);
547 /// assert_eq!(s2, "foo");
549 /// // Assigning to `s2` causes its original value to be dropped. Beyond
550 /// // this point, `s` must no longer be used, as the underlying memory has
552 /// s2 = String::default();
553 /// assert_eq!(s2, "");
555 /// // Assigning to `s` would cause the old value to be dropped again,
556 /// // resulting in undefined behavior.
557 /// // s = String::from("bar"); // ERROR
559 /// // `ptr::write` can be used to overwrite a value without dropping it.
560 /// ptr::write(&mut s, String::from("bar"));
563 /// assert_eq!(s, "bar");
566 /// [`mem::swap`]: ../mem/fn.swap.html
567 /// [valid]: ../ptr/index.html#safety
568 /// [`Copy`]: ../marker/trait.Copy.html
569 /// [`read_unaligned`]: ./fn.read_unaligned.html
570 /// [`write`]: ./fn.write.html
572 #[stable(feature = "rust1", since = "1.0.0")]
573 pub unsafe fn read<T>(src: *const T) -> T {
574 let mut tmp = MaybeUninit::<T>::uninitialized();
575 copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
576 tmp.into_initialized()
579 /// Reads the value from `src` without moving it. This leaves the
580 /// memory in `src` unchanged.
582 /// Unlike [`read`], `read_unaligned` works with unaligned pointers.
586 /// Behavior is undefined if any of the following conditions are violated:
588 /// * `src` must be [valid] for reads.
590 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
591 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
592 /// value and the value at `*src` can [violate memory safety][read-ownership].
594 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
596 /// [`Copy`]: ../marker/trait.Copy.html
597 /// [`read`]: ./fn.read.html
598 /// [`write_unaligned`]: ./fn.write_unaligned.html
599 /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value
600 /// [valid]: ../ptr/index.html#safety
604 /// Access members of a packed struct by reference:
609 /// #[repr(packed, C)]
617 /// unaligned: 0x01020304,
621 /// // Take the address of a 32-bit integer which is not aligned.
622 /// // This must be done as a raw pointer; unaligned references are invalid.
623 /// let unaligned = &x.unaligned as *const u32;
625 /// // Dereferencing normally will emit an aligned load instruction,
626 /// // causing undefined behavior.
627 /// // let v = *unaligned; // ERROR
629 /// // Instead, use `read_unaligned` to read improperly aligned values.
630 /// let v = ptr::read_unaligned(unaligned);
635 /// // Accessing unaligned values directly is safe.
636 /// assert!(x.unaligned == v);
639 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
640 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
641 let mut tmp = MaybeUninit::<T>::uninitialized();
642 copy_nonoverlapping(src as *const u8,
643 tmp.as_mut_ptr() as *mut u8,
644 mem::size_of::<T>());
645 tmp.into_initialized()
648 /// Overwrites a memory location with the given value without reading or
649 /// dropping the old value.
651 /// `write` does not drop the contents of `dst`. This is safe, but it could leak
652 /// allocations or resources, so care should be taken not to overwrite an object
653 /// that should be dropped.
655 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
656 /// location pointed to by `dst`.
658 /// This is appropriate for initializing uninitialized memory, or overwriting
659 /// memory that has previously been [`read`] from.
661 /// [`read`]: ./fn.read.html
665 /// Behavior is undefined if any of the following conditions are violated:
667 /// * `dst` must be [valid] for writes.
669 /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
672 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
674 /// [valid]: ../ptr/index.html#safety
675 /// [`write_unaligned`]: ./fn.write_unaligned.html
683 /// let y = &mut x as *mut i32;
687 /// std::ptr::write(y, z);
688 /// assert_eq!(std::ptr::read(y), 12);
692 /// Manually implement [`mem::swap`]:
697 /// fn swap<T>(a: &mut T, b: &mut T) {
699 /// // Create a bitwise copy of the value at `a` in `tmp`.
700 /// let tmp = ptr::read(a);
702 /// // Exiting at this point (either by explicitly returning or by
703 /// // calling a function which panics) would cause the value in `tmp` to
704 /// // be dropped while the same value is still referenced by `a`. This
705 /// // could trigger undefined behavior if `T` is not `Copy`.
707 /// // Create a bitwise copy of the value at `b` in `a`.
708 /// // This is safe because mutable references cannot alias.
709 /// ptr::copy_nonoverlapping(b, a, 1);
711 /// // As above, exiting here could trigger undefined behavior because
712 /// // the same value is referenced by `a` and `b`.
714 /// // Move `tmp` into `b`.
715 /// ptr::write(b, tmp);
717 /// // `tmp` has been moved (`write` takes ownership of its second argument),
718 /// // so nothing is dropped implicitly here.
722 /// let mut foo = "foo".to_owned();
723 /// let mut bar = "bar".to_owned();
725 /// swap(&mut foo, &mut bar);
727 /// assert_eq!(foo, "bar");
728 /// assert_eq!(bar, "foo");
731 /// [`mem::swap`]: ../mem/fn.swap.html
733 #[stable(feature = "rust1", since = "1.0.0")]
734 pub unsafe fn write<T>(dst: *mut T, src: T) {
735 intrinsics::move_val_init(&mut *dst, src)
738 /// Overwrites a memory location with the given value without reading or
739 /// dropping the old value.
741 /// Unlike [`write`], the pointer may be unaligned.
743 /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
744 /// could leak allocations or resources, so care should be taken not to overwrite
745 /// an object that should be dropped.
747 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
748 /// location pointed to by `dst`.
750 /// This is appropriate for initializing uninitialized memory, or overwriting
751 /// memory that has previously been read with [`read_unaligned`].
753 /// [`write`]: ./fn.write.html
754 /// [`read_unaligned`]: ./fn.read_unaligned.html
758 /// Behavior is undefined if any of the following conditions are violated:
760 /// * `dst` must be [valid] for writes.
762 /// Note that even if `T` has size `0`, the pointer must be non-NULL.
764 /// [valid]: ../ptr/index.html#safety
768 /// Access fields in a packed struct:
771 /// use std::{mem, ptr};
773 /// #[repr(packed, C)]
774 /// #[derive(Default)]
780 /// let v = 0x01020304;
781 /// let mut x: Packed = unsafe { mem::zeroed() };
784 /// // Take a reference to a 32-bit integer which is not aligned.
785 /// let unaligned = &mut x.unaligned as *mut u32;
787 /// // Dereferencing normally will emit an aligned store instruction,
788 /// // causing undefined behavior because the pointer is not aligned.
789 /// // *unaligned = v; // ERROR
791 /// // Instead, use `write_unaligned` to write improperly aligned values.
792 /// ptr::write_unaligned(unaligned, v);
795 /// // Accessing unaligned values directly is safe.
796 /// assert!(x.unaligned == v);
799 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
800 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
801 copy_nonoverlapping(&src as *const T as *const u8,
803 mem::size_of::<T>());
807 /// Performs a volatile read of the value from `src` without moving it. This
808 /// leaves the memory in `src` unchanged.
810 /// Volatile operations are intended to act on I/O memory, and are guaranteed
811 /// to not be elided or reordered by the compiler across other volatile
814 /// Memory accessed with `read_volatile` or [`write_volatile`] should not be
815 /// accessed with non-volatile operations.
817 /// [`write_volatile`]: ./fn.write_volatile.html
821 /// Rust does not currently have a rigorously and formally defined memory model,
822 /// so the precise semantics of what "volatile" means here is subject to change
823 /// over time. That being said, the semantics will almost always end up pretty
824 /// similar to [C11's definition of volatile][c11].
826 /// The compiler shouldn't change the relative order or number of volatile
827 /// memory operations. However, volatile memory operations on zero-sized types
828 /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
829 /// and may be ignored.
831 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
835 /// Behavior is undefined if any of the following conditions are violated:
837 /// * `src` must be [valid] for reads.
839 /// * `src` must be properly aligned.
841 /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
842 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
843 /// value and the value at `*src` can [violate memory safety][read-ownership].
844 /// However, storing non-[`Copy`] types in volatile memory is almost certainly
847 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
849 /// [valid]: ../ptr/index.html#safety
850 /// [`Copy`]: ../marker/trait.Copy.html
851 /// [`read`]: ./fn.read.html
852 /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value
854 /// Just like in C, whether an operation is volatile has no bearing whatsoever
855 /// on questions involving concurrent access from multiple threads. Volatile
856 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
857 /// a race between a `read_volatile` and any write operation to the same location
858 /// is undefined behavior.
866 /// let y = &x as *const i32;
869 /// assert_eq!(std::ptr::read_volatile(y), 12);
873 #[stable(feature = "volatile", since = "1.9.0")]
874 pub unsafe fn read_volatile<T>(src: *const T) -> T {
875 intrinsics::volatile_load(src)
878 /// Performs a volatile write of a memory location with the given value without
879 /// reading or dropping the old value.
881 /// Volatile operations are intended to act on I/O memory, and are guaranteed
882 /// to not be elided or reordered by the compiler across other volatile
885 /// Memory accessed with [`read_volatile`] or `write_volatile` should not be
886 /// accessed with non-volatile operations.
888 /// `write_volatile` does not drop the contents of `dst`. This is safe, but it
889 /// could leak allocations or resources, so care should be taken not to overwrite
890 /// an object that should be dropped.
892 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
893 /// location pointed to by `dst`.
895 /// [`read_volatile`]: ./fn.read_volatile.html
899 /// Rust does not currently have a rigorously and formally defined memory model,
900 /// so the precise semantics of what "volatile" means here is subject to change
901 /// over time. That being said, the semantics will almost always end up pretty
902 /// similar to [C11's definition of volatile][c11].
904 /// The compiler shouldn't change the relative order or number of volatile
905 /// memory operations. However, volatile memory operations on zero-sized types
906 /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
907 /// and may be ignored.
909 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
913 /// Behavior is undefined if any of the following conditions are violated:
915 /// * `dst` must be [valid] for writes.
917 /// * `dst` must be properly aligned.
919 /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
921 /// [valid]: ../ptr/index.html#safety
923 /// Just like in C, whether an operation is volatile has no bearing whatsoever
924 /// on questions involving concurrent access from multiple threads. Volatile
925 /// accesses behave exactly like non-atomic accesses in that regard. In particular,
926 /// a race between a `write_volatile` and any other operation (reading or writing)
927 /// on the same location is undefined behavior.
935 /// let y = &mut x as *mut i32;
939 /// std::ptr::write_volatile(y, z);
940 /// assert_eq!(std::ptr::read_volatile(y), 12);
944 #[stable(feature = "volatile", since = "1.9.0")]
945 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
946 intrinsics::volatile_store(dst, src);
949 #[lang = "const_ptr"]
950 impl<T: ?Sized> *const T {
951 /// Returns `true` if the pointer is null.
953 /// Note that unsized types have many possible null pointers, as only the
954 /// raw data pointer is considered, not their length, vtable, etc.
955 /// Therefore, two pointers that are null may still not compare equal to
963 /// let s: &str = "Follow the rabbit";
964 /// let ptr: *const u8 = s.as_ptr();
965 /// assert!(!ptr.is_null());
967 #[stable(feature = "rust1", since = "1.0.0")]
969 pub fn is_null(self) -> bool {
970 // Compare via a cast to a thin pointer, so fat pointers are only
971 // considering their "data" part for null-ness.
972 (self as *const u8) == null()
975 /// Returns `None` if the pointer is null, or else returns a reference to
976 /// the value wrapped in `Some`.
980 /// While this method and its mutable counterpart are useful for
981 /// null-safety, it is important to note that this is still an unsafe
982 /// operation because the returned value could be pointing to invalid
985 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
986 /// not necessarily reflect the actual lifetime of the data.
993 /// let ptr: *const u8 = &10u8 as *const u8;
996 /// if let Some(val_back) = ptr.as_ref() {
997 /// println!("We got back the value: {}!", val_back);
1002 /// # Null-unchecked version
1004 /// If you are sure the pointer can never be null and are looking for some kind of
1005 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
1006 /// dereference the pointer directly.
1009 /// let ptr: *const u8 = &10u8 as *const u8;
1012 /// let val_back = &*ptr;
1013 /// println!("We got back the value: {}!", val_back);
1016 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1018 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1026 /// Calculates the offset from a pointer.
1028 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1029 /// offset of `3 * size_of::<T>()` bytes.
1033 /// If any of the following conditions are violated, the result is Undefined
1036 /// * Both the starting and resulting pointer must be either in bounds or one
1037 /// byte past the end of the same allocated object.
1039 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1041 /// * The offset being in bounds cannot rely on "wrapping around" the address
1042 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1044 /// The compiler and standard library generally tries to ensure allocations
1045 /// never reach a size where an offset is a concern. For instance, `Vec`
1046 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1047 /// `vec.as_ptr().add(vec.len())` is always safe.
1049 /// Most platforms fundamentally can't even construct such an allocation.
1050 /// For instance, no known 64-bit platform can ever serve a request
1051 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1052 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1053 /// more than `isize::MAX` bytes with things like Physical Address
1054 /// Extension. As such, memory acquired directly from allocators or memory
1055 /// mapped files *may* be too large to handle with this function.
1057 /// Consider using `wrapping_offset` instead if these constraints are
1058 /// difficult to satisfy. The only advantage of this method is that it
1059 /// enables more aggressive compiler optimizations.
1066 /// let s: &str = "123";
1067 /// let ptr: *const u8 = s.as_ptr();
1070 /// println!("{}", *ptr.offset(1) as char);
1071 /// println!("{}", *ptr.offset(2) as char);
1074 #[stable(feature = "rust1", since = "1.0.0")]
1076 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
1077 intrinsics::offset(self, count)
1080 /// Calculates the offset from a pointer using wrapping arithmetic.
1082 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1083 /// offset of `3 * size_of::<T>()` bytes.
1087 /// The resulting pointer does not need to be in bounds, but it is
1088 /// potentially hazardous to dereference (which requires `unsafe`).
1089 /// In particular, the resulting pointer may *not* be used to access a
1090 /// different allocated object than the one `self` points to. In other
1091 /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1092 /// *not* the same as `y`, and dereferencing it is undefined behavior
1093 /// unless `x` and `y` point into the same allocated object.
1095 /// Always use `.offset(count)` instead when possible, because `offset`
1096 /// allows the compiler to optimize better. If you need to cross object
1097 /// boundaries, cast the pointer to an integer and do the arithmetic there.
1104 /// // Iterate using a raw pointer in increments of two elements
1105 /// let data = [1u8, 2, 3, 4, 5];
1106 /// let mut ptr: *const u8 = data.as_ptr();
1108 /// let end_rounded_up = ptr.wrapping_offset(6);
1110 /// // This loop prints "1, 3, 5, "
1111 /// while ptr != end_rounded_up {
1113 /// print!("{}, ", *ptr);
1115 /// ptr = ptr.wrapping_offset(step);
1118 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1120 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
1122 intrinsics::arith_offset(self, count)
1126 /// Calculates the distance between two pointers. The returned value is in
1127 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1129 /// This function is the inverse of [`offset`].
1131 /// [`offset`]: #method.offset
1132 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
1136 /// If any of the following conditions are violated, the result is Undefined
1139 /// * Both the starting and other pointer must be either in bounds or one
1140 /// byte past the end of the same allocated object.
1142 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1144 /// * The distance between the pointers, in bytes, must be an exact multiple
1145 /// of the size of `T`.
1147 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1149 /// The compiler and standard library generally try to ensure allocations
1150 /// never reach a size where an offset is a concern. For instance, `Vec`
1151 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1152 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1154 /// Most platforms fundamentally can't even construct such an allocation.
1155 /// For instance, no known 64-bit platform can ever serve a request
1156 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1157 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1158 /// more than `isize::MAX` bytes with things like Physical Address
1159 /// Extension. As such, memory acquired directly from allocators or memory
1160 /// mapped files *may* be too large to handle with this function.
1162 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1163 /// difficult to satisfy. The only advantage of this method is that it
1164 /// enables more aggressive compiler optimizations.
1168 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1175 /// #![feature(ptr_offset_from)]
1178 /// let ptr1: *const i32 = &a[1];
1179 /// let ptr2: *const i32 = &a[3];
1181 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1182 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1183 /// assert_eq!(ptr1.offset(2), ptr2);
1184 /// assert_eq!(ptr2.offset(-2), ptr1);
1187 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1189 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1190 let pointee_size = mem::size_of::<T>();
1191 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
1193 // This is the same sequence that Clang emits for pointer subtraction.
1194 // It can be neither `nsw` nor `nuw` because the input is treated as
1195 // unsigned but then the output is treated as signed, so neither works.
1196 let d = isize::wrapping_sub(self as _, origin as _);
1197 intrinsics::exact_div(d, pointee_size as _)
1200 /// Calculates the distance between two pointers. The returned value is in
1201 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1203 /// If the address different between the two pointers is not a multiple of
1204 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1207 /// Though this method is safe for any two pointers, note that its result
1208 /// will be mostly useless if the two pointers aren't into the same allocated
1209 /// object, for example if they point to two different local variables.
1213 /// This function panics if `T` is a zero-sized type.
1220 /// #![feature(ptr_wrapping_offset_from)]
1223 /// let ptr1: *const i32 = &a[1];
1224 /// let ptr2: *const i32 = &a[3];
1225 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1226 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1227 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1228 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1230 /// let ptr1: *const i32 = 3 as _;
1231 /// let ptr2: *const i32 = 13 as _;
1232 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1234 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1236 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1237 let pointee_size = mem::size_of::<T>();
1238 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
1240 let d = isize::wrapping_sub(self as _, origin as _);
1241 d.wrapping_div(pointee_size as _)
1244 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1246 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1247 /// offset of `3 * size_of::<T>()` bytes.
1251 /// If any of the following conditions are violated, the result is Undefined
1254 /// * Both the starting and resulting pointer must be either in bounds or one
1255 /// byte past the end of the same allocated object.
1257 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1259 /// * The offset being in bounds cannot rely on "wrapping around" the address
1260 /// space. That is, the infinite-precision sum must fit in a `usize`.
1262 /// The compiler and standard library generally tries to ensure allocations
1263 /// never reach a size where an offset is a concern. For instance, `Vec`
1264 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1265 /// `vec.as_ptr().add(vec.len())` is always safe.
1267 /// Most platforms fundamentally can't even construct such an allocation.
1268 /// For instance, no known 64-bit platform can ever serve a request
1269 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1270 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1271 /// more than `isize::MAX` bytes with things like Physical Address
1272 /// Extension. As such, memory acquired directly from allocators or memory
1273 /// mapped files *may* be too large to handle with this function.
1275 /// Consider using `wrapping_offset` instead if these constraints are
1276 /// difficult to satisfy. The only advantage of this method is that it
1277 /// enables more aggressive compiler optimizations.
1284 /// let s: &str = "123";
1285 /// let ptr: *const u8 = s.as_ptr();
1288 /// println!("{}", *ptr.add(1) as char);
1289 /// println!("{}", *ptr.add(2) as char);
1292 #[stable(feature = "pointer_methods", since = "1.26.0")]
1294 pub unsafe fn add(self, count: usize) -> Self
1297 self.offset(count as isize)
1300 /// Calculates the offset from a pointer (convenience for
1301 /// `.offset((count as isize).wrapping_neg())`).
1303 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1304 /// offset of `3 * size_of::<T>()` bytes.
1308 /// If any of the following conditions are violated, the result is Undefined
1311 /// * Both the starting and resulting pointer must be either in bounds or one
1312 /// byte past the end of the same allocated object.
1314 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1316 /// * The offset being in bounds cannot rely on "wrapping around" the address
1317 /// space. That is, the infinite-precision sum must fit in a usize.
1319 /// The compiler and standard library generally tries to ensure allocations
1320 /// never reach a size where an offset is a concern. For instance, `Vec`
1321 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1322 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1324 /// Most platforms fundamentally can't even construct such an allocation.
1325 /// For instance, no known 64-bit platform can ever serve a request
1326 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1327 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1328 /// more than `isize::MAX` bytes with things like Physical Address
1329 /// Extension. As such, memory acquired directly from allocators or memory
1330 /// mapped files *may* be too large to handle with this function.
1332 /// Consider using `wrapping_offset` instead if these constraints are
1333 /// difficult to satisfy. The only advantage of this method is that it
1334 /// enables more aggressive compiler optimizations.
1341 /// let s: &str = "123";
1344 /// let end: *const u8 = s.as_ptr().add(3);
1345 /// println!("{}", *end.sub(1) as char);
1346 /// println!("{}", *end.sub(2) as char);
1349 #[stable(feature = "pointer_methods", since = "1.26.0")]
1351 pub unsafe fn sub(self, count: usize) -> Self
1354 self.offset((count as isize).wrapping_neg())
1357 /// Calculates the offset from a pointer using wrapping arithmetic.
1358 /// (convenience for `.wrapping_offset(count as isize)`)
1360 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1361 /// offset of `3 * size_of::<T>()` bytes.
1365 /// The resulting pointer does not need to be in bounds, but it is
1366 /// potentially hazardous to dereference (which requires `unsafe`).
1368 /// Always use `.add(count)` instead when possible, because `add`
1369 /// allows the compiler to optimize better.
1376 /// // Iterate using a raw pointer in increments of two elements
1377 /// let data = [1u8, 2, 3, 4, 5];
1378 /// let mut ptr: *const u8 = data.as_ptr();
1380 /// let end_rounded_up = ptr.wrapping_add(6);
1382 /// // This loop prints "1, 3, 5, "
1383 /// while ptr != end_rounded_up {
1385 /// print!("{}, ", *ptr);
1387 /// ptr = ptr.wrapping_add(step);
1390 #[stable(feature = "pointer_methods", since = "1.26.0")]
1392 pub fn wrapping_add(self, count: usize) -> Self
1395 self.wrapping_offset(count as isize)
1398 /// Calculates the offset from a pointer using wrapping arithmetic.
1399 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1401 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1402 /// offset of `3 * size_of::<T>()` bytes.
1406 /// The resulting pointer does not need to be in bounds, but it is
1407 /// potentially hazardous to dereference (which requires `unsafe`).
1409 /// Always use `.sub(count)` instead when possible, because `sub`
1410 /// allows the compiler to optimize better.
1417 /// // Iterate using a raw pointer in increments of two elements (backwards)
1418 /// let data = [1u8, 2, 3, 4, 5];
1419 /// let mut ptr: *const u8 = data.as_ptr();
1420 /// let start_rounded_down = ptr.wrapping_sub(2);
1421 /// ptr = ptr.wrapping_add(4);
1423 /// // This loop prints "5, 3, 1, "
1424 /// while ptr != start_rounded_down {
1426 /// print!("{}, ", *ptr);
1428 /// ptr = ptr.wrapping_sub(step);
1431 #[stable(feature = "pointer_methods", since = "1.26.0")]
1433 pub fn wrapping_sub(self, count: usize) -> Self
1436 self.wrapping_offset((count as isize).wrapping_neg())
1439 /// Reads the value from `self` without moving it. This leaves the
1440 /// memory in `self` unchanged.
1442 /// See [`ptr::read`] for safety concerns and examples.
1444 /// [`ptr::read`]: ./ptr/fn.read.html
1445 #[stable(feature = "pointer_methods", since = "1.26.0")]
1447 pub unsafe fn read(self) -> T
1453 /// Performs a volatile read of the value from `self` without moving it. This
1454 /// leaves the memory in `self` unchanged.
1456 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1457 /// to not be elided or reordered by the compiler across other volatile
1460 /// See [`ptr::read_volatile`] for safety concerns and examples.
1462 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
1463 #[stable(feature = "pointer_methods", since = "1.26.0")]
1465 pub unsafe fn read_volatile(self) -> T
1471 /// Reads the value from `self` without moving it. This leaves the
1472 /// memory in `self` unchanged.
1474 /// Unlike `read`, the pointer may be unaligned.
1476 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1478 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
1479 #[stable(feature = "pointer_methods", since = "1.26.0")]
1481 pub unsafe fn read_unaligned(self) -> T
1484 read_unaligned(self)
1487 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1488 /// and destination may overlap.
1490 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1492 /// See [`ptr::copy`] for safety concerns and examples.
1494 /// [`ptr::copy`]: ./ptr/fn.copy.html
1495 #[stable(feature = "pointer_methods", since = "1.26.0")]
1497 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1500 copy(self, dest, count)
1503 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1504 /// and destination may *not* overlap.
1506 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1508 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1510 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
1511 #[stable(feature = "pointer_methods", since = "1.26.0")]
1513 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1516 copy_nonoverlapping(self, dest, count)
1519 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1522 /// If it is not possible to align the pointer, the implementation returns
1523 /// `usize::max_value()`.
1525 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1526 /// used with the `offset` or `offset_to` methods.
1528 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
1529 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1530 /// the returned offset is correct in all terms other than alignment.
1534 /// The function panics if `align` is not a power-of-two.
1538 /// Accessing adjacent `u8` as `u16`
1541 /// # #![feature(align_offset)]
1542 /// # fn foo(n: usize) {
1543 /// # use std::mem::align_of;
1545 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1546 /// let ptr = &x[n] as *const u8;
1547 /// let offset = ptr.align_offset(align_of::<u16>());
1548 /// if offset < x.len() - n - 1 {
1549 /// let u16_ptr = ptr.add(offset) as *const u16;
1550 /// assert_ne!(*u16_ptr, 500);
1552 /// // while the pointer can be aligned via `offset`, it would point
1553 /// // outside the allocation
1557 #[unstable(feature = "align_offset", issue = "44488")]
1558 pub fn align_offset(self, align: usize) -> usize where T: Sized {
1559 if !align.is_power_of_two() {
1560 panic!("align_offset: align is not a power-of-two");
1563 align_offset(self, align)
1570 impl<T: ?Sized> *mut T {
1571 /// Returns `true` if the pointer is null.
1573 /// Note that unsized types have many possible null pointers, as only the
1574 /// raw data pointer is considered, not their length, vtable, etc.
1575 /// Therefore, two pointers that are null may still not compare equal to
1583 /// let mut s = [1, 2, 3];
1584 /// let ptr: *mut u32 = s.as_mut_ptr();
1585 /// assert!(!ptr.is_null());
1587 #[stable(feature = "rust1", since = "1.0.0")]
1589 pub fn is_null(self) -> bool {
1590 // Compare via a cast to a thin pointer, so fat pointers are only
1591 // considering their "data" part for null-ness.
1592 (self as *mut u8) == null_mut()
1595 /// Returns `None` if the pointer is null, or else returns a reference to
1596 /// the value wrapped in `Some`.
1600 /// While this method and its mutable counterpart are useful for
1601 /// null-safety, it is important to note that this is still an unsafe
1602 /// operation because the returned value could be pointing to invalid
1605 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1606 /// not necessarily reflect the actual lifetime of the data.
1613 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1616 /// if let Some(val_back) = ptr.as_ref() {
1617 /// println!("We got back the value: {}!", val_back);
1622 /// # Null-unchecked version
1624 /// If you are sure the pointer can never be null and are looking for some kind of
1625 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
1626 /// dereference the pointer directly.
1629 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1632 /// let val_back = &*ptr;
1633 /// println!("We got back the value: {}!", val_back);
1636 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1638 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1646 /// Calculates the offset from a pointer.
1648 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1649 /// offset of `3 * size_of::<T>()` bytes.
1653 /// If any of the following conditions are violated, the result is Undefined
1656 /// * Both the starting and resulting pointer must be either in bounds or one
1657 /// byte past the end of the same allocated object.
1659 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1661 /// * The offset being in bounds cannot rely on "wrapping around" the address
1662 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1664 /// The compiler and standard library generally tries to ensure allocations
1665 /// never reach a size where an offset is a concern. For instance, `Vec`
1666 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1667 /// `vec.as_ptr().add(vec.len())` is always safe.
1669 /// Most platforms fundamentally can't even construct such an allocation.
1670 /// For instance, no known 64-bit platform can ever serve a request
1671 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1672 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1673 /// more than `isize::MAX` bytes with things like Physical Address
1674 /// Extension. As such, memory acquired directly from allocators or memory
1675 /// mapped files *may* be too large to handle with this function.
1677 /// Consider using `wrapping_offset` instead if these constraints are
1678 /// difficult to satisfy. The only advantage of this method is that it
1679 /// enables more aggressive compiler optimizations.
1686 /// let mut s = [1, 2, 3];
1687 /// let ptr: *mut u32 = s.as_mut_ptr();
1690 /// println!("{}", *ptr.offset(1));
1691 /// println!("{}", *ptr.offset(2));
1694 #[stable(feature = "rust1", since = "1.0.0")]
1696 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1697 intrinsics::offset(self, count) as *mut T
1700 /// Calculates the offset from a pointer using wrapping arithmetic.
1701 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1702 /// offset of `3 * size_of::<T>()` bytes.
1706 /// The resulting pointer does not need to be in bounds, but it is
1707 /// potentially hazardous to dereference (which requires `unsafe`).
1708 /// In particular, the resulting pointer may *not* be used to access a
1709 /// different allocated object than the one `self` points to. In other
1710 /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is
1711 /// *not* the same as `y`, and dereferencing it is undefined behavior
1712 /// unless `x` and `y` point into the same allocated object.
1714 /// Always use `.offset(count)` instead when possible, because `offset`
1715 /// allows the compiler to optimize better. If you need to cross object
1716 /// boundaries, cast the pointer to an integer and do the arithmetic there.
1723 /// // Iterate using a raw pointer in increments of two elements
1724 /// let mut data = [1u8, 2, 3, 4, 5];
1725 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1727 /// let end_rounded_up = ptr.wrapping_offset(6);
1729 /// while ptr != end_rounded_up {
1733 /// ptr = ptr.wrapping_offset(step);
1735 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1737 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1739 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1741 intrinsics::arith_offset(self, count) as *mut T
1745 /// Returns `None` if the pointer is null, or else returns a mutable
1746 /// reference to the value wrapped in `Some`.
1750 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1751 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1752 /// returned is indeed a valid lifetime for the contained data.
1759 /// let mut s = [1, 2, 3];
1760 /// let ptr: *mut u32 = s.as_mut_ptr();
1761 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1762 /// *first_value = 4;
1763 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1765 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1767 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1775 /// Calculates the distance between two pointers. The returned value is in
1776 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1778 /// This function is the inverse of [`offset`].
1780 /// [`offset`]: #method.offset-1
1781 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1785 /// If any of the following conditions are violated, the result is Undefined
1788 /// * Both the starting and other pointer must be either in bounds or one
1789 /// byte past the end of the same allocated object.
1791 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1793 /// * The distance between the pointers, in bytes, must be an exact multiple
1794 /// of the size of `T`.
1796 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1798 /// The compiler and standard library generally try to ensure allocations
1799 /// never reach a size where an offset is a concern. For instance, `Vec`
1800 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1801 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1803 /// Most platforms fundamentally can't even construct such an allocation.
1804 /// For instance, no known 64-bit platform can ever serve a request
1805 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1806 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1807 /// more than `isize::MAX` bytes with things like Physical Address
1808 /// Extension. As such, memory acquired directly from allocators or memory
1809 /// mapped files *may* be too large to handle with this function.
1811 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1812 /// difficult to satisfy. The only advantage of this method is that it
1813 /// enables more aggressive compiler optimizations.
1817 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1824 /// #![feature(ptr_offset_from)]
1826 /// let mut a = [0; 5];
1827 /// let ptr1: *mut i32 = &mut a[1];
1828 /// let ptr2: *mut i32 = &mut a[3];
1830 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1831 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1832 /// assert_eq!(ptr1.offset(2), ptr2);
1833 /// assert_eq!(ptr2.offset(-2), ptr1);
1836 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1838 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1839 (self as *const T).offset_from(origin)
1842 /// Calculates the distance between two pointers. The returned value is in
1843 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1845 /// If the address different between the two pointers is not a multiple of
1846 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1849 /// Though this method is safe for any two pointers, note that its result
1850 /// will be mostly useless if the two pointers aren't into the same allocated
1851 /// object, for example if they point to two different local variables.
1855 /// This function panics if `T` is a zero-sized type.
1862 /// #![feature(ptr_wrapping_offset_from)]
1864 /// let mut a = [0; 5];
1865 /// let ptr1: *mut i32 = &mut a[1];
1866 /// let ptr2: *mut i32 = &mut a[3];
1867 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1868 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1869 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1870 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1872 /// let ptr1: *mut i32 = 3 as _;
1873 /// let ptr2: *mut i32 = 13 as _;
1874 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1876 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1878 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1879 (self as *const T).wrapping_offset_from(origin)
1882 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1884 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1885 /// offset of `3 * size_of::<T>()` bytes.
1889 /// If any of the following conditions are violated, the result is Undefined
1892 /// * Both the starting and resulting pointer must be either in bounds or one
1893 /// byte past the end of the same allocated object.
1895 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1897 /// * The offset being in bounds cannot rely on "wrapping around" the address
1898 /// space. That is, the infinite-precision sum must fit in a `usize`.
1900 /// The compiler and standard library generally tries to ensure allocations
1901 /// never reach a size where an offset is a concern. For instance, `Vec`
1902 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1903 /// `vec.as_ptr().add(vec.len())` is always safe.
1905 /// Most platforms fundamentally can't even construct such an allocation.
1906 /// For instance, no known 64-bit platform can ever serve a request
1907 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1908 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1909 /// more than `isize::MAX` bytes with things like Physical Address
1910 /// Extension. As such, memory acquired directly from allocators or memory
1911 /// mapped files *may* be too large to handle with this function.
1913 /// Consider using `wrapping_offset` instead if these constraints are
1914 /// difficult to satisfy. The only advantage of this method is that it
1915 /// enables more aggressive compiler optimizations.
1922 /// let s: &str = "123";
1923 /// let ptr: *const u8 = s.as_ptr();
1926 /// println!("{}", *ptr.add(1) as char);
1927 /// println!("{}", *ptr.add(2) as char);
1930 #[stable(feature = "pointer_methods", since = "1.26.0")]
1932 pub unsafe fn add(self, count: usize) -> Self
1935 self.offset(count as isize)
1938 /// Calculates the offset from a pointer (convenience for
1939 /// `.offset((count as isize).wrapping_neg())`).
1941 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1942 /// offset of `3 * size_of::<T>()` bytes.
1946 /// If any of the following conditions are violated, the result is Undefined
1949 /// * Both the starting and resulting pointer must be either in bounds or one
1950 /// byte past the end of the same allocated object.
1952 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1954 /// * The offset being in bounds cannot rely on "wrapping around" the address
1955 /// space. That is, the infinite-precision sum must fit in a usize.
1957 /// The compiler and standard library generally tries to ensure allocations
1958 /// never reach a size where an offset is a concern. For instance, `Vec`
1959 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1960 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1962 /// Most platforms fundamentally can't even construct such an allocation.
1963 /// For instance, no known 64-bit platform can ever serve a request
1964 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1965 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1966 /// more than `isize::MAX` bytes with things like Physical Address
1967 /// Extension. As such, memory acquired directly from allocators or memory
1968 /// mapped files *may* be too large to handle with this function.
1970 /// Consider using `wrapping_offset` instead if these constraints are
1971 /// difficult to satisfy. The only advantage of this method is that it
1972 /// enables more aggressive compiler optimizations.
1979 /// let s: &str = "123";
1982 /// let end: *const u8 = s.as_ptr().add(3);
1983 /// println!("{}", *end.sub(1) as char);
1984 /// println!("{}", *end.sub(2) as char);
1987 #[stable(feature = "pointer_methods", since = "1.26.0")]
1989 pub unsafe fn sub(self, count: usize) -> Self
1992 self.offset((count as isize).wrapping_neg())
1995 /// Calculates the offset from a pointer using wrapping arithmetic.
1996 /// (convenience for `.wrapping_offset(count as isize)`)
1998 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1999 /// offset of `3 * size_of::<T>()` bytes.
2003 /// The resulting pointer does not need to be in bounds, but it is
2004 /// potentially hazardous to dereference (which requires `unsafe`).
2006 /// Always use `.add(count)` instead when possible, because `add`
2007 /// allows the compiler to optimize better.
2014 /// // Iterate using a raw pointer in increments of two elements
2015 /// let data = [1u8, 2, 3, 4, 5];
2016 /// let mut ptr: *const u8 = data.as_ptr();
2018 /// let end_rounded_up = ptr.wrapping_add(6);
2020 /// // This loop prints "1, 3, 5, "
2021 /// while ptr != end_rounded_up {
2023 /// print!("{}, ", *ptr);
2025 /// ptr = ptr.wrapping_add(step);
2028 #[stable(feature = "pointer_methods", since = "1.26.0")]
2030 pub fn wrapping_add(self, count: usize) -> Self
2033 self.wrapping_offset(count as isize)
2036 /// Calculates the offset from a pointer using wrapping arithmetic.
2037 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
2039 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
2040 /// offset of `3 * size_of::<T>()` bytes.
2044 /// The resulting pointer does not need to be in bounds, but it is
2045 /// potentially hazardous to dereference (which requires `unsafe`).
2047 /// Always use `.sub(count)` instead when possible, because `sub`
2048 /// allows the compiler to optimize better.
2055 /// // Iterate using a raw pointer in increments of two elements (backwards)
2056 /// let data = [1u8, 2, 3, 4, 5];
2057 /// let mut ptr: *const u8 = data.as_ptr();
2058 /// let start_rounded_down = ptr.wrapping_sub(2);
2059 /// ptr = ptr.wrapping_add(4);
2061 /// // This loop prints "5, 3, 1, "
2062 /// while ptr != start_rounded_down {
2064 /// print!("{}, ", *ptr);
2066 /// ptr = ptr.wrapping_sub(step);
2069 #[stable(feature = "pointer_methods", since = "1.26.0")]
2071 pub fn wrapping_sub(self, count: usize) -> Self
2074 self.wrapping_offset((count as isize).wrapping_neg())
2077 /// Reads the value from `self` without moving it. This leaves the
2078 /// memory in `self` unchanged.
2080 /// See [`ptr::read`] for safety concerns and examples.
2082 /// [`ptr::read`]: ./ptr/fn.read.html
2083 #[stable(feature = "pointer_methods", since = "1.26.0")]
2085 pub unsafe fn read(self) -> T
2091 /// Performs a volatile read of the value from `self` without moving it. This
2092 /// leaves the memory in `self` unchanged.
2094 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2095 /// to not be elided or reordered by the compiler across other volatile
2098 /// See [`ptr::read_volatile`] for safety concerns and examples.
2100 /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
2101 #[stable(feature = "pointer_methods", since = "1.26.0")]
2103 pub unsafe fn read_volatile(self) -> T
2109 /// Reads the value from `self` without moving it. This leaves the
2110 /// memory in `self` unchanged.
2112 /// Unlike `read`, the pointer may be unaligned.
2114 /// See [`ptr::read_unaligned`] for safety concerns and examples.
2116 /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
2117 #[stable(feature = "pointer_methods", since = "1.26.0")]
2119 pub unsafe fn read_unaligned(self) -> T
2122 read_unaligned(self)
2125 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2126 /// and destination may overlap.
2128 /// NOTE: this has the *same* argument order as [`ptr::copy`].
2130 /// See [`ptr::copy`] for safety concerns and examples.
2132 /// [`ptr::copy`]: ./ptr/fn.copy.html
2133 #[stable(feature = "pointer_methods", since = "1.26.0")]
2135 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
2138 copy(self, dest, count)
2141 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
2142 /// and destination may *not* overlap.
2144 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
2146 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2148 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2149 #[stable(feature = "pointer_methods", since = "1.26.0")]
2151 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
2154 copy_nonoverlapping(self, dest, count)
2157 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2158 /// and destination may overlap.
2160 /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
2162 /// See [`ptr::copy`] for safety concerns and examples.
2164 /// [`ptr::copy`]: ./ptr/fn.copy.html
2165 #[stable(feature = "pointer_methods", since = "1.26.0")]
2167 pub unsafe fn copy_from(self, src: *const T, count: usize)
2170 copy(src, self, count)
2173 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2174 /// and destination may *not* overlap.
2176 /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
2178 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
2180 /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
2181 #[stable(feature = "pointer_methods", since = "1.26.0")]
2183 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2186 copy_nonoverlapping(src, self, count)
2189 /// Executes the destructor (if any) of the pointed-to value.
2191 /// See [`ptr::drop_in_place`] for safety concerns and examples.
2193 /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html
2194 #[stable(feature = "pointer_methods", since = "1.26.0")]
2196 pub unsafe fn drop_in_place(self) {
2200 /// Overwrites a memory location with the given value without reading or
2201 /// dropping the old value.
2203 /// See [`ptr::write`] for safety concerns and examples.
2205 /// [`ptr::write`]: ./ptr/fn.write.html
2206 #[stable(feature = "pointer_methods", since = "1.26.0")]
2208 pub unsafe fn write(self, val: T)
2214 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2215 /// bytes of memory starting at `self` to `val`.
2217 /// See [`ptr::write_bytes`] for safety concerns and examples.
2219 /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html
2220 #[stable(feature = "pointer_methods", since = "1.26.0")]
2222 pub unsafe fn write_bytes(self, val: u8, count: usize)
2225 write_bytes(self, val, count)
2228 /// Performs a volatile write of a memory location with the given value without
2229 /// reading or dropping the old value.
2231 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2232 /// to not be elided or reordered by the compiler across other volatile
2235 /// See [`ptr::write_volatile`] for safety concerns and examples.
2237 /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html
2238 #[stable(feature = "pointer_methods", since = "1.26.0")]
2240 pub unsafe fn write_volatile(self, val: T)
2243 write_volatile(self, val)
2246 /// Overwrites a memory location with the given value without reading or
2247 /// dropping the old value.
2249 /// Unlike `write`, the pointer may be unaligned.
2251 /// See [`ptr::write_unaligned`] for safety concerns and examples.
2253 /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html
2254 #[stable(feature = "pointer_methods", since = "1.26.0")]
2256 pub unsafe fn write_unaligned(self, val: T)
2259 write_unaligned(self, val)
2262 /// Replaces the value at `self` with `src`, returning the old
2263 /// value, without dropping either.
2265 /// See [`ptr::replace`] for safety concerns and examples.
2267 /// [`ptr::replace`]: ./ptr/fn.replace.html
2268 #[stable(feature = "pointer_methods", since = "1.26.0")]
2270 pub unsafe fn replace(self, src: T) -> T
2276 /// Swaps the values at two mutable locations of the same type, without
2277 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2278 /// otherwise equivalent.
2280 /// See [`ptr::swap`] for safety concerns and examples.
2282 /// [`ptr::swap`]: ./ptr/fn.swap.html
2283 #[stable(feature = "pointer_methods", since = "1.26.0")]
2285 pub unsafe fn swap(self, with: *mut T)
2291 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
2294 /// If it is not possible to align the pointer, the implementation returns
2295 /// `usize::max_value()`.
2297 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
2298 /// used with the `offset` or `offset_to` methods.
2300 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
2301 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
2302 /// the returned offset is correct in all terms other than alignment.
2306 /// The function panics if `align` is not a power-of-two.
2310 /// Accessing adjacent `u8` as `u16`
2313 /// # #![feature(align_offset)]
2314 /// # fn foo(n: usize) {
2315 /// # use std::mem::align_of;
2317 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
2318 /// let ptr = &x[n] as *const u8;
2319 /// let offset = ptr.align_offset(align_of::<u16>());
2320 /// if offset < x.len() - n - 1 {
2321 /// let u16_ptr = ptr.add(offset) as *const u16;
2322 /// assert_ne!(*u16_ptr, 500);
2324 /// // while the pointer can be aligned via `offset`, it would point
2325 /// // outside the allocation
2329 #[unstable(feature = "align_offset", issue = "44488")]
2330 pub fn align_offset(self, align: usize) -> usize where T: Sized {
2331 if !align.is_power_of_two() {
2332 panic!("align_offset: align is not a power-of-two");
2335 align_offset(self, align)
2340 /// Align pointer `p`.
2342 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
2343 /// to pointer `p` so that pointer `p` would get aligned to `a`.
2345 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
2346 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
2349 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
2350 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
2351 /// than trying to adapt this to accommodate that change.
2353 /// Any questions go to @nagisa.
2354 #[lang="align_offset"]
2355 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
2356 /// Calculate multiplicative modular inverse of `x` modulo `m`.
2358 /// This implementation is tailored for align_offset and has following preconditions:
2360 /// * `m` is a power-of-two;
2361 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
2363 /// Implementation of this function shall not panic. Ever.
2365 fn mod_inv(x: usize, m: usize) -> usize {
2366 /// Multiplicative modular inverse table modulo 2⁴ = 16.
2368 /// Note, that this table does not contain values where inverse does not exist (i.e., for
2369 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
2370 const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
2371 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
2372 const INV_TABLE_MOD: usize = 16;
2374 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
2376 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
2377 if m <= INV_TABLE_MOD {
2378 table_inverse & (m - 1)
2380 // We iterate "up" using the following formula:
2382 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
2384 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
2385 let mut inverse = table_inverse;
2386 let mut going_mod = INV_TABLE_MOD_SQUARED;
2388 // y = y * (2 - xy) mod n
2390 // Note, that we use wrapping operations here intentionally – the original formula
2391 // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
2392 // usize::max_value()` instead, because we take the result `mod n` at the end
2394 inverse = inverse.wrapping_mul(
2395 2usize.wrapping_sub(x.wrapping_mul(inverse))
2396 ) & (going_mod - 1);
2398 return inverse & (m - 1);
2400 going_mod = going_mod.wrapping_mul(going_mod);
2405 let stride = ::mem::size_of::<T>();
2406 let a_minus_one = a.wrapping_sub(1);
2407 let pmoda = p as usize & a_minus_one;
2410 // Already aligned. Yay!
2415 return if stride == 0 {
2416 // If the pointer is not aligned, and the element is zero-sized, then no amount of
2417 // elements will ever align the pointer.
2420 a.wrapping_sub(pmoda)
2424 let smoda = stride & a_minus_one;
2425 // a is power-of-two so cannot be 0. stride = 0 is handled above.
2426 let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
2427 let gcd = 1usize << gcdpow;
2429 if p as usize & (gcd - 1) == 0 {
2430 // This branch solves for the following linear congruence equation:
2432 // $$ p + so ≡ 0 mod a $$
2434 // $p$ here is the pointer value, $s$ – stride of `T`, $o$ offset in `T`s, and $a$ – the
2435 // requested alignment.
2438 // o = (a - (p mod a))/g * ((s/g)⁻¹ mod a)
2440 // The first term is “the relative alignment of p to a”, the second term is “how does
2441 // incrementing p by s bytes change the relative alignment of p”. Division by `g` is
2442 // necessary to make this equation well formed if $a$ and $s$ are not co-prime.
2444 // Furthermore, the result produced by this solution is not “minimal”, so it is necessary
2445 // to take the result $o mod lcm(s, a)$. We can replace $lcm(s, a)$ with just a $a / g$.
2446 let j = a.wrapping_sub(pmoda) >> gcdpow;
2447 let k = smoda >> gcdpow;
2448 return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow);
2451 // Cannot be aligned at all.
2457 // Equality for pointers
2458 #[stable(feature = "rust1", since = "1.0.0")]
2459 impl<T: ?Sized> PartialEq for *const T {
2461 fn eq(&self, other: &*const T) -> bool { *self == *other }
2464 #[stable(feature = "rust1", since = "1.0.0")]
2465 impl<T: ?Sized> Eq for *const T {}
2467 #[stable(feature = "rust1", since = "1.0.0")]
2468 impl<T: ?Sized> PartialEq for *mut T {
2470 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2473 #[stable(feature = "rust1", since = "1.0.0")]
2474 impl<T: ?Sized> Eq for *mut T {}
2476 /// Compares raw pointers for equality.
2478 /// This is the same as using the `==` operator, but less generic:
2479 /// the arguments have to be `*const T` raw pointers,
2480 /// not anything that implements `PartialEq`.
2482 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2483 /// by their address rather than comparing the values they point to
2484 /// (which is what the `PartialEq for &T` implementation does).
2486 /// Smart pointer types, such as `Box`, `Rc`, and `Arc` do not compare
2487 /// using this function, instead they compare the values rather than
2488 /// their addresses.
2496 /// let other_five = 5;
2497 /// let five_ref = &five;
2498 /// let same_five_ref = &five;
2499 /// let other_five_ref = &other_five;
2501 /// assert!(five_ref == same_five_ref);
2502 /// assert!(five_ref == other_five_ref);
2504 /// assert!(ptr::eq(five_ref, same_five_ref));
2505 /// assert!(!ptr::eq(five_ref, other_five_ref));
2507 #[stable(feature = "ptr_eq", since = "1.17.0")]
2509 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2513 /// Hash a raw pointer.
2515 /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
2516 /// by its address rather than the value it points to
2517 /// (which is what the `Hash for &T` implementation does).
2522 /// #![feature(ptr_hash)]
2523 /// use std::collections::hash_map::DefaultHasher;
2524 /// use std::hash::{Hash, Hasher};
2528 /// let five_ref = &five;
2530 /// let mut hasher = DefaultHasher::new();
2531 /// ptr::hash(five_ref, &mut hasher);
2532 /// let actual = hasher.finish();
2534 /// let mut hasher = DefaultHasher::new();
2535 /// (five_ref as *const i32).hash(&mut hasher);
2536 /// let expected = hasher.finish();
2538 /// assert_eq!(actual, expected);
2540 #[unstable(feature = "ptr_hash", reason = "newly added", issue = "56286")]
2541 pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
2546 // Impls for function pointers
2547 macro_rules! fnptr_impls_safety_abi {
2548 ($FnTy: ty, $($Arg: ident),*) => {
2549 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2550 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2552 fn eq(&self, other: &Self) -> bool {
2553 *self as usize == *other as usize
2557 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2558 impl<Ret, $($Arg),*> Eq for $FnTy {}
2560 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2561 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2563 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2564 (*self as usize).partial_cmp(&(*other as usize))
2568 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2569 impl<Ret, $($Arg),*> Ord for $FnTy {
2571 fn cmp(&self, other: &Self) -> Ordering {
2572 (*self as usize).cmp(&(*other as usize))
2576 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2577 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2578 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2579 state.write_usize(*self as usize)
2583 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2584 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2585 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2586 fmt::Pointer::fmt(&(*self as *const ()), f)
2590 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2591 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2592 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2593 fmt::Pointer::fmt(&(*self as *const ()), f)
2599 macro_rules! fnptr_impls_args {
2600 ($($Arg: ident),+) => {
2601 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2602 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2603 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2604 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2605 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2606 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2609 // No variadic functions with 0 parameters
2610 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2611 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2612 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2613 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2617 fnptr_impls_args! { }
2618 fnptr_impls_args! { A }
2619 fnptr_impls_args! { A, B }
2620 fnptr_impls_args! { A, B, C }
2621 fnptr_impls_args! { A, B, C, D }
2622 fnptr_impls_args! { A, B, C, D, E }
2623 fnptr_impls_args! { A, B, C, D, E, F }
2624 fnptr_impls_args! { A, B, C, D, E, F, G }
2625 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2626 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2627 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2628 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2629 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2631 // Comparison for pointers
2632 #[stable(feature = "rust1", since = "1.0.0")]
2633 impl<T: ?Sized> Ord for *const T {
2635 fn cmp(&self, other: &*const T) -> Ordering {
2638 } else if self == other {
2646 #[stable(feature = "rust1", since = "1.0.0")]
2647 impl<T: ?Sized> PartialOrd for *const T {
2649 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2650 Some(self.cmp(other))
2654 fn lt(&self, other: &*const T) -> bool { *self < *other }
2657 fn le(&self, other: &*const T) -> bool { *self <= *other }
2660 fn gt(&self, other: &*const T) -> bool { *self > *other }
2663 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2666 #[stable(feature = "rust1", since = "1.0.0")]
2667 impl<T: ?Sized> Ord for *mut T {
2669 fn cmp(&self, other: &*mut T) -> Ordering {
2672 } else if self == other {
2680 #[stable(feature = "rust1", since = "1.0.0")]
2681 impl<T: ?Sized> PartialOrd for *mut T {
2683 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2684 Some(self.cmp(other))
2688 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2691 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2694 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2697 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2700 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2701 /// of this wrapper owns the referent. Useful for building abstractions like
2702 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2704 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2705 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2706 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2707 /// the referent of the pointer should not be modified without a unique path to
2708 /// its owning Unique.
2710 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2711 /// consider using `NonNull`, which has weaker semantics.
2713 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2714 /// is never dereferenced. This is so that enums may use this forbidden value
2715 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2716 /// However the pointer may still dangle if it isn't dereferenced.
2718 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2719 /// for any type which upholds Unique's aliasing requirements.
2720 #[unstable(feature = "ptr_internals", issue = "0",
2721 reason = "use NonNull instead and consider PhantomData<T> \
2722 (if you also use #[may_dangle]), Send, and/or Sync")]
2724 #[repr(transparent)]
2725 #[rustc_layout_scalar_valid_range_start(1)]
2726 pub struct Unique<T: ?Sized> {
2728 // NOTE: this marker has no consequences for variance, but is necessary
2729 // for dropck to understand that we logically own a `T`.
2731 // For details, see:
2732 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2733 _marker: PhantomData<T>,
2736 #[unstable(feature = "ptr_internals", issue = "0")]
2737 impl<T: ?Sized> fmt::Debug for Unique<T> {
2738 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2739 fmt::Pointer::fmt(&self.as_ptr(), f)
2743 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2744 /// reference is unaliased. Note that this aliasing invariant is
2745 /// unenforced by the type system; the abstraction using the
2746 /// `Unique` must enforce it.
2747 #[unstable(feature = "ptr_internals", issue = "0")]
2748 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2750 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2751 /// reference is unaliased. Note that this aliasing invariant is
2752 /// unenforced by the type system; the abstraction using the
2753 /// `Unique` must enforce it.
2754 #[unstable(feature = "ptr_internals", issue = "0")]
2755 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2757 #[unstable(feature = "ptr_internals", issue = "0")]
2758 impl<T: Sized> Unique<T> {
2759 /// Creates a new `Unique` that is dangling, but well-aligned.
2761 /// This is useful for initializing types which lazily allocate, like
2762 /// `Vec::new` does.
2764 /// Note that the pointer value may potentially represent a valid pointer to
2765 /// a `T`, which means this must not be used as a "not yet initialized"
2766 /// sentinel value. Types that lazily allocate must track initialization by
2767 /// some other means.
2768 // FIXME: rename to dangling() to match NonNull?
2769 pub const fn empty() -> Self {
2771 Unique::new_unchecked(mem::align_of::<T>() as *mut T)
2776 #[unstable(feature = "ptr_internals", issue = "0")]
2777 impl<T: ?Sized> Unique<T> {
2778 /// Creates a new `Unique`.
2782 /// `ptr` must be non-null.
2783 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2784 Unique { pointer: ptr as _, _marker: PhantomData }
2787 /// Creates a new `Unique` if `ptr` is non-null.
2788 pub fn new(ptr: *mut T) -> Option<Self> {
2790 Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } })
2796 /// Acquires the underlying `*mut` pointer.
2797 pub const fn as_ptr(self) -> *mut T {
2798 self.pointer as *mut T
2801 /// Dereferences the content.
2803 /// The resulting lifetime is bound to self so this behaves "as if"
2804 /// it were actually an instance of T that is getting borrowed. If a longer
2805 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2806 pub unsafe fn as_ref(&self) -> &T {
2810 /// Mutably dereferences the content.
2812 /// The resulting lifetime is bound to self so this behaves "as if"
2813 /// it were actually an instance of T that is getting borrowed. If a longer
2814 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2815 pub unsafe fn as_mut(&mut self) -> &mut T {
2820 #[unstable(feature = "ptr_internals", issue = "0")]
2821 impl<T: ?Sized> Clone for Unique<T> {
2822 fn clone(&self) -> Self {
2827 #[unstable(feature = "ptr_internals", issue = "0")]
2828 impl<T: ?Sized> Copy for Unique<T> { }
2830 #[unstable(feature = "ptr_internals", issue = "0")]
2831 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2833 #[unstable(feature = "ptr_internals", issue = "0")]
2834 impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> { }
2836 #[unstable(feature = "ptr_internals", issue = "0")]
2837 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2838 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2839 fmt::Pointer::fmt(&self.as_ptr(), f)
2843 #[unstable(feature = "ptr_internals", issue = "0")]
2844 impl<T: ?Sized> From<&mut T> for Unique<T> {
2845 fn from(reference: &mut T) -> Self {
2846 unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } }
2850 #[unstable(feature = "ptr_internals", issue = "0")]
2851 impl<T: ?Sized> From<&T> for Unique<T> {
2852 fn from(reference: &T) -> Self {
2853 unsafe { Unique { pointer: reference as *const T, _marker: PhantomData } }
2857 #[unstable(feature = "ptr_internals", issue = "0")]
2858 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2859 fn from(p: NonNull<T>) -> Self {
2860 unsafe { Unique { pointer: p.pointer, _marker: PhantomData } }
2864 /// `*mut T` but non-zero and covariant.
2866 /// This is often the correct thing to use when building data structures using
2867 /// raw pointers, but is ultimately more dangerous to use because of its additional
2868 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2870 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2871 /// is never dereferenced. This is so that enums may use this forbidden value
2872 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
2873 /// However the pointer may still dangle if it isn't dereferenced.
2875 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2876 /// for your use case, you should include some PhantomData in your type to
2877 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2878 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2879 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2880 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2882 /// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does
2883 /// not change the fact that mutating through a (pointer derived from a) shared
2884 /// reference is undefined behavior unless the mutation happens inside an
2885 /// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared
2886 /// reference. When using this `From` instance without an `UnsafeCell<T>`,
2887 /// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr`
2888 /// is never used for mutation.
2890 /// [`UnsafeCell<T>`]: ../cell/struct.UnsafeCell.html
2891 #[stable(feature = "nonnull", since = "1.25.0")]
2892 #[repr(transparent)]
2893 #[rustc_layout_scalar_valid_range_start(1)]
2894 pub struct NonNull<T: ?Sized> {
2898 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2899 // N.B., this impl is unnecessary, but should provide better error messages.
2900 #[stable(feature = "nonnull", since = "1.25.0")]
2901 impl<T: ?Sized> !Send for NonNull<T> { }
2903 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2904 // N.B., this impl is unnecessary, but should provide better error messages.
2905 #[stable(feature = "nonnull", since = "1.25.0")]
2906 impl<T: ?Sized> !Sync for NonNull<T> { }
2908 impl<T: Sized> NonNull<T> {
2909 /// Creates a new `NonNull` that is dangling, but well-aligned.
2911 /// This is useful for initializing types which lazily allocate, like
2912 /// `Vec::new` does.
2914 /// Note that the pointer value may potentially represent a valid pointer to
2915 /// a `T`, which means this must not be used as a "not yet initialized"
2916 /// sentinel value. Types that lazily allocate must track initialization by
2917 /// some other means.
2918 #[stable(feature = "nonnull", since = "1.25.0")]
2920 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))]
2921 pub const fn dangling() -> Self {
2923 let ptr = mem::align_of::<T>() as *mut T;
2924 NonNull::new_unchecked(ptr)
2929 impl<T: ?Sized> NonNull<T> {
2930 /// Creates a new `NonNull`.
2934 /// `ptr` must be non-null.
2935 #[stable(feature = "nonnull", since = "1.25.0")]
2937 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2938 NonNull { pointer: ptr as _ }
2941 /// Creates a new `NonNull` if `ptr` is non-null.
2942 #[stable(feature = "nonnull", since = "1.25.0")]
2944 pub fn new(ptr: *mut T) -> Option<Self> {
2946 Some(unsafe { Self::new_unchecked(ptr) })
2952 /// Acquires the underlying `*mut` pointer.
2953 #[stable(feature = "nonnull", since = "1.25.0")]
2955 pub const fn as_ptr(self) -> *mut T {
2956 self.pointer as *mut T
2959 /// Dereferences the content.
2961 /// The resulting lifetime is bound to self so this behaves "as if"
2962 /// it were actually an instance of T that is getting borrowed. If a longer
2963 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2964 #[stable(feature = "nonnull", since = "1.25.0")]
2966 pub unsafe fn as_ref(&self) -> &T {
2970 /// Mutably dereferences the content.
2972 /// The resulting lifetime is bound to self so this behaves "as if"
2973 /// it were actually an instance of T that is getting borrowed. If a longer
2974 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2975 #[stable(feature = "nonnull", since = "1.25.0")]
2977 pub unsafe fn as_mut(&mut self) -> &mut T {
2981 /// Cast to a pointer of another type
2982 #[stable(feature = "nonnull_cast", since = "1.27.0")]
2984 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))]
2985 pub const fn cast<U>(self) -> NonNull<U> {
2987 NonNull::new_unchecked(self.as_ptr() as *mut U)
2992 #[stable(feature = "nonnull", since = "1.25.0")]
2993 impl<T: ?Sized> Clone for NonNull<T> {
2994 fn clone(&self) -> Self {
2999 #[stable(feature = "nonnull", since = "1.25.0")]
3000 impl<T: ?Sized> Copy for NonNull<T> { }
3002 #[unstable(feature = "coerce_unsized", issue = "27732")]
3003 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
3005 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
3006 impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
3008 #[stable(feature = "nonnull", since = "1.25.0")]
3009 impl<T: ?Sized> fmt::Debug for NonNull<T> {
3010 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3011 fmt::Pointer::fmt(&self.as_ptr(), f)
3015 #[stable(feature = "nonnull", since = "1.25.0")]
3016 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
3017 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3018 fmt::Pointer::fmt(&self.as_ptr(), f)
3022 #[stable(feature = "nonnull", since = "1.25.0")]
3023 impl<T: ?Sized> Eq for NonNull<T> {}
3025 #[stable(feature = "nonnull", since = "1.25.0")]
3026 impl<T: ?Sized> PartialEq for NonNull<T> {
3028 fn eq(&self, other: &Self) -> bool {
3029 self.as_ptr() == other.as_ptr()
3033 #[stable(feature = "nonnull", since = "1.25.0")]
3034 impl<T: ?Sized> Ord for NonNull<T> {
3036 fn cmp(&self, other: &Self) -> Ordering {
3037 self.as_ptr().cmp(&other.as_ptr())
3041 #[stable(feature = "nonnull", since = "1.25.0")]
3042 impl<T: ?Sized> PartialOrd for NonNull<T> {
3044 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
3045 self.as_ptr().partial_cmp(&other.as_ptr())
3049 #[stable(feature = "nonnull", since = "1.25.0")]
3050 impl<T: ?Sized> hash::Hash for NonNull<T> {
3052 fn hash<H: hash::Hasher>(&self, state: &mut H) {
3053 self.as_ptr().hash(state)
3057 #[unstable(feature = "ptr_internals", issue = "0")]
3058 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
3060 fn from(unique: Unique<T>) -> Self {
3061 unsafe { NonNull { pointer: unique.pointer } }
3065 #[stable(feature = "nonnull", since = "1.25.0")]
3066 impl<T: ?Sized> From<&mut T> for NonNull<T> {
3068 fn from(reference: &mut T) -> Self {
3069 unsafe { NonNull { pointer: reference as *mut T } }
3073 #[stable(feature = "nonnull", since = "1.25.0")]
3074 impl<T: ?Sized> From<&T> for NonNull<T> {
3076 fn from(reference: &T) -> Self {
3077 unsafe { NonNull { pointer: reference as *const T } }