1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
26 #[allow(deprecated)] use nonzero::NonZero;
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
169 /// #![feature(swap_nonoverlapping)]
173 /// let mut x = [1, 2, 3, 4];
174 /// let mut y = [7, 8, 9];
177 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
180 /// assert_eq!(x, [7, 8, 3, 4]);
181 /// assert_eq!(y, [1, 2, 9]);
184 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
185 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
186 let x = x as *mut u8;
187 let y = y as *mut u8;
188 let len = mem::size_of::<T>() * count;
189 swap_nonoverlapping_bytes(x, y, len)
193 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
194 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
195 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
196 // Haswell E processors. LLVM is more able to optimize if we give a struct a
197 // #[repr(simd)], even if we don't actually use this struct directly.
199 // FIXME repr(simd) broken on emscripten and redox
200 // It's also broken on big-endian powerpc64 and s390x. #42778
201 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
202 target_endian = "big")),
204 struct Block(u64, u64, u64, u64);
205 struct UnalignedBlock(u64, u64, u64, u64);
207 let block_size = mem::size_of::<Block>();
209 // Loop through x & y, copying them `Block` at a time
210 // The optimizer should unroll the loop fully for most types
211 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
213 while i + block_size <= len {
214 // Create some uninitialized memory as scratch space
215 // Declaring `t` here avoids aligning the stack when this loop is unused
216 let mut t: Block = mem::uninitialized();
217 let t = &mut t as *mut _ as *mut u8;
218 let x = x.offset(i as isize);
219 let y = y.offset(i as isize);
221 // Swap a block of bytes of x & y, using t as a temporary buffer
222 // This should be optimized into efficient SIMD operations where available
223 copy_nonoverlapping(x, t, block_size);
224 copy_nonoverlapping(y, x, block_size);
225 copy_nonoverlapping(t, y, block_size);
230 // Swap any remaining bytes
231 let mut t: UnalignedBlock = mem::uninitialized();
234 let t = &mut t as *mut _ as *mut u8;
235 let x = x.offset(i as isize);
236 let y = y.offset(i as isize);
238 copy_nonoverlapping(x, t, rem);
239 copy_nonoverlapping(y, x, rem);
240 copy_nonoverlapping(t, y, rem);
244 /// Replaces the value at `dest` with `src`, returning the old
245 /// value, without dropping either.
249 /// This is only unsafe because it accepts a raw pointer.
250 /// Otherwise, this operation is identical to `mem::replace`.
252 #[stable(feature = "rust1", since = "1.0.0")]
253 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
254 mem::swap(&mut *dest, &mut src); // cannot overlap
258 /// Reads the value from `src` without moving it. This leaves the
259 /// memory in `src` unchanged.
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
270 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
278 /// let y = &x as *const i32;
281 /// assert_eq!(std::ptr::read(y), 12);
285 #[stable(feature = "rust1", since = "1.0.0")]
286 pub unsafe fn read<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
288 copy_nonoverlapping(src, &mut tmp, 1);
292 /// Reads the value from `src` without moving it. This leaves the
293 /// memory in `src` unchanged.
295 /// Unlike `read`, the pointer may be unaligned.
299 /// Beyond accepting a raw pointer, this is unsafe because it semantically
300 /// moves the value out of `src` without preventing further usage of `src`.
301 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
302 /// `src` is not used before the data is overwritten again (e.g. with `write`,
303 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
304 /// because it will attempt to drop the value previously at `*src`.
312 /// let y = &x as *const i32;
315 /// assert_eq!(std::ptr::read_unaligned(y), 12);
319 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
320 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
321 let mut tmp: T = mem::uninitialized();
322 copy_nonoverlapping(src as *const u8,
323 &mut tmp as *mut T as *mut u8,
324 mem::size_of::<T>());
328 /// Overwrites a memory location with the given value without reading or
329 /// dropping the old value.
333 /// This operation is marked unsafe because it accepts a raw pointer.
335 /// It does not drop the contents of `dst`. This is safe, but it could leak
336 /// allocations or resources, so care must be taken not to overwrite an object
337 /// that should be dropped.
339 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
340 /// location pointed to by `dst`.
342 /// This is appropriate for initializing uninitialized memory, or overwriting
343 /// memory that has previously been `read` from.
345 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
353 /// let y = &mut x as *mut i32;
357 /// std::ptr::write(y, z);
358 /// assert_eq!(std::ptr::read(y), 12);
362 #[stable(feature = "rust1", since = "1.0.0")]
363 pub unsafe fn write<T>(dst: *mut T, src: T) {
364 intrinsics::move_val_init(&mut *dst, src)
367 /// Overwrites a memory location with the given value without reading or
368 /// dropping the old value.
370 /// Unlike `write`, the pointer may be unaligned.
374 /// This operation is marked unsafe because it accepts a raw pointer.
376 /// It does not drop the contents of `dst`. This is safe, but it could leak
377 /// allocations or resources, so care must be taken not to overwrite an object
378 /// that should be dropped.
380 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
381 /// location pointed to by `dst`.
383 /// This is appropriate for initializing uninitialized memory, or overwriting
384 /// memory that has previously been `read` from.
392 /// let y = &mut x as *mut i32;
396 /// std::ptr::write_unaligned(y, z);
397 /// assert_eq!(std::ptr::read_unaligned(y), 12);
401 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
402 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
403 copy_nonoverlapping(&src as *const T as *const u8,
405 mem::size_of::<T>());
409 /// Performs a volatile read of the value from `src` without moving it. This
410 /// leaves the memory in `src` unchanged.
412 /// Volatile operations are intended to act on I/O memory, and are guaranteed
413 /// to not be elided or reordered by the compiler across other volatile
418 /// Rust does not currently have a rigorously and formally defined memory model,
419 /// so the precise semantics of what "volatile" means here is subject to change
420 /// over time. That being said, the semantics will almost always end up pretty
421 /// similar to [C11's definition of volatile][c11].
423 /// The compiler shouldn't change the relative order or number of volatile
424 /// memory operations. However, volatile memory operations on zero-sized types
425 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
426 /// and may be ignored.
428 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
432 /// Beyond accepting a raw pointer, this is unsafe because it semantically
433 /// moves the value out of `src` without preventing further usage of `src`.
434 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
435 /// `src` is not used before the data is overwritten again (e.g. with `write`,
436 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
437 /// because it will attempt to drop the value previously at `*src`.
445 /// let y = &x as *const i32;
448 /// assert_eq!(std::ptr::read_volatile(y), 12);
452 #[stable(feature = "volatile", since = "1.9.0")]
453 pub unsafe fn read_volatile<T>(src: *const T) -> T {
454 intrinsics::volatile_load(src)
457 /// Performs a volatile write of a memory location with the given value without
458 /// reading or dropping the old value.
460 /// Volatile operations are intended to act on I/O memory, and are guaranteed
461 /// to not be elided or reordered by the compiler across other volatile
466 /// Rust does not currently have a rigorously and formally defined memory model,
467 /// so the precise semantics of what "volatile" means here is subject to change
468 /// over time. That being said, the semantics will almost always end up pretty
469 /// similar to [C11's definition of volatile][c11].
471 /// The compiler shouldn't change the relative order or number of volatile
472 /// memory operations. However, volatile memory operations on zero-sized types
473 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
474 /// and may be ignored.
476 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
480 /// This operation is marked unsafe because it accepts a raw pointer.
482 /// It does not drop the contents of `dst`. This is safe, but it could leak
483 /// allocations or resources, so care must be taken not to overwrite an object
484 /// that should be dropped.
486 /// This is appropriate for initializing uninitialized memory, or overwriting
487 /// memory that has previously been `read` from.
495 /// let y = &mut x as *mut i32;
499 /// std::ptr::write_volatile(y, z);
500 /// assert_eq!(std::ptr::read_volatile(y), 12);
504 #[stable(feature = "volatile", since = "1.9.0")]
505 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
506 intrinsics::volatile_store(dst, src);
509 #[lang = "const_ptr"]
510 impl<T: ?Sized> *const T {
511 /// Returns `true` if the pointer is null.
513 /// Note that unsized types have many possible null pointers, as only the
514 /// raw data pointer is considered, not their length, vtable, etc.
515 /// Therefore, two pointers that are null may still not compare equal to
523 /// let s: &str = "Follow the rabbit";
524 /// let ptr: *const u8 = s.as_ptr();
525 /// assert!(!ptr.is_null());
527 #[stable(feature = "rust1", since = "1.0.0")]
529 pub fn is_null(self) -> bool {
530 // Compare via a cast to a thin pointer, so fat pointers are only
531 // considering their "data" part for null-ness.
532 (self as *const u8) == null()
535 /// Returns `None` if the pointer is null, or else returns a reference to
536 /// the value wrapped in `Some`.
540 /// While this method and its mutable counterpart are useful for
541 /// null-safety, it is important to note that this is still an unsafe
542 /// operation because the returned value could be pointing to invalid
545 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
546 /// not necessarily reflect the actual lifetime of the data.
553 /// let ptr: *const u8 = &10u8 as *const u8;
556 /// if let Some(val_back) = ptr.as_ref() {
557 /// println!("We got back the value: {}!", val_back);
561 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
563 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
571 /// Calculates the offset from a pointer.
573 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
574 /// offset of `3 * size_of::<T>()` bytes.
578 /// If any of the following conditions are violated, the result is Undefined
581 /// * Both the starting and resulting pointer must be either in bounds or one
582 /// byte past the end of an allocated object.
584 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
586 /// * The offset being in bounds cannot rely on "wrapping around" the address
587 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
589 /// The compiler and standard library generally tries to ensure allocations
590 /// never reach a size where an offset is a concern. For instance, `Vec`
591 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
592 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
594 /// Most platforms fundamentally can't even construct such an allocation.
595 /// For instance, no known 64-bit platform can ever serve a request
596 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
597 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
598 /// more than `isize::MAX` bytes with things like Physical Address
599 /// Extension. As such, memory acquired directly from allocators or memory
600 /// mapped files *may* be too large to handle with this function.
602 /// Consider using `wrapping_offset` instead if these constraints are
603 /// difficult to satisfy. The only advantage of this method is that it
604 /// enables more aggressive compiler optimizations.
611 /// let s: &str = "123";
612 /// let ptr: *const u8 = s.as_ptr();
615 /// println!("{}", *ptr.offset(1) as char);
616 /// println!("{}", *ptr.offset(2) as char);
619 #[stable(feature = "rust1", since = "1.0.0")]
621 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
622 intrinsics::offset(self, count)
625 /// Calculates the offset from a pointer using wrapping arithmetic.
627 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
628 /// offset of `3 * size_of::<T>()` bytes.
632 /// The resulting pointer does not need to be in bounds, but it is
633 /// potentially hazardous to dereference (which requires `unsafe`).
635 /// Always use `.offset(count)` instead when possible, because `offset`
636 /// allows the compiler to optimize better.
643 /// // Iterate using a raw pointer in increments of two elements
644 /// let data = [1u8, 2, 3, 4, 5];
645 /// let mut ptr: *const u8 = data.as_ptr();
647 /// let end_rounded_up = ptr.wrapping_offset(6);
649 /// // This loop prints "1, 3, 5, "
650 /// while ptr != end_rounded_up {
652 /// print!("{}, ", *ptr);
654 /// ptr = ptr.wrapping_offset(step);
657 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
659 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
661 intrinsics::arith_offset(self, count)
665 /// Calculates the distance between two pointers. The returned value is in
666 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
668 /// If the address different between the two pointers ia not a multiple of
669 /// `mem::size_of::<T>()` then the result of the division is rounded towards
672 /// This function returns `None` if `T` is a zero-sized typed.
679 /// #![feature(offset_to)]
683 /// let ptr1: *const i32 = &a[1];
684 /// let ptr2: *const i32 = &a[3];
685 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
686 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
687 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
688 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
691 #[unstable(feature = "offset_to", issue = "41079")]
693 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
694 let size = mem::size_of::<T>();
698 let diff = (other as isize).wrapping_sub(self as isize);
699 Some(diff / size as isize)
703 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
705 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
706 /// offset of `3 * size_of::<T>()` bytes.
710 /// If any of the following conditions are violated, the result is Undefined
713 /// * Both the starting and resulting pointer must be either in bounds or one
714 /// byte past the end of an allocated object.
716 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
718 /// * The offset being in bounds cannot rely on "wrapping around" the address
719 /// space. That is, the infinite-precision sum must fit in a `usize`.
721 /// The compiler and standard library generally tries to ensure allocations
722 /// never reach a size where an offset is a concern. For instance, `Vec`
723 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
724 /// `vec.as_ptr().add(vec.len())` is always safe.
726 /// Most platforms fundamentally can't even construct such an allocation.
727 /// For instance, no known 64-bit platform can ever serve a request
728 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
729 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
730 /// more than `isize::MAX` bytes with things like Physical Address
731 /// Extension. As such, memory acquired directly from allocators or memory
732 /// mapped files *may* be too large to handle with this function.
734 /// Consider using `wrapping_offset` instead if these constraints are
735 /// difficult to satisfy. The only advantage of this method is that it
736 /// enables more aggressive compiler optimizations.
743 /// let s: &str = "123";
744 /// let ptr: *const u8 = s.as_ptr();
747 /// println!("{}", *ptr.add(1) as char);
748 /// println!("{}", *ptr.add(2) as char);
751 #[stable(feature = "pointer_methods", since = "1.26.0")]
753 pub unsafe fn add(self, count: usize) -> Self
756 self.offset(count as isize)
759 /// Calculates the offset from a pointer (convenience for
760 /// `.offset((count as isize).wrapping_neg())`).
762 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
763 /// offset of `3 * size_of::<T>()` bytes.
767 /// If any of the following conditions are violated, the result is Undefined
770 /// * Both the starting and resulting pointer must be either in bounds or one
771 /// byte past the end of an allocated object.
773 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
775 /// * The offset being in bounds cannot rely on "wrapping around" the address
776 /// space. That is, the infinite-precision sum must fit in a usize.
778 /// The compiler and standard library generally tries to ensure allocations
779 /// never reach a size where an offset is a concern. For instance, `Vec`
780 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
781 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
783 /// Most platforms fundamentally can't even construct such an allocation.
784 /// For instance, no known 64-bit platform can ever serve a request
785 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
786 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
787 /// more than `isize::MAX` bytes with things like Physical Address
788 /// Extension. As such, memory acquired directly from allocators or memory
789 /// mapped files *may* be too large to handle with this function.
791 /// Consider using `wrapping_offset` instead if these constraints are
792 /// difficult to satisfy. The only advantage of this method is that it
793 /// enables more aggressive compiler optimizations.
800 /// let s: &str = "123";
803 /// let end: *const u8 = s.as_ptr().add(3);
804 /// println!("{}", *end.sub(1) as char);
805 /// println!("{}", *end.sub(2) as char);
808 #[stable(feature = "pointer_methods", since = "1.26.0")]
810 pub unsafe fn sub(self, count: usize) -> Self
813 self.offset((count as isize).wrapping_neg())
816 /// Calculates the offset from a pointer using wrapping arithmetic.
817 /// (convenience for `.wrapping_offset(count as isize)`)
819 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
820 /// offset of `3 * size_of::<T>()` bytes.
824 /// The resulting pointer does not need to be in bounds, but it is
825 /// potentially hazardous to dereference (which requires `unsafe`).
827 /// Always use `.add(count)` instead when possible, because `add`
828 /// allows the compiler to optimize better.
835 /// // Iterate using a raw pointer in increments of two elements
836 /// let data = [1u8, 2, 3, 4, 5];
837 /// let mut ptr: *const u8 = data.as_ptr();
839 /// let end_rounded_up = ptr.wrapping_add(6);
841 /// // This loop prints "1, 3, 5, "
842 /// while ptr != end_rounded_up {
844 /// print!("{}, ", *ptr);
846 /// ptr = ptr.wrapping_add(step);
849 #[stable(feature = "pointer_methods", since = "1.26.0")]
851 pub fn wrapping_add(self, count: usize) -> Self
854 self.wrapping_offset(count as isize)
857 /// Calculates the offset from a pointer using wrapping arithmetic.
858 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
860 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
861 /// offset of `3 * size_of::<T>()` bytes.
865 /// The resulting pointer does not need to be in bounds, but it is
866 /// potentially hazardous to dereference (which requires `unsafe`).
868 /// Always use `.sub(count)` instead when possible, because `sub`
869 /// allows the compiler to optimize better.
876 /// // Iterate using a raw pointer in increments of two elements (backwards)
877 /// let data = [1u8, 2, 3, 4, 5];
878 /// let mut ptr: *const u8 = data.as_ptr();
879 /// let start_rounded_down = ptr.wrapping_sub(2);
880 /// ptr = ptr.wrapping_add(4);
882 /// // This loop prints "5, 3, 1, "
883 /// while ptr != start_rounded_down {
885 /// print!("{}, ", *ptr);
887 /// ptr = ptr.wrapping_sub(step);
890 #[stable(feature = "pointer_methods", since = "1.26.0")]
892 pub fn wrapping_sub(self, count: usize) -> Self
895 self.wrapping_offset((count as isize).wrapping_neg())
898 /// Reads the value from `self` without moving it. This leaves the
899 /// memory in `self` unchanged.
903 /// Beyond accepting a raw pointer, this is unsafe because it semantically
904 /// moves the value out of `self` without preventing further usage of `self`.
905 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
906 /// `self` is not used before the data is overwritten again (e.g. with `write`,
907 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
908 /// because it will attempt to drop the value previously at `*self`.
910 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
918 /// let y = &x as *const i32;
921 /// assert_eq!(y.read(), 12);
924 #[stable(feature = "pointer_methods", since = "1.26.0")]
926 pub unsafe fn read(self) -> T
932 /// Performs a volatile read of the value from `self` without moving it. This
933 /// leaves the memory in `self` unchanged.
935 /// Volatile operations are intended to act on I/O memory, and are guaranteed
936 /// to not be elided or reordered by the compiler across other volatile
941 /// Rust does not currently have a rigorously and formally defined memory model,
942 /// so the precise semantics of what "volatile" means here is subject to change
943 /// over time. That being said, the semantics will almost always end up pretty
944 /// similar to [C11's definition of volatile][c11].
946 /// The compiler shouldn't change the relative order or number of volatile
947 /// memory operations. However, volatile memory operations on zero-sized types
948 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
949 /// and may be ignored.
951 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
955 /// Beyond accepting a raw pointer, this is unsafe because it semantically
956 /// moves the value out of `self` without preventing further usage of `self`.
957 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
958 /// `self` is not used before the data is overwritten again (e.g. with `write`,
959 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
960 /// because it will attempt to drop the value previously at `*self`.
968 /// let y = &x as *const i32;
971 /// assert_eq!(y.read_volatile(), 12);
974 #[stable(feature = "pointer_methods", since = "1.26.0")]
976 pub unsafe fn read_volatile(self) -> T
982 /// Reads the value from `self` without moving it. This leaves the
983 /// memory in `self` unchanged.
985 /// Unlike `read`, the pointer may be unaligned.
989 /// Beyond accepting a raw pointer, this is unsafe because it semantically
990 /// moves the value out of `self` without preventing further usage of `self`.
991 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
992 /// `self` is not used before the data is overwritten again (e.g. with `write`,
993 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
994 /// because it will attempt to drop the value previously at `*self`.
1002 /// let y = &x as *const i32;
1005 /// assert_eq!(y.read_unaligned(), 12);
1008 #[stable(feature = "pointer_methods", since = "1.26.0")]
1010 pub unsafe fn read_unaligned(self) -> T
1013 read_unaligned(self)
1016 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1017 /// and destination may overlap.
1019 /// NOTE: this has the *same* argument order as `ptr::copy`.
1021 /// This is semantically equivalent to C's `memmove`.
1025 /// Care must be taken with the ownership of `self` and `dest`.
1026 /// This method semantically moves the values of `self` into `dest`.
1027 /// However it does not drop the contents of `self`, or prevent the contents
1028 /// of `dest` from being dropped or used.
1032 /// Efficiently create a Rust vector from an unsafe buffer:
1035 /// # #[allow(dead_code)]
1036 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1037 /// let mut dst = Vec::with_capacity(elts);
1038 /// dst.set_len(elts);
1039 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1043 #[stable(feature = "pointer_methods", since = "1.26.0")]
1045 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1048 copy(self, dest, count)
1051 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1052 /// and destination may *not* overlap.
1054 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1056 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1060 /// Beyond requiring that the program must be allowed to access both regions
1061 /// of memory, it is Undefined Behavior for source and destination to
1062 /// overlap. Care must also be taken with the ownership of `self` and
1063 /// `self`. This method semantically moves the values of `self` into `dest`.
1064 /// However it does not drop the contents of `dest`, or prevent the contents
1065 /// of `self` from being dropped or used.
1069 /// Efficiently create a Rust vector from an unsafe buffer:
1072 /// # #[allow(dead_code)]
1073 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1074 /// let mut dst = Vec::with_capacity(elts);
1075 /// dst.set_len(elts);
1076 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1080 #[stable(feature = "pointer_methods", since = "1.26.0")]
1082 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1085 copy_nonoverlapping(self, dest, count)
1088 /// Computes the byte offset that needs to be applied in order to
1089 /// make the pointer aligned to `align`.
1090 /// If it is not possible to align the pointer, the implementation returns
1091 /// `usize::max_value()`.
1093 /// There are no guarantees whatsover that offsetting the pointer will not
1094 /// overflow or go beyond the allocation that the pointer points into.
1095 /// It is up to the caller to ensure that the returned offset is correct
1096 /// in all terms other than alignment.
1100 /// Accessing adjacent `u8` as `u16`
1103 /// # #![feature(align_offset)]
1104 /// # fn foo(n: usize) {
1105 /// # use std::mem::align_of;
1107 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1108 /// let ptr = &x[n] as *const u8;
1109 /// let offset = ptr.align_offset(align_of::<u16>());
1110 /// if offset < x.len() - n - 1 {
1111 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1112 /// assert_ne!(*u16_ptr, 500);
1114 /// // while the pointer can be aligned via `offset`, it would point
1115 /// // outside the allocation
1119 #[unstable(feature = "align_offset", issue = "44488")]
1120 pub fn align_offset(self, align: usize) -> usize {
1122 intrinsics::align_offset(self as *const _, align)
1128 impl<T: ?Sized> *mut T {
1129 /// Returns `true` if the pointer is null.
1131 /// Note that unsized types have many possible null pointers, as only the
1132 /// raw data pointer is considered, not their length, vtable, etc.
1133 /// Therefore, two pointers that are null may still not compare equal to
1141 /// let mut s = [1, 2, 3];
1142 /// let ptr: *mut u32 = s.as_mut_ptr();
1143 /// assert!(!ptr.is_null());
1145 #[stable(feature = "rust1", since = "1.0.0")]
1147 pub fn is_null(self) -> bool {
1148 // Compare via a cast to a thin pointer, so fat pointers are only
1149 // considering their "data" part for null-ness.
1150 (self as *mut u8) == null_mut()
1153 /// Returns `None` if the pointer is null, or else returns a reference to
1154 /// the value wrapped in `Some`.
1158 /// While this method and its mutable counterpart are useful for
1159 /// null-safety, it is important to note that this is still an unsafe
1160 /// operation because the returned value could be pointing to invalid
1163 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1164 /// not necessarily reflect the actual lifetime of the data.
1171 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1174 /// if let Some(val_back) = ptr.as_ref() {
1175 /// println!("We got back the value: {}!", val_back);
1179 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1181 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1189 /// Calculates the offset from a pointer.
1191 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1192 /// offset of `3 * size_of::<T>()` bytes.
1196 /// If any of the following conditions are violated, the result is Undefined
1199 /// * Both the starting and resulting pointer must be either in bounds or one
1200 /// byte past the end of an allocated object.
1202 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1204 /// * The offset being in bounds cannot rely on "wrapping around" the address
1205 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1207 /// The compiler and standard library generally tries to ensure allocations
1208 /// never reach a size where an offset is a concern. For instance, `Vec`
1209 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1210 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1212 /// Most platforms fundamentally can't even construct such an allocation.
1213 /// For instance, no known 64-bit platform can ever serve a request
1214 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1215 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1216 /// more than `isize::MAX` bytes with things like Physical Address
1217 /// Extension. As such, memory acquired directly from allocators or memory
1218 /// mapped files *may* be too large to handle with this function.
1220 /// Consider using `wrapping_offset` instead if these constraints are
1221 /// difficult to satisfy. The only advantage of this method is that it
1222 /// enables more aggressive compiler optimizations.
1229 /// let mut s = [1, 2, 3];
1230 /// let ptr: *mut u32 = s.as_mut_ptr();
1233 /// println!("{}", *ptr.offset(1));
1234 /// println!("{}", *ptr.offset(2));
1237 #[stable(feature = "rust1", since = "1.0.0")]
1239 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1240 intrinsics::offset(self, count) as *mut T
1243 /// Calculates the offset from a pointer using wrapping arithmetic.
1244 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1245 /// offset of `3 * size_of::<T>()` bytes.
1249 /// The resulting pointer does not need to be in bounds, but it is
1250 /// potentially hazardous to dereference (which requires `unsafe`).
1252 /// Always use `.offset(count)` instead when possible, because `offset`
1253 /// allows the compiler to optimize better.
1260 /// // Iterate using a raw pointer in increments of two elements
1261 /// let mut data = [1u8, 2, 3, 4, 5];
1262 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1264 /// let end_rounded_up = ptr.wrapping_offset(6);
1266 /// while ptr != end_rounded_up {
1270 /// ptr = ptr.wrapping_offset(step);
1272 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1274 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1276 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1278 intrinsics::arith_offset(self, count) as *mut T
1282 /// Returns `None` if the pointer is null, or else returns a mutable
1283 /// reference to the value wrapped in `Some`.
1287 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1288 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1289 /// returned is indeed a valid lifetime for the contained data.
1296 /// let mut s = [1, 2, 3];
1297 /// let ptr: *mut u32 = s.as_mut_ptr();
1298 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1299 /// *first_value = 4;
1300 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1302 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1304 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1312 /// Calculates the distance between two pointers. The returned value is in
1313 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1315 /// If the address different between the two pointers ia not a multiple of
1316 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1319 /// This function returns `None` if `T` is a zero-sized typed.
1326 /// #![feature(offset_to)]
1329 /// let mut a = [0; 5];
1330 /// let ptr1: *mut i32 = &mut a[1];
1331 /// let ptr2: *mut i32 = &mut a[3];
1332 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1333 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1334 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1335 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1338 #[unstable(feature = "offset_to", issue = "41079")]
1340 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1341 let size = mem::size_of::<T>();
1345 let diff = (other as isize).wrapping_sub(self as isize);
1346 Some(diff / size as isize)
1350 /// Computes the byte offset that needs to be applied in order to
1351 /// make the pointer aligned to `align`.
1352 /// If it is not possible to align the pointer, the implementation returns
1353 /// `usize::max_value()`.
1355 /// There are no guarantees whatsover that offsetting the pointer will not
1356 /// overflow or go beyond the allocation that the pointer points into.
1357 /// It is up to the caller to ensure that the returned offset is correct
1358 /// in all terms other than alignment.
1362 /// Accessing adjacent `u8` as `u16`
1365 /// # #![feature(align_offset)]
1366 /// # fn foo(n: usize) {
1367 /// # use std::mem::align_of;
1369 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1370 /// let ptr = &x[n] as *const u8;
1371 /// let offset = ptr.align_offset(align_of::<u16>());
1372 /// if offset < x.len() - n - 1 {
1373 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1374 /// assert_ne!(*u16_ptr, 500);
1376 /// // while the pointer can be aligned via `offset`, it would point
1377 /// // outside the allocation
1381 #[unstable(feature = "align_offset", issue = "44488")]
1382 pub fn align_offset(self, align: usize) -> usize {
1384 intrinsics::align_offset(self as *const _, align)
1388 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1390 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1391 /// offset of `3 * size_of::<T>()` bytes.
1395 /// If any of the following conditions are violated, the result is Undefined
1398 /// * Both the starting and resulting pointer must be either in bounds or one
1399 /// byte past the end of an allocated object.
1401 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1403 /// * The offset being in bounds cannot rely on "wrapping around" the address
1404 /// space. That is, the infinite-precision sum must fit in a `usize`.
1406 /// The compiler and standard library generally tries to ensure allocations
1407 /// never reach a size where an offset is a concern. For instance, `Vec`
1408 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1409 /// `vec.as_ptr().add(vec.len())` is always safe.
1411 /// Most platforms fundamentally can't even construct such an allocation.
1412 /// For instance, no known 64-bit platform can ever serve a request
1413 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1414 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1415 /// more than `isize::MAX` bytes with things like Physical Address
1416 /// Extension. As such, memory acquired directly from allocators or memory
1417 /// mapped files *may* be too large to handle with this function.
1419 /// Consider using `wrapping_offset` instead if these constraints are
1420 /// difficult to satisfy. The only advantage of this method is that it
1421 /// enables more aggressive compiler optimizations.
1428 /// let s: &str = "123";
1429 /// let ptr: *const u8 = s.as_ptr();
1432 /// println!("{}", *ptr.add(1) as char);
1433 /// println!("{}", *ptr.add(2) as char);
1436 #[stable(feature = "pointer_methods", since = "1.26.0")]
1438 pub unsafe fn add(self, count: usize) -> Self
1441 self.offset(count as isize)
1444 /// Calculates the offset from a pointer (convenience for
1445 /// `.offset((count as isize).wrapping_neg())`).
1447 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1448 /// offset of `3 * size_of::<T>()` bytes.
1452 /// If any of the following conditions are violated, the result is Undefined
1455 /// * Both the starting and resulting pointer must be either in bounds or one
1456 /// byte past the end of an allocated object.
1458 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1460 /// * The offset being in bounds cannot rely on "wrapping around" the address
1461 /// space. That is, the infinite-precision sum must fit in a usize.
1463 /// The compiler and standard library generally tries to ensure allocations
1464 /// never reach a size where an offset is a concern. For instance, `Vec`
1465 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1466 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1468 /// Most platforms fundamentally can't even construct such an allocation.
1469 /// For instance, no known 64-bit platform can ever serve a request
1470 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1471 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1472 /// more than `isize::MAX` bytes with things like Physical Address
1473 /// Extension. As such, memory acquired directly from allocators or memory
1474 /// mapped files *may* be too large to handle with this function.
1476 /// Consider using `wrapping_offset` instead if these constraints are
1477 /// difficult to satisfy. The only advantage of this method is that it
1478 /// enables more aggressive compiler optimizations.
1485 /// let s: &str = "123";
1488 /// let end: *const u8 = s.as_ptr().add(3);
1489 /// println!("{}", *end.sub(1) as char);
1490 /// println!("{}", *end.sub(2) as char);
1493 #[stable(feature = "pointer_methods", since = "1.26.0")]
1495 pub unsafe fn sub(self, count: usize) -> Self
1498 self.offset((count as isize).wrapping_neg())
1501 /// Calculates the offset from a pointer using wrapping arithmetic.
1502 /// (convenience for `.wrapping_offset(count as isize)`)
1504 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1505 /// offset of `3 * size_of::<T>()` bytes.
1509 /// The resulting pointer does not need to be in bounds, but it is
1510 /// potentially hazardous to dereference (which requires `unsafe`).
1512 /// Always use `.add(count)` instead when possible, because `add`
1513 /// allows the compiler to optimize better.
1520 /// // Iterate using a raw pointer in increments of two elements
1521 /// let data = [1u8, 2, 3, 4, 5];
1522 /// let mut ptr: *const u8 = data.as_ptr();
1524 /// let end_rounded_up = ptr.wrapping_add(6);
1526 /// // This loop prints "1, 3, 5, "
1527 /// while ptr != end_rounded_up {
1529 /// print!("{}, ", *ptr);
1531 /// ptr = ptr.wrapping_add(step);
1534 #[stable(feature = "pointer_methods", since = "1.26.0")]
1536 pub fn wrapping_add(self, count: usize) -> Self
1539 self.wrapping_offset(count as isize)
1542 /// Calculates the offset from a pointer using wrapping arithmetic.
1543 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1545 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1546 /// offset of `3 * size_of::<T>()` bytes.
1550 /// The resulting pointer does not need to be in bounds, but it is
1551 /// potentially hazardous to dereference (which requires `unsafe`).
1553 /// Always use `.sub(count)` instead when possible, because `sub`
1554 /// allows the compiler to optimize better.
1561 /// // Iterate using a raw pointer in increments of two elements (backwards)
1562 /// let data = [1u8, 2, 3, 4, 5];
1563 /// let mut ptr: *const u8 = data.as_ptr();
1564 /// let start_rounded_down = ptr.wrapping_sub(2);
1565 /// ptr = ptr.wrapping_add(4);
1567 /// // This loop prints "5, 3, 1, "
1568 /// while ptr != start_rounded_down {
1570 /// print!("{}, ", *ptr);
1572 /// ptr = ptr.wrapping_sub(step);
1575 #[stable(feature = "pointer_methods", since = "1.26.0")]
1577 pub fn wrapping_sub(self, count: usize) -> Self
1580 self.wrapping_offset((count as isize).wrapping_neg())
1583 /// Reads the value from `self` without moving it. This leaves the
1584 /// memory in `self` unchanged.
1588 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1589 /// moves the value out of `self` without preventing further usage of `self`.
1590 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1591 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1592 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1593 /// because it will attempt to drop the value previously at `*self`.
1595 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1603 /// let y = &x as *const i32;
1606 /// assert_eq!(y.read(), 12);
1609 #[stable(feature = "pointer_methods", since = "1.26.0")]
1611 pub unsafe fn read(self) -> T
1617 /// Performs a volatile read of the value from `self` without moving it. This
1618 /// leaves the memory in `self` unchanged.
1620 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1621 /// to not be elided or reordered by the compiler across other volatile
1626 /// Rust does not currently have a rigorously and formally defined memory model,
1627 /// so the precise semantics of what "volatile" means here is subject to change
1628 /// over time. That being said, the semantics will almost always end up pretty
1629 /// similar to [C11's definition of volatile][c11].
1631 /// The compiler shouldn't change the relative order or number of volatile
1632 /// memory operations. However, volatile memory operations on zero-sized types
1633 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1634 /// and may be ignored.
1636 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1640 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1641 /// moves the value out of `self` without preventing further usage of `self`.
1642 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1643 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1644 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1645 /// because it will attempt to drop the value previously at `*self`.
1653 /// let y = &x as *const i32;
1656 /// assert_eq!(y.read_volatile(), 12);
1659 #[stable(feature = "pointer_methods", since = "1.26.0")]
1661 pub unsafe fn read_volatile(self) -> T
1667 /// Reads the value from `self` without moving it. This leaves the
1668 /// memory in `self` unchanged.
1670 /// Unlike `read`, the pointer may be unaligned.
1674 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1675 /// moves the value out of `self` without preventing further usage of `self`.
1676 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1677 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1678 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1679 /// because it will attempt to drop the value previously at `*self`.
1687 /// let y = &x as *const i32;
1690 /// assert_eq!(y.read_unaligned(), 12);
1693 #[stable(feature = "pointer_methods", since = "1.26.0")]
1695 pub unsafe fn read_unaligned(self) -> T
1698 read_unaligned(self)
1701 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1702 /// and destination may overlap.
1704 /// NOTE: this has the *same* argument order as `ptr::copy`.
1706 /// This is semantically equivalent to C's `memmove`.
1710 /// Care must be taken with the ownership of `self` and `dest`.
1711 /// This method semantically moves the values of `self` into `dest`.
1712 /// However it does not drop the contents of `self`, or prevent the contents
1713 /// of `dest` from being dropped or used.
1717 /// Efficiently create a Rust vector from an unsafe buffer:
1720 /// # #[allow(dead_code)]
1721 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1722 /// let mut dst = Vec::with_capacity(elts);
1723 /// dst.set_len(elts);
1724 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1728 #[stable(feature = "pointer_methods", since = "1.26.0")]
1730 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1733 copy(self, dest, count)
1736 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1737 /// and destination may *not* overlap.
1739 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1741 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1745 /// Beyond requiring that the program must be allowed to access both regions
1746 /// of memory, it is Undefined Behavior for source and destination to
1747 /// overlap. Care must also be taken with the ownership of `self` and
1748 /// `self`. This method semantically moves the values of `self` into `dest`.
1749 /// However it does not drop the contents of `dest`, or prevent the contents
1750 /// of `self` from being dropped or used.
1754 /// Efficiently create a Rust vector from an unsafe buffer:
1757 /// # #[allow(dead_code)]
1758 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1759 /// let mut dst = Vec::with_capacity(elts);
1760 /// dst.set_len(elts);
1761 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1765 #[stable(feature = "pointer_methods", since = "1.26.0")]
1767 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1770 copy_nonoverlapping(self, dest, count)
1773 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1774 /// and destination may overlap.
1776 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1778 /// This is semantically equivalent to C's `memmove`.
1782 /// Care must be taken with the ownership of `src` and `self`.
1783 /// This method semantically moves the values of `src` into `self`.
1784 /// However it does not drop the contents of `self`, or prevent the contents
1785 /// of `src` from being dropped or used.
1789 /// Efficiently create a Rust vector from an unsafe buffer:
1792 /// # #[allow(dead_code)]
1793 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1794 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
1795 /// dst.set_len(elts);
1796 /// dst.as_mut_ptr().copy_from(ptr, elts);
1800 #[stable(feature = "pointer_methods", since = "1.26.0")]
1802 pub unsafe fn copy_from(self, src: *const T, count: usize)
1805 copy(src, self, count)
1808 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1809 /// and destination may *not* overlap.
1811 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1813 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1817 /// Beyond requiring that the program must be allowed to access both regions
1818 /// of memory, it is Undefined Behavior for source and destination to
1819 /// overlap. Care must also be taken with the ownership of `src` and
1820 /// `self`. This method semantically moves the values of `src` into `self`.
1821 /// However it does not drop the contents of `self`, or prevent the contents
1822 /// of `src` from being dropped or used.
1826 /// Efficiently create a Rust vector from an unsafe buffer:
1829 /// # #[allow(dead_code)]
1830 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1831 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
1832 /// dst.set_len(elts);
1833 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1837 #[stable(feature = "pointer_methods", since = "1.26.0")]
1839 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
1842 copy_nonoverlapping(src, self, count)
1845 /// Executes the destructor (if any) of the pointed-to value.
1847 /// This has two use cases:
1849 /// * It is *required* to use `drop_in_place` to drop unsized types like
1850 /// trait objects, because they can't be read out onto the stack and
1851 /// dropped normally.
1853 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1854 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1855 /// as the compiler doesn't need to prove that it's sound to elide the
1860 /// This has all the same safety problems as `ptr::read` with respect to
1861 /// invalid pointers, types, and double drops.
1862 #[stable(feature = "pointer_methods", since = "1.26.0")]
1864 pub unsafe fn drop_in_place(self) {
1868 /// Overwrites a memory location with the given value without reading or
1869 /// dropping the old value.
1873 /// This operation is marked unsafe because it writes through a raw pointer.
1875 /// It does not drop the contents of `self`. This is safe, but it could leak
1876 /// allocations or resources, so care must be taken not to overwrite an object
1877 /// that should be dropped.
1879 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1880 /// location pointed to by `self`.
1882 /// This is appropriate for initializing uninitialized memory, or overwriting
1883 /// memory that has previously been `read` from.
1885 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1893 /// let y = &mut x as *mut i32;
1898 /// assert_eq!(y.read(), 12);
1901 #[stable(feature = "pointer_methods", since = "1.26.0")]
1903 pub unsafe fn write(self, val: T)
1909 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1910 /// bytes of memory starting at `self` to `val`.
1915 /// let mut vec = vec![0; 4];
1917 /// let vec_ptr = vec.as_mut_ptr();
1918 /// vec_ptr.write_bytes(b'a', 2);
1920 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1922 #[stable(feature = "pointer_methods", since = "1.26.0")]
1924 pub unsafe fn write_bytes(self, val: u8, count: usize)
1927 write_bytes(self, val, count)
1930 /// Performs a volatile write of a memory location with the given value without
1931 /// reading or dropping the old value.
1933 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1934 /// to not be elided or reordered by the compiler across other volatile
1939 /// Rust does not currently have a rigorously and formally defined memory model,
1940 /// so the precise semantics of what "volatile" means here is subject to change
1941 /// over time. That being said, the semantics will almost always end up pretty
1942 /// similar to [C11's definition of volatile][c11].
1944 /// The compiler shouldn't change the relative order or number of volatile
1945 /// memory operations. However, volatile memory operations on zero-sized types
1946 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1947 /// and may be ignored.
1949 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1953 /// This operation is marked unsafe because it accepts a raw pointer.
1955 /// It does not drop the contents of `self`. This is safe, but it could leak
1956 /// allocations or resources, so care must be taken not to overwrite an object
1957 /// that should be dropped.
1959 /// This is appropriate for initializing uninitialized memory, or overwriting
1960 /// memory that has previously been `read` from.
1968 /// let y = &mut x as *mut i32;
1972 /// y.write_volatile(z);
1973 /// assert_eq!(y.read_volatile(), 12);
1976 #[stable(feature = "pointer_methods", since = "1.26.0")]
1978 pub unsafe fn write_volatile(self, val: T)
1981 write_volatile(self, val)
1984 /// Overwrites a memory location with the given value without reading or
1985 /// dropping the old value.
1987 /// Unlike `write`, the pointer may be unaligned.
1991 /// This operation is marked unsafe because it writes through a raw pointer.
1993 /// It does not drop the contents of `self`. This is safe, but it could leak
1994 /// allocations or resources, so care must be taken not to overwrite an object
1995 /// that should be dropped.
1997 /// Additionally, it does not drop `self`. Semantically, `self` is moved into the
1998 /// location pointed to by `val`.
2000 /// This is appropriate for initializing uninitialized memory, or overwriting
2001 /// memory that has previously been `read` from.
2009 /// let y = &mut x as *mut i32;
2013 /// y.write_unaligned(z);
2014 /// assert_eq!(y.read_unaligned(), 12);
2017 #[stable(feature = "pointer_methods", since = "1.26.0")]
2019 pub unsafe fn write_unaligned(self, val: T)
2022 write_unaligned(self, val)
2025 /// Replaces the value at `self` with `src`, returning the old
2026 /// value, without dropping either.
2030 /// This is only unsafe because it accepts a raw pointer.
2031 /// Otherwise, this operation is identical to `mem::replace`.
2032 #[stable(feature = "pointer_methods", since = "1.26.0")]
2034 pub unsafe fn replace(self, src: T) -> T
2040 /// Swaps the values at two mutable locations of the same type, without
2041 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2042 /// otherwise equivalent.
2046 /// This function copies the memory through the raw pointers passed to it
2049 /// Ensure that these pointers are valid before calling `swap`.
2050 #[stable(feature = "pointer_methods", since = "1.26.0")]
2052 pub unsafe fn swap(self, with: *mut T)
2059 // Equality for pointers
2060 #[stable(feature = "rust1", since = "1.0.0")]
2061 impl<T: ?Sized> PartialEq for *const T {
2063 fn eq(&self, other: &*const T) -> bool { *self == *other }
2066 #[stable(feature = "rust1", since = "1.0.0")]
2067 impl<T: ?Sized> Eq for *const T {}
2069 #[stable(feature = "rust1", since = "1.0.0")]
2070 impl<T: ?Sized> PartialEq for *mut T {
2072 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2075 #[stable(feature = "rust1", since = "1.0.0")]
2076 impl<T: ?Sized> Eq for *mut T {}
2078 /// Compare raw pointers for equality.
2080 /// This is the same as using the `==` operator, but less generic:
2081 /// the arguments have to be `*const T` raw pointers,
2082 /// not anything that implements `PartialEq`.
2084 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2085 /// by their address rather than comparing the values they point to
2086 /// (which is what the `PartialEq for &T` implementation does).
2094 /// let other_five = 5;
2095 /// let five_ref = &five;
2096 /// let same_five_ref = &five;
2097 /// let other_five_ref = &other_five;
2099 /// assert!(five_ref == same_five_ref);
2100 /// assert!(five_ref == other_five_ref);
2102 /// assert!(ptr::eq(five_ref, same_five_ref));
2103 /// assert!(!ptr::eq(five_ref, other_five_ref));
2105 #[stable(feature = "ptr_eq", since = "1.17.0")]
2107 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2111 // Impls for function pointers
2112 macro_rules! fnptr_impls_safety_abi {
2113 ($FnTy: ty, $($Arg: ident),*) => {
2114 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2115 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2117 fn eq(&self, other: &Self) -> bool {
2118 *self as usize == *other as usize
2122 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2123 impl<Ret, $($Arg),*> Eq for $FnTy {}
2125 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2126 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2128 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2129 (*self as usize).partial_cmp(&(*other as usize))
2133 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2134 impl<Ret, $($Arg),*> Ord for $FnTy {
2136 fn cmp(&self, other: &Self) -> Ordering {
2137 (*self as usize).cmp(&(*other as usize))
2141 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2142 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2143 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2144 state.write_usize(*self as usize)
2148 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2149 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2150 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2151 fmt::Pointer::fmt(&(*self as *const ()), f)
2155 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2156 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2157 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2158 fmt::Pointer::fmt(&(*self as *const ()), f)
2164 macro_rules! fnptr_impls_args {
2165 ($($Arg: ident),+) => {
2166 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2167 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2168 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2169 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2170 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2171 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2174 // No variadic functions with 0 parameters
2175 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2176 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2177 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2178 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2182 fnptr_impls_args! { }
2183 fnptr_impls_args! { A }
2184 fnptr_impls_args! { A, B }
2185 fnptr_impls_args! { A, B, C }
2186 fnptr_impls_args! { A, B, C, D }
2187 fnptr_impls_args! { A, B, C, D, E }
2188 fnptr_impls_args! { A, B, C, D, E, F }
2189 fnptr_impls_args! { A, B, C, D, E, F, G }
2190 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2191 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2192 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2193 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2194 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2196 // Comparison for pointers
2197 #[stable(feature = "rust1", since = "1.0.0")]
2198 impl<T: ?Sized> Ord for *const T {
2200 fn cmp(&self, other: &*const T) -> Ordering {
2203 } else if self == other {
2211 #[stable(feature = "rust1", since = "1.0.0")]
2212 impl<T: ?Sized> PartialOrd for *const T {
2214 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2215 Some(self.cmp(other))
2219 fn lt(&self, other: &*const T) -> bool { *self < *other }
2222 fn le(&self, other: &*const T) -> bool { *self <= *other }
2225 fn gt(&self, other: &*const T) -> bool { *self > *other }
2228 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2231 #[stable(feature = "rust1", since = "1.0.0")]
2232 impl<T: ?Sized> Ord for *mut T {
2234 fn cmp(&self, other: &*mut T) -> Ordering {
2237 } else if self == other {
2245 #[stable(feature = "rust1", since = "1.0.0")]
2246 impl<T: ?Sized> PartialOrd for *mut T {
2248 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2249 Some(self.cmp(other))
2253 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2256 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2259 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2262 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2265 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2266 /// of this wrapper owns the referent. Useful for building abstractions like
2267 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2269 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2270 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2271 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2272 /// the referent of the pointer should not be modified without a unique path to
2273 /// its owning Unique.
2275 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2276 /// consider using `NonNull`, which has weaker semantics.
2278 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2279 /// is never dereferenced. This is so that enums may use this forbidden value
2280 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2281 /// However the pointer may still dangle if it isn't dereferenced.
2283 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2284 /// for any type which upholds Unique's aliasing requirements.
2285 #[unstable(feature = "ptr_internals", issue = "0",
2286 reason = "use NonNull instead and consider PhantomData<T> \
2287 (if you also use #[may_dangle]), Send, and/or Sync")]
2288 #[allow(deprecated)]
2289 pub struct Unique<T: ?Sized> {
2290 pointer: NonZero<*const T>,
2291 // NOTE: this marker has no consequences for variance, but is necessary
2292 // for dropck to understand that we logically own a `T`.
2294 // For details, see:
2295 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2296 _marker: PhantomData<T>,
2299 #[unstable(feature = "ptr_internals", issue = "0")]
2300 impl<T: ?Sized> fmt::Debug for Unique<T> {
2301 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2302 fmt::Pointer::fmt(&self.as_ptr(), f)
2306 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2307 /// reference is unaliased. Note that this aliasing invariant is
2308 /// unenforced by the type system; the abstraction using the
2309 /// `Unique` must enforce it.
2310 #[unstable(feature = "ptr_internals", issue = "0")]
2311 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2313 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2314 /// reference is unaliased. Note that this aliasing invariant is
2315 /// unenforced by the type system; the abstraction using the
2316 /// `Unique` must enforce it.
2317 #[unstable(feature = "ptr_internals", issue = "0")]
2318 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2320 #[unstable(feature = "ptr_internals", issue = "0")]
2321 impl<T: Sized> Unique<T> {
2322 /// Creates a new `Unique` that is dangling, but well-aligned.
2324 /// This is useful for initializing types which lazily allocate, like
2325 /// `Vec::new` does.
2326 // FIXME: rename to dangling() to match NonNull?
2327 pub fn empty() -> Self {
2329 let ptr = mem::align_of::<T>() as *mut T;
2330 Unique::new_unchecked(ptr)
2335 #[unstable(feature = "ptr_internals", issue = "0")]
2336 #[allow(deprecated)]
2337 impl<T: ?Sized> Unique<T> {
2338 /// Creates a new `Unique`.
2342 /// `ptr` must be non-null.
2343 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2344 Unique { pointer: NonZero(ptr as _), _marker: PhantomData }
2347 /// Creates a new `Unique` if `ptr` is non-null.
2348 pub fn new(ptr: *mut T) -> Option<Self> {
2350 Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData })
2356 /// Acquires the underlying `*mut` pointer.
2357 pub fn as_ptr(self) -> *mut T {
2358 self.pointer.0 as *mut T
2361 /// Dereferences the content.
2363 /// The resulting lifetime is bound to self so this behaves "as if"
2364 /// it were actually an instance of T that is getting borrowed. If a longer
2365 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2366 pub unsafe fn as_ref(&self) -> &T {
2370 /// Mutably dereferences the content.
2372 /// The resulting lifetime is bound to self so this behaves "as if"
2373 /// it were actually an instance of T that is getting borrowed. If a longer
2374 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2375 pub unsafe fn as_mut(&mut self) -> &mut T {
2380 #[unstable(feature = "ptr_internals", issue = "0")]
2381 impl<T: ?Sized> Clone for Unique<T> {
2382 fn clone(&self) -> Self {
2387 #[unstable(feature = "ptr_internals", issue = "0")]
2388 impl<T: ?Sized> Copy for Unique<T> { }
2390 #[unstable(feature = "ptr_internals", issue = "0")]
2391 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2393 #[unstable(feature = "ptr_internals", issue = "0")]
2394 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2395 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2396 fmt::Pointer::fmt(&self.as_ptr(), f)
2400 #[unstable(feature = "ptr_internals", issue = "0")]
2401 #[allow(deprecated)]
2402 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2403 fn from(reference: &'a mut T) -> Self {
2404 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2408 #[unstable(feature = "ptr_internals", issue = "0")]
2409 #[allow(deprecated)]
2410 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2411 fn from(reference: &'a T) -> Self {
2412 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2416 #[unstable(feature = "ptr_internals", issue = "0")]
2417 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2418 fn from(p: NonNull<T>) -> Self {
2419 Unique { pointer: p.pointer, _marker: PhantomData }
2423 /// Previous name of `NonNull`.
2424 #[rustc_deprecated(since = "1.25.0", reason = "renamed to `NonNull`")]
2425 #[unstable(feature = "shared", issue = "27730")]
2426 pub type Shared<T> = NonNull<T>;
2428 /// `*mut T` but non-zero and covariant.
2430 /// This is often the correct thing to use when building data structures using
2431 /// raw pointers, but is ultimately more dangerous to use because of its additional
2432 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2434 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2435 /// is never dereferenced. This is so that enums may use this forbidden value
2436 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `NonNull<T>`.
2437 /// However the pointer may still dangle if it isn't dereferenced.
2439 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2440 /// for your use case, you should include some PhantomData in your type to
2441 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2442 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2443 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2444 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2445 #[stable(feature = "nonnull", since = "1.25.0")]
2446 pub struct NonNull<T: ?Sized> {
2447 #[allow(deprecated)] pointer: NonZero<*const T>,
2450 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2451 // NB: This impl is unnecessary, but should provide better error messages.
2452 #[stable(feature = "nonnull", since = "1.25.0")]
2453 impl<T: ?Sized> !Send for NonNull<T> { }
2455 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2456 // NB: This impl is unnecessary, but should provide better error messages.
2457 #[stable(feature = "nonnull", since = "1.25.0")]
2458 impl<T: ?Sized> !Sync for NonNull<T> { }
2460 impl<T: Sized> NonNull<T> {
2461 /// Creates a new `NonNull` that is dangling, but well-aligned.
2463 /// This is useful for initializing types which lazily allocate, like
2464 /// `Vec::new` does.
2465 #[stable(feature = "nonnull", since = "1.25.0")]
2466 pub fn dangling() -> Self {
2468 let ptr = mem::align_of::<T>() as *mut T;
2469 NonNull::new_unchecked(ptr)
2474 #[allow(deprecated)]
2475 impl<T: ?Sized> NonNull<T> {
2476 /// Creates a new `NonNull`.
2480 /// `ptr` must be non-null.
2481 #[stable(feature = "nonnull", since = "1.25.0")]
2482 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2483 NonNull { pointer: NonZero(ptr as _) }
2486 /// Creates a new `NonNull` if `ptr` is non-null.
2487 #[stable(feature = "nonnull", since = "1.25.0")]
2488 pub fn new(ptr: *mut T) -> Option<Self> {
2490 Some(NonNull { pointer: NonZero(ptr as _) })
2496 /// Acquires the underlying `*mut` pointer.
2497 #[stable(feature = "nonnull", since = "1.25.0")]
2498 pub fn as_ptr(self) -> *mut T {
2499 self.pointer.0 as *mut T
2502 /// Dereferences the content.
2504 /// The resulting lifetime is bound to self so this behaves "as if"
2505 /// it were actually an instance of T that is getting borrowed. If a longer
2506 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2507 #[stable(feature = "nonnull", since = "1.25.0")]
2508 pub unsafe fn as_ref(&self) -> &T {
2512 /// Mutably dereferences the content.
2514 /// The resulting lifetime is bound to self so this behaves "as if"
2515 /// it were actually an instance of T that is getting borrowed. If a longer
2516 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2517 #[stable(feature = "nonnull", since = "1.25.0")]
2518 pub unsafe fn as_mut(&mut self) -> &mut T {
2522 /// Cast to a pointer of another type
2523 #[unstable(feature = "nonnull_cast", issue = "47653")]
2524 pub fn cast<U>(self) -> NonNull<U> {
2526 NonNull::new_unchecked(self.as_ptr() as *mut U)
2531 #[stable(feature = "nonnull", since = "1.25.0")]
2532 impl<T: ?Sized> Clone for NonNull<T> {
2533 fn clone(&self) -> Self {
2538 #[stable(feature = "nonnull", since = "1.25.0")]
2539 impl<T: ?Sized> Copy for NonNull<T> { }
2541 #[unstable(feature = "coerce_unsized", issue = "27732")]
2542 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2544 #[stable(feature = "nonnull", since = "1.25.0")]
2545 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2546 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2547 fmt::Pointer::fmt(&self.as_ptr(), f)
2551 #[stable(feature = "nonnull", since = "1.25.0")]
2552 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2553 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2554 fmt::Pointer::fmt(&self.as_ptr(), f)
2558 #[stable(feature = "nonnull", since = "1.25.0")]
2559 impl<T: ?Sized> Eq for NonNull<T> {}
2561 #[stable(feature = "nonnull", since = "1.25.0")]
2562 impl<T: ?Sized> PartialEq for NonNull<T> {
2563 fn eq(&self, other: &Self) -> bool {
2564 self.as_ptr() == other.as_ptr()
2568 #[stable(feature = "nonnull", since = "1.25.0")]
2569 impl<T: ?Sized> Ord for NonNull<T> {
2570 fn cmp(&self, other: &Self) -> Ordering {
2571 self.as_ptr().cmp(&other.as_ptr())
2575 #[stable(feature = "nonnull", since = "1.25.0")]
2576 impl<T: ?Sized> PartialOrd for NonNull<T> {
2577 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2578 self.as_ptr().partial_cmp(&other.as_ptr())
2582 #[stable(feature = "nonnull", since = "1.25.0")]
2583 impl<T: ?Sized> hash::Hash for NonNull<T> {
2584 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2585 self.as_ptr().hash(state)
2589 #[unstable(feature = "ptr_internals", issue = "0")]
2590 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2591 fn from(unique: Unique<T>) -> Self {
2592 NonNull { pointer: unique.pointer }
2596 #[stable(feature = "nonnull", since = "1.25.0")]
2597 #[allow(deprecated)]
2598 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
2599 fn from(reference: &'a mut T) -> Self {
2600 NonNull { pointer: NonZero(reference as _) }
2604 #[stable(feature = "nonnull", since = "1.25.0")]
2605 #[allow(deprecated)]
2606 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
2607 fn from(reference: &'a T) -> Self {
2608 NonNull { pointer: NonZero(reference as _) }