1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
169 /// #![feature(swap_nonoverlapping)]
173 /// let mut x = [1, 2, 3, 4];
174 /// let mut y = [7, 8, 9];
177 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
180 /// assert_eq!(x, [7, 8, 3, 4]);
181 /// assert_eq!(y, [1, 2, 9]);
184 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
185 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
186 let x = x as *mut u8;
187 let y = y as *mut u8;
188 let len = mem::size_of::<T>() * count;
189 swap_nonoverlapping_bytes(x, y, len)
193 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
194 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
195 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
196 // Haswell E processors. LLVM is more able to optimize if we give a struct a
197 // #[repr(simd)], even if we don't actually use this struct directly.
199 // FIXME repr(simd) broken on emscripten and redox
200 // It's also broken on big-endian powerpc64 and s390x. #42778
201 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
202 target_endian = "big")),
204 struct Block(u64, u64, u64, u64);
205 struct UnalignedBlock(u64, u64, u64, u64);
207 let block_size = mem::size_of::<Block>();
209 // Loop through x & y, copying them `Block` at a time
210 // The optimizer should unroll the loop fully for most types
211 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
213 while i + block_size <= len {
214 // Create some uninitialized memory as scratch space
215 // Declaring `t` here avoids aligning the stack when this loop is unused
216 let mut t: Block = mem::uninitialized();
217 let t = &mut t as *mut _ as *mut u8;
218 let x = x.offset(i as isize);
219 let y = y.offset(i as isize);
221 // Swap a block of bytes of x & y, using t as a temporary buffer
222 // This should be optimized into efficient SIMD operations where available
223 copy_nonoverlapping(x, t, block_size);
224 copy_nonoverlapping(y, x, block_size);
225 copy_nonoverlapping(t, y, block_size);
230 // Swap any remaining bytes
231 let mut t: UnalignedBlock = mem::uninitialized();
234 let t = &mut t as *mut _ as *mut u8;
235 let x = x.offset(i as isize);
236 let y = y.offset(i as isize);
238 copy_nonoverlapping(x, t, rem);
239 copy_nonoverlapping(y, x, rem);
240 copy_nonoverlapping(t, y, rem);
244 /// Replaces the value at `dest` with `src`, returning the old
245 /// value, without dropping either.
249 /// This is only unsafe because it accepts a raw pointer.
250 /// Otherwise, this operation is identical to `mem::replace`.
252 #[stable(feature = "rust1", since = "1.0.0")]
253 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
254 mem::swap(&mut *dest, &mut src); // cannot overlap
258 /// Reads the value from `src` without moving it. This leaves the
259 /// memory in `src` unchanged.
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
270 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
278 /// let y = &x as *const i32;
281 /// assert_eq!(std::ptr::read(y), 12);
285 #[stable(feature = "rust1", since = "1.0.0")]
286 pub unsafe fn read<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
288 copy_nonoverlapping(src, &mut tmp, 1);
292 /// Reads the value from `src` without moving it. This leaves the
293 /// memory in `src` unchanged.
295 /// Unlike `read`, the pointer may be unaligned.
299 /// Beyond accepting a raw pointer, this is unsafe because it semantically
300 /// moves the value out of `src` without preventing further usage of `src`.
301 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
302 /// `src` is not used before the data is overwritten again (e.g. with `write`,
303 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
304 /// because it will attempt to drop the value previously at `*src`.
312 /// let y = &x as *const i32;
315 /// assert_eq!(std::ptr::read_unaligned(y), 12);
319 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
320 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
321 let mut tmp: T = mem::uninitialized();
322 copy_nonoverlapping(src as *const u8,
323 &mut tmp as *mut T as *mut u8,
324 mem::size_of::<T>());
328 /// Overwrites a memory location with the given value without reading or
329 /// dropping the old value.
333 /// This operation is marked unsafe because it accepts a raw pointer.
335 /// It does not drop the contents of `dst`. This is safe, but it could leak
336 /// allocations or resources, so care must be taken not to overwrite an object
337 /// that should be dropped.
339 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
340 /// location pointed to by `dst`.
342 /// This is appropriate for initializing uninitialized memory, or overwriting
343 /// memory that has previously been `read` from.
345 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
353 /// let y = &mut x as *mut i32;
357 /// std::ptr::write(y, z);
358 /// assert_eq!(std::ptr::read(y), 12);
362 #[stable(feature = "rust1", since = "1.0.0")]
363 pub unsafe fn write<T>(dst: *mut T, src: T) {
364 intrinsics::move_val_init(&mut *dst, src)
367 /// Overwrites a memory location with the given value without reading or
368 /// dropping the old value.
370 /// Unlike `write`, the pointer may be unaligned.
374 /// This operation is marked unsafe because it accepts a raw pointer.
376 /// It does not drop the contents of `dst`. This is safe, but it could leak
377 /// allocations or resources, so care must be taken not to overwrite an object
378 /// that should be dropped.
380 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
381 /// location pointed to by `dst`.
383 /// This is appropriate for initializing uninitialized memory, or overwriting
384 /// memory that has previously been `read` from.
392 /// let y = &mut x as *mut i32;
396 /// std::ptr::write_unaligned(y, z);
397 /// assert_eq!(std::ptr::read_unaligned(y), 12);
401 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
402 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
403 copy_nonoverlapping(&src as *const T as *const u8,
405 mem::size_of::<T>());
409 /// Performs a volatile read of the value from `src` without moving it. This
410 /// leaves the memory in `src` unchanged.
412 /// Volatile operations are intended to act on I/O memory, and are guaranteed
413 /// to not be elided or reordered by the compiler across other volatile
418 /// Rust does not currently have a rigorously and formally defined memory model,
419 /// so the precise semantics of what "volatile" means here is subject to change
420 /// over time. That being said, the semantics will almost always end up pretty
421 /// similar to [C11's definition of volatile][c11].
423 /// The compiler shouldn't change the relative order or number of volatile
424 /// memory operations. However, volatile memory operations on zero-sized types
425 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
426 /// and may be ignored.
428 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
432 /// Beyond accepting a raw pointer, this is unsafe because it semantically
433 /// moves the value out of `src` without preventing further usage of `src`.
434 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
435 /// `src` is not used before the data is overwritten again (e.g. with `write`,
436 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
437 /// because it will attempt to drop the value previously at `*src`.
445 /// let y = &x as *const i32;
448 /// assert_eq!(std::ptr::read_volatile(y), 12);
452 #[stable(feature = "volatile", since = "1.9.0")]
453 pub unsafe fn read_volatile<T>(src: *const T) -> T {
454 intrinsics::volatile_load(src)
457 /// Performs a volatile write of a memory location with the given value without
458 /// reading or dropping the old value.
460 /// Volatile operations are intended to act on I/O memory, and are guaranteed
461 /// to not be elided or reordered by the compiler across other volatile
466 /// Rust does not currently have a rigorously and formally defined memory model,
467 /// so the precise semantics of what "volatile" means here is subject to change
468 /// over time. That being said, the semantics will almost always end up pretty
469 /// similar to [C11's definition of volatile][c11].
471 /// The compiler shouldn't change the relative order or number of volatile
472 /// memory operations. However, volatile memory operations on zero-sized types
473 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
474 /// and may be ignored.
476 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
480 /// This operation is marked unsafe because it accepts a raw pointer.
482 /// It does not drop the contents of `dst`. This is safe, but it could leak
483 /// allocations or resources, so care must be taken not to overwrite an object
484 /// that should be dropped.
486 /// This is appropriate for initializing uninitialized memory, or overwriting
487 /// memory that has previously been `read` from.
495 /// let y = &mut x as *mut i32;
499 /// std::ptr::write_volatile(y, z);
500 /// assert_eq!(std::ptr::read_volatile(y), 12);
504 #[stable(feature = "volatile", since = "1.9.0")]
505 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
506 intrinsics::volatile_store(dst, src);
509 #[lang = "const_ptr"]
510 impl<T: ?Sized> *const T {
511 /// Returns `true` if the pointer is null.
513 /// Note that unsized types have many possible null pointers, as only the
514 /// raw data pointer is considered, not their length, vtable, etc.
515 /// Therefore, two pointers that are null may still not compare equal to
523 /// let s: &str = "Follow the rabbit";
524 /// let ptr: *const u8 = s.as_ptr();
525 /// assert!(!ptr.is_null());
527 #[stable(feature = "rust1", since = "1.0.0")]
529 pub fn is_null(self) -> bool {
530 // Compare via a cast to a thin pointer, so fat pointers are only
531 // considering their "data" part for null-ness.
532 (self as *const u8) == null()
535 /// Returns `None` if the pointer is null, or else returns a reference to
536 /// the value wrapped in `Some`.
540 /// While this method and its mutable counterpart are useful for
541 /// null-safety, it is important to note that this is still an unsafe
542 /// operation because the returned value could be pointing to invalid
545 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
546 /// not necessarily reflect the actual lifetime of the data.
553 /// let ptr: *const u8 = &10u8 as *const u8;
556 /// if let Some(val_back) = ptr.as_ref() {
557 /// println!("We got back the value: {}!", val_back);
561 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
563 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
571 /// Calculates the offset from a pointer.
573 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
574 /// offset of `3 * size_of::<T>()` bytes.
578 /// If any of the following conditions are violated, the result is Undefined
581 /// * Both the starting and resulting pointer must be either in bounds or one
582 /// byte past the end of an allocated object.
584 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
586 /// * The offset being in bounds cannot rely on "wrapping around" the address
587 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
589 /// The compiler and standard library generally tries to ensure allocations
590 /// never reach a size where an offset is a concern. For instance, `Vec`
591 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
592 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
594 /// Most platforms fundamentally can't even construct such an allocation.
595 /// For instance, no known 64-bit platform can ever serve a request
596 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
597 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
598 /// more than `isize::MAX` bytes with things like Physical Address
599 /// Extension. As such, memory acquired directly from allocators or memory
600 /// mapped files *may* be too large to handle with this function.
602 /// Consider using `wrapping_offset` instead if these constraints are
603 /// difficult to satisfy. The only advantage of this method is that it
604 /// enables more aggressive compiler optimizations.
611 /// let s: &str = "123";
612 /// let ptr: *const u8 = s.as_ptr();
615 /// println!("{}", *ptr.offset(1) as char);
616 /// println!("{}", *ptr.offset(2) as char);
619 #[stable(feature = "rust1", since = "1.0.0")]
621 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
622 intrinsics::offset(self, count)
625 /// Calculates the offset from a pointer using wrapping arithmetic.
627 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
628 /// offset of `3 * size_of::<T>()` bytes.
632 /// The resulting pointer does not need to be in bounds, but it is
633 /// potentially hazardous to dereference (which requires `unsafe`).
635 /// Always use `.offset(count)` instead when possible, because `offset`
636 /// allows the compiler to optimize better.
643 /// // Iterate using a raw pointer in increments of two elements
644 /// let data = [1u8, 2, 3, 4, 5];
645 /// let mut ptr: *const u8 = data.as_ptr();
647 /// let end_rounded_up = ptr.wrapping_offset(6);
649 /// // This loop prints "1, 3, 5, "
650 /// while ptr != end_rounded_up {
652 /// print!("{}, ", *ptr);
654 /// ptr = ptr.wrapping_offset(step);
657 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
659 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
661 intrinsics::arith_offset(self, count)
665 /// Calculates the distance between two pointers. The returned value is in
666 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
668 /// If the address different between the two pointers ia not a multiple of
669 /// `mem::size_of::<T>()` then the result of the division is rounded towards
672 /// This function returns `None` if `T` is a zero-sized typed.
679 /// #![feature(offset_to)]
683 /// let ptr1: *const i32 = &a[1];
684 /// let ptr2: *const i32 = &a[3];
685 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
686 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
687 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
688 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
691 #[unstable(feature = "offset_to", issue = "41079")]
693 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
694 let size = mem::size_of::<T>();
698 let diff = (other as isize).wrapping_sub(self as isize);
699 Some(diff / size as isize)
703 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
705 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
706 /// offset of `3 * size_of::<T>()` bytes.
710 /// If any of the following conditions are violated, the result is Undefined
713 /// * Both the starting and resulting pointer must be either in bounds or one
714 /// byte past the end of an allocated object.
716 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
718 /// * The offset being in bounds cannot rely on "wrapping around" the address
719 /// space. That is, the infinite-precision sum must fit in a `usize`.
721 /// The compiler and standard library generally tries to ensure allocations
722 /// never reach a size where an offset is a concern. For instance, `Vec`
723 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
724 /// `vec.as_ptr().add(vec.len())` is always safe.
726 /// Most platforms fundamentally can't even construct such an allocation.
727 /// For instance, no known 64-bit platform can ever serve a request
728 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
729 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
730 /// more than `isize::MAX` bytes with things like Physical Address
731 /// Extension. As such, memory acquired directly from allocators or memory
732 /// mapped files *may* be too large to handle with this function.
734 /// Consider using `wrapping_offset` instead if these constraints are
735 /// difficult to satisfy. The only advantage of this method is that it
736 /// enables more aggressive compiler optimizations.
743 /// #![feature(pointer_methods)]
745 /// let s: &str = "123";
746 /// let ptr: *const u8 = s.as_ptr();
749 /// println!("{}", *ptr.add(1) as char);
750 /// println!("{}", *ptr.add(2) as char);
753 #[unstable(feature = "pointer_methods", issue = "43941")]
755 pub unsafe fn add(self, count: usize) -> Self
758 self.offset(count as isize)
761 /// Calculates the offset from a pointer (convenience for
762 /// `.offset((count as isize).wrapping_neg())`).
764 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
765 /// offset of `3 * size_of::<T>()` bytes.
769 /// If any of the following conditions are violated, the result is Undefined
772 /// * Both the starting and resulting pointer must be either in bounds or one
773 /// byte past the end of an allocated object.
775 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
777 /// * The offset being in bounds cannot rely on "wrapping around" the address
778 /// space. That is, the infinite-precision sum must fit in a usize.
780 /// The compiler and standard library generally tries to ensure allocations
781 /// never reach a size where an offset is a concern. For instance, `Vec`
782 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
783 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
785 /// Most platforms fundamentally can't even construct such an allocation.
786 /// For instance, no known 64-bit platform can ever serve a request
787 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
788 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
789 /// more than `isize::MAX` bytes with things like Physical Address
790 /// Extension. As such, memory acquired directly from allocators or memory
791 /// mapped files *may* be too large to handle with this function.
793 /// Consider using `wrapping_offset` instead if these constraints are
794 /// difficult to satisfy. The only advantage of this method is that it
795 /// enables more aggressive compiler optimizations.
802 /// #![feature(pointer_methods)]
804 /// let s: &str = "123";
807 /// let end: *const u8 = s.as_ptr().add(3);
808 /// println!("{}", *end.sub(1) as char);
809 /// println!("{}", *end.sub(2) as char);
812 #[unstable(feature = "pointer_methods", issue = "43941")]
814 pub unsafe fn sub(self, count: usize) -> Self
817 self.offset((count as isize).wrapping_neg())
820 /// Calculates the offset from a pointer using wrapping arithmetic.
821 /// (convenience for `.wrapping_offset(count as isize)`)
823 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
824 /// offset of `3 * size_of::<T>()` bytes.
828 /// The resulting pointer does not need to be in bounds, but it is
829 /// potentially hazardous to dereference (which requires `unsafe`).
831 /// Always use `.add(count)` instead when possible, because `add`
832 /// allows the compiler to optimize better.
839 /// #![feature(pointer_methods)]
841 /// // Iterate using a raw pointer in increments of two elements
842 /// let data = [1u8, 2, 3, 4, 5];
843 /// let mut ptr: *const u8 = data.as_ptr();
845 /// let end_rounded_up = ptr.wrapping_add(6);
847 /// // This loop prints "1, 3, 5, "
848 /// while ptr != end_rounded_up {
850 /// print!("{}, ", *ptr);
852 /// ptr = ptr.wrapping_add(step);
855 #[unstable(feature = "pointer_methods", issue = "43941")]
857 pub fn wrapping_add(self, count: usize) -> Self
860 self.wrapping_offset(count as isize)
863 /// Calculates the offset from a pointer using wrapping arithmetic.
864 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
866 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
867 /// offset of `3 * size_of::<T>()` bytes.
871 /// The resulting pointer does not need to be in bounds, but it is
872 /// potentially hazardous to dereference (which requires `unsafe`).
874 /// Always use `.sub(count)` instead when possible, because `sub`
875 /// allows the compiler to optimize better.
882 /// #![feature(pointer_methods)]
884 /// // Iterate using a raw pointer in increments of two elements (backwards)
885 /// let data = [1u8, 2, 3, 4, 5];
886 /// let mut ptr: *const u8 = data.as_ptr();
887 /// let start_rounded_down = ptr.wrapping_sub(2);
888 /// ptr = ptr.wrapping_add(4);
890 /// // This loop prints "5, 3, 1, "
891 /// while ptr != start_rounded_down {
893 /// print!("{}, ", *ptr);
895 /// ptr = ptr.wrapping_sub(step);
898 #[unstable(feature = "pointer_methods", issue = "43941")]
900 pub fn wrapping_sub(self, count: usize) -> Self
903 self.wrapping_offset((count as isize).wrapping_neg())
906 /// Reads the value from `self` without moving it. This leaves the
907 /// memory in `self` unchanged.
911 /// Beyond accepting a raw pointer, this is unsafe because it semantically
912 /// moves the value out of `self` without preventing further usage of `self`.
913 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
914 /// `self` is not used before the data is overwritten again (e.g. with `write`,
915 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
916 /// because it will attempt to drop the value previously at `*self`.
918 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
925 /// #![feature(pointer_methods)]
928 /// let y = &x as *const i32;
931 /// assert_eq!(y.read(), 12);
934 #[unstable(feature = "pointer_methods", issue = "43941")]
936 pub unsafe fn read(self) -> T
942 /// Performs a volatile read of the value from `self` without moving it. This
943 /// leaves the memory in `self` unchanged.
945 /// Volatile operations are intended to act on I/O memory, and are guaranteed
946 /// to not be elided or reordered by the compiler across other volatile
951 /// Rust does not currently have a rigorously and formally defined memory model,
952 /// so the precise semantics of what "volatile" means here is subject to change
953 /// over time. That being said, the semantics will almost always end up pretty
954 /// similar to [C11's definition of volatile][c11].
956 /// The compiler shouldn't change the relative order or number of volatile
957 /// memory operations. However, volatile memory operations on zero-sized types
958 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
959 /// and may be ignored.
961 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
965 /// Beyond accepting a raw pointer, this is unsafe because it semantically
966 /// moves the value out of `self` without preventing further usage of `self`.
967 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
968 /// `self` is not used before the data is overwritten again (e.g. with `write`,
969 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
970 /// because it will attempt to drop the value previously at `*self`.
977 /// #![feature(pointer_methods)]
980 /// let y = &x as *const i32;
983 /// assert_eq!(y.read_volatile(), 12);
986 #[unstable(feature = "pointer_methods", issue = "43941")]
988 pub unsafe fn read_volatile(self) -> T
994 /// Reads the value from `self` without moving it. This leaves the
995 /// memory in `self` unchanged.
997 /// Unlike `read`, the pointer may be unaligned.
1001 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1002 /// moves the value out of `self` without preventing further usage of `self`.
1003 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1004 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1005 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1006 /// because it will attempt to drop the value previously at `*self`.
1013 /// #![feature(pointer_methods)]
1016 /// let y = &x as *const i32;
1019 /// assert_eq!(y.read_unaligned(), 12);
1022 #[unstable(feature = "pointer_methods", issue = "43941")]
1024 pub unsafe fn read_unaligned(self) -> T
1027 read_unaligned(self)
1030 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1031 /// and destination may overlap.
1033 /// NOTE: this has the *same* argument order as `ptr::copy`.
1035 /// This is semantically equivalent to C's `memmove`.
1039 /// Care must be taken with the ownership of `self` and `dest`.
1040 /// This method semantically moves the values of `self` into `dest`.
1041 /// However it does not drop the contents of `self`, or prevent the contents
1042 /// of `dest` from being dropped or used.
1046 /// Efficiently create a Rust vector from an unsafe buffer:
1049 /// #![feature(pointer_methods)]
1051 /// # #[allow(dead_code)]
1052 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1053 /// let mut dst = Vec::with_capacity(elts);
1054 /// dst.set_len(elts);
1055 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1059 #[unstable(feature = "pointer_methods", issue = "43941")]
1061 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1064 copy(self, dest, count)
1067 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1068 /// and destination may *not* overlap.
1070 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1072 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1076 /// Beyond requiring that the program must be allowed to access both regions
1077 /// of memory, it is Undefined Behavior for source and destination to
1078 /// overlap. Care must also be taken with the ownership of `self` and
1079 /// `self`. This method semantically moves the values of `self` into `dest`.
1080 /// However it does not drop the contents of `dest`, or prevent the contents
1081 /// of `self` from being dropped or used.
1085 /// Efficiently create a Rust vector from an unsafe buffer:
1088 /// #![feature(pointer_methods)]
1090 /// # #[allow(dead_code)]
1091 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1092 /// let mut dst = Vec::with_capacity(elts);
1093 /// dst.set_len(elts);
1094 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1098 #[unstable(feature = "pointer_methods", issue = "43941")]
1100 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1103 copy_nonoverlapping(self, dest, count)
1106 /// Computes the byte offset that needs to be applied in order to
1107 /// make the pointer aligned to `align`.
1108 /// If it is not possible to align the pointer, the implementation returns
1109 /// `usize::max_value()`.
1111 /// There are no guarantees whatsover that offsetting the pointer will not
1112 /// overflow or go beyond the allocation that the pointer points into.
1113 /// It is up to the caller to ensure that the returned offset is correct
1114 /// in all terms other than alignment.
1118 /// Accessing adjacent `u8` as `u16`
1121 /// # #![feature(align_offset)]
1122 /// # fn foo(n: usize) {
1123 /// # use std::mem::align_of;
1125 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1126 /// let ptr = &x[n] as *const u8;
1127 /// let offset = ptr.align_offset(align_of::<u16>());
1128 /// if offset < x.len() - n - 1 {
1129 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1130 /// assert_ne!(*u16_ptr, 500);
1132 /// // while the pointer can be aligned via `offset`, it would point
1133 /// // outside the allocation
1137 #[unstable(feature = "align_offset", issue = "44488")]
1138 pub fn align_offset(self, align: usize) -> usize {
1140 intrinsics::align_offset(self as *const _, align)
1146 impl<T: ?Sized> *mut T {
1147 /// Returns `true` if the pointer is null.
1149 /// Note that unsized types have many possible null pointers, as only the
1150 /// raw data pointer is considered, not their length, vtable, etc.
1151 /// Therefore, two pointers that are null may still not compare equal to
1159 /// let mut s = [1, 2, 3];
1160 /// let ptr: *mut u32 = s.as_mut_ptr();
1161 /// assert!(!ptr.is_null());
1163 #[stable(feature = "rust1", since = "1.0.0")]
1165 pub fn is_null(self) -> bool {
1166 // Compare via a cast to a thin pointer, so fat pointers are only
1167 // considering their "data" part for null-ness.
1168 (self as *mut u8) == null_mut()
1171 /// Returns `None` if the pointer is null, or else returns a reference to
1172 /// the value wrapped in `Some`.
1176 /// While this method and its mutable counterpart are useful for
1177 /// null-safety, it is important to note that this is still an unsafe
1178 /// operation because the returned value could be pointing to invalid
1181 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1182 /// not necessarily reflect the actual lifetime of the data.
1189 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1192 /// if let Some(val_back) = ptr.as_ref() {
1193 /// println!("We got back the value: {}!", val_back);
1197 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1199 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1207 /// Calculates the offset from a pointer.
1209 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1210 /// offset of `3 * size_of::<T>()` bytes.
1214 /// If any of the following conditions are violated, the result is Undefined
1217 /// * Both the starting and resulting pointer must be either in bounds or one
1218 /// byte past the end of an allocated object.
1220 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1222 /// * The offset being in bounds cannot rely on "wrapping around" the address
1223 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1225 /// The compiler and standard library generally tries to ensure allocations
1226 /// never reach a size where an offset is a concern. For instance, `Vec`
1227 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1228 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1230 /// Most platforms fundamentally can't even construct such an allocation.
1231 /// For instance, no known 64-bit platform can ever serve a request
1232 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1233 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1234 /// more than `isize::MAX` bytes with things like Physical Address
1235 /// Extension. As such, memory acquired directly from allocators or memory
1236 /// mapped files *may* be too large to handle with this function.
1238 /// Consider using `wrapping_offset` instead if these constraints are
1239 /// difficult to satisfy. The only advantage of this method is that it
1240 /// enables more aggressive compiler optimizations.
1247 /// let mut s = [1, 2, 3];
1248 /// let ptr: *mut u32 = s.as_mut_ptr();
1251 /// println!("{}", *ptr.offset(1));
1252 /// println!("{}", *ptr.offset(2));
1255 #[stable(feature = "rust1", since = "1.0.0")]
1257 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1258 intrinsics::offset(self, count) as *mut T
1261 /// Calculates the offset from a pointer using wrapping arithmetic.
1262 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1263 /// offset of `3 * size_of::<T>()` bytes.
1267 /// The resulting pointer does not need to be in bounds, but it is
1268 /// potentially hazardous to dereference (which requires `unsafe`).
1270 /// Always use `.offset(count)` instead when possible, because `offset`
1271 /// allows the compiler to optimize better.
1278 /// // Iterate using a raw pointer in increments of two elements
1279 /// let mut data = [1u8, 2, 3, 4, 5];
1280 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1282 /// let end_rounded_up = ptr.wrapping_offset(6);
1284 /// while ptr != end_rounded_up {
1288 /// ptr = ptr.wrapping_offset(step);
1290 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1292 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1294 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1296 intrinsics::arith_offset(self, count) as *mut T
1300 /// Returns `None` if the pointer is null, or else returns a mutable
1301 /// reference to the value wrapped in `Some`.
1305 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1306 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1307 /// returned is indeed a valid lifetime for the contained data.
1314 /// let mut s = [1, 2, 3];
1315 /// let ptr: *mut u32 = s.as_mut_ptr();
1316 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1317 /// *first_value = 4;
1318 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1320 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1322 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1330 /// Calculates the distance between two pointers. The returned value is in
1331 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1333 /// If the address different between the two pointers ia not a multiple of
1334 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1337 /// This function returns `None` if `T` is a zero-sized typed.
1344 /// #![feature(offset_to)]
1347 /// let mut a = [0; 5];
1348 /// let ptr1: *mut i32 = &mut a[1];
1349 /// let ptr2: *mut i32 = &mut a[3];
1350 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1351 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1352 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1353 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1356 #[unstable(feature = "offset_to", issue = "41079")]
1358 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1359 let size = mem::size_of::<T>();
1363 let diff = (other as isize).wrapping_sub(self as isize);
1364 Some(diff / size as isize)
1368 /// Computes the byte offset that needs to be applied in order to
1369 /// make the pointer aligned to `align`.
1370 /// If it is not possible to align the pointer, the implementation returns
1371 /// `usize::max_value()`.
1373 /// There are no guarantees whatsover that offsetting the pointer will not
1374 /// overflow or go beyond the allocation that the pointer points into.
1375 /// It is up to the caller to ensure that the returned offset is correct
1376 /// in all terms other than alignment.
1380 /// Accessing adjacent `u8` as `u16`
1383 /// # #![feature(align_offset)]
1384 /// # fn foo(n: usize) {
1385 /// # use std::mem::align_of;
1387 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1388 /// let ptr = &x[n] as *const u8;
1389 /// let offset = ptr.align_offset(align_of::<u16>());
1390 /// if offset < x.len() - n - 1 {
1391 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1392 /// assert_ne!(*u16_ptr, 500);
1394 /// // while the pointer can be aligned via `offset`, it would point
1395 /// // outside the allocation
1399 #[unstable(feature = "align_offset", issue = "44488")]
1400 pub fn align_offset(self, align: usize) -> usize {
1402 intrinsics::align_offset(self as *const _, align)
1406 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1408 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1409 /// offset of `3 * size_of::<T>()` bytes.
1413 /// If any of the following conditions are violated, the result is Undefined
1416 /// * Both the starting and resulting pointer must be either in bounds or one
1417 /// byte past the end of an allocated object.
1419 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1421 /// * The offset being in bounds cannot rely on "wrapping around" the address
1422 /// space. That is, the infinite-precision sum must fit in a `usize`.
1424 /// The compiler and standard library generally tries to ensure allocations
1425 /// never reach a size where an offset is a concern. For instance, `Vec`
1426 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1427 /// `vec.as_ptr().add(vec.len())` is always safe.
1429 /// Most platforms fundamentally can't even construct such an allocation.
1430 /// For instance, no known 64-bit platform can ever serve a request
1431 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1432 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1433 /// more than `isize::MAX` bytes with things like Physical Address
1434 /// Extension. As such, memory acquired directly from allocators or memory
1435 /// mapped files *may* be too large to handle with this function.
1437 /// Consider using `wrapping_offset` instead if these constraints are
1438 /// difficult to satisfy. The only advantage of this method is that it
1439 /// enables more aggressive compiler optimizations.
1446 /// #![feature(pointer_methods)]
1448 /// let s: &str = "123";
1449 /// let ptr: *const u8 = s.as_ptr();
1452 /// println!("{}", *ptr.add(1) as char);
1453 /// println!("{}", *ptr.add(2) as char);
1456 #[unstable(feature = "pointer_methods", issue = "43941")]
1458 pub unsafe fn add(self, count: usize) -> Self
1461 self.offset(count as isize)
1464 /// Calculates the offset from a pointer (convenience for
1465 /// `.offset((count as isize).wrapping_neg())`).
1467 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1468 /// offset of `3 * size_of::<T>()` bytes.
1472 /// If any of the following conditions are violated, the result is Undefined
1475 /// * Both the starting and resulting pointer must be either in bounds or one
1476 /// byte past the end of an allocated object.
1478 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1480 /// * The offset being in bounds cannot rely on "wrapping around" the address
1481 /// space. That is, the infinite-precision sum must fit in a usize.
1483 /// The compiler and standard library generally tries to ensure allocations
1484 /// never reach a size where an offset is a concern. For instance, `Vec`
1485 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1486 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1488 /// Most platforms fundamentally can't even construct such an allocation.
1489 /// For instance, no known 64-bit platform can ever serve a request
1490 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1491 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1492 /// more than `isize::MAX` bytes with things like Physical Address
1493 /// Extension. As such, memory acquired directly from allocators or memory
1494 /// mapped files *may* be too large to handle with this function.
1496 /// Consider using `wrapping_offset` instead if these constraints are
1497 /// difficult to satisfy. The only advantage of this method is that it
1498 /// enables more aggressive compiler optimizations.
1505 /// #![feature(pointer_methods)]
1507 /// let s: &str = "123";
1510 /// let end: *const u8 = s.as_ptr().add(3);
1511 /// println!("{}", *end.sub(1) as char);
1512 /// println!("{}", *end.sub(2) as char);
1515 #[unstable(feature = "pointer_methods", issue = "43941")]
1517 pub unsafe fn sub(self, count: usize) -> Self
1520 self.offset((count as isize).wrapping_neg())
1523 /// Calculates the offset from a pointer using wrapping arithmetic.
1524 /// (convenience for `.wrapping_offset(count as isize)`)
1526 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1527 /// offset of `3 * size_of::<T>()` bytes.
1531 /// The resulting pointer does not need to be in bounds, but it is
1532 /// potentially hazardous to dereference (which requires `unsafe`).
1534 /// Always use `.add(count)` instead when possible, because `add`
1535 /// allows the compiler to optimize better.
1542 /// #![feature(pointer_methods)]
1544 /// // Iterate using a raw pointer in increments of two elements
1545 /// let data = [1u8, 2, 3, 4, 5];
1546 /// let mut ptr: *const u8 = data.as_ptr();
1548 /// let end_rounded_up = ptr.wrapping_add(6);
1550 /// // This loop prints "1, 3, 5, "
1551 /// while ptr != end_rounded_up {
1553 /// print!("{}, ", *ptr);
1555 /// ptr = ptr.wrapping_add(step);
1558 #[unstable(feature = "pointer_methods", issue = "43941")]
1560 pub fn wrapping_add(self, count: usize) -> Self
1563 self.wrapping_offset(count as isize)
1566 /// Calculates the offset from a pointer using wrapping arithmetic.
1567 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1569 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1570 /// offset of `3 * size_of::<T>()` bytes.
1574 /// The resulting pointer does not need to be in bounds, but it is
1575 /// potentially hazardous to dereference (which requires `unsafe`).
1577 /// Always use `.sub(count)` instead when possible, because `sub`
1578 /// allows the compiler to optimize better.
1585 /// #![feature(pointer_methods)]
1587 /// // Iterate using a raw pointer in increments of two elements (backwards)
1588 /// let data = [1u8, 2, 3, 4, 5];
1589 /// let mut ptr: *const u8 = data.as_ptr();
1590 /// let start_rounded_down = ptr.wrapping_sub(2);
1591 /// ptr = ptr.wrapping_add(4);
1593 /// // This loop prints "5, 3, 1, "
1594 /// while ptr != start_rounded_down {
1596 /// print!("{}, ", *ptr);
1598 /// ptr = ptr.wrapping_sub(step);
1601 #[unstable(feature = "pointer_methods", issue = "43941")]
1603 pub fn wrapping_sub(self, count: usize) -> Self
1606 self.wrapping_offset((count as isize).wrapping_neg())
1609 /// Reads the value from `self` without moving it. This leaves the
1610 /// memory in `self` unchanged.
1614 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1615 /// moves the value out of `self` without preventing further usage of `self`.
1616 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1617 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1618 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1619 /// because it will attempt to drop the value previously at `*self`.
1621 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1628 /// #![feature(pointer_methods)]
1631 /// let y = &x as *const i32;
1634 /// assert_eq!(y.read(), 12);
1637 #[unstable(feature = "pointer_methods", issue = "43941")]
1639 pub unsafe fn read(self) -> T
1645 /// Performs a volatile read of the value from `self` without moving it. This
1646 /// leaves the memory in `self` unchanged.
1648 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1649 /// to not be elided or reordered by the compiler across other volatile
1654 /// Rust does not currently have a rigorously and formally defined memory model,
1655 /// so the precise semantics of what "volatile" means here is subject to change
1656 /// over time. That being said, the semantics will almost always end up pretty
1657 /// similar to [C11's definition of volatile][c11].
1659 /// The compiler shouldn't change the relative order or number of volatile
1660 /// memory operations. However, volatile memory operations on zero-sized types
1661 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1662 /// and may be ignored.
1664 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1668 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1669 /// moves the value out of `self` without preventing further usage of `self`.
1670 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1671 /// `src` is not used before the data is overwritten again (e.g. with `write`,
1672 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1673 /// because it will attempt to drop the value previously at `*self`.
1680 /// #![feature(pointer_methods)]
1683 /// let y = &x as *const i32;
1686 /// assert_eq!(y.read_volatile(), 12);
1689 #[unstable(feature = "pointer_methods", issue = "43941")]
1691 pub unsafe fn read_volatile(self) -> T
1697 /// Reads the value from `self` without moving it. This leaves the
1698 /// memory in `self` unchanged.
1700 /// Unlike `read`, the pointer may be unaligned.
1704 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1705 /// moves the value out of `self` without preventing further usage of `self`.
1706 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1707 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1708 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1709 /// because it will attempt to drop the value previously at `*self`.
1716 /// #![feature(pointer_methods)]
1719 /// let y = &x as *const i32;
1722 /// assert_eq!(y.read_unaligned(), 12);
1725 #[unstable(feature = "pointer_methods", issue = "43941")]
1727 pub unsafe fn read_unaligned(self) -> T
1730 read_unaligned(self)
1733 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1734 /// and destination may overlap.
1736 /// NOTE: this has the *same* argument order as `ptr::copy`.
1738 /// This is semantically equivalent to C's `memmove`.
1742 /// Care must be taken with the ownership of `self` and `dest`.
1743 /// This method semantically moves the values of `self` into `dest`.
1744 /// However it does not drop the contents of `self`, or prevent the contents
1745 /// of `dest` from being dropped or used.
1749 /// Efficiently create a Rust vector from an unsafe buffer:
1752 /// #![feature(pointer_methods)]
1754 /// # #[allow(dead_code)]
1755 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1756 /// let mut dst = Vec::with_capacity(elts);
1757 /// dst.set_len(elts);
1758 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1762 #[unstable(feature = "pointer_methods", issue = "43941")]
1764 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1767 copy(self, dest, count)
1770 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1771 /// and destination may *not* overlap.
1773 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1775 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1779 /// Beyond requiring that the program must be allowed to access both regions
1780 /// of memory, it is Undefined Behavior for source and destination to
1781 /// overlap. Care must also be taken with the ownership of `self` and
1782 /// `self`. This method semantically moves the values of `self` into `dest`.
1783 /// However it does not drop the contents of `dest`, or prevent the contents
1784 /// of `self` from being dropped or used.
1788 /// Efficiently create a Rust vector from an unsafe buffer:
1791 /// #![feature(pointer_methods)]
1793 /// # #[allow(dead_code)]
1794 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1795 /// let mut dst = Vec::with_capacity(elts);
1796 /// dst.set_len(elts);
1797 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1801 #[unstable(feature = "pointer_methods", issue = "43941")]
1803 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1806 copy_nonoverlapping(self, dest, count)
1809 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1810 /// and destination may overlap.
1812 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1814 /// This is semantically equivalent to C's `memmove`.
1818 /// Care must be taken with the ownership of `src` and `self`.
1819 /// This method semantically moves the values of `src` into `self`.
1820 /// However it does not drop the contents of `self`, or prevent the contents
1821 /// of `src` from being dropped or used.
1825 /// Efficiently create a Rust vector from an unsafe buffer:
1828 /// #![feature(pointer_methods)]
1830 /// # #[allow(dead_code)]
1831 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1832 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
1833 /// dst.set_len(elts);
1834 /// dst.as_mut_ptr().copy_from(ptr, elts);
1838 #[unstable(feature = "pointer_methods", issue = "43941")]
1840 pub unsafe fn copy_from(self, src: *const T, count: usize)
1843 copy(src, self, count)
1846 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1847 /// and destination may *not* overlap.
1849 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1851 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1855 /// Beyond requiring that the program must be allowed to access both regions
1856 /// of memory, it is Undefined Behavior for source and destination to
1857 /// overlap. Care must also be taken with the ownership of `src` and
1858 /// `self`. This method semantically moves the values of `src` into `self`.
1859 /// However it does not drop the contents of `self`, or prevent the contents
1860 /// of `src` from being dropped or used.
1864 /// Efficiently create a Rust vector from an unsafe buffer:
1867 /// #![feature(pointer_methods)]
1869 /// # #[allow(dead_code)]
1870 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1871 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
1872 /// dst.set_len(elts);
1873 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1877 #[unstable(feature = "pointer_methods", issue = "43941")]
1879 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
1882 copy_nonoverlapping(src, self, count)
1885 /// Executes the destructor (if any) of the pointed-to value.
1887 /// This has two use cases:
1889 /// * It is *required* to use `drop_in_place` to drop unsized types like
1890 /// trait objects, because they can't be read out onto the stack and
1891 /// dropped normally.
1893 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1894 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1895 /// as the compiler doesn't need to prove that it's sound to elide the
1900 /// This has all the same safety problems as `ptr::read` with respect to
1901 /// invalid pointers, types, and double drops.
1902 #[unstable(feature = "pointer_methods", issue = "43941")]
1904 pub unsafe fn drop_in_place(self) {
1908 /// Overwrites a memory location with the given value without reading or
1909 /// dropping the old value.
1913 /// This operation is marked unsafe because it writes through a raw pointer.
1915 /// It does not drop the contents of `self`. This is safe, but it could leak
1916 /// allocations or resources, so care must be taken not to overwrite an object
1917 /// that should be dropped.
1919 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1920 /// location pointed to by `self`.
1922 /// This is appropriate for initializing uninitialized memory, or overwriting
1923 /// memory that has previously been `read` from.
1925 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1932 /// #![feature(pointer_methods)]
1935 /// let y = &mut x as *mut i32;
1940 /// assert_eq!(y.read(), 12);
1943 #[unstable(feature = "pointer_methods", issue = "43941")]
1945 pub unsafe fn write(self, val: T)
1951 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1952 /// bytes of memory starting at `self` to `val`.
1957 /// #![feature(pointer_methods)]
1959 /// let mut vec = vec![0; 4];
1961 /// let vec_ptr = vec.as_mut_ptr();
1962 /// vec_ptr.write_bytes(b'a', 2);
1964 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1966 #[unstable(feature = "pointer_methods", issue = "43941")]
1968 pub unsafe fn write_bytes(self, val: u8, count: usize)
1971 write_bytes(self, val, count)
1974 /// Performs a volatile write of a memory location with the given value without
1975 /// reading or dropping the old value.
1977 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1978 /// to not be elided or reordered by the compiler across other volatile
1983 /// Rust does not currently have a rigorously and formally defined memory model,
1984 /// so the precise semantics of what "volatile" means here is subject to change
1985 /// over time. That being said, the semantics will almost always end up pretty
1986 /// similar to [C11's definition of volatile][c11].
1988 /// The compiler shouldn't change the relative order or number of volatile
1989 /// memory operations. However, volatile memory operations on zero-sized types
1990 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1991 /// and may be ignored.
1993 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1997 /// This operation is marked unsafe because it accepts a raw pointer.
1999 /// It does not drop the contents of `self`. This is safe, but it could leak
2000 /// allocations or resources, so care must be taken not to overwrite an object
2001 /// that should be dropped.
2003 /// This is appropriate for initializing uninitialized memory, or overwriting
2004 /// memory that has previously been `read` from.
2011 /// #![feature(pointer_methods)]
2014 /// let y = &mut x as *mut i32;
2018 /// y.write_volatile(z);
2019 /// assert_eq!(y.read_volatile(), 12);
2022 #[unstable(feature = "pointer_methods", issue = "43941")]
2024 pub unsafe fn write_volatile(self, val: T)
2027 write_volatile(self, val)
2030 /// Overwrites a memory location with the given value without reading or
2031 /// dropping the old value.
2033 /// Unlike `write`, the pointer may be unaligned.
2037 /// This operation is marked unsafe because it writes through a raw pointer.
2039 /// It does not drop the contents of `self`. This is safe, but it could leak
2040 /// allocations or resources, so care must be taken not to overwrite an object
2041 /// that should be dropped.
2043 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
2044 /// location pointed to by `dst`.
2046 /// This is appropriate for initializing uninitialized memory, or overwriting
2047 /// memory that has previously been `read` from.
2054 /// #![feature(pointer_methods)]
2057 /// let y = &mut x as *mut i32;
2061 /// y.write_unaligned(z);
2062 /// assert_eq!(y.read_unaligned(), 12);
2065 #[unstable(feature = "pointer_methods", issue = "43941")]
2067 pub unsafe fn write_unaligned(self, val: T)
2070 write_unaligned(self, val)
2073 /// Replaces the value at `self` with `src`, returning the old
2074 /// value, without dropping either.
2078 /// This is only unsafe because it accepts a raw pointer.
2079 /// Otherwise, this operation is identical to `mem::replace`.
2080 #[unstable(feature = "pointer_methods", issue = "43941")]
2082 pub unsafe fn replace(self, src: T) -> T
2088 /// Swaps the values at two mutable locations of the same type, without
2089 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2090 /// otherwise equivalent.
2094 /// This function copies the memory through the raw pointers passed to it
2097 /// Ensure that these pointers are valid before calling `swap`.
2098 #[unstable(feature = "pointer_methods", issue = "43941")]
2100 pub unsafe fn swap(self, with: *mut T)
2107 // Equality for pointers
2108 #[stable(feature = "rust1", since = "1.0.0")]
2109 impl<T: ?Sized> PartialEq for *const T {
2111 fn eq(&self, other: &*const T) -> bool { *self == *other }
2114 #[stable(feature = "rust1", since = "1.0.0")]
2115 impl<T: ?Sized> Eq for *const T {}
2117 #[stable(feature = "rust1", since = "1.0.0")]
2118 impl<T: ?Sized> PartialEq for *mut T {
2120 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2123 #[stable(feature = "rust1", since = "1.0.0")]
2124 impl<T: ?Sized> Eq for *mut T {}
2126 /// Compare raw pointers for equality.
2128 /// This is the same as using the `==` operator, but less generic:
2129 /// the arguments have to be `*const T` raw pointers,
2130 /// not anything that implements `PartialEq`.
2132 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2133 /// by their address rather than comparing the values they point to
2134 /// (which is what the `PartialEq for &T` implementation does).
2142 /// let other_five = 5;
2143 /// let five_ref = &five;
2144 /// let same_five_ref = &five;
2145 /// let other_five_ref = &other_five;
2147 /// assert!(five_ref == same_five_ref);
2148 /// assert!(five_ref == other_five_ref);
2150 /// assert!(ptr::eq(five_ref, same_five_ref));
2151 /// assert!(!ptr::eq(five_ref, other_five_ref));
2153 #[stable(feature = "ptr_eq", since = "1.17.0")]
2155 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2159 // Impls for function pointers
2160 macro_rules! fnptr_impls_safety_abi {
2161 ($FnTy: ty, $($Arg: ident),*) => {
2162 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2163 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2165 fn eq(&self, other: &Self) -> bool {
2166 *self as usize == *other as usize
2170 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2171 impl<Ret, $($Arg),*> Eq for $FnTy {}
2173 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2174 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2176 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2177 (*self as usize).partial_cmp(&(*other as usize))
2181 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2182 impl<Ret, $($Arg),*> Ord for $FnTy {
2184 fn cmp(&self, other: &Self) -> Ordering {
2185 (*self as usize).cmp(&(*other as usize))
2189 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2190 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2191 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2192 state.write_usize(*self as usize)
2196 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2197 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2198 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2199 fmt::Pointer::fmt(&(*self as *const ()), f)
2203 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2204 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2205 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2206 fmt::Pointer::fmt(&(*self as *const ()), f)
2212 macro_rules! fnptr_impls_args {
2213 ($($Arg: ident),+) => {
2214 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2215 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2216 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2217 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2218 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2219 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2222 // No variadic functions with 0 parameters
2223 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2224 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2225 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2226 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2230 fnptr_impls_args! { }
2231 fnptr_impls_args! { A }
2232 fnptr_impls_args! { A, B }
2233 fnptr_impls_args! { A, B, C }
2234 fnptr_impls_args! { A, B, C, D }
2235 fnptr_impls_args! { A, B, C, D, E }
2236 fnptr_impls_args! { A, B, C, D, E, F }
2237 fnptr_impls_args! { A, B, C, D, E, F, G }
2238 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2239 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2240 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2241 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2242 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2244 // Comparison for pointers
2245 #[stable(feature = "rust1", since = "1.0.0")]
2246 impl<T: ?Sized> Ord for *const T {
2248 fn cmp(&self, other: &*const T) -> Ordering {
2251 } else if self == other {
2259 #[stable(feature = "rust1", since = "1.0.0")]
2260 impl<T: ?Sized> PartialOrd for *const T {
2262 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2263 Some(self.cmp(other))
2267 fn lt(&self, other: &*const T) -> bool { *self < *other }
2270 fn le(&self, other: &*const T) -> bool { *self <= *other }
2273 fn gt(&self, other: &*const T) -> bool { *self > *other }
2276 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2279 #[stable(feature = "rust1", since = "1.0.0")]
2280 impl<T: ?Sized> Ord for *mut T {
2282 fn cmp(&self, other: &*mut T) -> Ordering {
2285 } else if self == other {
2293 #[stable(feature = "rust1", since = "1.0.0")]
2294 impl<T: ?Sized> PartialOrd for *mut T {
2296 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2297 Some(self.cmp(other))
2301 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2304 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2307 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2310 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2313 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2314 /// of this wrapper owns the referent. Useful for building abstractions like
2315 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2317 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2318 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2319 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2320 /// the referent of the pointer should not be modified without a unique path to
2321 /// its owning Unique.
2323 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2324 /// consider using `NonNull`, which has weaker semantics.
2326 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2327 /// is never dereferenced. This is so that enums may use this forbidden value
2328 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2329 /// However the pointer may still dangle if it isn't dereferenced.
2331 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2332 /// for any type which upholds Unique's aliasing requirements.
2333 #[unstable(feature = "ptr_internals", issue = "0",
2334 reason = "use NonNull instead and consider PhantomData<T> \
2335 (if you also use #[may_dangle]), Send, and/or Sync")]
2336 pub struct Unique<T: ?Sized> {
2337 pointer: NonZero<*const T>,
2338 // NOTE: this marker has no consequences for variance, but is necessary
2339 // for dropck to understand that we logically own a `T`.
2341 // For details, see:
2342 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2343 _marker: PhantomData<T>,
2346 #[unstable(feature = "ptr_internals", issue = "0")]
2347 impl<T: ?Sized> fmt::Debug for Unique<T> {
2348 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2349 fmt::Pointer::fmt(&self.as_ptr(), f)
2353 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2354 /// reference is unaliased. Note that this aliasing invariant is
2355 /// unenforced by the type system; the abstraction using the
2356 /// `Unique` must enforce it.
2357 #[unstable(feature = "ptr_internals", issue = "0")]
2358 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2360 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2361 /// reference is unaliased. Note that this aliasing invariant is
2362 /// unenforced by the type system; the abstraction using the
2363 /// `Unique` must enforce it.
2364 #[unstable(feature = "ptr_internals", issue = "0")]
2365 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2367 #[unstable(feature = "ptr_internals", issue = "0")]
2368 impl<T: Sized> Unique<T> {
2369 /// Creates a new `Unique` that is dangling, but well-aligned.
2371 /// This is useful for initializing types which lazily allocate, like
2372 /// `Vec::new` does.
2373 // FIXME: rename to dangling() to match NonNull?
2374 pub fn empty() -> Self {
2376 let ptr = mem::align_of::<T>() as *mut T;
2377 Unique::new_unchecked(ptr)
2382 #[unstable(feature = "ptr_internals", issue = "0")]
2383 impl<T: ?Sized> Unique<T> {
2384 /// Creates a new `Unique`.
2388 /// `ptr` must be non-null.
2389 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2390 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2393 /// Creates a new `Unique` if `ptr` is non-null.
2394 pub fn new(ptr: *mut T) -> Option<Self> {
2395 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
2398 /// Acquires the underlying `*mut` pointer.
2399 pub fn as_ptr(self) -> *mut T {
2400 self.pointer.get() as *mut T
2403 /// Dereferences the content.
2405 /// The resulting lifetime is bound to self so this behaves "as if"
2406 /// it were actually an instance of T that is getting borrowed. If a longer
2407 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2408 pub unsafe fn as_ref(&self) -> &T {
2412 /// Mutably dereferences the content.
2414 /// The resulting lifetime is bound to self so this behaves "as if"
2415 /// it were actually an instance of T that is getting borrowed. If a longer
2416 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2417 pub unsafe fn as_mut(&mut self) -> &mut T {
2422 #[unstable(feature = "ptr_internals", issue = "0")]
2423 impl<T: ?Sized> Clone for Unique<T> {
2424 fn clone(&self) -> Self {
2429 #[unstable(feature = "ptr_internals", issue = "0")]
2430 impl<T: ?Sized> Copy for Unique<T> { }
2432 #[unstable(feature = "ptr_internals", issue = "0")]
2433 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2435 #[unstable(feature = "ptr_internals", issue = "0")]
2436 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2437 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2438 fmt::Pointer::fmt(&self.as_ptr(), f)
2442 #[unstable(feature = "ptr_internals", issue = "0")]
2443 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2444 fn from(reference: &'a mut T) -> Self {
2445 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2449 #[unstable(feature = "ptr_internals", issue = "0")]
2450 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2451 fn from(reference: &'a T) -> Self {
2452 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2456 #[unstable(feature = "ptr_internals", issue = "0")]
2457 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2458 fn from(p: NonNull<T>) -> Self {
2459 Unique { pointer: p.pointer, _marker: PhantomData }
2463 /// Previous name of `NonNull`.
2464 #[rustc_deprecated(since = "1.25.0", reason = "renamed to `NonNull`")]
2465 #[unstable(feature = "shared", issue = "27730")]
2466 pub type Shared<T> = NonNull<T>;
2468 /// `*mut T` but non-zero and covariant.
2470 /// This is often the correct thing to use when building data structures using
2471 /// raw pointers, but is ultimately more dangerous to use because of its additional
2472 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2474 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2475 /// is never dereferenced. This is so that enums may use this forbidden value
2476 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `NonNull<T>`.
2477 /// However the pointer may still dangle if it isn't dereferenced.
2479 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2480 /// for your use case, you should include some PhantomData in your type to
2481 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2482 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2483 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2484 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2485 #[stable(feature = "nonnull", since = "1.25.0")]
2486 pub struct NonNull<T: ?Sized> {
2487 pointer: NonZero<*const T>,
2490 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2491 // NB: This impl is unnecessary, but should provide better error messages.
2492 #[stable(feature = "nonnull", since = "1.25.0")]
2493 impl<T: ?Sized> !Send for NonNull<T> { }
2495 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2496 // NB: This impl is unnecessary, but should provide better error messages.
2497 #[stable(feature = "nonnull", since = "1.25.0")]
2498 impl<T: ?Sized> !Sync for NonNull<T> { }
2500 impl<T: Sized> NonNull<T> {
2501 /// Creates a new `NonNull` that is dangling, but well-aligned.
2503 /// This is useful for initializing types which lazily allocate, like
2504 /// `Vec::new` does.
2505 #[stable(feature = "nonnull", since = "1.25.0")]
2506 pub fn dangling() -> Self {
2508 let ptr = mem::align_of::<T>() as *mut T;
2509 NonNull::new_unchecked(ptr)
2514 impl<T: ?Sized> NonNull<T> {
2515 /// Creates a new `NonNull`.
2519 /// `ptr` must be non-null.
2520 #[stable(feature = "nonnull", since = "1.25.0")]
2521 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2522 NonNull { pointer: NonZero::new_unchecked(ptr) }
2525 /// Creates a new `NonNull` if `ptr` is non-null.
2526 #[stable(feature = "nonnull", since = "1.25.0")]
2527 pub fn new(ptr: *mut T) -> Option<Self> {
2528 NonZero::new(ptr as *const T).map(|nz| NonNull { pointer: nz })
2531 /// Acquires the underlying `*mut` pointer.
2532 #[stable(feature = "nonnull", since = "1.25.0")]
2533 pub fn as_ptr(self) -> *mut T {
2534 self.pointer.get() as *mut T
2537 /// Dereferences the content.
2539 /// The resulting lifetime is bound to self so this behaves "as if"
2540 /// it were actually an instance of T that is getting borrowed. If a longer
2541 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2542 #[stable(feature = "nonnull", since = "1.25.0")]
2543 pub unsafe fn as_ref(&self) -> &T {
2547 /// Mutably dereferences the content.
2549 /// The resulting lifetime is bound to self so this behaves "as if"
2550 /// it were actually an instance of T that is getting borrowed. If a longer
2551 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2552 #[stable(feature = "nonnull", since = "1.25.0")]
2553 pub unsafe fn as_mut(&mut self) -> &mut T {
2558 #[stable(feature = "nonnull", since = "1.25.0")]
2559 impl<T: ?Sized> Clone for NonNull<T> {
2560 fn clone(&self) -> Self {
2565 #[stable(feature = "nonnull", since = "1.25.0")]
2566 impl<T: ?Sized> Copy for NonNull<T> { }
2568 #[stable(feature = "nonnull", since = "1.25.0")]
2569 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2571 #[stable(feature = "nonnull", since = "1.25.0")]
2572 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2573 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2574 fmt::Pointer::fmt(&self.as_ptr(), f)
2578 #[stable(feature = "nonnull", since = "1.25.0")]
2579 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2580 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2581 fmt::Pointer::fmt(&self.as_ptr(), f)
2585 #[stable(feature = "nonnull", since = "1.25.0")]
2586 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2587 fn from(unique: Unique<T>) -> Self {
2588 NonNull { pointer: unique.pointer }
2592 #[stable(feature = "nonnull", since = "1.25.0")]
2593 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
2594 fn from(reference: &'a mut T) -> Self {
2595 NonNull { pointer: NonZero::from(reference) }
2599 #[stable(feature = "nonnull", since = "1.25.0")]
2600 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
2601 fn from(reference: &'a T) -> Self {
2602 NonNull { pointer: NonZero::from(reference) }