1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
169 /// #![feature(swap_nonoverlapping)]
173 /// let mut x = [1, 2, 3, 4];
174 /// let mut y = [7, 8, 9];
177 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
180 /// assert_eq!(x, [7, 8, 3, 4]);
181 /// assert_eq!(y, [1, 2, 9]);
184 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
185 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
186 let x = x as *mut u8;
187 let y = y as *mut u8;
188 let len = mem::size_of::<T>() * count;
189 swap_nonoverlapping_bytes(x, y, len)
193 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
194 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
195 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
196 // Haswell E processors. LLVM is more able to optimize if we give a struct a
197 // #[repr(simd)], even if we don't actually use this struct directly.
199 // FIXME repr(simd) broken on emscripten and redox
200 // It's also broken on big-endian powerpc64 and s390x. #42778
201 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
202 target_endian = "big")),
204 struct Block(u64, u64, u64, u64);
205 struct UnalignedBlock(u64, u64, u64, u64);
207 let block_size = mem::size_of::<Block>();
209 // Loop through x & y, copying them `Block` at a time
210 // The optimizer should unroll the loop fully for most types
211 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
213 while i + block_size <= len {
214 // Create some uninitialized memory as scratch space
215 // Declaring `t` here avoids aligning the stack when this loop is unused
216 let mut t: Block = mem::uninitialized();
217 let t = &mut t as *mut _ as *mut u8;
218 let x = x.offset(i as isize);
219 let y = y.offset(i as isize);
221 // Swap a block of bytes of x & y, using t as a temporary buffer
222 // This should be optimized into efficient SIMD operations where available
223 copy_nonoverlapping(x, t, block_size);
224 copy_nonoverlapping(y, x, block_size);
225 copy_nonoverlapping(t, y, block_size);
230 // Swap any remaining bytes
231 let mut t: UnalignedBlock = mem::uninitialized();
234 let t = &mut t as *mut _ as *mut u8;
235 let x = x.offset(i as isize);
236 let y = y.offset(i as isize);
238 copy_nonoverlapping(x, t, rem);
239 copy_nonoverlapping(y, x, rem);
240 copy_nonoverlapping(t, y, rem);
244 /// Replaces the value at `dest` with `src`, returning the old
245 /// value, without dropping either.
249 /// This is only unsafe because it accepts a raw pointer.
250 /// Otherwise, this operation is identical to `mem::replace`.
252 #[stable(feature = "rust1", since = "1.0.0")]
253 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
254 mem::swap(&mut *dest, &mut src); // cannot overlap
258 /// Reads the value from `src` without moving it. This leaves the
259 /// memory in `src` unchanged.
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
270 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
278 /// let y = &x as *const i32;
281 /// assert_eq!(std::ptr::read(y), 12);
285 #[stable(feature = "rust1", since = "1.0.0")]
286 pub unsafe fn read<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
288 copy_nonoverlapping(src, &mut tmp, 1);
292 /// Reads the value from `src` without moving it. This leaves the
293 /// memory in `src` unchanged.
295 /// Unlike `read`, the pointer may be unaligned.
299 /// Beyond accepting a raw pointer, this is unsafe because it semantically
300 /// moves the value out of `src` without preventing further usage of `src`.
301 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
302 /// `src` is not used before the data is overwritten again (e.g. with `write`,
303 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
304 /// because it will attempt to drop the value previously at `*src`.
312 /// let y = &x as *const i32;
315 /// assert_eq!(std::ptr::read_unaligned(y), 12);
319 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
320 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
321 let mut tmp: T = mem::uninitialized();
322 copy_nonoverlapping(src as *const u8,
323 &mut tmp as *mut T as *mut u8,
324 mem::size_of::<T>());
328 /// Overwrites a memory location with the given value without reading or
329 /// dropping the old value.
333 /// This operation is marked unsafe because it accepts a raw pointer.
335 /// It does not drop the contents of `dst`. This is safe, but it could leak
336 /// allocations or resources, so care must be taken not to overwrite an object
337 /// that should be dropped.
339 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
340 /// location pointed to by `dst`.
342 /// This is appropriate for initializing uninitialized memory, or overwriting
343 /// memory that has previously been `read` from.
345 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
353 /// let y = &mut x as *mut i32;
357 /// std::ptr::write(y, z);
358 /// assert_eq!(std::ptr::read(y), 12);
362 #[stable(feature = "rust1", since = "1.0.0")]
363 pub unsafe fn write<T>(dst: *mut T, src: T) {
364 intrinsics::move_val_init(&mut *dst, src)
367 /// Overwrites a memory location with the given value without reading or
368 /// dropping the old value.
370 /// Unlike `write`, the pointer may be unaligned.
374 /// This operation is marked unsafe because it accepts a raw pointer.
376 /// It does not drop the contents of `dst`. This is safe, but it could leak
377 /// allocations or resources, so care must be taken not to overwrite an object
378 /// that should be dropped.
380 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
381 /// location pointed to by `dst`.
383 /// This is appropriate for initializing uninitialized memory, or overwriting
384 /// memory that has previously been `read` from.
392 /// let y = &mut x as *mut i32;
396 /// std::ptr::write_unaligned(y, z);
397 /// assert_eq!(std::ptr::read_unaligned(y), 12);
401 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
402 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
403 copy_nonoverlapping(&src as *const T as *const u8,
405 mem::size_of::<T>());
409 /// Performs a volatile read of the value from `src` without moving it. This
410 /// leaves the memory in `src` unchanged.
412 /// Volatile operations are intended to act on I/O memory, and are guaranteed
413 /// to not be elided or reordered by the compiler across other volatile
418 /// Rust does not currently have a rigorously and formally defined memory model,
419 /// so the precise semantics of what "volatile" means here is subject to change
420 /// over time. That being said, the semantics will almost always end up pretty
421 /// similar to [C11's definition of volatile][c11].
423 /// The compiler shouldn't change the relative order or number of volatile
424 /// memory operations. However, volatile memory operations on zero-sized types
425 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
426 /// and may be ignored.
428 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
432 /// Beyond accepting a raw pointer, this is unsafe because it semantically
433 /// moves the value out of `src` without preventing further usage of `src`.
434 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
435 /// `src` is not used before the data is overwritten again (e.g. with `write`,
436 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
437 /// because it will attempt to drop the value previously at `*src`.
445 /// let y = &x as *const i32;
448 /// assert_eq!(std::ptr::read_volatile(y), 12);
452 #[stable(feature = "volatile", since = "1.9.0")]
453 pub unsafe fn read_volatile<T>(src: *const T) -> T {
454 intrinsics::volatile_load(src)
457 /// Performs a volatile write of a memory location with the given value without
458 /// reading or dropping the old value.
460 /// Volatile operations are intended to act on I/O memory, and are guaranteed
461 /// to not be elided or reordered by the compiler across other volatile
466 /// Rust does not currently have a rigorously and formally defined memory model,
467 /// so the precise semantics of what "volatile" means here is subject to change
468 /// over time. That being said, the semantics will almost always end up pretty
469 /// similar to [C11's definition of volatile][c11].
471 /// The compiler shouldn't change the relative order or number of volatile
472 /// memory operations. However, volatile memory operations on zero-sized types
473 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
474 /// and may be ignored.
476 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
480 /// This operation is marked unsafe because it accepts a raw pointer.
482 /// It does not drop the contents of `dst`. This is safe, but it could leak
483 /// allocations or resources, so care must be taken not to overwrite an object
484 /// that should be dropped.
486 /// This is appropriate for initializing uninitialized memory, or overwriting
487 /// memory that has previously been `read` from.
495 /// let y = &mut x as *mut i32;
499 /// std::ptr::write_volatile(y, z);
500 /// assert_eq!(std::ptr::read_volatile(y), 12);
504 #[stable(feature = "volatile", since = "1.9.0")]
505 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
506 intrinsics::volatile_store(dst, src);
509 #[lang = "const_ptr"]
510 impl<T: ?Sized> *const T {
511 /// Returns `true` if the pointer is null.
513 /// Note that unsized types have many possible null pointers, as only the
514 /// raw data pointer is considered, not their length, vtable, etc.
515 /// Therefore, two pointers that are null may still not compare equal to
523 /// let s: &str = "Follow the rabbit";
524 /// let ptr: *const u8 = s.as_ptr();
525 /// assert!(!ptr.is_null());
527 #[stable(feature = "rust1", since = "1.0.0")]
529 pub fn is_null(self) -> bool {
530 // Compare via a cast to a thin pointer, so fat pointers are only
531 // considering their "data" part for null-ness.
532 (self as *const u8) == null()
535 /// Returns `None` if the pointer is null, or else returns a reference to
536 /// the value wrapped in `Some`.
540 /// While this method and its mutable counterpart are useful for
541 /// null-safety, it is important to note that this is still an unsafe
542 /// operation because the returned value could be pointing to invalid
545 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
546 /// not necessarily reflect the actual lifetime of the data.
553 /// let ptr: *const u8 = &10u8 as *const u8;
556 /// if let Some(val_back) = ptr.as_ref() {
557 /// println!("We got back the value: {}!", val_back);
561 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
563 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
571 /// Calculates the offset from a pointer.
573 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
574 /// offset of `3 * size_of::<T>()` bytes.
578 /// If any of the following conditions are violated, the result is Undefined
581 /// * Both the starting and resulting pointer must be either in bounds or one
582 /// byte past the end of an allocated object.
584 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
586 /// * The offset being in bounds cannot rely on "wrapping around" the address
587 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
589 /// The compiler and standard library generally tries to ensure allocations
590 /// never reach a size where an offset is a concern. For instance, `Vec`
591 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
592 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
594 /// Most platforms fundamentally can't even construct such an allocation.
595 /// For instance, no known 64-bit platform can ever serve a request
596 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
597 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
598 /// more than `isize::MAX` bytes with things like Physical Address
599 /// Extension. As such, memory acquired directly from allocators or memory
600 /// mapped files *may* be too large to handle with this function.
602 /// Consider using `wrapping_offset` instead if these constraints are
603 /// difficult to satisfy. The only advantage of this method is that it
604 /// enables more aggressive compiler optimizations.
611 /// let s: &str = "123";
612 /// let ptr: *const u8 = s.as_ptr();
615 /// println!("{}", *ptr.offset(1) as char);
616 /// println!("{}", *ptr.offset(2) as char);
619 #[stable(feature = "rust1", since = "1.0.0")]
621 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
622 intrinsics::offset(self, count)
625 /// Calculates the offset from a pointer using wrapping arithmetic.
627 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
628 /// offset of `3 * size_of::<T>()` bytes.
632 /// The resulting pointer does not need to be in bounds, but it is
633 /// potentially hazardous to dereference (which requires `unsafe`).
635 /// Always use `.offset(count)` instead when possible, because `offset`
636 /// allows the compiler to optimize better.
643 /// // Iterate using a raw pointer in increments of two elements
644 /// let data = [1u8, 2, 3, 4, 5];
645 /// let mut ptr: *const u8 = data.as_ptr();
647 /// let end_rounded_up = ptr.wrapping_offset(6);
649 /// // This loop prints "1, 3, 5, "
650 /// while ptr != end_rounded_up {
652 /// print!("{}, ", *ptr);
654 /// ptr = ptr.wrapping_offset(step);
657 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
659 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
661 intrinsics::arith_offset(self, count)
665 /// Calculates the distance between two pointers. The returned value is in
666 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
668 /// If the address different between the two pointers ia not a multiple of
669 /// `mem::size_of::<T>()` then the result of the division is rounded towards
672 /// This function returns `None` if `T` is a zero-sized typed.
679 /// #![feature(offset_to)]
683 /// let ptr1: *const i32 = &a[1];
684 /// let ptr2: *const i32 = &a[3];
685 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
686 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
687 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
688 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
691 #[unstable(feature = "offset_to", issue = "41079")]
693 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
694 let size = mem::size_of::<T>();
698 let diff = (other as isize).wrapping_sub(self as isize);
699 Some(diff / size as isize)
703 /// Calculates the distance between two pointers. The returned value is in
704 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
706 /// This function is the inverse of [`offset`].
708 /// [`offset`]: #method.offset
709 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
713 /// If any of the following conditions are violated, the result is Undefined
716 /// * Both the starting and other pointer must be either in bounds or one
717 /// byte past the end of the same allocated object.
719 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
721 /// * The distance between the pointers, in bytes, must be an exact multiple
722 /// of the size of `T` and `T` must not be a Zero-Sized Type ("ZST").
724 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
726 /// The compiler and standard library generally try to ensure allocations
727 /// never reach a size where an offset is a concern. For instance, `Vec`
728 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
729 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
731 /// Most platforms fundamentally can't even construct such an allocation.
732 /// For instance, no known 64-bit platform can ever serve a request
733 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
734 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
735 /// more than `isize::MAX` bytes with things like Physical Address
736 /// Extension. As such, memory acquired directly from allocators or memory
737 /// mapped files *may* be too large to handle with this function.
739 /// Consider using [`wrapping_offset_from`] instead if these constraints are
740 /// difficult to satisfy. The only advantage of this method is that it
741 /// enables more aggressive compiler optimizations.
748 /// #![feature(ptr_offset_from)]
751 /// let ptr1: *const i32 = &a[1];
752 /// let ptr2: *const i32 = &a[3];
754 /// assert_eq!(ptr2.offset_from(ptr1), 2);
755 /// assert_eq!(ptr1.offset_from(ptr2), -2);
756 /// assert_eq!(ptr1.offset(2), ptr2);
757 /// assert_eq!(ptr2.offset(-2), ptr1);
760 #[unstable(feature = "ptr_offset_from", issue = "41079")]
762 pub unsafe fn offset_from(self, other: *const T) -> isize where T: Sized {
763 let pointee_size = mem::size_of::<T>();
764 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
766 // FIXME: can this be nuw/nsw?
767 let d = isize::wrapping_sub(self as _, other as _);
768 intrinsics::exact_div(d, pointee_size as _)
771 /// Calculates the distance between two pointers. The returned value is in
772 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
774 /// If the address different between the two pointers is not a multiple of
775 /// `mem::size_of::<T>()` then the result of the division is rounded towards
780 /// This function panics if `T` is a zero-sized typed.
787 /// #![feature(ptr_wrapping_offset_from)]
790 /// let ptr1: *const i32 = &a[1];
791 /// let ptr2: *const i32 = &a[3];
792 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
793 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
794 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
795 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
797 /// let ptr1: *const i32 = 3 as _;
798 /// let ptr2: *const i32 = 13 as _;
799 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
801 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
803 pub fn wrapping_offset_from(self, other: *const T) -> isize where T: Sized {
804 let pointee_size = mem::size_of::<T>();
805 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
807 let d = isize::wrapping_sub(self as _, other as _);
808 d.wrapping_div(pointee_size as _)
811 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
813 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
814 /// offset of `3 * size_of::<T>()` bytes.
818 /// If any of the following conditions are violated, the result is Undefined
821 /// * Both the starting and resulting pointer must be either in bounds or one
822 /// byte past the end of an allocated object.
824 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
826 /// * The offset being in bounds cannot rely on "wrapping around" the address
827 /// space. That is, the infinite-precision sum must fit in a `usize`.
829 /// The compiler and standard library generally tries to ensure allocations
830 /// never reach a size where an offset is a concern. For instance, `Vec`
831 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
832 /// `vec.as_ptr().add(vec.len())` is always safe.
834 /// Most platforms fundamentally can't even construct such an allocation.
835 /// For instance, no known 64-bit platform can ever serve a request
836 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
837 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
838 /// more than `isize::MAX` bytes with things like Physical Address
839 /// Extension. As such, memory acquired directly from allocators or memory
840 /// mapped files *may* be too large to handle with this function.
842 /// Consider using `wrapping_offset` instead if these constraints are
843 /// difficult to satisfy. The only advantage of this method is that it
844 /// enables more aggressive compiler optimizations.
851 /// let s: &str = "123";
852 /// let ptr: *const u8 = s.as_ptr();
855 /// println!("{}", *ptr.add(1) as char);
856 /// println!("{}", *ptr.add(2) as char);
859 #[stable(feature = "pointer_methods", since = "1.26.0")]
861 pub unsafe fn add(self, count: usize) -> Self
864 self.offset(count as isize)
867 /// Calculates the offset from a pointer (convenience for
868 /// `.offset((count as isize).wrapping_neg())`).
870 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
871 /// offset of `3 * size_of::<T>()` bytes.
875 /// If any of the following conditions are violated, the result is Undefined
878 /// * Both the starting and resulting pointer must be either in bounds or one
879 /// byte past the end of an allocated object.
881 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
883 /// * The offset being in bounds cannot rely on "wrapping around" the address
884 /// space. That is, the infinite-precision sum must fit in a usize.
886 /// The compiler and standard library generally tries to ensure allocations
887 /// never reach a size where an offset is a concern. For instance, `Vec`
888 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
889 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
891 /// Most platforms fundamentally can't even construct such an allocation.
892 /// For instance, no known 64-bit platform can ever serve a request
893 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
894 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
895 /// more than `isize::MAX` bytes with things like Physical Address
896 /// Extension. As such, memory acquired directly from allocators or memory
897 /// mapped files *may* be too large to handle with this function.
899 /// Consider using `wrapping_offset` instead if these constraints are
900 /// difficult to satisfy. The only advantage of this method is that it
901 /// enables more aggressive compiler optimizations.
908 /// let s: &str = "123";
911 /// let end: *const u8 = s.as_ptr().add(3);
912 /// println!("{}", *end.sub(1) as char);
913 /// println!("{}", *end.sub(2) as char);
916 #[stable(feature = "pointer_methods", since = "1.26.0")]
918 pub unsafe fn sub(self, count: usize) -> Self
921 self.offset((count as isize).wrapping_neg())
924 /// Calculates the offset from a pointer using wrapping arithmetic.
925 /// (convenience for `.wrapping_offset(count as isize)`)
927 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
928 /// offset of `3 * size_of::<T>()` bytes.
932 /// The resulting pointer does not need to be in bounds, but it is
933 /// potentially hazardous to dereference (which requires `unsafe`).
935 /// Always use `.add(count)` instead when possible, because `add`
936 /// allows the compiler to optimize better.
943 /// // Iterate using a raw pointer in increments of two elements
944 /// let data = [1u8, 2, 3, 4, 5];
945 /// let mut ptr: *const u8 = data.as_ptr();
947 /// let end_rounded_up = ptr.wrapping_add(6);
949 /// // This loop prints "1, 3, 5, "
950 /// while ptr != end_rounded_up {
952 /// print!("{}, ", *ptr);
954 /// ptr = ptr.wrapping_add(step);
957 #[stable(feature = "pointer_methods", since = "1.26.0")]
959 pub fn wrapping_add(self, count: usize) -> Self
962 self.wrapping_offset(count as isize)
965 /// Calculates the offset from a pointer using wrapping arithmetic.
966 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
968 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
969 /// offset of `3 * size_of::<T>()` bytes.
973 /// The resulting pointer does not need to be in bounds, but it is
974 /// potentially hazardous to dereference (which requires `unsafe`).
976 /// Always use `.sub(count)` instead when possible, because `sub`
977 /// allows the compiler to optimize better.
984 /// // Iterate using a raw pointer in increments of two elements (backwards)
985 /// let data = [1u8, 2, 3, 4, 5];
986 /// let mut ptr: *const u8 = data.as_ptr();
987 /// let start_rounded_down = ptr.wrapping_sub(2);
988 /// ptr = ptr.wrapping_add(4);
990 /// // This loop prints "5, 3, 1, "
991 /// while ptr != start_rounded_down {
993 /// print!("{}, ", *ptr);
995 /// ptr = ptr.wrapping_sub(step);
998 #[stable(feature = "pointer_methods", since = "1.26.0")]
1000 pub fn wrapping_sub(self, count: usize) -> Self
1003 self.wrapping_offset((count as isize).wrapping_neg())
1006 /// Reads the value from `self` without moving it. This leaves the
1007 /// memory in `self` unchanged.
1011 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1012 /// moves the value out of `self` without preventing further usage of `self`.
1013 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1014 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1015 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1016 /// because it will attempt to drop the value previously at `*self`.
1018 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1026 /// let y = &x as *const i32;
1029 /// assert_eq!(y.read(), 12);
1032 #[stable(feature = "pointer_methods", since = "1.26.0")]
1034 pub unsafe fn read(self) -> T
1040 /// Performs a volatile read of the value from `self` without moving it. This
1041 /// leaves the memory in `self` unchanged.
1043 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1044 /// to not be elided or reordered by the compiler across other volatile
1049 /// Rust does not currently have a rigorously and formally defined memory model,
1050 /// so the precise semantics of what "volatile" means here is subject to change
1051 /// over time. That being said, the semantics will almost always end up pretty
1052 /// similar to [C11's definition of volatile][c11].
1054 /// The compiler shouldn't change the relative order or number of volatile
1055 /// memory operations. However, volatile memory operations on zero-sized types
1056 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1057 /// and may be ignored.
1059 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1063 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1064 /// moves the value out of `self` without preventing further usage of `self`.
1065 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1066 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1067 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1068 /// because it will attempt to drop the value previously at `*self`.
1076 /// let y = &x as *const i32;
1079 /// assert_eq!(y.read_volatile(), 12);
1082 #[stable(feature = "pointer_methods", since = "1.26.0")]
1084 pub unsafe fn read_volatile(self) -> T
1090 /// Reads the value from `self` without moving it. This leaves the
1091 /// memory in `self` unchanged.
1093 /// Unlike `read`, the pointer may be unaligned.
1097 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1098 /// moves the value out of `self` without preventing further usage of `self`.
1099 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1100 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1101 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1102 /// because it will attempt to drop the value previously at `*self`.
1110 /// let y = &x as *const i32;
1113 /// assert_eq!(y.read_unaligned(), 12);
1116 #[stable(feature = "pointer_methods", since = "1.26.0")]
1118 pub unsafe fn read_unaligned(self) -> T
1121 read_unaligned(self)
1124 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1125 /// and destination may overlap.
1127 /// NOTE: this has the *same* argument order as `ptr::copy`.
1129 /// This is semantically equivalent to C's `memmove`.
1133 /// Care must be taken with the ownership of `self` and `dest`.
1134 /// This method semantically moves the values of `self` into `dest`.
1135 /// However it does not drop the contents of `self`, or prevent the contents
1136 /// of `dest` from being dropped or used.
1140 /// Efficiently create a Rust vector from an unsafe buffer:
1143 /// # #[allow(dead_code)]
1144 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1145 /// let mut dst = Vec::with_capacity(elts);
1146 /// dst.set_len(elts);
1147 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1151 #[stable(feature = "pointer_methods", since = "1.26.0")]
1153 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1156 copy(self, dest, count)
1159 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1160 /// and destination may *not* overlap.
1162 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1164 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1168 /// Beyond requiring that the program must be allowed to access both regions
1169 /// of memory, it is Undefined Behavior for source and destination to
1170 /// overlap. Care must also be taken with the ownership of `self` and
1171 /// `self`. This method semantically moves the values of `self` into `dest`.
1172 /// However it does not drop the contents of `dest`, or prevent the contents
1173 /// of `self` from being dropped or used.
1177 /// Efficiently create a Rust vector from an unsafe buffer:
1180 /// # #[allow(dead_code)]
1181 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1182 /// let mut dst = Vec::with_capacity(elts);
1183 /// dst.set_len(elts);
1184 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1188 #[stable(feature = "pointer_methods", since = "1.26.0")]
1190 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1193 copy_nonoverlapping(self, dest, count)
1196 /// Computes the byte offset that needs to be applied in order to
1197 /// make the pointer aligned to `align`.
1198 /// If it is not possible to align the pointer, the implementation returns
1199 /// `usize::max_value()`.
1201 /// There are no guarantees whatsover that offsetting the pointer will not
1202 /// overflow or go beyond the allocation that the pointer points into.
1203 /// It is up to the caller to ensure that the returned offset is correct
1204 /// in all terms other than alignment.
1208 /// Accessing adjacent `u8` as `u16`
1211 /// # #![feature(align_offset)]
1212 /// # fn foo(n: usize) {
1213 /// # use std::mem::align_of;
1215 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1216 /// let ptr = &x[n] as *const u8;
1217 /// let offset = ptr.align_offset(align_of::<u16>());
1218 /// if offset < x.len() - n - 1 {
1219 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1220 /// assert_ne!(*u16_ptr, 500);
1222 /// // while the pointer can be aligned via `offset`, it would point
1223 /// // outside the allocation
1227 #[unstable(feature = "align_offset", issue = "44488")]
1228 pub fn align_offset(self, align: usize) -> usize {
1230 intrinsics::align_offset(self as *const _, align)
1236 impl<T: ?Sized> *mut T {
1237 /// Returns `true` if the pointer is null.
1239 /// Note that unsized types have many possible null pointers, as only the
1240 /// raw data pointer is considered, not their length, vtable, etc.
1241 /// Therefore, two pointers that are null may still not compare equal to
1249 /// let mut s = [1, 2, 3];
1250 /// let ptr: *mut u32 = s.as_mut_ptr();
1251 /// assert!(!ptr.is_null());
1253 #[stable(feature = "rust1", since = "1.0.0")]
1255 pub fn is_null(self) -> bool {
1256 // Compare via a cast to a thin pointer, so fat pointers are only
1257 // considering their "data" part for null-ness.
1258 (self as *mut u8) == null_mut()
1261 /// Returns `None` if the pointer is null, or else returns a reference to
1262 /// the value wrapped in `Some`.
1266 /// While this method and its mutable counterpart are useful for
1267 /// null-safety, it is important to note that this is still an unsafe
1268 /// operation because the returned value could be pointing to invalid
1271 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1272 /// not necessarily reflect the actual lifetime of the data.
1279 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1282 /// if let Some(val_back) = ptr.as_ref() {
1283 /// println!("We got back the value: {}!", val_back);
1287 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1289 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1297 /// Calculates the offset from a pointer.
1299 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1300 /// offset of `3 * size_of::<T>()` bytes.
1304 /// If any of the following conditions are violated, the result is Undefined
1307 /// * Both the starting and resulting pointer must be either in bounds or one
1308 /// byte past the end of an allocated object.
1310 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1312 /// * The offset being in bounds cannot rely on "wrapping around" the address
1313 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1315 /// The compiler and standard library generally tries to ensure allocations
1316 /// never reach a size where an offset is a concern. For instance, `Vec`
1317 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1318 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1320 /// Most platforms fundamentally can't even construct such an allocation.
1321 /// For instance, no known 64-bit platform can ever serve a request
1322 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1323 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1324 /// more than `isize::MAX` bytes with things like Physical Address
1325 /// Extension. As such, memory acquired directly from allocators or memory
1326 /// mapped files *may* be too large to handle with this function.
1328 /// Consider using `wrapping_offset` instead if these constraints are
1329 /// difficult to satisfy. The only advantage of this method is that it
1330 /// enables more aggressive compiler optimizations.
1337 /// let mut s = [1, 2, 3];
1338 /// let ptr: *mut u32 = s.as_mut_ptr();
1341 /// println!("{}", *ptr.offset(1));
1342 /// println!("{}", *ptr.offset(2));
1345 #[stable(feature = "rust1", since = "1.0.0")]
1347 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1348 intrinsics::offset(self, count) as *mut T
1351 /// Calculates the offset from a pointer using wrapping arithmetic.
1352 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1353 /// offset of `3 * size_of::<T>()` bytes.
1357 /// The resulting pointer does not need to be in bounds, but it is
1358 /// potentially hazardous to dereference (which requires `unsafe`).
1360 /// Always use `.offset(count)` instead when possible, because `offset`
1361 /// allows the compiler to optimize better.
1368 /// // Iterate using a raw pointer in increments of two elements
1369 /// let mut data = [1u8, 2, 3, 4, 5];
1370 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1372 /// let end_rounded_up = ptr.wrapping_offset(6);
1374 /// while ptr != end_rounded_up {
1378 /// ptr = ptr.wrapping_offset(step);
1380 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1382 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1384 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1386 intrinsics::arith_offset(self, count) as *mut T
1390 /// Returns `None` if the pointer is null, or else returns a mutable
1391 /// reference to the value wrapped in `Some`.
1395 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1396 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1397 /// returned is indeed a valid lifetime for the contained data.
1404 /// let mut s = [1, 2, 3];
1405 /// let ptr: *mut u32 = s.as_mut_ptr();
1406 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1407 /// *first_value = 4;
1408 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1410 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1412 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1420 /// Calculates the distance between two pointers. The returned value is in
1421 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1423 /// If the address different between the two pointers ia not a multiple of
1424 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1427 /// This function returns `None` if `T` is a zero-sized typed.
1434 /// #![feature(offset_to)]
1437 /// let mut a = [0; 5];
1438 /// let ptr1: *mut i32 = &mut a[1];
1439 /// let ptr2: *mut i32 = &mut a[3];
1440 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1441 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1442 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1443 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1446 #[unstable(feature = "offset_to", issue = "41079")]
1448 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1449 let size = mem::size_of::<T>();
1453 let diff = (other as isize).wrapping_sub(self as isize);
1454 Some(diff / size as isize)
1458 /// Calculates the distance between two pointers. The returned value is in
1459 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1461 /// This function is the inverse of [`offset`].
1463 /// [`offset`]: #method.offset-1
1464 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1468 /// If any of the following conditions are violated, the result is Undefined
1471 /// * Both the starting and other pointer must be either in bounds or one
1472 /// byte past the end of the same allocated object.
1474 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1476 /// * The distance between the pointers, in bytes, must be an exact multiple
1477 /// of the size of `T` and `T` must not be a Zero-Sized Type ("ZST").
1479 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1481 /// The compiler and standard library generally try to ensure allocations
1482 /// never reach a size where an offset is a concern. For instance, `Vec`
1483 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1484 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1486 /// Most platforms fundamentally can't even construct such an allocation.
1487 /// For instance, no known 64-bit platform can ever serve a request
1488 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1489 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1490 /// more than `isize::MAX` bytes with things like Physical Address
1491 /// Extension. As such, memory acquired directly from allocators or memory
1492 /// mapped files *may* be too large to handle with this function.
1494 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1495 /// difficult to satisfy. The only advantage of this method is that it
1496 /// enables more aggressive compiler optimizations.
1503 /// #![feature(ptr_offset_from)]
1506 /// let ptr1: *mut i32 = &mut a[1];
1507 /// let ptr2: *mut i32 = &mut a[3];
1509 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1510 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1511 /// assert_eq!(ptr1.offset(2), ptr2);
1512 /// assert_eq!(ptr2.offset(-2), ptr1);
1515 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1517 pub unsafe fn offset_from(self, other: *const T) -> isize where T: Sized {
1518 (self as *const T).offset_from(other)
1521 /// Calculates the distance between two pointers. The returned value is in
1522 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1524 /// If the address different between the two pointers is not a multiple of
1525 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1530 /// This function panics if `T` is a zero-sized typed.
1537 /// #![feature(ptr_wrapping_offset_from)]
1540 /// let ptr1: *mut i32 = &mut a[1];
1541 /// let ptr2: *mut i32 = &mut a[3];
1542 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1543 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1544 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1545 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1547 /// let ptr1: *mut i32 = 3 as _;
1548 /// let ptr2: *mut i32 = 13 as _;
1549 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1551 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1553 pub fn wrapping_offset_from(self, other: *const T) -> isize where T: Sized {
1554 (self as *const T).wrapping_offset_from(other)
1557 /// Computes the byte offset that needs to be applied in order to
1558 /// make the pointer aligned to `align`.
1559 /// If it is not possible to align the pointer, the implementation returns
1560 /// `usize::max_value()`.
1562 /// There are no guarantees whatsover that offsetting the pointer will not
1563 /// overflow or go beyond the allocation that the pointer points into.
1564 /// It is up to the caller to ensure that the returned offset is correct
1565 /// in all terms other than alignment.
1569 /// Accessing adjacent `u8` as `u16`
1572 /// # #![feature(align_offset)]
1573 /// # fn foo(n: usize) {
1574 /// # use std::mem::align_of;
1576 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1577 /// let ptr = &x[n] as *const u8;
1578 /// let offset = ptr.align_offset(align_of::<u16>());
1579 /// if offset < x.len() - n - 1 {
1580 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1581 /// assert_ne!(*u16_ptr, 500);
1583 /// // while the pointer can be aligned via `offset`, it would point
1584 /// // outside the allocation
1588 #[unstable(feature = "align_offset", issue = "44488")]
1589 pub fn align_offset(self, align: usize) -> usize {
1591 intrinsics::align_offset(self as *const _, align)
1595 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1597 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1598 /// offset of `3 * size_of::<T>()` bytes.
1602 /// If any of the following conditions are violated, the result is Undefined
1605 /// * Both the starting and resulting pointer must be either in bounds or one
1606 /// byte past the end of an allocated object.
1608 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1610 /// * The offset being in bounds cannot rely on "wrapping around" the address
1611 /// space. That is, the infinite-precision sum must fit in a `usize`.
1613 /// The compiler and standard library generally tries to ensure allocations
1614 /// never reach a size where an offset is a concern. For instance, `Vec`
1615 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1616 /// `vec.as_ptr().add(vec.len())` is always safe.
1618 /// Most platforms fundamentally can't even construct such an allocation.
1619 /// For instance, no known 64-bit platform can ever serve a request
1620 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1621 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1622 /// more than `isize::MAX` bytes with things like Physical Address
1623 /// Extension. As such, memory acquired directly from allocators or memory
1624 /// mapped files *may* be too large to handle with this function.
1626 /// Consider using `wrapping_offset` instead if these constraints are
1627 /// difficult to satisfy. The only advantage of this method is that it
1628 /// enables more aggressive compiler optimizations.
1635 /// let s: &str = "123";
1636 /// let ptr: *const u8 = s.as_ptr();
1639 /// println!("{}", *ptr.add(1) as char);
1640 /// println!("{}", *ptr.add(2) as char);
1643 #[stable(feature = "pointer_methods", since = "1.26.0")]
1645 pub unsafe fn add(self, count: usize) -> Self
1648 self.offset(count as isize)
1651 /// Calculates the offset from a pointer (convenience for
1652 /// `.offset((count as isize).wrapping_neg())`).
1654 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1655 /// offset of `3 * size_of::<T>()` bytes.
1659 /// If any of the following conditions are violated, the result is Undefined
1662 /// * Both the starting and resulting pointer must be either in bounds or one
1663 /// byte past the end of an allocated object.
1665 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1667 /// * The offset being in bounds cannot rely on "wrapping around" the address
1668 /// space. That is, the infinite-precision sum must fit in a usize.
1670 /// The compiler and standard library generally tries to ensure allocations
1671 /// never reach a size where an offset is a concern. For instance, `Vec`
1672 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1673 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1675 /// Most platforms fundamentally can't even construct such an allocation.
1676 /// For instance, no known 64-bit platform can ever serve a request
1677 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1678 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1679 /// more than `isize::MAX` bytes with things like Physical Address
1680 /// Extension. As such, memory acquired directly from allocators or memory
1681 /// mapped files *may* be too large to handle with this function.
1683 /// Consider using `wrapping_offset` instead if these constraints are
1684 /// difficult to satisfy. The only advantage of this method is that it
1685 /// enables more aggressive compiler optimizations.
1692 /// let s: &str = "123";
1695 /// let end: *const u8 = s.as_ptr().add(3);
1696 /// println!("{}", *end.sub(1) as char);
1697 /// println!("{}", *end.sub(2) as char);
1700 #[stable(feature = "pointer_methods", since = "1.26.0")]
1702 pub unsafe fn sub(self, count: usize) -> Self
1705 self.offset((count as isize).wrapping_neg())
1708 /// Calculates the offset from a pointer using wrapping arithmetic.
1709 /// (convenience for `.wrapping_offset(count as isize)`)
1711 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1712 /// offset of `3 * size_of::<T>()` bytes.
1716 /// The resulting pointer does not need to be in bounds, but it is
1717 /// potentially hazardous to dereference (which requires `unsafe`).
1719 /// Always use `.add(count)` instead when possible, because `add`
1720 /// allows the compiler to optimize better.
1727 /// // Iterate using a raw pointer in increments of two elements
1728 /// let data = [1u8, 2, 3, 4, 5];
1729 /// let mut ptr: *const u8 = data.as_ptr();
1731 /// let end_rounded_up = ptr.wrapping_add(6);
1733 /// // This loop prints "1, 3, 5, "
1734 /// while ptr != end_rounded_up {
1736 /// print!("{}, ", *ptr);
1738 /// ptr = ptr.wrapping_add(step);
1741 #[stable(feature = "pointer_methods", since = "1.26.0")]
1743 pub fn wrapping_add(self, count: usize) -> Self
1746 self.wrapping_offset(count as isize)
1749 /// Calculates the offset from a pointer using wrapping arithmetic.
1750 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1752 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1753 /// offset of `3 * size_of::<T>()` bytes.
1757 /// The resulting pointer does not need to be in bounds, but it is
1758 /// potentially hazardous to dereference (which requires `unsafe`).
1760 /// Always use `.sub(count)` instead when possible, because `sub`
1761 /// allows the compiler to optimize better.
1768 /// // Iterate using a raw pointer in increments of two elements (backwards)
1769 /// let data = [1u8, 2, 3, 4, 5];
1770 /// let mut ptr: *const u8 = data.as_ptr();
1771 /// let start_rounded_down = ptr.wrapping_sub(2);
1772 /// ptr = ptr.wrapping_add(4);
1774 /// // This loop prints "5, 3, 1, "
1775 /// while ptr != start_rounded_down {
1777 /// print!("{}, ", *ptr);
1779 /// ptr = ptr.wrapping_sub(step);
1782 #[stable(feature = "pointer_methods", since = "1.26.0")]
1784 pub fn wrapping_sub(self, count: usize) -> Self
1787 self.wrapping_offset((count as isize).wrapping_neg())
1790 /// Reads the value from `self` without moving it. This leaves the
1791 /// memory in `self` unchanged.
1795 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1796 /// moves the value out of `self` without preventing further usage of `self`.
1797 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1798 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1799 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1800 /// because it will attempt to drop the value previously at `*self`.
1802 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1810 /// let y = &x as *const i32;
1813 /// assert_eq!(y.read(), 12);
1816 #[stable(feature = "pointer_methods", since = "1.26.0")]
1818 pub unsafe fn read(self) -> T
1824 /// Performs a volatile read of the value from `self` without moving it. This
1825 /// leaves the memory in `self` unchanged.
1827 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1828 /// to not be elided or reordered by the compiler across other volatile
1833 /// Rust does not currently have a rigorously and formally defined memory model,
1834 /// so the precise semantics of what "volatile" means here is subject to change
1835 /// over time. That being said, the semantics will almost always end up pretty
1836 /// similar to [C11's definition of volatile][c11].
1838 /// The compiler shouldn't change the relative order or number of volatile
1839 /// memory operations. However, volatile memory operations on zero-sized types
1840 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1841 /// and may be ignored.
1843 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1847 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1848 /// moves the value out of `self` without preventing further usage of `self`.
1849 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1850 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1851 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1852 /// because it will attempt to drop the value previously at `*self`.
1860 /// let y = &x as *const i32;
1863 /// assert_eq!(y.read_volatile(), 12);
1866 #[stable(feature = "pointer_methods", since = "1.26.0")]
1868 pub unsafe fn read_volatile(self) -> T
1874 /// Reads the value from `self` without moving it. This leaves the
1875 /// memory in `self` unchanged.
1877 /// Unlike `read`, the pointer may be unaligned.
1881 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1882 /// moves the value out of `self` without preventing further usage of `self`.
1883 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1884 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1885 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1886 /// because it will attempt to drop the value previously at `*self`.
1894 /// let y = &x as *const i32;
1897 /// assert_eq!(y.read_unaligned(), 12);
1900 #[stable(feature = "pointer_methods", since = "1.26.0")]
1902 pub unsafe fn read_unaligned(self) -> T
1905 read_unaligned(self)
1908 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1909 /// and destination may overlap.
1911 /// NOTE: this has the *same* argument order as `ptr::copy`.
1913 /// This is semantically equivalent to C's `memmove`.
1917 /// Care must be taken with the ownership of `self` and `dest`.
1918 /// This method semantically moves the values of `self` into `dest`.
1919 /// However it does not drop the contents of `self`, or prevent the contents
1920 /// of `dest` from being dropped or used.
1924 /// Efficiently create a Rust vector from an unsafe buffer:
1927 /// # #[allow(dead_code)]
1928 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1929 /// let mut dst = Vec::with_capacity(elts);
1930 /// dst.set_len(elts);
1931 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1935 #[stable(feature = "pointer_methods", since = "1.26.0")]
1937 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1940 copy(self, dest, count)
1943 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1944 /// and destination may *not* overlap.
1946 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1948 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1952 /// Beyond requiring that the program must be allowed to access both regions
1953 /// of memory, it is Undefined Behavior for source and destination to
1954 /// overlap. Care must also be taken with the ownership of `self` and
1955 /// `self`. This method semantically moves the values of `self` into `dest`.
1956 /// However it does not drop the contents of `dest`, or prevent the contents
1957 /// of `self` from being dropped or used.
1961 /// Efficiently create a Rust vector from an unsafe buffer:
1964 /// # #[allow(dead_code)]
1965 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1966 /// let mut dst = Vec::with_capacity(elts);
1967 /// dst.set_len(elts);
1968 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1972 #[stable(feature = "pointer_methods", since = "1.26.0")]
1974 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1977 copy_nonoverlapping(self, dest, count)
1980 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1981 /// and destination may overlap.
1983 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1985 /// This is semantically equivalent to C's `memmove`.
1989 /// Care must be taken with the ownership of `src` and `self`.
1990 /// This method semantically moves the values of `src` into `self`.
1991 /// However it does not drop the contents of `self`, or prevent the contents
1992 /// of `src` from being dropped or used.
1996 /// Efficiently create a Rust vector from an unsafe buffer:
1999 /// # #[allow(dead_code)]
2000 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2001 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2002 /// dst.set_len(elts);
2003 /// dst.as_mut_ptr().copy_from(ptr, elts);
2007 #[stable(feature = "pointer_methods", since = "1.26.0")]
2009 pub unsafe fn copy_from(self, src: *const T, count: usize)
2012 copy(src, self, count)
2015 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2016 /// and destination may *not* overlap.
2018 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
2020 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
2024 /// Beyond requiring that the program must be allowed to access both regions
2025 /// of memory, it is Undefined Behavior for source and destination to
2026 /// overlap. Care must also be taken with the ownership of `src` and
2027 /// `self`. This method semantically moves the values of `src` into `self`.
2028 /// However it does not drop the contents of `self`, or prevent the contents
2029 /// of `src` from being dropped or used.
2033 /// Efficiently create a Rust vector from an unsafe buffer:
2036 /// # #[allow(dead_code)]
2037 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2038 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2039 /// dst.set_len(elts);
2040 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
2044 #[stable(feature = "pointer_methods", since = "1.26.0")]
2046 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2049 copy_nonoverlapping(src, self, count)
2052 /// Executes the destructor (if any) of the pointed-to value.
2054 /// This has two use cases:
2056 /// * It is *required* to use `drop_in_place` to drop unsized types like
2057 /// trait objects, because they can't be read out onto the stack and
2058 /// dropped normally.
2060 /// * It is friendlier to the optimizer to do this over `ptr::read` when
2061 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
2062 /// as the compiler doesn't need to prove that it's sound to elide the
2067 /// This has all the same safety problems as `ptr::read` with respect to
2068 /// invalid pointers, types, and double drops.
2069 #[stable(feature = "pointer_methods", since = "1.26.0")]
2071 pub unsafe fn drop_in_place(self) {
2075 /// Overwrites a memory location with the given value without reading or
2076 /// dropping the old value.
2080 /// This operation is marked unsafe because it writes through a raw pointer.
2082 /// It does not drop the contents of `self`. This is safe, but it could leak
2083 /// allocations or resources, so care must be taken not to overwrite an object
2084 /// that should be dropped.
2086 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
2087 /// location pointed to by `self`.
2089 /// This is appropriate for initializing uninitialized memory, or overwriting
2090 /// memory that has previously been `read` from.
2092 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
2100 /// let y = &mut x as *mut i32;
2105 /// assert_eq!(y.read(), 12);
2108 #[stable(feature = "pointer_methods", since = "1.26.0")]
2110 pub unsafe fn write(self, val: T)
2116 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2117 /// bytes of memory starting at `self` to `val`.
2122 /// let mut vec = vec![0; 4];
2124 /// let vec_ptr = vec.as_mut_ptr();
2125 /// vec_ptr.write_bytes(b'a', 2);
2127 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
2129 #[stable(feature = "pointer_methods", since = "1.26.0")]
2131 pub unsafe fn write_bytes(self, val: u8, count: usize)
2134 write_bytes(self, val, count)
2137 /// Performs a volatile write of a memory location with the given value without
2138 /// reading or dropping the old value.
2140 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2141 /// to not be elided or reordered by the compiler across other volatile
2146 /// Rust does not currently have a rigorously and formally defined memory model,
2147 /// so the precise semantics of what "volatile" means here is subject to change
2148 /// over time. That being said, the semantics will almost always end up pretty
2149 /// similar to [C11's definition of volatile][c11].
2151 /// The compiler shouldn't change the relative order or number of volatile
2152 /// memory operations. However, volatile memory operations on zero-sized types
2153 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
2154 /// and may be ignored.
2156 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
2160 /// This operation is marked unsafe because it accepts a raw pointer.
2162 /// It does not drop the contents of `self`. This is safe, but it could leak
2163 /// allocations or resources, so care must be taken not to overwrite an object
2164 /// that should be dropped.
2166 /// This is appropriate for initializing uninitialized memory, or overwriting
2167 /// memory that has previously been `read` from.
2175 /// let y = &mut x as *mut i32;
2179 /// y.write_volatile(z);
2180 /// assert_eq!(y.read_volatile(), 12);
2183 #[stable(feature = "pointer_methods", since = "1.26.0")]
2185 pub unsafe fn write_volatile(self, val: T)
2188 write_volatile(self, val)
2191 /// Overwrites a memory location with the given value without reading or
2192 /// dropping the old value.
2194 /// Unlike `write`, the pointer may be unaligned.
2198 /// This operation is marked unsafe because it writes through a raw pointer.
2200 /// It does not drop the contents of `self`. This is safe, but it could leak
2201 /// allocations or resources, so care must be taken not to overwrite an object
2202 /// that should be dropped.
2204 /// Additionally, it does not drop `self`. Semantically, `self` is moved into the
2205 /// location pointed to by `val`.
2207 /// This is appropriate for initializing uninitialized memory, or overwriting
2208 /// memory that has previously been `read` from.
2216 /// let y = &mut x as *mut i32;
2220 /// y.write_unaligned(z);
2221 /// assert_eq!(y.read_unaligned(), 12);
2224 #[stable(feature = "pointer_methods", since = "1.26.0")]
2226 pub unsafe fn write_unaligned(self, val: T)
2229 write_unaligned(self, val)
2232 /// Replaces the value at `self` with `src`, returning the old
2233 /// value, without dropping either.
2237 /// This is only unsafe because it accepts a raw pointer.
2238 /// Otherwise, this operation is identical to `mem::replace`.
2239 #[stable(feature = "pointer_methods", since = "1.26.0")]
2241 pub unsafe fn replace(self, src: T) -> T
2247 /// Swaps the values at two mutable locations of the same type, without
2248 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2249 /// otherwise equivalent.
2253 /// This function copies the memory through the raw pointers passed to it
2256 /// Ensure that these pointers are valid before calling `swap`.
2257 #[stable(feature = "pointer_methods", since = "1.26.0")]
2259 pub unsafe fn swap(self, with: *mut T)
2266 // Equality for pointers
2267 #[stable(feature = "rust1", since = "1.0.0")]
2268 impl<T: ?Sized> PartialEq for *const T {
2270 fn eq(&self, other: &*const T) -> bool { *self == *other }
2273 #[stable(feature = "rust1", since = "1.0.0")]
2274 impl<T: ?Sized> Eq for *const T {}
2276 #[stable(feature = "rust1", since = "1.0.0")]
2277 impl<T: ?Sized> PartialEq for *mut T {
2279 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2282 #[stable(feature = "rust1", since = "1.0.0")]
2283 impl<T: ?Sized> Eq for *mut T {}
2285 /// Compare raw pointers for equality.
2287 /// This is the same as using the `==` operator, but less generic:
2288 /// the arguments have to be `*const T` raw pointers,
2289 /// not anything that implements `PartialEq`.
2291 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2292 /// by their address rather than comparing the values they point to
2293 /// (which is what the `PartialEq for &T` implementation does).
2301 /// let other_five = 5;
2302 /// let five_ref = &five;
2303 /// let same_five_ref = &five;
2304 /// let other_five_ref = &other_five;
2306 /// assert!(five_ref == same_five_ref);
2307 /// assert!(five_ref == other_five_ref);
2309 /// assert!(ptr::eq(five_ref, same_five_ref));
2310 /// assert!(!ptr::eq(five_ref, other_five_ref));
2312 #[stable(feature = "ptr_eq", since = "1.17.0")]
2314 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2318 // Impls for function pointers
2319 macro_rules! fnptr_impls_safety_abi {
2320 ($FnTy: ty, $($Arg: ident),*) => {
2321 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2322 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2324 fn eq(&self, other: &Self) -> bool {
2325 *self as usize == *other as usize
2329 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2330 impl<Ret, $($Arg),*> Eq for $FnTy {}
2332 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2333 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2335 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2336 (*self as usize).partial_cmp(&(*other as usize))
2340 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2341 impl<Ret, $($Arg),*> Ord for $FnTy {
2343 fn cmp(&self, other: &Self) -> Ordering {
2344 (*self as usize).cmp(&(*other as usize))
2348 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2349 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2350 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2351 state.write_usize(*self as usize)
2355 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2356 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2357 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2358 fmt::Pointer::fmt(&(*self as *const ()), f)
2362 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2363 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2364 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2365 fmt::Pointer::fmt(&(*self as *const ()), f)
2371 macro_rules! fnptr_impls_args {
2372 ($($Arg: ident),+) => {
2373 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2374 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2375 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2376 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2377 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2378 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2381 // No variadic functions with 0 parameters
2382 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2383 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2384 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2385 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2389 fnptr_impls_args! { }
2390 fnptr_impls_args! { A }
2391 fnptr_impls_args! { A, B }
2392 fnptr_impls_args! { A, B, C }
2393 fnptr_impls_args! { A, B, C, D }
2394 fnptr_impls_args! { A, B, C, D, E }
2395 fnptr_impls_args! { A, B, C, D, E, F }
2396 fnptr_impls_args! { A, B, C, D, E, F, G }
2397 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2398 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2399 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2400 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2401 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2403 // Comparison for pointers
2404 #[stable(feature = "rust1", since = "1.0.0")]
2405 impl<T: ?Sized> Ord for *const T {
2407 fn cmp(&self, other: &*const T) -> Ordering {
2410 } else if self == other {
2418 #[stable(feature = "rust1", since = "1.0.0")]
2419 impl<T: ?Sized> PartialOrd for *const T {
2421 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2422 Some(self.cmp(other))
2426 fn lt(&self, other: &*const T) -> bool { *self < *other }
2429 fn le(&self, other: &*const T) -> bool { *self <= *other }
2432 fn gt(&self, other: &*const T) -> bool { *self > *other }
2435 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2438 #[stable(feature = "rust1", since = "1.0.0")]
2439 impl<T: ?Sized> Ord for *mut T {
2441 fn cmp(&self, other: &*mut T) -> Ordering {
2444 } else if self == other {
2452 #[stable(feature = "rust1", since = "1.0.0")]
2453 impl<T: ?Sized> PartialOrd for *mut T {
2455 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2456 Some(self.cmp(other))
2460 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2463 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2466 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2469 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2472 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2473 /// of this wrapper owns the referent. Useful for building abstractions like
2474 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2476 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2477 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2478 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2479 /// the referent of the pointer should not be modified without a unique path to
2480 /// its owning Unique.
2482 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2483 /// consider using `NonNull`, which has weaker semantics.
2485 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2486 /// is never dereferenced. This is so that enums may use this forbidden value
2487 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2488 /// However the pointer may still dangle if it isn't dereferenced.
2490 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2491 /// for any type which upholds Unique's aliasing requirements.
2492 #[unstable(feature = "ptr_internals", issue = "0",
2493 reason = "use NonNull instead and consider PhantomData<T> \
2494 (if you also use #[may_dangle]), Send, and/or Sync")]
2495 pub struct Unique<T: ?Sized> {
2496 pointer: NonZero<*const T>,
2497 // NOTE: this marker has no consequences for variance, but is necessary
2498 // for dropck to understand that we logically own a `T`.
2500 // For details, see:
2501 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2502 _marker: PhantomData<T>,
2505 #[unstable(feature = "ptr_internals", issue = "0")]
2506 impl<T: ?Sized> fmt::Debug for Unique<T> {
2507 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2508 fmt::Pointer::fmt(&self.as_ptr(), f)
2512 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2513 /// reference is unaliased. Note that this aliasing invariant is
2514 /// unenforced by the type system; the abstraction using the
2515 /// `Unique` must enforce it.
2516 #[unstable(feature = "ptr_internals", issue = "0")]
2517 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2519 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2520 /// reference is unaliased. Note that this aliasing invariant is
2521 /// unenforced by the type system; the abstraction using the
2522 /// `Unique` must enforce it.
2523 #[unstable(feature = "ptr_internals", issue = "0")]
2524 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2526 #[unstable(feature = "ptr_internals", issue = "0")]
2527 impl<T: Sized> Unique<T> {
2528 /// Creates a new `Unique` that is dangling, but well-aligned.
2530 /// This is useful for initializing types which lazily allocate, like
2531 /// `Vec::new` does.
2532 // FIXME: rename to dangling() to match NonNull?
2533 pub fn empty() -> Self {
2535 let ptr = mem::align_of::<T>() as *mut T;
2536 Unique::new_unchecked(ptr)
2541 #[unstable(feature = "ptr_internals", issue = "0")]
2542 impl<T: ?Sized> Unique<T> {
2543 /// Creates a new `Unique`.
2547 /// `ptr` must be non-null.
2548 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2549 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2552 /// Creates a new `Unique` if `ptr` is non-null.
2553 pub fn new(ptr: *mut T) -> Option<Self> {
2554 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
2557 /// Acquires the underlying `*mut` pointer.
2558 pub fn as_ptr(self) -> *mut T {
2559 self.pointer.get() as *mut T
2562 /// Dereferences the content.
2564 /// The resulting lifetime is bound to self so this behaves "as if"
2565 /// it were actually an instance of T that is getting borrowed. If a longer
2566 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2567 pub unsafe fn as_ref(&self) -> &T {
2571 /// Mutably dereferences the content.
2573 /// The resulting lifetime is bound to self so this behaves "as if"
2574 /// it were actually an instance of T that is getting borrowed. If a longer
2575 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2576 pub unsafe fn as_mut(&mut self) -> &mut T {
2581 #[unstable(feature = "ptr_internals", issue = "0")]
2582 impl<T: ?Sized> Clone for Unique<T> {
2583 fn clone(&self) -> Self {
2588 #[unstable(feature = "ptr_internals", issue = "0")]
2589 impl<T: ?Sized> Copy for Unique<T> { }
2591 #[unstable(feature = "ptr_internals", issue = "0")]
2592 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2594 #[unstable(feature = "ptr_internals", issue = "0")]
2595 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2596 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2597 fmt::Pointer::fmt(&self.as_ptr(), f)
2601 #[unstable(feature = "ptr_internals", issue = "0")]
2602 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2603 fn from(reference: &'a mut T) -> Self {
2604 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2608 #[unstable(feature = "ptr_internals", issue = "0")]
2609 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2610 fn from(reference: &'a T) -> Self {
2611 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2615 #[unstable(feature = "ptr_internals", issue = "0")]
2616 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2617 fn from(p: NonNull<T>) -> Self {
2618 Unique { pointer: p.pointer, _marker: PhantomData }
2622 /// Previous name of `NonNull`.
2623 #[rustc_deprecated(since = "1.25.0", reason = "renamed to `NonNull`")]
2624 #[unstable(feature = "shared", issue = "27730")]
2625 pub type Shared<T> = NonNull<T>;
2627 /// `*mut T` but non-zero and covariant.
2629 /// This is often the correct thing to use when building data structures using
2630 /// raw pointers, but is ultimately more dangerous to use because of its additional
2631 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2633 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2634 /// is never dereferenced. This is so that enums may use this forbidden value
2635 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `NonNull<T>`.
2636 /// However the pointer may still dangle if it isn't dereferenced.
2638 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2639 /// for your use case, you should include some PhantomData in your type to
2640 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2641 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2642 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2643 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2644 #[stable(feature = "nonnull", since = "1.25.0")]
2645 pub struct NonNull<T: ?Sized> {
2646 pointer: NonZero<*const T>,
2649 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2650 // NB: This impl is unnecessary, but should provide better error messages.
2651 #[stable(feature = "nonnull", since = "1.25.0")]
2652 impl<T: ?Sized> !Send for NonNull<T> { }
2654 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2655 // NB: This impl is unnecessary, but should provide better error messages.
2656 #[stable(feature = "nonnull", since = "1.25.0")]
2657 impl<T: ?Sized> !Sync for NonNull<T> { }
2659 impl<T: Sized> NonNull<T> {
2660 /// Creates a new `NonNull` that is dangling, but well-aligned.
2662 /// This is useful for initializing types which lazily allocate, like
2663 /// `Vec::new` does.
2664 #[stable(feature = "nonnull", since = "1.25.0")]
2665 pub fn dangling() -> Self {
2667 let ptr = mem::align_of::<T>() as *mut T;
2668 NonNull::new_unchecked(ptr)
2673 impl<T: ?Sized> NonNull<T> {
2674 /// Creates a new `NonNull`.
2678 /// `ptr` must be non-null.
2679 #[stable(feature = "nonnull", since = "1.25.0")]
2680 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2681 NonNull { pointer: NonZero::new_unchecked(ptr) }
2684 /// Creates a new `NonNull` if `ptr` is non-null.
2685 #[stable(feature = "nonnull", since = "1.25.0")]
2686 pub fn new(ptr: *mut T) -> Option<Self> {
2687 NonZero::new(ptr as *const T).map(|nz| NonNull { pointer: nz })
2690 /// Acquires the underlying `*mut` pointer.
2691 #[stable(feature = "nonnull", since = "1.25.0")]
2692 pub fn as_ptr(self) -> *mut T {
2693 self.pointer.get() as *mut T
2696 /// Dereferences the content.
2698 /// The resulting lifetime is bound to self so this behaves "as if"
2699 /// it were actually an instance of T that is getting borrowed. If a longer
2700 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2701 #[stable(feature = "nonnull", since = "1.25.0")]
2702 pub unsafe fn as_ref(&self) -> &T {
2706 /// Mutably dereferences the content.
2708 /// The resulting lifetime is bound to self so this behaves "as if"
2709 /// it were actually an instance of T that is getting borrowed. If a longer
2710 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2711 #[stable(feature = "nonnull", since = "1.25.0")]
2712 pub unsafe fn as_mut(&mut self) -> &mut T {
2716 /// Cast to a pointer of another type
2717 #[unstable(feature = "nonnull_cast", issue = "47653")]
2718 pub fn cast<U>(self) -> NonNull<U> {
2720 NonNull::new_unchecked(self.as_ptr() as *mut U)
2725 #[stable(feature = "nonnull", since = "1.25.0")]
2726 impl<T: ?Sized> Clone for NonNull<T> {
2727 fn clone(&self) -> Self {
2732 #[stable(feature = "nonnull", since = "1.25.0")]
2733 impl<T: ?Sized> Copy for NonNull<T> { }
2735 #[unstable(feature = "coerce_unsized", issue = "27732")]
2736 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2738 #[stable(feature = "nonnull", since = "1.25.0")]
2739 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2740 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2741 fmt::Pointer::fmt(&self.as_ptr(), f)
2745 #[stable(feature = "nonnull", since = "1.25.0")]
2746 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2747 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2748 fmt::Pointer::fmt(&self.as_ptr(), f)
2752 #[stable(feature = "nonnull", since = "1.25.0")]
2753 impl<T: ?Sized> Eq for NonNull<T> {}
2755 #[stable(feature = "nonnull", since = "1.25.0")]
2756 impl<T: ?Sized> PartialEq for NonNull<T> {
2757 fn eq(&self, other: &Self) -> bool {
2758 self.as_ptr() == other.as_ptr()
2762 #[stable(feature = "nonnull", since = "1.25.0")]
2763 impl<T: ?Sized> Ord for NonNull<T> {
2764 fn cmp(&self, other: &Self) -> Ordering {
2765 self.as_ptr().cmp(&other.as_ptr())
2769 #[stable(feature = "nonnull", since = "1.25.0")]
2770 impl<T: ?Sized> PartialOrd for NonNull<T> {
2771 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2772 self.as_ptr().partial_cmp(&other.as_ptr())
2776 #[stable(feature = "nonnull", since = "1.25.0")]
2777 impl<T: ?Sized> hash::Hash for NonNull<T> {
2778 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2779 self.as_ptr().hash(state)
2783 #[unstable(feature = "ptr_internals", issue = "0")]
2784 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2785 fn from(unique: Unique<T>) -> Self {
2786 NonNull { pointer: unique.pointer }
2790 #[stable(feature = "nonnull", since = "1.25.0")]
2791 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
2792 fn from(reference: &'a mut T) -> Self {
2793 NonNull { pointer: NonZero::from(reference) }
2797 #[stable(feature = "nonnull", since = "1.25.0")]
2798 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
2799 fn from(reference: &'a T) -> Self {
2800 NonNull { pointer: NonZero::from(reference) }