1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
169 /// #![feature(swap_nonoverlapping)]
173 /// let mut x = [1, 2, 3, 4];
174 /// let mut y = [7, 8, 9];
177 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
180 /// assert_eq!(x, [7, 8, 3, 4]);
181 /// assert_eq!(y, [1, 2, 9]);
184 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
185 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
186 let x = x as *mut u8;
187 let y = y as *mut u8;
188 let len = mem::size_of::<T>() * count;
189 swap_nonoverlapping_bytes(x, y, len)
193 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
194 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
195 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
196 // Haswell E processors. LLVM is more able to optimize if we give a struct a
197 // #[repr(simd)], even if we don't actually use this struct directly.
199 // FIXME repr(simd) broken on emscripten and redox
200 // It's also broken on big-endian powerpc64 and s390x. #42778
201 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
202 target_endian = "big")),
204 struct Block(u64, u64, u64, u64);
205 struct UnalignedBlock(u64, u64, u64, u64);
207 let block_size = mem::size_of::<Block>();
209 // Loop through x & y, copying them `Block` at a time
210 // The optimizer should unroll the loop fully for most types
211 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
213 while i + block_size <= len {
214 // Create some uninitialized memory as scratch space
215 // Declaring `t` here avoids aligning the stack when this loop is unused
216 let mut t: Block = mem::uninitialized();
217 let t = &mut t as *mut _ as *mut u8;
218 let x = x.offset(i as isize);
219 let y = y.offset(i as isize);
221 // Swap a block of bytes of x & y, using t as a temporary buffer
222 // This should be optimized into efficient SIMD operations where available
223 copy_nonoverlapping(x, t, block_size);
224 copy_nonoverlapping(y, x, block_size);
225 copy_nonoverlapping(t, y, block_size);
230 // Swap any remaining bytes
231 let mut t: UnalignedBlock = mem::uninitialized();
234 let t = &mut t as *mut _ as *mut u8;
235 let x = x.offset(i as isize);
236 let y = y.offset(i as isize);
238 copy_nonoverlapping(x, t, rem);
239 copy_nonoverlapping(y, x, rem);
240 copy_nonoverlapping(t, y, rem);
244 /// Replaces the value at `dest` with `src`, returning the old
245 /// value, without dropping either.
249 /// This is only unsafe because it accepts a raw pointer.
250 /// Otherwise, this operation is identical to `mem::replace`.
252 #[stable(feature = "rust1", since = "1.0.0")]
253 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
254 mem::swap(&mut *dest, &mut src); // cannot overlap
258 /// Reads the value from `src` without moving it. This leaves the
259 /// memory in `src` unchanged.
263 /// Beyond accepting a raw pointer, this is unsafe because it semantically
264 /// moves the value out of `src` without preventing further usage of `src`.
265 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
266 /// `src` is not used before the data is overwritten again (e.g. with `write`,
267 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
268 /// because it will attempt to drop the value previously at `*src`.
270 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
278 /// let y = &x as *const i32;
281 /// assert_eq!(std::ptr::read(y), 12);
285 #[stable(feature = "rust1", since = "1.0.0")]
286 pub unsafe fn read<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
288 copy_nonoverlapping(src, &mut tmp, 1);
292 /// Reads the value from `src` without moving it. This leaves the
293 /// memory in `src` unchanged.
295 /// Unlike `read`, the pointer may be unaligned.
299 /// Beyond accepting a raw pointer, this is unsafe because it semantically
300 /// moves the value out of `src` without preventing further usage of `src`.
301 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
302 /// `src` is not used before the data is overwritten again (e.g. with `write`,
303 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
304 /// because it will attempt to drop the value previously at `*src`.
312 /// let y = &x as *const i32;
315 /// assert_eq!(std::ptr::read_unaligned(y), 12);
319 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
320 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
321 let mut tmp: T = mem::uninitialized();
322 copy_nonoverlapping(src as *const u8,
323 &mut tmp as *mut T as *mut u8,
324 mem::size_of::<T>());
328 /// Overwrites a memory location with the given value without reading or
329 /// dropping the old value.
333 /// This operation is marked unsafe because it accepts a raw pointer.
335 /// It does not drop the contents of `dst`. This is safe, but it could leak
336 /// allocations or resources, so care must be taken not to overwrite an object
337 /// that should be dropped.
339 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
340 /// location pointed to by `dst`.
342 /// This is appropriate for initializing uninitialized memory, or overwriting
343 /// memory that has previously been `read` from.
345 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
353 /// let y = &mut x as *mut i32;
357 /// std::ptr::write(y, z);
358 /// assert_eq!(std::ptr::read(y), 12);
362 #[stable(feature = "rust1", since = "1.0.0")]
363 pub unsafe fn write<T>(dst: *mut T, src: T) {
364 intrinsics::move_val_init(&mut *dst, src)
367 /// Overwrites a memory location with the given value without reading or
368 /// dropping the old value.
370 /// Unlike `write`, the pointer may be unaligned.
374 /// This operation is marked unsafe because it accepts a raw pointer.
376 /// It does not drop the contents of `dst`. This is safe, but it could leak
377 /// allocations or resources, so care must be taken not to overwrite an object
378 /// that should be dropped.
380 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
381 /// location pointed to by `dst`.
383 /// This is appropriate for initializing uninitialized memory, or overwriting
384 /// memory that has previously been `read` from.
392 /// let y = &mut x as *mut i32;
396 /// std::ptr::write_unaligned(y, z);
397 /// assert_eq!(std::ptr::read_unaligned(y), 12);
401 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
402 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
403 copy_nonoverlapping(&src as *const T as *const u8,
405 mem::size_of::<T>());
409 /// Performs a volatile read of the value from `src` without moving it. This
410 /// leaves the memory in `src` unchanged.
412 /// Volatile operations are intended to act on I/O memory, and are guaranteed
413 /// to not be elided or reordered by the compiler across other volatile
418 /// Rust does not currently have a rigorously and formally defined memory model,
419 /// so the precise semantics of what "volatile" means here is subject to change
420 /// over time. That being said, the semantics will almost always end up pretty
421 /// similar to [C11's definition of volatile][c11].
423 /// The compiler shouldn't change the relative order or number of volatile
424 /// memory operations. However, volatile memory operations on zero-sized types
425 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
426 /// and may be ignored.
428 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
432 /// Beyond accepting a raw pointer, this is unsafe because it semantically
433 /// moves the value out of `src` without preventing further usage of `src`.
434 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
435 /// `src` is not used before the data is overwritten again (e.g. with `write`,
436 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
437 /// because it will attempt to drop the value previously at `*src`.
445 /// let y = &x as *const i32;
448 /// assert_eq!(std::ptr::read_volatile(y), 12);
452 #[stable(feature = "volatile", since = "1.9.0")]
453 pub unsafe fn read_volatile<T>(src: *const T) -> T {
454 intrinsics::volatile_load(src)
457 /// Performs a volatile write of a memory location with the given value without
458 /// reading or dropping the old value.
460 /// Volatile operations are intended to act on I/O memory, and are guaranteed
461 /// to not be elided or reordered by the compiler across other volatile
466 /// Rust does not currently have a rigorously and formally defined memory model,
467 /// so the precise semantics of what "volatile" means here is subject to change
468 /// over time. That being said, the semantics will almost always end up pretty
469 /// similar to [C11's definition of volatile][c11].
471 /// The compiler shouldn't change the relative order or number of volatile
472 /// memory operations. However, volatile memory operations on zero-sized types
473 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
474 /// and may be ignored.
476 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
480 /// This operation is marked unsafe because it accepts a raw pointer.
482 /// It does not drop the contents of `dst`. This is safe, but it could leak
483 /// allocations or resources, so care must be taken not to overwrite an object
484 /// that should be dropped.
486 /// This is appropriate for initializing uninitialized memory, or overwriting
487 /// memory that has previously been `read` from.
495 /// let y = &mut x as *mut i32;
499 /// std::ptr::write_volatile(y, z);
500 /// assert_eq!(std::ptr::read_volatile(y), 12);
504 #[stable(feature = "volatile", since = "1.9.0")]
505 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
506 intrinsics::volatile_store(dst, src);
509 #[lang = "const_ptr"]
510 impl<T: ?Sized> *const T {
511 /// Returns `true` if the pointer is null.
513 /// Note that unsized types have many possible null pointers, as only the
514 /// raw data pointer is considered, not their length, vtable, etc.
515 /// Therefore, two pointers that are null may still not compare equal to
523 /// let s: &str = "Follow the rabbit";
524 /// let ptr: *const u8 = s.as_ptr();
525 /// assert!(!ptr.is_null());
527 #[stable(feature = "rust1", since = "1.0.0")]
529 pub fn is_null(self) -> bool {
530 // Compare via a cast to a thin pointer, so fat pointers are only
531 // considering their "data" part for null-ness.
532 (self as *const u8) == null()
535 /// Returns `None` if the pointer is null, or else returns a reference to
536 /// the value wrapped in `Some`.
540 /// While this method and its mutable counterpart are useful for
541 /// null-safety, it is important to note that this is still an unsafe
542 /// operation because the returned value could be pointing to invalid
545 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
546 /// not necessarily reflect the actual lifetime of the data.
553 /// let ptr: *const u8 = &10u8 as *const u8;
556 /// if let Some(val_back) = ptr.as_ref() {
557 /// println!("We got back the value: {}!", val_back);
561 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
563 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
571 /// Calculates the offset from a pointer.
573 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
574 /// offset of `3 * size_of::<T>()` bytes.
578 /// If any of the following conditions are violated, the result is Undefined
581 /// * Both the starting and resulting pointer must be either in bounds or one
582 /// byte past the end of an allocated object.
584 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
586 /// * The offset being in bounds cannot rely on "wrapping around" the address
587 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
589 /// The compiler and standard library generally tries to ensure allocations
590 /// never reach a size where an offset is a concern. For instance, `Vec`
591 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
592 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
594 /// Most platforms fundamentally can't even construct such an allocation.
595 /// For instance, no known 64-bit platform can ever serve a request
596 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
597 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
598 /// more than `isize::MAX` bytes with things like Physical Address
599 /// Extension. As such, memory acquired directly from allocators or memory
600 /// mapped files *may* be too large to handle with this function.
602 /// Consider using `wrapping_offset` instead if these constraints are
603 /// difficult to satisfy. The only advantage of this method is that it
604 /// enables more aggressive compiler optimizations.
611 /// let s: &str = "123";
612 /// let ptr: *const u8 = s.as_ptr();
615 /// println!("{}", *ptr.offset(1) as char);
616 /// println!("{}", *ptr.offset(2) as char);
619 #[stable(feature = "rust1", since = "1.0.0")]
621 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
622 intrinsics::offset(self, count)
625 /// Calculates the offset from a pointer using wrapping arithmetic.
627 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
628 /// offset of `3 * size_of::<T>()` bytes.
632 /// The resulting pointer does not need to be in bounds, but it is
633 /// potentially hazardous to dereference (which requires `unsafe`).
635 /// Always use `.offset(count)` instead when possible, because `offset`
636 /// allows the compiler to optimize better.
643 /// // Iterate using a raw pointer in increments of two elements
644 /// let data = [1u8, 2, 3, 4, 5];
645 /// let mut ptr: *const u8 = data.as_ptr();
647 /// let end_rounded_up = ptr.wrapping_offset(6);
649 /// // This loop prints "1, 3, 5, "
650 /// while ptr != end_rounded_up {
652 /// print!("{}, ", *ptr);
654 /// ptr = ptr.wrapping_offset(step);
657 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
659 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
661 intrinsics::arith_offset(self, count)
665 /// Calculates the distance between two pointers. The returned value is in
666 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
668 /// If the address different between the two pointers ia not a multiple of
669 /// `mem::size_of::<T>()` then the result of the division is rounded towards
672 /// This function returns `None` if `T` is a zero-sized type.
679 /// #![feature(offset_to)]
683 /// let ptr1: *const i32 = &a[1];
684 /// let ptr2: *const i32 = &a[3];
685 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
686 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
687 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
688 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
691 #[unstable(feature = "offset_to", issue = "41079")]
693 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
694 let size = mem::size_of::<T>();
698 let diff = (other as isize).wrapping_sub(self as isize);
699 Some(diff / size as isize)
703 /// Calculates the distance between two pointers. The returned value is in
704 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
706 /// This function is the inverse of [`offset`].
708 /// [`offset`]: #method.offset
709 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
713 /// If any of the following conditions are violated, the result is Undefined
716 /// * Both the starting and other pointer must be either in bounds or one
717 /// byte past the end of the same allocated object.
719 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
721 /// * The distance between the pointers, in bytes, must be an exact multiple
722 /// of the size of `T`.
724 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
726 /// The compiler and standard library generally try to ensure allocations
727 /// never reach a size where an offset is a concern. For instance, `Vec`
728 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
729 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
731 /// Most platforms fundamentally can't even construct such an allocation.
732 /// For instance, no known 64-bit platform can ever serve a request
733 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
734 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
735 /// more than `isize::MAX` bytes with things like Physical Address
736 /// Extension. As such, memory acquired directly from allocators or memory
737 /// mapped files *may* be too large to handle with this function.
739 /// Consider using [`wrapping_offset_from`] instead if these constraints are
740 /// difficult to satisfy. The only advantage of this method is that it
741 /// enables more aggressive compiler optimizations.
745 /// This function panics if `T` is a Zero-Sized Type ("ZST").
752 /// #![feature(ptr_offset_from)]
755 /// let ptr1: *const i32 = &a[1];
756 /// let ptr2: *const i32 = &a[3];
758 /// assert_eq!(ptr2.offset_from(ptr1), 2);
759 /// assert_eq!(ptr1.offset_from(ptr2), -2);
760 /// assert_eq!(ptr1.offset(2), ptr2);
761 /// assert_eq!(ptr2.offset(-2), ptr1);
764 #[unstable(feature = "ptr_offset_from", issue = "41079")]
766 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
767 let pointee_size = mem::size_of::<T>();
768 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
770 // This is the same sequence that Clang emits for pointer subtraction.
771 // It can be neither `nsw` nor `nuw` because the input is treated as
772 // unsigned but then the output is treated as signed, so neither works.
773 let d = isize::wrapping_sub(self as _, origin as _);
774 intrinsics::exact_div(d, pointee_size as _)
777 /// Calculates the distance between two pointers. The returned value is in
778 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
780 /// If the address different between the two pointers is not a multiple of
781 /// `mem::size_of::<T>()` then the result of the division is rounded towards
784 /// Though this method is safe for any two pointers, note that its result
785 /// will be mostly useless if the two pointers aren't into the same allocated
786 /// object, for example if they point to two different local variables.
790 /// This function panics if `T` is a zero-sized type.
797 /// #![feature(ptr_wrapping_offset_from)]
800 /// let ptr1: *const i32 = &a[1];
801 /// let ptr2: *const i32 = &a[3];
802 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
803 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
804 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
805 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
807 /// let ptr1: *const i32 = 3 as _;
808 /// let ptr2: *const i32 = 13 as _;
809 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
811 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
813 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
814 let pointee_size = mem::size_of::<T>();
815 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
817 let d = isize::wrapping_sub(self as _, origin as _);
818 d.wrapping_div(pointee_size as _)
821 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
823 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
824 /// offset of `3 * size_of::<T>()` bytes.
828 /// If any of the following conditions are violated, the result is Undefined
831 /// * Both the starting and resulting pointer must be either in bounds or one
832 /// byte past the end of an allocated object.
834 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
836 /// * The offset being in bounds cannot rely on "wrapping around" the address
837 /// space. That is, the infinite-precision sum must fit in a `usize`.
839 /// The compiler and standard library generally tries to ensure allocations
840 /// never reach a size where an offset is a concern. For instance, `Vec`
841 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
842 /// `vec.as_ptr().add(vec.len())` is always safe.
844 /// Most platforms fundamentally can't even construct such an allocation.
845 /// For instance, no known 64-bit platform can ever serve a request
846 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
847 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
848 /// more than `isize::MAX` bytes with things like Physical Address
849 /// Extension. As such, memory acquired directly from allocators or memory
850 /// mapped files *may* be too large to handle with this function.
852 /// Consider using `wrapping_offset` instead if these constraints are
853 /// difficult to satisfy. The only advantage of this method is that it
854 /// enables more aggressive compiler optimizations.
861 /// let s: &str = "123";
862 /// let ptr: *const u8 = s.as_ptr();
865 /// println!("{}", *ptr.add(1) as char);
866 /// println!("{}", *ptr.add(2) as char);
869 #[stable(feature = "pointer_methods", since = "1.26.0")]
871 pub unsafe fn add(self, count: usize) -> Self
874 self.offset(count as isize)
877 /// Calculates the offset from a pointer (convenience for
878 /// `.offset((count as isize).wrapping_neg())`).
880 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
881 /// offset of `3 * size_of::<T>()` bytes.
885 /// If any of the following conditions are violated, the result is Undefined
888 /// * Both the starting and resulting pointer must be either in bounds or one
889 /// byte past the end of an allocated object.
891 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
893 /// * The offset being in bounds cannot rely on "wrapping around" the address
894 /// space. That is, the infinite-precision sum must fit in a usize.
896 /// The compiler and standard library generally tries to ensure allocations
897 /// never reach a size where an offset is a concern. For instance, `Vec`
898 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
899 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
901 /// Most platforms fundamentally can't even construct such an allocation.
902 /// For instance, no known 64-bit platform can ever serve a request
903 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
904 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
905 /// more than `isize::MAX` bytes with things like Physical Address
906 /// Extension. As such, memory acquired directly from allocators or memory
907 /// mapped files *may* be too large to handle with this function.
909 /// Consider using `wrapping_offset` instead if these constraints are
910 /// difficult to satisfy. The only advantage of this method is that it
911 /// enables more aggressive compiler optimizations.
918 /// let s: &str = "123";
921 /// let end: *const u8 = s.as_ptr().add(3);
922 /// println!("{}", *end.sub(1) as char);
923 /// println!("{}", *end.sub(2) as char);
926 #[stable(feature = "pointer_methods", since = "1.26.0")]
928 pub unsafe fn sub(self, count: usize) -> Self
931 self.offset((count as isize).wrapping_neg())
934 /// Calculates the offset from a pointer using wrapping arithmetic.
935 /// (convenience for `.wrapping_offset(count as isize)`)
937 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
938 /// offset of `3 * size_of::<T>()` bytes.
942 /// The resulting pointer does not need to be in bounds, but it is
943 /// potentially hazardous to dereference (which requires `unsafe`).
945 /// Always use `.add(count)` instead when possible, because `add`
946 /// allows the compiler to optimize better.
953 /// // Iterate using a raw pointer in increments of two elements
954 /// let data = [1u8, 2, 3, 4, 5];
955 /// let mut ptr: *const u8 = data.as_ptr();
957 /// let end_rounded_up = ptr.wrapping_add(6);
959 /// // This loop prints "1, 3, 5, "
960 /// while ptr != end_rounded_up {
962 /// print!("{}, ", *ptr);
964 /// ptr = ptr.wrapping_add(step);
967 #[stable(feature = "pointer_methods", since = "1.26.0")]
969 pub fn wrapping_add(self, count: usize) -> Self
972 self.wrapping_offset(count as isize)
975 /// Calculates the offset from a pointer using wrapping arithmetic.
976 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
978 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
979 /// offset of `3 * size_of::<T>()` bytes.
983 /// The resulting pointer does not need to be in bounds, but it is
984 /// potentially hazardous to dereference (which requires `unsafe`).
986 /// Always use `.sub(count)` instead when possible, because `sub`
987 /// allows the compiler to optimize better.
994 /// // Iterate using a raw pointer in increments of two elements (backwards)
995 /// let data = [1u8, 2, 3, 4, 5];
996 /// let mut ptr: *const u8 = data.as_ptr();
997 /// let start_rounded_down = ptr.wrapping_sub(2);
998 /// ptr = ptr.wrapping_add(4);
1000 /// // This loop prints "5, 3, 1, "
1001 /// while ptr != start_rounded_down {
1003 /// print!("{}, ", *ptr);
1005 /// ptr = ptr.wrapping_sub(step);
1008 #[stable(feature = "pointer_methods", since = "1.26.0")]
1010 pub fn wrapping_sub(self, count: usize) -> Self
1013 self.wrapping_offset((count as isize).wrapping_neg())
1016 /// Reads the value from `self` without moving it. This leaves the
1017 /// memory in `self` unchanged.
1021 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1022 /// moves the value out of `self` without preventing further usage of `self`.
1023 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1024 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1025 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1026 /// because it will attempt to drop the value previously at `*self`.
1028 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1036 /// let y = &x as *const i32;
1039 /// assert_eq!(y.read(), 12);
1042 #[stable(feature = "pointer_methods", since = "1.26.0")]
1044 pub unsafe fn read(self) -> T
1050 /// Performs a volatile read of the value from `self` without moving it. This
1051 /// leaves the memory in `self` unchanged.
1053 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1054 /// to not be elided or reordered by the compiler across other volatile
1059 /// Rust does not currently have a rigorously and formally defined memory model,
1060 /// so the precise semantics of what "volatile" means here is subject to change
1061 /// over time. That being said, the semantics will almost always end up pretty
1062 /// similar to [C11's definition of volatile][c11].
1064 /// The compiler shouldn't change the relative order or number of volatile
1065 /// memory operations. However, volatile memory operations on zero-sized types
1066 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1067 /// and may be ignored.
1069 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1073 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1074 /// moves the value out of `self` without preventing further usage of `self`.
1075 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1076 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1077 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1078 /// because it will attempt to drop the value previously at `*self`.
1086 /// let y = &x as *const i32;
1089 /// assert_eq!(y.read_volatile(), 12);
1092 #[stable(feature = "pointer_methods", since = "1.26.0")]
1094 pub unsafe fn read_volatile(self) -> T
1100 /// Reads the value from `self` without moving it. This leaves the
1101 /// memory in `self` unchanged.
1103 /// Unlike `read`, the pointer may be unaligned.
1107 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1108 /// moves the value out of `self` without preventing further usage of `self`.
1109 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1110 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1111 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1112 /// because it will attempt to drop the value previously at `*self`.
1120 /// let y = &x as *const i32;
1123 /// assert_eq!(y.read_unaligned(), 12);
1126 #[stable(feature = "pointer_methods", since = "1.26.0")]
1128 pub unsafe fn read_unaligned(self) -> T
1131 read_unaligned(self)
1134 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1135 /// and destination may overlap.
1137 /// NOTE: this has the *same* argument order as `ptr::copy`.
1139 /// This is semantically equivalent to C's `memmove`.
1143 /// Care must be taken with the ownership of `self` and `dest`.
1144 /// This method semantically moves the values of `self` into `dest`.
1145 /// However it does not drop the contents of `self`, or prevent the contents
1146 /// of `dest` from being dropped or used.
1150 /// Efficiently create a Rust vector from an unsafe buffer:
1153 /// # #[allow(dead_code)]
1154 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1155 /// let mut dst = Vec::with_capacity(elts);
1156 /// dst.set_len(elts);
1157 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1161 #[stable(feature = "pointer_methods", since = "1.26.0")]
1163 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1166 copy(self, dest, count)
1169 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1170 /// and destination may *not* overlap.
1172 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1174 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1178 /// Beyond requiring that the program must be allowed to access both regions
1179 /// of memory, it is Undefined Behavior for source and destination to
1180 /// overlap. Care must also be taken with the ownership of `self` and
1181 /// `self`. This method semantically moves the values of `self` into `dest`.
1182 /// However it does not drop the contents of `dest`, or prevent the contents
1183 /// of `self` from being dropped or used.
1187 /// Efficiently create a Rust vector from an unsafe buffer:
1190 /// # #[allow(dead_code)]
1191 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1192 /// let mut dst = Vec::with_capacity(elts);
1193 /// dst.set_len(elts);
1194 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1198 #[stable(feature = "pointer_methods", since = "1.26.0")]
1200 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1203 copy_nonoverlapping(self, dest, count)
1206 /// Computes the byte offset that needs to be applied in order to
1207 /// make the pointer aligned to `align`.
1208 /// If it is not possible to align the pointer, the implementation returns
1209 /// `usize::max_value()`.
1211 /// There are no guarantees whatsover that offsetting the pointer will not
1212 /// overflow or go beyond the allocation that the pointer points into.
1213 /// It is up to the caller to ensure that the returned offset is correct
1214 /// in all terms other than alignment.
1218 /// Accessing adjacent `u8` as `u16`
1221 /// # #![feature(align_offset)]
1222 /// # fn foo(n: usize) {
1223 /// # use std::mem::align_of;
1225 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1226 /// let ptr = &x[n] as *const u8;
1227 /// let offset = ptr.align_offset(align_of::<u16>());
1228 /// if offset < x.len() - n - 1 {
1229 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1230 /// assert_ne!(*u16_ptr, 500);
1232 /// // while the pointer can be aligned via `offset`, it would point
1233 /// // outside the allocation
1237 #[unstable(feature = "align_offset", issue = "44488")]
1238 pub fn align_offset(self, align: usize) -> usize {
1240 intrinsics::align_offset(self as *const _, align)
1246 impl<T: ?Sized> *mut T {
1247 /// Returns `true` if the pointer is null.
1249 /// Note that unsized types have many possible null pointers, as only the
1250 /// raw data pointer is considered, not their length, vtable, etc.
1251 /// Therefore, two pointers that are null may still not compare equal to
1259 /// let mut s = [1, 2, 3];
1260 /// let ptr: *mut u32 = s.as_mut_ptr();
1261 /// assert!(!ptr.is_null());
1263 #[stable(feature = "rust1", since = "1.0.0")]
1265 pub fn is_null(self) -> bool {
1266 // Compare via a cast to a thin pointer, so fat pointers are only
1267 // considering their "data" part for null-ness.
1268 (self as *mut u8) == null_mut()
1271 /// Returns `None` if the pointer is null, or else returns a reference to
1272 /// the value wrapped in `Some`.
1276 /// While this method and its mutable counterpart are useful for
1277 /// null-safety, it is important to note that this is still an unsafe
1278 /// operation because the returned value could be pointing to invalid
1281 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1282 /// not necessarily reflect the actual lifetime of the data.
1289 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1292 /// if let Some(val_back) = ptr.as_ref() {
1293 /// println!("We got back the value: {}!", val_back);
1297 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1299 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1307 /// Calculates the offset from a pointer.
1309 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1310 /// offset of `3 * size_of::<T>()` bytes.
1314 /// If any of the following conditions are violated, the result is Undefined
1317 /// * Both the starting and resulting pointer must be either in bounds or one
1318 /// byte past the end of an allocated object.
1320 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1322 /// * The offset being in bounds cannot rely on "wrapping around" the address
1323 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1325 /// The compiler and standard library generally tries to ensure allocations
1326 /// never reach a size where an offset is a concern. For instance, `Vec`
1327 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1328 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1330 /// Most platforms fundamentally can't even construct such an allocation.
1331 /// For instance, no known 64-bit platform can ever serve a request
1332 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1333 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1334 /// more than `isize::MAX` bytes with things like Physical Address
1335 /// Extension. As such, memory acquired directly from allocators or memory
1336 /// mapped files *may* be too large to handle with this function.
1338 /// Consider using `wrapping_offset` instead if these constraints are
1339 /// difficult to satisfy. The only advantage of this method is that it
1340 /// enables more aggressive compiler optimizations.
1347 /// let mut s = [1, 2, 3];
1348 /// let ptr: *mut u32 = s.as_mut_ptr();
1351 /// println!("{}", *ptr.offset(1));
1352 /// println!("{}", *ptr.offset(2));
1355 #[stable(feature = "rust1", since = "1.0.0")]
1357 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1358 intrinsics::offset(self, count) as *mut T
1361 /// Calculates the offset from a pointer using wrapping arithmetic.
1362 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1363 /// offset of `3 * size_of::<T>()` bytes.
1367 /// The resulting pointer does not need to be in bounds, but it is
1368 /// potentially hazardous to dereference (which requires `unsafe`).
1370 /// Always use `.offset(count)` instead when possible, because `offset`
1371 /// allows the compiler to optimize better.
1378 /// // Iterate using a raw pointer in increments of two elements
1379 /// let mut data = [1u8, 2, 3, 4, 5];
1380 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1382 /// let end_rounded_up = ptr.wrapping_offset(6);
1384 /// while ptr != end_rounded_up {
1388 /// ptr = ptr.wrapping_offset(step);
1390 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1392 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1394 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1396 intrinsics::arith_offset(self, count) as *mut T
1400 /// Returns `None` if the pointer is null, or else returns a mutable
1401 /// reference to the value wrapped in `Some`.
1405 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1406 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1407 /// returned is indeed a valid lifetime for the contained data.
1414 /// let mut s = [1, 2, 3];
1415 /// let ptr: *mut u32 = s.as_mut_ptr();
1416 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1417 /// *first_value = 4;
1418 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1420 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1422 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1430 /// Calculates the distance between two pointers. The returned value is in
1431 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1433 /// If the address different between the two pointers ia not a multiple of
1434 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1437 /// This function returns `None` if `T` is a zero-sized type.
1444 /// #![feature(offset_to)]
1447 /// let mut a = [0; 5];
1448 /// let ptr1: *mut i32 = &mut a[1];
1449 /// let ptr2: *mut i32 = &mut a[3];
1450 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1451 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1452 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1453 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1456 #[unstable(feature = "offset_to", issue = "41079")]
1458 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1459 let size = mem::size_of::<T>();
1463 let diff = (other as isize).wrapping_sub(self as isize);
1464 Some(diff / size as isize)
1468 /// Calculates the distance between two pointers. The returned value is in
1469 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1471 /// This function is the inverse of [`offset`].
1473 /// [`offset`]: #method.offset-1
1474 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1478 /// If any of the following conditions are violated, the result is Undefined
1481 /// * Both the starting and other pointer must be either in bounds or one
1482 /// byte past the end of the same allocated object.
1484 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1486 /// * The distance between the pointers, in bytes, must be an exact multiple
1487 /// of the size of `T`.
1489 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1491 /// The compiler and standard library generally try to ensure allocations
1492 /// never reach a size where an offset is a concern. For instance, `Vec`
1493 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1494 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1496 /// Most platforms fundamentally can't even construct such an allocation.
1497 /// For instance, no known 64-bit platform can ever serve a request
1498 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1499 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1500 /// more than `isize::MAX` bytes with things like Physical Address
1501 /// Extension. As such, memory acquired directly from allocators or memory
1502 /// mapped files *may* be too large to handle with this function.
1504 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1505 /// difficult to satisfy. The only advantage of this method is that it
1506 /// enables more aggressive compiler optimizations.
1510 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1517 /// #![feature(ptr_offset_from)]
1520 /// let ptr1: *mut i32 = &mut a[1];
1521 /// let ptr2: *mut i32 = &mut a[3];
1523 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1524 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1525 /// assert_eq!(ptr1.offset(2), ptr2);
1526 /// assert_eq!(ptr2.offset(-2), ptr1);
1529 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1531 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1532 (self as *const T).offset_from(origin)
1535 /// Calculates the distance between two pointers. The returned value is in
1536 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1538 /// If the address different between the two pointers is not a multiple of
1539 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1542 /// Though this method is safe for any two pointers, note that its result
1543 /// will be mostly useless if the two pointers aren't into the same allocated
1544 /// object, for example if they point to two different local variables.
1548 /// This function panics if `T` is a zero-sized type.
1555 /// #![feature(ptr_wrapping_offset_from)]
1558 /// let ptr1: *mut i32 = &mut a[1];
1559 /// let ptr2: *mut i32 = &mut a[3];
1560 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1561 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1562 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1563 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1565 /// let ptr1: *mut i32 = 3 as _;
1566 /// let ptr2: *mut i32 = 13 as _;
1567 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1569 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1571 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1572 (self as *const T).wrapping_offset_from(origin)
1575 /// Computes the byte offset that needs to be applied in order to
1576 /// make the pointer aligned to `align`.
1577 /// If it is not possible to align the pointer, the implementation returns
1578 /// `usize::max_value()`.
1580 /// There are no guarantees whatsover that offsetting the pointer will not
1581 /// overflow or go beyond the allocation that the pointer points into.
1582 /// It is up to the caller to ensure that the returned offset is correct
1583 /// in all terms other than alignment.
1587 /// Accessing adjacent `u8` as `u16`
1590 /// # #![feature(align_offset)]
1591 /// # fn foo(n: usize) {
1592 /// # use std::mem::align_of;
1594 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1595 /// let ptr = &x[n] as *const u8;
1596 /// let offset = ptr.align_offset(align_of::<u16>());
1597 /// if offset < x.len() - n - 1 {
1598 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1599 /// assert_ne!(*u16_ptr, 500);
1601 /// // while the pointer can be aligned via `offset`, it would point
1602 /// // outside the allocation
1606 #[unstable(feature = "align_offset", issue = "44488")]
1607 pub fn align_offset(self, align: usize) -> usize {
1609 intrinsics::align_offset(self as *const _, align)
1613 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1615 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1616 /// offset of `3 * size_of::<T>()` bytes.
1620 /// If any of the following conditions are violated, the result is Undefined
1623 /// * Both the starting and resulting pointer must be either in bounds or one
1624 /// byte past the end of an allocated object.
1626 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1628 /// * The offset being in bounds cannot rely on "wrapping around" the address
1629 /// space. That is, the infinite-precision sum must fit in a `usize`.
1631 /// The compiler and standard library generally tries to ensure allocations
1632 /// never reach a size where an offset is a concern. For instance, `Vec`
1633 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1634 /// `vec.as_ptr().add(vec.len())` is always safe.
1636 /// Most platforms fundamentally can't even construct such an allocation.
1637 /// For instance, no known 64-bit platform can ever serve a request
1638 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1639 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1640 /// more than `isize::MAX` bytes with things like Physical Address
1641 /// Extension. As such, memory acquired directly from allocators or memory
1642 /// mapped files *may* be too large to handle with this function.
1644 /// Consider using `wrapping_offset` instead if these constraints are
1645 /// difficult to satisfy. The only advantage of this method is that it
1646 /// enables more aggressive compiler optimizations.
1653 /// let s: &str = "123";
1654 /// let ptr: *const u8 = s.as_ptr();
1657 /// println!("{}", *ptr.add(1) as char);
1658 /// println!("{}", *ptr.add(2) as char);
1661 #[stable(feature = "pointer_methods", since = "1.26.0")]
1663 pub unsafe fn add(self, count: usize) -> Self
1666 self.offset(count as isize)
1669 /// Calculates the offset from a pointer (convenience for
1670 /// `.offset((count as isize).wrapping_neg())`).
1672 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1673 /// offset of `3 * size_of::<T>()` bytes.
1677 /// If any of the following conditions are violated, the result is Undefined
1680 /// * Both the starting and resulting pointer must be either in bounds or one
1681 /// byte past the end of an allocated object.
1683 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1685 /// * The offset being in bounds cannot rely on "wrapping around" the address
1686 /// space. That is, the infinite-precision sum must fit in a usize.
1688 /// The compiler and standard library generally tries to ensure allocations
1689 /// never reach a size where an offset is a concern. For instance, `Vec`
1690 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1691 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1693 /// Most platforms fundamentally can't even construct such an allocation.
1694 /// For instance, no known 64-bit platform can ever serve a request
1695 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1696 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1697 /// more than `isize::MAX` bytes with things like Physical Address
1698 /// Extension. As such, memory acquired directly from allocators or memory
1699 /// mapped files *may* be too large to handle with this function.
1701 /// Consider using `wrapping_offset` instead if these constraints are
1702 /// difficult to satisfy. The only advantage of this method is that it
1703 /// enables more aggressive compiler optimizations.
1710 /// let s: &str = "123";
1713 /// let end: *const u8 = s.as_ptr().add(3);
1714 /// println!("{}", *end.sub(1) as char);
1715 /// println!("{}", *end.sub(2) as char);
1718 #[stable(feature = "pointer_methods", since = "1.26.0")]
1720 pub unsafe fn sub(self, count: usize) -> Self
1723 self.offset((count as isize).wrapping_neg())
1726 /// Calculates the offset from a pointer using wrapping arithmetic.
1727 /// (convenience for `.wrapping_offset(count as isize)`)
1729 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1730 /// offset of `3 * size_of::<T>()` bytes.
1734 /// The resulting pointer does not need to be in bounds, but it is
1735 /// potentially hazardous to dereference (which requires `unsafe`).
1737 /// Always use `.add(count)` instead when possible, because `add`
1738 /// allows the compiler to optimize better.
1745 /// // Iterate using a raw pointer in increments of two elements
1746 /// let data = [1u8, 2, 3, 4, 5];
1747 /// let mut ptr: *const u8 = data.as_ptr();
1749 /// let end_rounded_up = ptr.wrapping_add(6);
1751 /// // This loop prints "1, 3, 5, "
1752 /// while ptr != end_rounded_up {
1754 /// print!("{}, ", *ptr);
1756 /// ptr = ptr.wrapping_add(step);
1759 #[stable(feature = "pointer_methods", since = "1.26.0")]
1761 pub fn wrapping_add(self, count: usize) -> Self
1764 self.wrapping_offset(count as isize)
1767 /// Calculates the offset from a pointer using wrapping arithmetic.
1768 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1770 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1771 /// offset of `3 * size_of::<T>()` bytes.
1775 /// The resulting pointer does not need to be in bounds, but it is
1776 /// potentially hazardous to dereference (which requires `unsafe`).
1778 /// Always use `.sub(count)` instead when possible, because `sub`
1779 /// allows the compiler to optimize better.
1786 /// // Iterate using a raw pointer in increments of two elements (backwards)
1787 /// let data = [1u8, 2, 3, 4, 5];
1788 /// let mut ptr: *const u8 = data.as_ptr();
1789 /// let start_rounded_down = ptr.wrapping_sub(2);
1790 /// ptr = ptr.wrapping_add(4);
1792 /// // This loop prints "5, 3, 1, "
1793 /// while ptr != start_rounded_down {
1795 /// print!("{}, ", *ptr);
1797 /// ptr = ptr.wrapping_sub(step);
1800 #[stable(feature = "pointer_methods", since = "1.26.0")]
1802 pub fn wrapping_sub(self, count: usize) -> Self
1805 self.wrapping_offset((count as isize).wrapping_neg())
1808 /// Reads the value from `self` without moving it. This leaves the
1809 /// memory in `self` unchanged.
1813 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1814 /// moves the value out of `self` without preventing further usage of `self`.
1815 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1816 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1817 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1818 /// because it will attempt to drop the value previously at `*self`.
1820 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1828 /// let y = &x as *const i32;
1831 /// assert_eq!(y.read(), 12);
1834 #[stable(feature = "pointer_methods", since = "1.26.0")]
1836 pub unsafe fn read(self) -> T
1842 /// Performs a volatile read of the value from `self` without moving it. This
1843 /// leaves the memory in `self` unchanged.
1845 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1846 /// to not be elided or reordered by the compiler across other volatile
1851 /// Rust does not currently have a rigorously and formally defined memory model,
1852 /// so the precise semantics of what "volatile" means here is subject to change
1853 /// over time. That being said, the semantics will almost always end up pretty
1854 /// similar to [C11's definition of volatile][c11].
1856 /// The compiler shouldn't change the relative order or number of volatile
1857 /// memory operations. However, volatile memory operations on zero-sized types
1858 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1859 /// and may be ignored.
1861 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1865 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1866 /// moves the value out of `self` without preventing further usage of `self`.
1867 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1868 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1869 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1870 /// because it will attempt to drop the value previously at `*self`.
1878 /// let y = &x as *const i32;
1881 /// assert_eq!(y.read_volatile(), 12);
1884 #[stable(feature = "pointer_methods", since = "1.26.0")]
1886 pub unsafe fn read_volatile(self) -> T
1892 /// Reads the value from `self` without moving it. This leaves the
1893 /// memory in `self` unchanged.
1895 /// Unlike `read`, the pointer may be unaligned.
1899 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1900 /// moves the value out of `self` without preventing further usage of `self`.
1901 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1902 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1903 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1904 /// because it will attempt to drop the value previously at `*self`.
1912 /// let y = &x as *const i32;
1915 /// assert_eq!(y.read_unaligned(), 12);
1918 #[stable(feature = "pointer_methods", since = "1.26.0")]
1920 pub unsafe fn read_unaligned(self) -> T
1923 read_unaligned(self)
1926 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1927 /// and destination may overlap.
1929 /// NOTE: this has the *same* argument order as `ptr::copy`.
1931 /// This is semantically equivalent to C's `memmove`.
1935 /// Care must be taken with the ownership of `self` and `dest`.
1936 /// This method semantically moves the values of `self` into `dest`.
1937 /// However it does not drop the contents of `self`, or prevent the contents
1938 /// of `dest` from being dropped or used.
1942 /// Efficiently create a Rust vector from an unsafe buffer:
1945 /// # #[allow(dead_code)]
1946 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1947 /// let mut dst = Vec::with_capacity(elts);
1948 /// dst.set_len(elts);
1949 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1953 #[stable(feature = "pointer_methods", since = "1.26.0")]
1955 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1958 copy(self, dest, count)
1961 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1962 /// and destination may *not* overlap.
1964 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1966 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1970 /// Beyond requiring that the program must be allowed to access both regions
1971 /// of memory, it is Undefined Behavior for source and destination to
1972 /// overlap. Care must also be taken with the ownership of `self` and
1973 /// `self`. This method semantically moves the values of `self` into `dest`.
1974 /// However it does not drop the contents of `dest`, or prevent the contents
1975 /// of `self` from being dropped or used.
1979 /// Efficiently create a Rust vector from an unsafe buffer:
1982 /// # #[allow(dead_code)]
1983 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1984 /// let mut dst = Vec::with_capacity(elts);
1985 /// dst.set_len(elts);
1986 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1990 #[stable(feature = "pointer_methods", since = "1.26.0")]
1992 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1995 copy_nonoverlapping(self, dest, count)
1998 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1999 /// and destination may overlap.
2001 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
2003 /// This is semantically equivalent to C's `memmove`.
2007 /// Care must be taken with the ownership of `src` and `self`.
2008 /// This method semantically moves the values of `src` into `self`.
2009 /// However it does not drop the contents of `self`, or prevent the contents
2010 /// of `src` from being dropped or used.
2014 /// Efficiently create a Rust vector from an unsafe buffer:
2017 /// # #[allow(dead_code)]
2018 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2019 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2020 /// dst.set_len(elts);
2021 /// dst.as_mut_ptr().copy_from(ptr, elts);
2025 #[stable(feature = "pointer_methods", since = "1.26.0")]
2027 pub unsafe fn copy_from(self, src: *const T, count: usize)
2030 copy(src, self, count)
2033 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2034 /// and destination may *not* overlap.
2036 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
2038 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
2042 /// Beyond requiring that the program must be allowed to access both regions
2043 /// of memory, it is Undefined Behavior for source and destination to
2044 /// overlap. Care must also be taken with the ownership of `src` and
2045 /// `self`. This method semantically moves the values of `src` into `self`.
2046 /// However it does not drop the contents of `self`, or prevent the contents
2047 /// of `src` from being dropped or used.
2051 /// Efficiently create a Rust vector from an unsafe buffer:
2054 /// # #[allow(dead_code)]
2055 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2056 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2057 /// dst.set_len(elts);
2058 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
2062 #[stable(feature = "pointer_methods", since = "1.26.0")]
2064 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2067 copy_nonoverlapping(src, self, count)
2070 /// Executes the destructor (if any) of the pointed-to value.
2072 /// This has two use cases:
2074 /// * It is *required* to use `drop_in_place` to drop unsized types like
2075 /// trait objects, because they can't be read out onto the stack and
2076 /// dropped normally.
2078 /// * It is friendlier to the optimizer to do this over `ptr::read` when
2079 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
2080 /// as the compiler doesn't need to prove that it's sound to elide the
2085 /// This has all the same safety problems as `ptr::read` with respect to
2086 /// invalid pointers, types, and double drops.
2087 #[stable(feature = "pointer_methods", since = "1.26.0")]
2089 pub unsafe fn drop_in_place(self) {
2093 /// Overwrites a memory location with the given value without reading or
2094 /// dropping the old value.
2098 /// This operation is marked unsafe because it writes through a raw pointer.
2100 /// It does not drop the contents of `self`. This is safe, but it could leak
2101 /// allocations or resources, so care must be taken not to overwrite an object
2102 /// that should be dropped.
2104 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
2105 /// location pointed to by `self`.
2107 /// This is appropriate for initializing uninitialized memory, or overwriting
2108 /// memory that has previously been `read` from.
2110 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
2118 /// let y = &mut x as *mut i32;
2123 /// assert_eq!(y.read(), 12);
2126 #[stable(feature = "pointer_methods", since = "1.26.0")]
2128 pub unsafe fn write(self, val: T)
2134 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2135 /// bytes of memory starting at `self` to `val`.
2140 /// let mut vec = vec![0; 4];
2142 /// let vec_ptr = vec.as_mut_ptr();
2143 /// vec_ptr.write_bytes(b'a', 2);
2145 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
2147 #[stable(feature = "pointer_methods", since = "1.26.0")]
2149 pub unsafe fn write_bytes(self, val: u8, count: usize)
2152 write_bytes(self, val, count)
2155 /// Performs a volatile write of a memory location with the given value without
2156 /// reading or dropping the old value.
2158 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2159 /// to not be elided or reordered by the compiler across other volatile
2164 /// Rust does not currently have a rigorously and formally defined memory model,
2165 /// so the precise semantics of what "volatile" means here is subject to change
2166 /// over time. That being said, the semantics will almost always end up pretty
2167 /// similar to [C11's definition of volatile][c11].
2169 /// The compiler shouldn't change the relative order or number of volatile
2170 /// memory operations. However, volatile memory operations on zero-sized types
2171 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
2172 /// and may be ignored.
2174 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
2178 /// This operation is marked unsafe because it accepts a raw pointer.
2180 /// It does not drop the contents of `self`. This is safe, but it could leak
2181 /// allocations or resources, so care must be taken not to overwrite an object
2182 /// that should be dropped.
2184 /// This is appropriate for initializing uninitialized memory, or overwriting
2185 /// memory that has previously been `read` from.
2193 /// let y = &mut x as *mut i32;
2197 /// y.write_volatile(z);
2198 /// assert_eq!(y.read_volatile(), 12);
2201 #[stable(feature = "pointer_methods", since = "1.26.0")]
2203 pub unsafe fn write_volatile(self, val: T)
2206 write_volatile(self, val)
2209 /// Overwrites a memory location with the given value without reading or
2210 /// dropping the old value.
2212 /// Unlike `write`, the pointer may be unaligned.
2216 /// This operation is marked unsafe because it writes through a raw pointer.
2218 /// It does not drop the contents of `self`. This is safe, but it could leak
2219 /// allocations or resources, so care must be taken not to overwrite an object
2220 /// that should be dropped.
2222 /// Additionally, it does not drop `self`. Semantically, `self` is moved into the
2223 /// location pointed to by `val`.
2225 /// This is appropriate for initializing uninitialized memory, or overwriting
2226 /// memory that has previously been `read` from.
2234 /// let y = &mut x as *mut i32;
2238 /// y.write_unaligned(z);
2239 /// assert_eq!(y.read_unaligned(), 12);
2242 #[stable(feature = "pointer_methods", since = "1.26.0")]
2244 pub unsafe fn write_unaligned(self, val: T)
2247 write_unaligned(self, val)
2250 /// Replaces the value at `self` with `src`, returning the old
2251 /// value, without dropping either.
2255 /// This is only unsafe because it accepts a raw pointer.
2256 /// Otherwise, this operation is identical to `mem::replace`.
2257 #[stable(feature = "pointer_methods", since = "1.26.0")]
2259 pub unsafe fn replace(self, src: T) -> T
2265 /// Swaps the values at two mutable locations of the same type, without
2266 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2267 /// otherwise equivalent.
2271 /// This function copies the memory through the raw pointers passed to it
2274 /// Ensure that these pointers are valid before calling `swap`.
2275 #[stable(feature = "pointer_methods", since = "1.26.0")]
2277 pub unsafe fn swap(self, with: *mut T)
2284 // Equality for pointers
2285 #[stable(feature = "rust1", since = "1.0.0")]
2286 impl<T: ?Sized> PartialEq for *const T {
2288 fn eq(&self, other: &*const T) -> bool { *self == *other }
2291 #[stable(feature = "rust1", since = "1.0.0")]
2292 impl<T: ?Sized> Eq for *const T {}
2294 #[stable(feature = "rust1", since = "1.0.0")]
2295 impl<T: ?Sized> PartialEq for *mut T {
2297 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2300 #[stable(feature = "rust1", since = "1.0.0")]
2301 impl<T: ?Sized> Eq for *mut T {}
2303 /// Compare raw pointers for equality.
2305 /// This is the same as using the `==` operator, but less generic:
2306 /// the arguments have to be `*const T` raw pointers,
2307 /// not anything that implements `PartialEq`.
2309 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2310 /// by their address rather than comparing the values they point to
2311 /// (which is what the `PartialEq for &T` implementation does).
2319 /// let other_five = 5;
2320 /// let five_ref = &five;
2321 /// let same_five_ref = &five;
2322 /// let other_five_ref = &other_five;
2324 /// assert!(five_ref == same_five_ref);
2325 /// assert!(five_ref == other_five_ref);
2327 /// assert!(ptr::eq(five_ref, same_five_ref));
2328 /// assert!(!ptr::eq(five_ref, other_five_ref));
2330 #[stable(feature = "ptr_eq", since = "1.17.0")]
2332 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2336 // Impls for function pointers
2337 macro_rules! fnptr_impls_safety_abi {
2338 ($FnTy: ty, $($Arg: ident),*) => {
2339 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2340 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2342 fn eq(&self, other: &Self) -> bool {
2343 *self as usize == *other as usize
2347 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2348 impl<Ret, $($Arg),*> Eq for $FnTy {}
2350 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2351 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2353 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2354 (*self as usize).partial_cmp(&(*other as usize))
2358 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2359 impl<Ret, $($Arg),*> Ord for $FnTy {
2361 fn cmp(&self, other: &Self) -> Ordering {
2362 (*self as usize).cmp(&(*other as usize))
2366 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2367 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2368 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2369 state.write_usize(*self as usize)
2373 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2374 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2375 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2376 fmt::Pointer::fmt(&(*self as *const ()), f)
2380 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2381 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2382 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2383 fmt::Pointer::fmt(&(*self as *const ()), f)
2389 macro_rules! fnptr_impls_args {
2390 ($($Arg: ident),+) => {
2391 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2392 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2393 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2394 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2395 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2396 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2399 // No variadic functions with 0 parameters
2400 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2401 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2402 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2403 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2407 fnptr_impls_args! { }
2408 fnptr_impls_args! { A }
2409 fnptr_impls_args! { A, B }
2410 fnptr_impls_args! { A, B, C }
2411 fnptr_impls_args! { A, B, C, D }
2412 fnptr_impls_args! { A, B, C, D, E }
2413 fnptr_impls_args! { A, B, C, D, E, F }
2414 fnptr_impls_args! { A, B, C, D, E, F, G }
2415 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2416 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2417 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2418 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2419 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2421 // Comparison for pointers
2422 #[stable(feature = "rust1", since = "1.0.0")]
2423 impl<T: ?Sized> Ord for *const T {
2425 fn cmp(&self, other: &*const T) -> Ordering {
2428 } else if self == other {
2436 #[stable(feature = "rust1", since = "1.0.0")]
2437 impl<T: ?Sized> PartialOrd for *const T {
2439 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2440 Some(self.cmp(other))
2444 fn lt(&self, other: &*const T) -> bool { *self < *other }
2447 fn le(&self, other: &*const T) -> bool { *self <= *other }
2450 fn gt(&self, other: &*const T) -> bool { *self > *other }
2453 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2456 #[stable(feature = "rust1", since = "1.0.0")]
2457 impl<T: ?Sized> Ord for *mut T {
2459 fn cmp(&self, other: &*mut T) -> Ordering {
2462 } else if self == other {
2470 #[stable(feature = "rust1", since = "1.0.0")]
2471 impl<T: ?Sized> PartialOrd for *mut T {
2473 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2474 Some(self.cmp(other))
2478 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2481 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2484 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2487 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2490 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2491 /// of this wrapper owns the referent. Useful for building abstractions like
2492 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2494 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2495 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2496 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2497 /// the referent of the pointer should not be modified without a unique path to
2498 /// its owning Unique.
2500 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2501 /// consider using `NonNull`, which has weaker semantics.
2503 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2504 /// is never dereferenced. This is so that enums may use this forbidden value
2505 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2506 /// However the pointer may still dangle if it isn't dereferenced.
2508 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2509 /// for any type which upholds Unique's aliasing requirements.
2510 #[unstable(feature = "ptr_internals", issue = "0",
2511 reason = "use NonNull instead and consider PhantomData<T> \
2512 (if you also use #[may_dangle]), Send, and/or Sync")]
2513 pub struct Unique<T: ?Sized> {
2514 pointer: NonZero<*const T>,
2515 // NOTE: this marker has no consequences for variance, but is necessary
2516 // for dropck to understand that we logically own a `T`.
2518 // For details, see:
2519 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2520 _marker: PhantomData<T>,
2523 #[unstable(feature = "ptr_internals", issue = "0")]
2524 impl<T: ?Sized> fmt::Debug for Unique<T> {
2525 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2526 fmt::Pointer::fmt(&self.as_ptr(), f)
2530 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2531 /// reference is unaliased. Note that this aliasing invariant is
2532 /// unenforced by the type system; the abstraction using the
2533 /// `Unique` must enforce it.
2534 #[unstable(feature = "ptr_internals", issue = "0")]
2535 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2537 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2538 /// reference is unaliased. Note that this aliasing invariant is
2539 /// unenforced by the type system; the abstraction using the
2540 /// `Unique` must enforce it.
2541 #[unstable(feature = "ptr_internals", issue = "0")]
2542 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2544 #[unstable(feature = "ptr_internals", issue = "0")]
2545 impl<T: Sized> Unique<T> {
2546 /// Creates a new `Unique` that is dangling, but well-aligned.
2548 /// This is useful for initializing types which lazily allocate, like
2549 /// `Vec::new` does.
2550 // FIXME: rename to dangling() to match NonNull?
2551 pub fn empty() -> Self {
2553 let ptr = mem::align_of::<T>() as *mut T;
2554 Unique::new_unchecked(ptr)
2559 #[unstable(feature = "ptr_internals", issue = "0")]
2560 impl<T: ?Sized> Unique<T> {
2561 /// Creates a new `Unique`.
2565 /// `ptr` must be non-null.
2566 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2567 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2570 /// Creates a new `Unique` if `ptr` is non-null.
2571 pub fn new(ptr: *mut T) -> Option<Self> {
2572 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
2575 /// Acquires the underlying `*mut` pointer.
2576 pub fn as_ptr(self) -> *mut T {
2577 self.pointer.get() as *mut T
2580 /// Dereferences the content.
2582 /// The resulting lifetime is bound to self so this behaves "as if"
2583 /// it were actually an instance of T that is getting borrowed. If a longer
2584 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2585 pub unsafe fn as_ref(&self) -> &T {
2589 /// Mutably dereferences the content.
2591 /// The resulting lifetime is bound to self so this behaves "as if"
2592 /// it were actually an instance of T that is getting borrowed. If a longer
2593 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2594 pub unsafe fn as_mut(&mut self) -> &mut T {
2599 #[unstable(feature = "ptr_internals", issue = "0")]
2600 impl<T: ?Sized> Clone for Unique<T> {
2601 fn clone(&self) -> Self {
2606 #[unstable(feature = "ptr_internals", issue = "0")]
2607 impl<T: ?Sized> Copy for Unique<T> { }
2609 #[unstable(feature = "ptr_internals", issue = "0")]
2610 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2612 #[unstable(feature = "ptr_internals", issue = "0")]
2613 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2614 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2615 fmt::Pointer::fmt(&self.as_ptr(), f)
2619 #[unstable(feature = "ptr_internals", issue = "0")]
2620 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2621 fn from(reference: &'a mut T) -> Self {
2622 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2626 #[unstable(feature = "ptr_internals", issue = "0")]
2627 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2628 fn from(reference: &'a T) -> Self {
2629 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2633 #[unstable(feature = "ptr_internals", issue = "0")]
2634 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2635 fn from(p: NonNull<T>) -> Self {
2636 Unique { pointer: p.pointer, _marker: PhantomData }
2640 /// Previous name of `NonNull`.
2641 #[rustc_deprecated(since = "1.25.0", reason = "renamed to `NonNull`")]
2642 #[unstable(feature = "shared", issue = "27730")]
2643 pub type Shared<T> = NonNull<T>;
2645 /// `*mut T` but non-zero and covariant.
2647 /// This is often the correct thing to use when building data structures using
2648 /// raw pointers, but is ultimately more dangerous to use because of its additional
2649 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2651 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2652 /// is never dereferenced. This is so that enums may use this forbidden value
2653 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `NonNull<T>`.
2654 /// However the pointer may still dangle if it isn't dereferenced.
2656 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2657 /// for your use case, you should include some PhantomData in your type to
2658 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2659 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2660 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2661 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2662 #[stable(feature = "nonnull", since = "1.25.0")]
2663 pub struct NonNull<T: ?Sized> {
2664 pointer: NonZero<*const T>,
2667 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2668 // NB: This impl is unnecessary, but should provide better error messages.
2669 #[stable(feature = "nonnull", since = "1.25.0")]
2670 impl<T: ?Sized> !Send for NonNull<T> { }
2672 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2673 // NB: This impl is unnecessary, but should provide better error messages.
2674 #[stable(feature = "nonnull", since = "1.25.0")]
2675 impl<T: ?Sized> !Sync for NonNull<T> { }
2677 impl<T: Sized> NonNull<T> {
2678 /// Creates a new `NonNull` that is dangling, but well-aligned.
2680 /// This is useful for initializing types which lazily allocate, like
2681 /// `Vec::new` does.
2682 #[stable(feature = "nonnull", since = "1.25.0")]
2683 pub fn dangling() -> Self {
2685 let ptr = mem::align_of::<T>() as *mut T;
2686 NonNull::new_unchecked(ptr)
2691 impl<T: ?Sized> NonNull<T> {
2692 /// Creates a new `NonNull`.
2696 /// `ptr` must be non-null.
2697 #[stable(feature = "nonnull", since = "1.25.0")]
2698 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2699 NonNull { pointer: NonZero::new_unchecked(ptr) }
2702 /// Creates a new `NonNull` if `ptr` is non-null.
2703 #[stable(feature = "nonnull", since = "1.25.0")]
2704 pub fn new(ptr: *mut T) -> Option<Self> {
2705 NonZero::new(ptr as *const T).map(|nz| NonNull { pointer: nz })
2708 /// Acquires the underlying `*mut` pointer.
2709 #[stable(feature = "nonnull", since = "1.25.0")]
2710 pub fn as_ptr(self) -> *mut T {
2711 self.pointer.get() as *mut T
2714 /// Dereferences the content.
2716 /// The resulting lifetime is bound to self so this behaves "as if"
2717 /// it were actually an instance of T that is getting borrowed. If a longer
2718 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2719 #[stable(feature = "nonnull", since = "1.25.0")]
2720 pub unsafe fn as_ref(&self) -> &T {
2724 /// Mutably dereferences the content.
2726 /// The resulting lifetime is bound to self so this behaves "as if"
2727 /// it were actually an instance of T that is getting borrowed. If a longer
2728 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2729 #[stable(feature = "nonnull", since = "1.25.0")]
2730 pub unsafe fn as_mut(&mut self) -> &mut T {
2734 /// Cast to a pointer of another type
2735 #[unstable(feature = "nonnull_cast", issue = "47653")]
2736 pub fn cast<U>(self) -> NonNull<U> {
2738 NonNull::new_unchecked(self.as_ptr() as *mut U)
2743 #[stable(feature = "nonnull", since = "1.25.0")]
2744 impl<T: ?Sized> Clone for NonNull<T> {
2745 fn clone(&self) -> Self {
2750 #[stable(feature = "nonnull", since = "1.25.0")]
2751 impl<T: ?Sized> Copy for NonNull<T> { }
2753 #[unstable(feature = "coerce_unsized", issue = "27732")]
2754 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2756 #[stable(feature = "nonnull", since = "1.25.0")]
2757 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2758 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2759 fmt::Pointer::fmt(&self.as_ptr(), f)
2763 #[stable(feature = "nonnull", since = "1.25.0")]
2764 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2765 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2766 fmt::Pointer::fmt(&self.as_ptr(), f)
2770 #[stable(feature = "nonnull", since = "1.25.0")]
2771 impl<T: ?Sized> Eq for NonNull<T> {}
2773 #[stable(feature = "nonnull", since = "1.25.0")]
2774 impl<T: ?Sized> PartialEq for NonNull<T> {
2775 fn eq(&self, other: &Self) -> bool {
2776 self.as_ptr() == other.as_ptr()
2780 #[stable(feature = "nonnull", since = "1.25.0")]
2781 impl<T: ?Sized> Ord for NonNull<T> {
2782 fn cmp(&self, other: &Self) -> Ordering {
2783 self.as_ptr().cmp(&other.as_ptr())
2787 #[stable(feature = "nonnull", since = "1.25.0")]
2788 impl<T: ?Sized> PartialOrd for NonNull<T> {
2789 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2790 self.as_ptr().partial_cmp(&other.as_ptr())
2794 #[stable(feature = "nonnull", since = "1.25.0")]
2795 impl<T: ?Sized> hash::Hash for NonNull<T> {
2796 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2797 self.as_ptr().hash(state)
2801 #[unstable(feature = "ptr_internals", issue = "0")]
2802 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2803 fn from(unique: Unique<T>) -> Self {
2804 NonNull { pointer: unique.pointer }
2808 #[stable(feature = "nonnull", since = "1.25.0")]
2809 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
2810 fn from(reference: &'a mut T) -> Self {
2811 NonNull { pointer: NonZero::from(reference) }
2815 #[stable(feature = "nonnull", since = "1.25.0")]
2816 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
2817 fn from(reference: &'a T) -> Self {
2818 NonNull { pointer: NonZero::from(reference) }