1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
26 #[allow(deprecated)] use nonzero::NonZero;
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
171 /// let mut x = [1, 2, 3, 4];
172 /// let mut y = [7, 8, 9];
175 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
178 /// assert_eq!(x, [7, 8, 3, 4]);
179 /// assert_eq!(y, [1, 2, 9]);
182 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
183 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
184 let x = x as *mut u8;
185 let y = y as *mut u8;
186 let len = mem::size_of::<T>() * count;
187 swap_nonoverlapping_bytes(x, y, len)
191 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
192 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
193 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
194 // Haswell E processors. LLVM is more able to optimize if we give a struct a
195 // #[repr(simd)], even if we don't actually use this struct directly.
197 // FIXME repr(simd) broken on emscripten and redox
198 // It's also broken on big-endian powerpc64 and s390x. #42778
199 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
200 target_endian = "big")),
202 struct Block(u64, u64, u64, u64);
203 struct UnalignedBlock(u64, u64, u64, u64);
205 let block_size = mem::size_of::<Block>();
207 // Loop through x & y, copying them `Block` at a time
208 // The optimizer should unroll the loop fully for most types
209 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
211 while i + block_size <= len {
212 // Create some uninitialized memory as scratch space
213 // Declaring `t` here avoids aligning the stack when this loop is unused
214 let mut t: Block = mem::uninitialized();
215 let t = &mut t as *mut _ as *mut u8;
216 let x = x.offset(i as isize);
217 let y = y.offset(i as isize);
219 // Swap a block of bytes of x & y, using t as a temporary buffer
220 // This should be optimized into efficient SIMD operations where available
221 copy_nonoverlapping(x, t, block_size);
222 copy_nonoverlapping(y, x, block_size);
223 copy_nonoverlapping(t, y, block_size);
228 // Swap any remaining bytes
229 let mut t: UnalignedBlock = mem::uninitialized();
232 let t = &mut t as *mut _ as *mut u8;
233 let x = x.offset(i as isize);
234 let y = y.offset(i as isize);
236 copy_nonoverlapping(x, t, rem);
237 copy_nonoverlapping(y, x, rem);
238 copy_nonoverlapping(t, y, rem);
242 /// Replaces the value at `dest` with `src`, returning the old
243 /// value, without dropping either.
247 /// This is only unsafe because it accepts a raw pointer.
248 /// Otherwise, this operation is identical to `mem::replace`.
250 #[stable(feature = "rust1", since = "1.0.0")]
251 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
252 mem::swap(&mut *dest, &mut src); // cannot overlap
256 /// Reads the value from `src` without moving it. This leaves the
257 /// memory in `src` unchanged.
261 /// Beyond accepting a raw pointer, this is unsafe because it semantically
262 /// moves the value out of `src` without preventing further usage of `src`.
263 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
264 /// `src` is not used before the data is overwritten again (e.g. with `write`,
265 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
266 /// because it will attempt to drop the value previously at `*src`.
268 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
276 /// let y = &x as *const i32;
279 /// assert_eq!(std::ptr::read(y), 12);
283 #[stable(feature = "rust1", since = "1.0.0")]
284 pub unsafe fn read<T>(src: *const T) -> T {
285 let mut tmp: T = mem::uninitialized();
286 copy_nonoverlapping(src, &mut tmp, 1);
290 /// Reads the value from `src` without moving it. This leaves the
291 /// memory in `src` unchanged.
293 /// Unlike `read`, the pointer may be unaligned.
297 /// Beyond accepting a raw pointer, this is unsafe because it semantically
298 /// moves the value out of `src` without preventing further usage of `src`.
299 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
300 /// `src` is not used before the data is overwritten again (e.g. with `write`,
301 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
302 /// because it will attempt to drop the value previously at `*src`.
310 /// let y = &x as *const i32;
313 /// assert_eq!(std::ptr::read_unaligned(y), 12);
317 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
318 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
319 let mut tmp: T = mem::uninitialized();
320 copy_nonoverlapping(src as *const u8,
321 &mut tmp as *mut T as *mut u8,
322 mem::size_of::<T>());
326 /// Overwrites a memory location with the given value without reading or
327 /// dropping the old value.
331 /// This operation is marked unsafe because it accepts a raw pointer.
333 /// It does not drop the contents of `dst`. This is safe, but it could leak
334 /// allocations or resources, so care must be taken not to overwrite an object
335 /// that should be dropped.
337 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
338 /// location pointed to by `dst`.
340 /// This is appropriate for initializing uninitialized memory, or overwriting
341 /// memory that has previously been `read` from.
343 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
351 /// let y = &mut x as *mut i32;
355 /// std::ptr::write(y, z);
356 /// assert_eq!(std::ptr::read(y), 12);
360 #[stable(feature = "rust1", since = "1.0.0")]
361 pub unsafe fn write<T>(dst: *mut T, src: T) {
362 intrinsics::move_val_init(&mut *dst, src)
365 /// Overwrites a memory location with the given value without reading or
366 /// dropping the old value.
368 /// Unlike `write`, the pointer may be unaligned.
372 /// This operation is marked unsafe because it accepts a raw pointer.
374 /// It does not drop the contents of `dst`. This is safe, but it could leak
375 /// allocations or resources, so care must be taken not to overwrite an object
376 /// that should be dropped.
378 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
379 /// location pointed to by `dst`.
381 /// This is appropriate for initializing uninitialized memory, or overwriting
382 /// memory that has previously been `read` from.
390 /// let y = &mut x as *mut i32;
394 /// std::ptr::write_unaligned(y, z);
395 /// assert_eq!(std::ptr::read_unaligned(y), 12);
399 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
400 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
401 copy_nonoverlapping(&src as *const T as *const u8,
403 mem::size_of::<T>());
407 /// Performs a volatile read of the value from `src` without moving it. This
408 /// leaves the memory in `src` unchanged.
410 /// Volatile operations are intended to act on I/O memory, and are guaranteed
411 /// to not be elided or reordered by the compiler across other volatile
416 /// Rust does not currently have a rigorously and formally defined memory model,
417 /// so the precise semantics of what "volatile" means here is subject to change
418 /// over time. That being said, the semantics will almost always end up pretty
419 /// similar to [C11's definition of volatile][c11].
421 /// The compiler shouldn't change the relative order or number of volatile
422 /// memory operations. However, volatile memory operations on zero-sized types
423 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
424 /// and may be ignored.
426 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
430 /// Beyond accepting a raw pointer, this is unsafe because it semantically
431 /// moves the value out of `src` without preventing further usage of `src`.
432 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
433 /// `src` is not used before the data is overwritten again (e.g. with `write`,
434 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
435 /// because it will attempt to drop the value previously at `*src`.
443 /// let y = &x as *const i32;
446 /// assert_eq!(std::ptr::read_volatile(y), 12);
450 #[stable(feature = "volatile", since = "1.9.0")]
451 pub unsafe fn read_volatile<T>(src: *const T) -> T {
452 intrinsics::volatile_load(src)
455 /// Performs a volatile write of a memory location with the given value without
456 /// reading or dropping the old value.
458 /// Volatile operations are intended to act on I/O memory, and are guaranteed
459 /// to not be elided or reordered by the compiler across other volatile
464 /// Rust does not currently have a rigorously and formally defined memory model,
465 /// so the precise semantics of what "volatile" means here is subject to change
466 /// over time. That being said, the semantics will almost always end up pretty
467 /// similar to [C11's definition of volatile][c11].
469 /// The compiler shouldn't change the relative order or number of volatile
470 /// memory operations. However, volatile memory operations on zero-sized types
471 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
472 /// and may be ignored.
474 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
478 /// This operation is marked unsafe because it accepts a raw pointer.
480 /// It does not drop the contents of `dst`. This is safe, but it could leak
481 /// allocations or resources, so care must be taken not to overwrite an object
482 /// that should be dropped.
484 /// This is appropriate for initializing uninitialized memory, or overwriting
485 /// memory that has previously been `read` from.
493 /// let y = &mut x as *mut i32;
497 /// std::ptr::write_volatile(y, z);
498 /// assert_eq!(std::ptr::read_volatile(y), 12);
502 #[stable(feature = "volatile", since = "1.9.0")]
503 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
504 intrinsics::volatile_store(dst, src);
507 #[lang = "const_ptr"]
508 impl<T: ?Sized> *const T {
509 /// Returns `true` if the pointer is null.
511 /// Note that unsized types have many possible null pointers, as only the
512 /// raw data pointer is considered, not their length, vtable, etc.
513 /// Therefore, two pointers that are null may still not compare equal to
521 /// let s: &str = "Follow the rabbit";
522 /// let ptr: *const u8 = s.as_ptr();
523 /// assert!(!ptr.is_null());
525 #[stable(feature = "rust1", since = "1.0.0")]
527 pub fn is_null(self) -> bool {
528 // Compare via a cast to a thin pointer, so fat pointers are only
529 // considering their "data" part for null-ness.
530 (self as *const u8) == null()
533 /// Returns `None` if the pointer is null, or else returns a reference to
534 /// the value wrapped in `Some`.
538 /// While this method and its mutable counterpart are useful for
539 /// null-safety, it is important to note that this is still an unsafe
540 /// operation because the returned value could be pointing to invalid
543 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
544 /// not necessarily reflect the actual lifetime of the data.
551 /// let ptr: *const u8 = &10u8 as *const u8;
554 /// if let Some(val_back) = ptr.as_ref() {
555 /// println!("We got back the value: {}!", val_back);
559 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
561 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
569 /// Calculates the offset from a pointer.
571 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
572 /// offset of `3 * size_of::<T>()` bytes.
576 /// If any of the following conditions are violated, the result is Undefined
579 /// * Both the starting and resulting pointer must be either in bounds or one
580 /// byte past the end of an allocated object.
582 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
584 /// * The offset being in bounds cannot rely on "wrapping around" the address
585 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
587 /// The compiler and standard library generally tries to ensure allocations
588 /// never reach a size where an offset is a concern. For instance, `Vec`
589 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
590 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
592 /// Most platforms fundamentally can't even construct such an allocation.
593 /// For instance, no known 64-bit platform can ever serve a request
594 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
595 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
596 /// more than `isize::MAX` bytes with things like Physical Address
597 /// Extension. As such, memory acquired directly from allocators or memory
598 /// mapped files *may* be too large to handle with this function.
600 /// Consider using `wrapping_offset` instead if these constraints are
601 /// difficult to satisfy. The only advantage of this method is that it
602 /// enables more aggressive compiler optimizations.
609 /// let s: &str = "123";
610 /// let ptr: *const u8 = s.as_ptr();
613 /// println!("{}", *ptr.offset(1) as char);
614 /// println!("{}", *ptr.offset(2) as char);
617 #[stable(feature = "rust1", since = "1.0.0")]
619 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
620 intrinsics::offset(self, count)
623 /// Calculates the offset from a pointer using wrapping arithmetic.
625 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
626 /// offset of `3 * size_of::<T>()` bytes.
630 /// The resulting pointer does not need to be in bounds, but it is
631 /// potentially hazardous to dereference (which requires `unsafe`).
633 /// Always use `.offset(count)` instead when possible, because `offset`
634 /// allows the compiler to optimize better.
641 /// // Iterate using a raw pointer in increments of two elements
642 /// let data = [1u8, 2, 3, 4, 5];
643 /// let mut ptr: *const u8 = data.as_ptr();
645 /// let end_rounded_up = ptr.wrapping_offset(6);
647 /// // This loop prints "1, 3, 5, "
648 /// while ptr != end_rounded_up {
650 /// print!("{}, ", *ptr);
652 /// ptr = ptr.wrapping_offset(step);
655 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
657 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
659 intrinsics::arith_offset(self, count)
663 /// Calculates the distance between two pointers. The returned value is in
664 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
666 /// If the address different between the two pointers ia not a multiple of
667 /// `mem::size_of::<T>()` then the result of the division is rounded towards
670 /// This function returns `None` if `T` is a zero-sized type.
677 /// #![feature(offset_to)]
678 /// #![allow(deprecated)]
682 /// let ptr1: *const i32 = &a[1];
683 /// let ptr2: *const i32 = &a[3];
684 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
685 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
686 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
687 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
690 #[unstable(feature = "offset_to", issue = "41079")]
691 #[rustc_deprecated(since = "1.27.0", reason = "Replaced by `wrapping_offset_from`, with the \
692 opposite argument order. If you're writing unsafe code, consider `offset_from`.")]
694 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
695 let size = mem::size_of::<T>();
699 Some(other.wrapping_offset_from(self))
703 /// Calculates the distance between two pointers. The returned value is in
704 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
706 /// This function is the inverse of [`offset`].
708 /// [`offset`]: #method.offset
709 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
713 /// If any of the following conditions are violated, the result is Undefined
716 /// * Both the starting and other pointer must be either in bounds or one
717 /// byte past the end of the same allocated object.
719 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
721 /// * The distance between the pointers, in bytes, must be an exact multiple
722 /// of the size of `T`.
724 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
726 /// The compiler and standard library generally try to ensure allocations
727 /// never reach a size where an offset is a concern. For instance, `Vec`
728 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
729 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
731 /// Most platforms fundamentally can't even construct such an allocation.
732 /// For instance, no known 64-bit platform can ever serve a request
733 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
734 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
735 /// more than `isize::MAX` bytes with things like Physical Address
736 /// Extension. As such, memory acquired directly from allocators or memory
737 /// mapped files *may* be too large to handle with this function.
739 /// Consider using [`wrapping_offset_from`] instead if these constraints are
740 /// difficult to satisfy. The only advantage of this method is that it
741 /// enables more aggressive compiler optimizations.
745 /// This function panics if `T` is a Zero-Sized Type ("ZST").
752 /// #![feature(ptr_offset_from)]
755 /// let ptr1: *const i32 = &a[1];
756 /// let ptr2: *const i32 = &a[3];
758 /// assert_eq!(ptr2.offset_from(ptr1), 2);
759 /// assert_eq!(ptr1.offset_from(ptr2), -2);
760 /// assert_eq!(ptr1.offset(2), ptr2);
761 /// assert_eq!(ptr2.offset(-2), ptr1);
764 #[unstable(feature = "ptr_offset_from", issue = "41079")]
766 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
767 let pointee_size = mem::size_of::<T>();
768 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
770 // This is the same sequence that Clang emits for pointer subtraction.
771 // It can be neither `nsw` nor `nuw` because the input is treated as
772 // unsigned but then the output is treated as signed, so neither works.
773 let d = isize::wrapping_sub(self as _, origin as _);
774 intrinsics::exact_div(d, pointee_size as _)
777 /// Calculates the distance between two pointers. The returned value is in
778 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
780 /// If the address different between the two pointers is not a multiple of
781 /// `mem::size_of::<T>()` then the result of the division is rounded towards
784 /// Though this method is safe for any two pointers, note that its result
785 /// will be mostly useless if the two pointers aren't into the same allocated
786 /// object, for example if they point to two different local variables.
790 /// This function panics if `T` is a zero-sized type.
797 /// #![feature(ptr_wrapping_offset_from)]
800 /// let ptr1: *const i32 = &a[1];
801 /// let ptr2: *const i32 = &a[3];
802 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
803 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
804 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
805 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
807 /// let ptr1: *const i32 = 3 as _;
808 /// let ptr2: *const i32 = 13 as _;
809 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
811 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
813 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
814 let pointee_size = mem::size_of::<T>();
815 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
817 let d = isize::wrapping_sub(self as _, origin as _);
818 d.wrapping_div(pointee_size as _)
821 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
823 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
824 /// offset of `3 * size_of::<T>()` bytes.
828 /// If any of the following conditions are violated, the result is Undefined
831 /// * Both the starting and resulting pointer must be either in bounds or one
832 /// byte past the end of an allocated object.
834 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
836 /// * The offset being in bounds cannot rely on "wrapping around" the address
837 /// space. That is, the infinite-precision sum must fit in a `usize`.
839 /// The compiler and standard library generally tries to ensure allocations
840 /// never reach a size where an offset is a concern. For instance, `Vec`
841 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
842 /// `vec.as_ptr().add(vec.len())` is always safe.
844 /// Most platforms fundamentally can't even construct such an allocation.
845 /// For instance, no known 64-bit platform can ever serve a request
846 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
847 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
848 /// more than `isize::MAX` bytes with things like Physical Address
849 /// Extension. As such, memory acquired directly from allocators or memory
850 /// mapped files *may* be too large to handle with this function.
852 /// Consider using `wrapping_offset` instead if these constraints are
853 /// difficult to satisfy. The only advantage of this method is that it
854 /// enables more aggressive compiler optimizations.
861 /// let s: &str = "123";
862 /// let ptr: *const u8 = s.as_ptr();
865 /// println!("{}", *ptr.add(1) as char);
866 /// println!("{}", *ptr.add(2) as char);
869 #[stable(feature = "pointer_methods", since = "1.26.0")]
871 pub unsafe fn add(self, count: usize) -> Self
874 self.offset(count as isize)
877 /// Calculates the offset from a pointer (convenience for
878 /// `.offset((count as isize).wrapping_neg())`).
880 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
881 /// offset of `3 * size_of::<T>()` bytes.
885 /// If any of the following conditions are violated, the result is Undefined
888 /// * Both the starting and resulting pointer must be either in bounds or one
889 /// byte past the end of an allocated object.
891 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
893 /// * The offset being in bounds cannot rely on "wrapping around" the address
894 /// space. That is, the infinite-precision sum must fit in a usize.
896 /// The compiler and standard library generally tries to ensure allocations
897 /// never reach a size where an offset is a concern. For instance, `Vec`
898 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
899 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
901 /// Most platforms fundamentally can't even construct such an allocation.
902 /// For instance, no known 64-bit platform can ever serve a request
903 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
904 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
905 /// more than `isize::MAX` bytes with things like Physical Address
906 /// Extension. As such, memory acquired directly from allocators or memory
907 /// mapped files *may* be too large to handle with this function.
909 /// Consider using `wrapping_offset` instead if these constraints are
910 /// difficult to satisfy. The only advantage of this method is that it
911 /// enables more aggressive compiler optimizations.
918 /// let s: &str = "123";
921 /// let end: *const u8 = s.as_ptr().add(3);
922 /// println!("{}", *end.sub(1) as char);
923 /// println!("{}", *end.sub(2) as char);
926 #[stable(feature = "pointer_methods", since = "1.26.0")]
928 pub unsafe fn sub(self, count: usize) -> Self
931 self.offset((count as isize).wrapping_neg())
934 /// Calculates the offset from a pointer using wrapping arithmetic.
935 /// (convenience for `.wrapping_offset(count as isize)`)
937 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
938 /// offset of `3 * size_of::<T>()` bytes.
942 /// The resulting pointer does not need to be in bounds, but it is
943 /// potentially hazardous to dereference (which requires `unsafe`).
945 /// Always use `.add(count)` instead when possible, because `add`
946 /// allows the compiler to optimize better.
953 /// // Iterate using a raw pointer in increments of two elements
954 /// let data = [1u8, 2, 3, 4, 5];
955 /// let mut ptr: *const u8 = data.as_ptr();
957 /// let end_rounded_up = ptr.wrapping_add(6);
959 /// // This loop prints "1, 3, 5, "
960 /// while ptr != end_rounded_up {
962 /// print!("{}, ", *ptr);
964 /// ptr = ptr.wrapping_add(step);
967 #[stable(feature = "pointer_methods", since = "1.26.0")]
969 pub fn wrapping_add(self, count: usize) -> Self
972 self.wrapping_offset(count as isize)
975 /// Calculates the offset from a pointer using wrapping arithmetic.
976 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
978 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
979 /// offset of `3 * size_of::<T>()` bytes.
983 /// The resulting pointer does not need to be in bounds, but it is
984 /// potentially hazardous to dereference (which requires `unsafe`).
986 /// Always use `.sub(count)` instead when possible, because `sub`
987 /// allows the compiler to optimize better.
994 /// // Iterate using a raw pointer in increments of two elements (backwards)
995 /// let data = [1u8, 2, 3, 4, 5];
996 /// let mut ptr: *const u8 = data.as_ptr();
997 /// let start_rounded_down = ptr.wrapping_sub(2);
998 /// ptr = ptr.wrapping_add(4);
1000 /// // This loop prints "5, 3, 1, "
1001 /// while ptr != start_rounded_down {
1003 /// print!("{}, ", *ptr);
1005 /// ptr = ptr.wrapping_sub(step);
1008 #[stable(feature = "pointer_methods", since = "1.26.0")]
1010 pub fn wrapping_sub(self, count: usize) -> Self
1013 self.wrapping_offset((count as isize).wrapping_neg())
1016 /// Reads the value from `self` without moving it. This leaves the
1017 /// memory in `self` unchanged.
1021 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1022 /// moves the value out of `self` without preventing further usage of `self`.
1023 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1024 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1025 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1026 /// because it will attempt to drop the value previously at `*self`.
1028 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1036 /// let y = &x as *const i32;
1039 /// assert_eq!(y.read(), 12);
1042 #[stable(feature = "pointer_methods", since = "1.26.0")]
1044 pub unsafe fn read(self) -> T
1050 /// Performs a volatile read of the value from `self` without moving it. This
1051 /// leaves the memory in `self` unchanged.
1053 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1054 /// to not be elided or reordered by the compiler across other volatile
1059 /// Rust does not currently have a rigorously and formally defined memory model,
1060 /// so the precise semantics of what "volatile" means here is subject to change
1061 /// over time. That being said, the semantics will almost always end up pretty
1062 /// similar to [C11's definition of volatile][c11].
1064 /// The compiler shouldn't change the relative order or number of volatile
1065 /// memory operations. However, volatile memory operations on zero-sized types
1066 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1067 /// and may be ignored.
1069 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1073 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1074 /// moves the value out of `self` without preventing further usage of `self`.
1075 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1076 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1077 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1078 /// because it will attempt to drop the value previously at `*self`.
1086 /// let y = &x as *const i32;
1089 /// assert_eq!(y.read_volatile(), 12);
1092 #[stable(feature = "pointer_methods", since = "1.26.0")]
1094 pub unsafe fn read_volatile(self) -> T
1100 /// Reads the value from `self` without moving it. This leaves the
1101 /// memory in `self` unchanged.
1103 /// Unlike `read`, the pointer may be unaligned.
1107 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1108 /// moves the value out of `self` without preventing further usage of `self`.
1109 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1110 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1111 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1112 /// because it will attempt to drop the value previously at `*self`.
1120 /// let y = &x as *const i32;
1123 /// assert_eq!(y.read_unaligned(), 12);
1126 #[stable(feature = "pointer_methods", since = "1.26.0")]
1128 pub unsafe fn read_unaligned(self) -> T
1131 read_unaligned(self)
1134 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1135 /// and destination may overlap.
1137 /// NOTE: this has the *same* argument order as `ptr::copy`.
1139 /// This is semantically equivalent to C's `memmove`.
1143 /// Care must be taken with the ownership of `self` and `dest`.
1144 /// This method semantically moves the values of `self` into `dest`.
1145 /// However it does not drop the contents of `self`, or prevent the contents
1146 /// of `dest` from being dropped or used.
1150 /// Efficiently create a Rust vector from an unsafe buffer:
1153 /// # #[allow(dead_code)]
1154 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1155 /// let mut dst = Vec::with_capacity(elts);
1156 /// dst.set_len(elts);
1157 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1161 #[stable(feature = "pointer_methods", since = "1.26.0")]
1163 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1166 copy(self, dest, count)
1169 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1170 /// and destination may *not* overlap.
1172 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1174 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1178 /// Beyond requiring that the program must be allowed to access both regions
1179 /// of memory, it is Undefined Behavior for source and destination to
1180 /// overlap. Care must also be taken with the ownership of `self` and
1181 /// `self`. This method semantically moves the values of `self` into `dest`.
1182 /// However it does not drop the contents of `dest`, or prevent the contents
1183 /// of `self` from being dropped or used.
1187 /// Efficiently create a Rust vector from an unsafe buffer:
1190 /// # #[allow(dead_code)]
1191 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1192 /// let mut dst = Vec::with_capacity(elts);
1193 /// dst.set_len(elts);
1194 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1198 #[stable(feature = "pointer_methods", since = "1.26.0")]
1200 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1203 copy_nonoverlapping(self, dest, count)
1206 /// Computes the byte offset that needs to be applied in order to
1207 /// make the pointer aligned to `align`.
1208 /// If it is not possible to align the pointer, the implementation returns
1209 /// `usize::max_value()`.
1211 /// There are no guarantees whatsover that offsetting the pointer will not
1212 /// overflow or go beyond the allocation that the pointer points into.
1213 /// It is up to the caller to ensure that the returned offset is correct
1214 /// in all terms other than alignment.
1218 /// Accessing adjacent `u8` as `u16`
1221 /// # #![feature(align_offset)]
1222 /// # fn foo(n: usize) {
1223 /// # use std::mem::align_of;
1225 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1226 /// let ptr = &x[n] as *const u8;
1227 /// let offset = ptr.align_offset(align_of::<u16>());
1228 /// if offset < x.len() - n - 1 {
1229 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1230 /// assert_ne!(*u16_ptr, 500);
1232 /// // while the pointer can be aligned via `offset`, it would point
1233 /// // outside the allocation
1237 #[unstable(feature = "align_offset", issue = "44488")]
1238 pub fn align_offset(self, align: usize) -> usize {
1240 intrinsics::align_offset(self as *const _, align)
1246 impl<T: ?Sized> *mut T {
1247 /// Returns `true` if the pointer is null.
1249 /// Note that unsized types have many possible null pointers, as only the
1250 /// raw data pointer is considered, not their length, vtable, etc.
1251 /// Therefore, two pointers that are null may still not compare equal to
1259 /// let mut s = [1, 2, 3];
1260 /// let ptr: *mut u32 = s.as_mut_ptr();
1261 /// assert!(!ptr.is_null());
1263 #[stable(feature = "rust1", since = "1.0.0")]
1265 pub fn is_null(self) -> bool {
1266 // Compare via a cast to a thin pointer, so fat pointers are only
1267 // considering their "data" part for null-ness.
1268 (self as *mut u8) == null_mut()
1271 /// Returns `None` if the pointer is null, or else returns a reference to
1272 /// the value wrapped in `Some`.
1276 /// While this method and its mutable counterpart are useful for
1277 /// null-safety, it is important to note that this is still an unsafe
1278 /// operation because the returned value could be pointing to invalid
1281 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1282 /// not necessarily reflect the actual lifetime of the data.
1289 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1292 /// if let Some(val_back) = ptr.as_ref() {
1293 /// println!("We got back the value: {}!", val_back);
1297 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1299 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1307 /// Calculates the offset from a pointer.
1309 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1310 /// offset of `3 * size_of::<T>()` bytes.
1314 /// If any of the following conditions are violated, the result is Undefined
1317 /// * Both the starting and resulting pointer must be either in bounds or one
1318 /// byte past the end of an allocated object.
1320 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1322 /// * The offset being in bounds cannot rely on "wrapping around" the address
1323 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1325 /// The compiler and standard library generally tries to ensure allocations
1326 /// never reach a size where an offset is a concern. For instance, `Vec`
1327 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1328 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1330 /// Most platforms fundamentally can't even construct such an allocation.
1331 /// For instance, no known 64-bit platform can ever serve a request
1332 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1333 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1334 /// more than `isize::MAX` bytes with things like Physical Address
1335 /// Extension. As such, memory acquired directly from allocators or memory
1336 /// mapped files *may* be too large to handle with this function.
1338 /// Consider using `wrapping_offset` instead if these constraints are
1339 /// difficult to satisfy. The only advantage of this method is that it
1340 /// enables more aggressive compiler optimizations.
1347 /// let mut s = [1, 2, 3];
1348 /// let ptr: *mut u32 = s.as_mut_ptr();
1351 /// println!("{}", *ptr.offset(1));
1352 /// println!("{}", *ptr.offset(2));
1355 #[stable(feature = "rust1", since = "1.0.0")]
1357 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1358 intrinsics::offset(self, count) as *mut T
1361 /// Calculates the offset from a pointer using wrapping arithmetic.
1362 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1363 /// offset of `3 * size_of::<T>()` bytes.
1367 /// The resulting pointer does not need to be in bounds, but it is
1368 /// potentially hazardous to dereference (which requires `unsafe`).
1370 /// Always use `.offset(count)` instead when possible, because `offset`
1371 /// allows the compiler to optimize better.
1378 /// // Iterate using a raw pointer in increments of two elements
1379 /// let mut data = [1u8, 2, 3, 4, 5];
1380 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1382 /// let end_rounded_up = ptr.wrapping_offset(6);
1384 /// while ptr != end_rounded_up {
1388 /// ptr = ptr.wrapping_offset(step);
1390 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1392 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1394 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1396 intrinsics::arith_offset(self, count) as *mut T
1400 /// Returns `None` if the pointer is null, or else returns a mutable
1401 /// reference to the value wrapped in `Some`.
1405 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1406 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1407 /// returned is indeed a valid lifetime for the contained data.
1414 /// let mut s = [1, 2, 3];
1415 /// let ptr: *mut u32 = s.as_mut_ptr();
1416 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1417 /// *first_value = 4;
1418 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1420 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1422 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1430 /// Calculates the distance between two pointers. The returned value is in
1431 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1433 /// If the address different between the two pointers ia not a multiple of
1434 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1437 /// This function returns `None` if `T` is a zero-sized type.
1444 /// #![feature(offset_to)]
1445 /// #![allow(deprecated)]
1448 /// let mut a = [0; 5];
1449 /// let ptr1: *mut i32 = &mut a[1];
1450 /// let ptr2: *mut i32 = &mut a[3];
1451 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1452 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1453 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1454 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1457 #[unstable(feature = "offset_to", issue = "41079")]
1458 #[rustc_deprecated(since = "1.27.0", reason = "Replaced by `wrapping_offset_from`, with the \
1459 opposite argument order. If you're writing unsafe code, consider `offset_from`.")]
1461 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1462 let size = mem::size_of::<T>();
1466 Some(other.wrapping_offset_from(self))
1470 /// Calculates the distance between two pointers. The returned value is in
1471 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1473 /// This function is the inverse of [`offset`].
1475 /// [`offset`]: #method.offset-1
1476 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1480 /// If any of the following conditions are violated, the result is Undefined
1483 /// * Both the starting and other pointer must be either in bounds or one
1484 /// byte past the end of the same allocated object.
1486 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1488 /// * The distance between the pointers, in bytes, must be an exact multiple
1489 /// of the size of `T`.
1491 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1493 /// The compiler and standard library generally try to ensure allocations
1494 /// never reach a size where an offset is a concern. For instance, `Vec`
1495 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1496 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1498 /// Most platforms fundamentally can't even construct such an allocation.
1499 /// For instance, no known 64-bit platform can ever serve a request
1500 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1501 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1502 /// more than `isize::MAX` bytes with things like Physical Address
1503 /// Extension. As such, memory acquired directly from allocators or memory
1504 /// mapped files *may* be too large to handle with this function.
1506 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1507 /// difficult to satisfy. The only advantage of this method is that it
1508 /// enables more aggressive compiler optimizations.
1512 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1519 /// #![feature(ptr_offset_from)]
1521 /// let mut a = [0; 5];
1522 /// let ptr1: *mut i32 = &mut a[1];
1523 /// let ptr2: *mut i32 = &mut a[3];
1525 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1526 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1527 /// assert_eq!(ptr1.offset(2), ptr2);
1528 /// assert_eq!(ptr2.offset(-2), ptr1);
1531 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1533 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1534 (self as *const T).offset_from(origin)
1537 /// Calculates the distance between two pointers. The returned value is in
1538 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1540 /// If the address different between the two pointers is not a multiple of
1541 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1544 /// Though this method is safe for any two pointers, note that its result
1545 /// will be mostly useless if the two pointers aren't into the same allocated
1546 /// object, for example if they point to two different local variables.
1550 /// This function panics if `T` is a zero-sized type.
1557 /// #![feature(ptr_wrapping_offset_from)]
1559 /// let mut a = [0; 5];
1560 /// let ptr1: *mut i32 = &mut a[1];
1561 /// let ptr2: *mut i32 = &mut a[3];
1562 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1563 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1564 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1565 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1567 /// let ptr1: *mut i32 = 3 as _;
1568 /// let ptr2: *mut i32 = 13 as _;
1569 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1571 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1573 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1574 (self as *const T).wrapping_offset_from(origin)
1577 /// Computes the byte offset that needs to be applied in order to
1578 /// make the pointer aligned to `align`.
1579 /// If it is not possible to align the pointer, the implementation returns
1580 /// `usize::max_value()`.
1582 /// There are no guarantees whatsover that offsetting the pointer will not
1583 /// overflow or go beyond the allocation that the pointer points into.
1584 /// It is up to the caller to ensure that the returned offset is correct
1585 /// in all terms other than alignment.
1589 /// Accessing adjacent `u8` as `u16`
1592 /// # #![feature(align_offset)]
1593 /// # fn foo(n: usize) {
1594 /// # use std::mem::align_of;
1596 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1597 /// let ptr = &x[n] as *const u8;
1598 /// let offset = ptr.align_offset(align_of::<u16>());
1599 /// if offset < x.len() - n - 1 {
1600 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1601 /// assert_ne!(*u16_ptr, 500);
1603 /// // while the pointer can be aligned via `offset`, it would point
1604 /// // outside the allocation
1608 #[unstable(feature = "align_offset", issue = "44488")]
1609 pub fn align_offset(self, align: usize) -> usize {
1611 intrinsics::align_offset(self as *const _, align)
1615 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1617 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1618 /// offset of `3 * size_of::<T>()` bytes.
1622 /// If any of the following conditions are violated, the result is Undefined
1625 /// * Both the starting and resulting pointer must be either in bounds or one
1626 /// byte past the end of an allocated object.
1628 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1630 /// * The offset being in bounds cannot rely on "wrapping around" the address
1631 /// space. That is, the infinite-precision sum must fit in a `usize`.
1633 /// The compiler and standard library generally tries to ensure allocations
1634 /// never reach a size where an offset is a concern. For instance, `Vec`
1635 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1636 /// `vec.as_ptr().add(vec.len())` is always safe.
1638 /// Most platforms fundamentally can't even construct such an allocation.
1639 /// For instance, no known 64-bit platform can ever serve a request
1640 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1641 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1642 /// more than `isize::MAX` bytes with things like Physical Address
1643 /// Extension. As such, memory acquired directly from allocators or memory
1644 /// mapped files *may* be too large to handle with this function.
1646 /// Consider using `wrapping_offset` instead if these constraints are
1647 /// difficult to satisfy. The only advantage of this method is that it
1648 /// enables more aggressive compiler optimizations.
1655 /// let s: &str = "123";
1656 /// let ptr: *const u8 = s.as_ptr();
1659 /// println!("{}", *ptr.add(1) as char);
1660 /// println!("{}", *ptr.add(2) as char);
1663 #[stable(feature = "pointer_methods", since = "1.26.0")]
1665 pub unsafe fn add(self, count: usize) -> Self
1668 self.offset(count as isize)
1671 /// Calculates the offset from a pointer (convenience for
1672 /// `.offset((count as isize).wrapping_neg())`).
1674 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1675 /// offset of `3 * size_of::<T>()` bytes.
1679 /// If any of the following conditions are violated, the result is Undefined
1682 /// * Both the starting and resulting pointer must be either in bounds or one
1683 /// byte past the end of an allocated object.
1685 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1687 /// * The offset being in bounds cannot rely on "wrapping around" the address
1688 /// space. That is, the infinite-precision sum must fit in a usize.
1690 /// The compiler and standard library generally tries to ensure allocations
1691 /// never reach a size where an offset is a concern. For instance, `Vec`
1692 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1693 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1695 /// Most platforms fundamentally can't even construct such an allocation.
1696 /// For instance, no known 64-bit platform can ever serve a request
1697 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1698 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1699 /// more than `isize::MAX` bytes with things like Physical Address
1700 /// Extension. As such, memory acquired directly from allocators or memory
1701 /// mapped files *may* be too large to handle with this function.
1703 /// Consider using `wrapping_offset` instead if these constraints are
1704 /// difficult to satisfy. The only advantage of this method is that it
1705 /// enables more aggressive compiler optimizations.
1712 /// let s: &str = "123";
1715 /// let end: *const u8 = s.as_ptr().add(3);
1716 /// println!("{}", *end.sub(1) as char);
1717 /// println!("{}", *end.sub(2) as char);
1720 #[stable(feature = "pointer_methods", since = "1.26.0")]
1722 pub unsafe fn sub(self, count: usize) -> Self
1725 self.offset((count as isize).wrapping_neg())
1728 /// Calculates the offset from a pointer using wrapping arithmetic.
1729 /// (convenience for `.wrapping_offset(count as isize)`)
1731 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1732 /// offset of `3 * size_of::<T>()` bytes.
1736 /// The resulting pointer does not need to be in bounds, but it is
1737 /// potentially hazardous to dereference (which requires `unsafe`).
1739 /// Always use `.add(count)` instead when possible, because `add`
1740 /// allows the compiler to optimize better.
1747 /// // Iterate using a raw pointer in increments of two elements
1748 /// let data = [1u8, 2, 3, 4, 5];
1749 /// let mut ptr: *const u8 = data.as_ptr();
1751 /// let end_rounded_up = ptr.wrapping_add(6);
1753 /// // This loop prints "1, 3, 5, "
1754 /// while ptr != end_rounded_up {
1756 /// print!("{}, ", *ptr);
1758 /// ptr = ptr.wrapping_add(step);
1761 #[stable(feature = "pointer_methods", since = "1.26.0")]
1763 pub fn wrapping_add(self, count: usize) -> Self
1766 self.wrapping_offset(count as isize)
1769 /// Calculates the offset from a pointer using wrapping arithmetic.
1770 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1772 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1773 /// offset of `3 * size_of::<T>()` bytes.
1777 /// The resulting pointer does not need to be in bounds, but it is
1778 /// potentially hazardous to dereference (which requires `unsafe`).
1780 /// Always use `.sub(count)` instead when possible, because `sub`
1781 /// allows the compiler to optimize better.
1788 /// // Iterate using a raw pointer in increments of two elements (backwards)
1789 /// let data = [1u8, 2, 3, 4, 5];
1790 /// let mut ptr: *const u8 = data.as_ptr();
1791 /// let start_rounded_down = ptr.wrapping_sub(2);
1792 /// ptr = ptr.wrapping_add(4);
1794 /// // This loop prints "5, 3, 1, "
1795 /// while ptr != start_rounded_down {
1797 /// print!("{}, ", *ptr);
1799 /// ptr = ptr.wrapping_sub(step);
1802 #[stable(feature = "pointer_methods", since = "1.26.0")]
1804 pub fn wrapping_sub(self, count: usize) -> Self
1807 self.wrapping_offset((count as isize).wrapping_neg())
1810 /// Reads the value from `self` without moving it. This leaves the
1811 /// memory in `self` unchanged.
1815 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1816 /// moves the value out of `self` without preventing further usage of `self`.
1817 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1818 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1819 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1820 /// because it will attempt to drop the value previously at `*self`.
1822 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1830 /// let y = &x as *const i32;
1833 /// assert_eq!(y.read(), 12);
1836 #[stable(feature = "pointer_methods", since = "1.26.0")]
1838 pub unsafe fn read(self) -> T
1844 /// Performs a volatile read of the value from `self` without moving it. This
1845 /// leaves the memory in `self` unchanged.
1847 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1848 /// to not be elided or reordered by the compiler across other volatile
1853 /// Rust does not currently have a rigorously and formally defined memory model,
1854 /// so the precise semantics of what "volatile" means here is subject to change
1855 /// over time. That being said, the semantics will almost always end up pretty
1856 /// similar to [C11's definition of volatile][c11].
1858 /// The compiler shouldn't change the relative order or number of volatile
1859 /// memory operations. However, volatile memory operations on zero-sized types
1860 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1861 /// and may be ignored.
1863 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1867 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1868 /// moves the value out of `self` without preventing further usage of `self`.
1869 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1870 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1871 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1872 /// because it will attempt to drop the value previously at `*self`.
1880 /// let y = &x as *const i32;
1883 /// assert_eq!(y.read_volatile(), 12);
1886 #[stable(feature = "pointer_methods", since = "1.26.0")]
1888 pub unsafe fn read_volatile(self) -> T
1894 /// Reads the value from `self` without moving it. This leaves the
1895 /// memory in `self` unchanged.
1897 /// Unlike `read`, the pointer may be unaligned.
1901 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1902 /// moves the value out of `self` without preventing further usage of `self`.
1903 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1904 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1905 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1906 /// because it will attempt to drop the value previously at `*self`.
1914 /// let y = &x as *const i32;
1917 /// assert_eq!(y.read_unaligned(), 12);
1920 #[stable(feature = "pointer_methods", since = "1.26.0")]
1922 pub unsafe fn read_unaligned(self) -> T
1925 read_unaligned(self)
1928 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1929 /// and destination may overlap.
1931 /// NOTE: this has the *same* argument order as `ptr::copy`.
1933 /// This is semantically equivalent to C's `memmove`.
1937 /// Care must be taken with the ownership of `self` and `dest`.
1938 /// This method semantically moves the values of `self` into `dest`.
1939 /// However it does not drop the contents of `self`, or prevent the contents
1940 /// of `dest` from being dropped or used.
1944 /// Efficiently create a Rust vector from an unsafe buffer:
1947 /// # #[allow(dead_code)]
1948 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1949 /// let mut dst = Vec::with_capacity(elts);
1950 /// dst.set_len(elts);
1951 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1955 #[stable(feature = "pointer_methods", since = "1.26.0")]
1957 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1960 copy(self, dest, count)
1963 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1964 /// and destination may *not* overlap.
1966 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1968 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1972 /// Beyond requiring that the program must be allowed to access both regions
1973 /// of memory, it is Undefined Behavior for source and destination to
1974 /// overlap. Care must also be taken with the ownership of `self` and
1975 /// `self`. This method semantically moves the values of `self` into `dest`.
1976 /// However it does not drop the contents of `dest`, or prevent the contents
1977 /// of `self` from being dropped or used.
1981 /// Efficiently create a Rust vector from an unsafe buffer:
1984 /// # #[allow(dead_code)]
1985 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1986 /// let mut dst = Vec::with_capacity(elts);
1987 /// dst.set_len(elts);
1988 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1992 #[stable(feature = "pointer_methods", since = "1.26.0")]
1994 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1997 copy_nonoverlapping(self, dest, count)
2000 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2001 /// and destination may overlap.
2003 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
2005 /// This is semantically equivalent to C's `memmove`.
2009 /// Care must be taken with the ownership of `src` and `self`.
2010 /// This method semantically moves the values of `src` into `self`.
2011 /// However it does not drop the contents of `self`, or prevent the contents
2012 /// of `src` from being dropped or used.
2016 /// Efficiently create a Rust vector from an unsafe buffer:
2019 /// # #[allow(dead_code)]
2020 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2021 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2022 /// dst.set_len(elts);
2023 /// dst.as_mut_ptr().copy_from(ptr, elts);
2027 #[stable(feature = "pointer_methods", since = "1.26.0")]
2029 pub unsafe fn copy_from(self, src: *const T, count: usize)
2032 copy(src, self, count)
2035 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2036 /// and destination may *not* overlap.
2038 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
2040 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
2044 /// Beyond requiring that the program must be allowed to access both regions
2045 /// of memory, it is Undefined Behavior for source and destination to
2046 /// overlap. Care must also be taken with the ownership of `src` and
2047 /// `self`. This method semantically moves the values of `src` into `self`.
2048 /// However it does not drop the contents of `self`, or prevent the contents
2049 /// of `src` from being dropped or used.
2053 /// Efficiently create a Rust vector from an unsafe buffer:
2056 /// # #[allow(dead_code)]
2057 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2058 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2059 /// dst.set_len(elts);
2060 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
2064 #[stable(feature = "pointer_methods", since = "1.26.0")]
2066 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2069 copy_nonoverlapping(src, self, count)
2072 /// Executes the destructor (if any) of the pointed-to value.
2074 /// This has two use cases:
2076 /// * It is *required* to use `drop_in_place` to drop unsized types like
2077 /// trait objects, because they can't be read out onto the stack and
2078 /// dropped normally.
2080 /// * It is friendlier to the optimizer to do this over `ptr::read` when
2081 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
2082 /// as the compiler doesn't need to prove that it's sound to elide the
2087 /// This has all the same safety problems as `ptr::read` with respect to
2088 /// invalid pointers, types, and double drops.
2089 #[stable(feature = "pointer_methods", since = "1.26.0")]
2091 pub unsafe fn drop_in_place(self) {
2095 /// Overwrites a memory location with the given value without reading or
2096 /// dropping the old value.
2100 /// This operation is marked unsafe because it writes through a raw pointer.
2102 /// It does not drop the contents of `self`. This is safe, but it could leak
2103 /// allocations or resources, so care must be taken not to overwrite an object
2104 /// that should be dropped.
2106 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
2107 /// location pointed to by `self`.
2109 /// This is appropriate for initializing uninitialized memory, or overwriting
2110 /// memory that has previously been `read` from.
2112 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
2120 /// let y = &mut x as *mut i32;
2125 /// assert_eq!(y.read(), 12);
2128 #[stable(feature = "pointer_methods", since = "1.26.0")]
2130 pub unsafe fn write(self, val: T)
2136 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2137 /// bytes of memory starting at `self` to `val`.
2142 /// let mut vec = vec![0; 4];
2144 /// let vec_ptr = vec.as_mut_ptr();
2145 /// vec_ptr.write_bytes(b'a', 2);
2147 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
2149 #[stable(feature = "pointer_methods", since = "1.26.0")]
2151 pub unsafe fn write_bytes(self, val: u8, count: usize)
2154 write_bytes(self, val, count)
2157 /// Performs a volatile write of a memory location with the given value without
2158 /// reading or dropping the old value.
2160 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2161 /// to not be elided or reordered by the compiler across other volatile
2166 /// Rust does not currently have a rigorously and formally defined memory model,
2167 /// so the precise semantics of what "volatile" means here is subject to change
2168 /// over time. That being said, the semantics will almost always end up pretty
2169 /// similar to [C11's definition of volatile][c11].
2171 /// The compiler shouldn't change the relative order or number of volatile
2172 /// memory operations. However, volatile memory operations on zero-sized types
2173 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
2174 /// and may be ignored.
2176 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
2180 /// This operation is marked unsafe because it accepts a raw pointer.
2182 /// It does not drop the contents of `self`. This is safe, but it could leak
2183 /// allocations or resources, so care must be taken not to overwrite an object
2184 /// that should be dropped.
2186 /// This is appropriate for initializing uninitialized memory, or overwriting
2187 /// memory that has previously been `read` from.
2195 /// let y = &mut x as *mut i32;
2199 /// y.write_volatile(z);
2200 /// assert_eq!(y.read_volatile(), 12);
2203 #[stable(feature = "pointer_methods", since = "1.26.0")]
2205 pub unsafe fn write_volatile(self, val: T)
2208 write_volatile(self, val)
2211 /// Overwrites a memory location with the given value without reading or
2212 /// dropping the old value.
2214 /// Unlike `write`, the pointer may be unaligned.
2218 /// This operation is marked unsafe because it writes through a raw pointer.
2220 /// It does not drop the contents of `self`. This is safe, but it could leak
2221 /// allocations or resources, so care must be taken not to overwrite an object
2222 /// that should be dropped.
2224 /// Additionally, it does not drop `self`. Semantically, `self` is moved into the
2225 /// location pointed to by `val`.
2227 /// This is appropriate for initializing uninitialized memory, or overwriting
2228 /// memory that has previously been `read` from.
2236 /// let y = &mut x as *mut i32;
2240 /// y.write_unaligned(z);
2241 /// assert_eq!(y.read_unaligned(), 12);
2244 #[stable(feature = "pointer_methods", since = "1.26.0")]
2246 pub unsafe fn write_unaligned(self, val: T)
2249 write_unaligned(self, val)
2252 /// Replaces the value at `self` with `src`, returning the old
2253 /// value, without dropping either.
2257 /// This is only unsafe because it accepts a raw pointer.
2258 /// Otherwise, this operation is identical to `mem::replace`.
2259 #[stable(feature = "pointer_methods", since = "1.26.0")]
2261 pub unsafe fn replace(self, src: T) -> T
2267 /// Swaps the values at two mutable locations of the same type, without
2268 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2269 /// otherwise equivalent.
2273 /// This function copies the memory through the raw pointers passed to it
2276 /// Ensure that these pointers are valid before calling `swap`.
2277 #[stable(feature = "pointer_methods", since = "1.26.0")]
2279 pub unsafe fn swap(self, with: *mut T)
2286 // Equality for pointers
2287 #[stable(feature = "rust1", since = "1.0.0")]
2288 impl<T: ?Sized> PartialEq for *const T {
2290 fn eq(&self, other: &*const T) -> bool { *self == *other }
2293 #[stable(feature = "rust1", since = "1.0.0")]
2294 impl<T: ?Sized> Eq for *const T {}
2296 #[stable(feature = "rust1", since = "1.0.0")]
2297 impl<T: ?Sized> PartialEq for *mut T {
2299 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2302 #[stable(feature = "rust1", since = "1.0.0")]
2303 impl<T: ?Sized> Eq for *mut T {}
2305 /// Compare raw pointers for equality.
2307 /// This is the same as using the `==` operator, but less generic:
2308 /// the arguments have to be `*const T` raw pointers,
2309 /// not anything that implements `PartialEq`.
2311 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2312 /// by their address rather than comparing the values they point to
2313 /// (which is what the `PartialEq for &T` implementation does).
2321 /// let other_five = 5;
2322 /// let five_ref = &five;
2323 /// let same_five_ref = &five;
2324 /// let other_five_ref = &other_five;
2326 /// assert!(five_ref == same_five_ref);
2327 /// assert!(five_ref == other_five_ref);
2329 /// assert!(ptr::eq(five_ref, same_five_ref));
2330 /// assert!(!ptr::eq(five_ref, other_five_ref));
2332 #[stable(feature = "ptr_eq", since = "1.17.0")]
2334 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2338 // Impls for function pointers
2339 macro_rules! fnptr_impls_safety_abi {
2340 ($FnTy: ty, $($Arg: ident),*) => {
2341 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2342 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2344 fn eq(&self, other: &Self) -> bool {
2345 *self as usize == *other as usize
2349 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2350 impl<Ret, $($Arg),*> Eq for $FnTy {}
2352 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2353 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2355 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2356 (*self as usize).partial_cmp(&(*other as usize))
2360 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2361 impl<Ret, $($Arg),*> Ord for $FnTy {
2363 fn cmp(&self, other: &Self) -> Ordering {
2364 (*self as usize).cmp(&(*other as usize))
2368 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2369 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2370 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2371 state.write_usize(*self as usize)
2375 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2376 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2377 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2378 fmt::Pointer::fmt(&(*self as *const ()), f)
2382 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2383 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2384 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2385 fmt::Pointer::fmt(&(*self as *const ()), f)
2391 macro_rules! fnptr_impls_args {
2392 ($($Arg: ident),+) => {
2393 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2394 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2395 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2396 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2397 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2398 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2401 // No variadic functions with 0 parameters
2402 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2403 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2404 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2405 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2409 fnptr_impls_args! { }
2410 fnptr_impls_args! { A }
2411 fnptr_impls_args! { A, B }
2412 fnptr_impls_args! { A, B, C }
2413 fnptr_impls_args! { A, B, C, D }
2414 fnptr_impls_args! { A, B, C, D, E }
2415 fnptr_impls_args! { A, B, C, D, E, F }
2416 fnptr_impls_args! { A, B, C, D, E, F, G }
2417 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2418 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2419 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2420 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2421 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2423 // Comparison for pointers
2424 #[stable(feature = "rust1", since = "1.0.0")]
2425 impl<T: ?Sized> Ord for *const T {
2427 fn cmp(&self, other: &*const T) -> Ordering {
2430 } else if self == other {
2438 #[stable(feature = "rust1", since = "1.0.0")]
2439 impl<T: ?Sized> PartialOrd for *const T {
2441 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2442 Some(self.cmp(other))
2446 fn lt(&self, other: &*const T) -> bool { *self < *other }
2449 fn le(&self, other: &*const T) -> bool { *self <= *other }
2452 fn gt(&self, other: &*const T) -> bool { *self > *other }
2455 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2458 #[stable(feature = "rust1", since = "1.0.0")]
2459 impl<T: ?Sized> Ord for *mut T {
2461 fn cmp(&self, other: &*mut T) -> Ordering {
2464 } else if self == other {
2472 #[stable(feature = "rust1", since = "1.0.0")]
2473 impl<T: ?Sized> PartialOrd for *mut T {
2475 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2476 Some(self.cmp(other))
2480 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2483 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2486 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2489 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2492 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2493 /// of this wrapper owns the referent. Useful for building abstractions like
2494 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2496 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2497 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2498 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2499 /// the referent of the pointer should not be modified without a unique path to
2500 /// its owning Unique.
2502 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2503 /// consider using `NonNull`, which has weaker semantics.
2505 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2506 /// is never dereferenced. This is so that enums may use this forbidden value
2507 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2508 /// However the pointer may still dangle if it isn't dereferenced.
2510 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2511 /// for any type which upholds Unique's aliasing requirements.
2512 #[unstable(feature = "ptr_internals", issue = "0",
2513 reason = "use NonNull instead and consider PhantomData<T> \
2514 (if you also use #[may_dangle]), Send, and/or Sync")]
2515 #[allow(deprecated)]
2516 pub struct Unique<T: ?Sized> {
2517 pointer: NonZero<*const T>,
2518 // NOTE: this marker has no consequences for variance, but is necessary
2519 // for dropck to understand that we logically own a `T`.
2521 // For details, see:
2522 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2523 _marker: PhantomData<T>,
2526 #[unstable(feature = "ptr_internals", issue = "0")]
2527 impl<T: ?Sized> fmt::Debug for Unique<T> {
2528 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2529 fmt::Pointer::fmt(&self.as_ptr(), f)
2533 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2534 /// reference is unaliased. Note that this aliasing invariant is
2535 /// unenforced by the type system; the abstraction using the
2536 /// `Unique` must enforce it.
2537 #[unstable(feature = "ptr_internals", issue = "0")]
2538 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2540 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2541 /// reference is unaliased. Note that this aliasing invariant is
2542 /// unenforced by the type system; the abstraction using the
2543 /// `Unique` must enforce it.
2544 #[unstable(feature = "ptr_internals", issue = "0")]
2545 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2547 #[unstable(feature = "ptr_internals", issue = "0")]
2548 impl<T: Sized> Unique<T> {
2549 /// Creates a new `Unique` that is dangling, but well-aligned.
2551 /// This is useful for initializing types which lazily allocate, like
2552 /// `Vec::new` does.
2553 // FIXME: rename to dangling() to match NonNull?
2554 pub const fn empty() -> Self {
2556 Unique::new_unchecked(mem::align_of::<T>() as *mut T)
2561 #[unstable(feature = "ptr_internals", issue = "0")]
2562 #[allow(deprecated)]
2563 impl<T: ?Sized> Unique<T> {
2564 /// Creates a new `Unique`.
2568 /// `ptr` must be non-null.
2569 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2570 Unique { pointer: NonZero(ptr as _), _marker: PhantomData }
2573 /// Creates a new `Unique` if `ptr` is non-null.
2574 pub fn new(ptr: *mut T) -> Option<Self> {
2576 Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData })
2582 /// Acquires the underlying `*mut` pointer.
2583 pub fn as_ptr(self) -> *mut T {
2584 self.pointer.0 as *mut T
2587 /// Dereferences the content.
2589 /// The resulting lifetime is bound to self so this behaves "as if"
2590 /// it were actually an instance of T that is getting borrowed. If a longer
2591 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2592 pub unsafe fn as_ref(&self) -> &T {
2596 /// Mutably dereferences the content.
2598 /// The resulting lifetime is bound to self so this behaves "as if"
2599 /// it were actually an instance of T that is getting borrowed. If a longer
2600 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2601 pub unsafe fn as_mut(&mut self) -> &mut T {
2606 #[unstable(feature = "ptr_internals", issue = "0")]
2607 impl<T: ?Sized> Clone for Unique<T> {
2608 fn clone(&self) -> Self {
2613 #[unstable(feature = "ptr_internals", issue = "0")]
2614 impl<T: ?Sized> Copy for Unique<T> { }
2616 #[unstable(feature = "ptr_internals", issue = "0")]
2617 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2619 #[unstable(feature = "ptr_internals", issue = "0")]
2620 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2621 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2622 fmt::Pointer::fmt(&self.as_ptr(), f)
2626 #[unstable(feature = "ptr_internals", issue = "0")]
2627 #[allow(deprecated)]
2628 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2629 fn from(reference: &'a mut T) -> Self {
2630 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2634 #[unstable(feature = "ptr_internals", issue = "0")]
2635 #[allow(deprecated)]
2636 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2637 fn from(reference: &'a T) -> Self {
2638 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2642 #[unstable(feature = "ptr_internals", issue = "0")]
2643 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2644 fn from(p: NonNull<T>) -> Self {
2645 Unique { pointer: p.pointer, _marker: PhantomData }
2649 /// `*mut T` but non-zero and covariant.
2651 /// This is often the correct thing to use when building data structures using
2652 /// raw pointers, but is ultimately more dangerous to use because of its additional
2653 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2655 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2656 /// is never dereferenced. This is so that enums may use this forbidden value
2657 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
2658 /// However the pointer may still dangle if it isn't dereferenced.
2660 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2661 /// for your use case, you should include some PhantomData in your type to
2662 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2663 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2664 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2665 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2666 #[stable(feature = "nonnull", since = "1.25.0")]
2667 pub struct NonNull<T: ?Sized> {
2668 #[allow(deprecated)] pointer: NonZero<*const T>,
2671 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2672 // NB: This impl is unnecessary, but should provide better error messages.
2673 #[stable(feature = "nonnull", since = "1.25.0")]
2674 impl<T: ?Sized> !Send for NonNull<T> { }
2676 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2677 // NB: This impl is unnecessary, but should provide better error messages.
2678 #[stable(feature = "nonnull", since = "1.25.0")]
2679 impl<T: ?Sized> !Sync for NonNull<T> { }
2681 impl<T: Sized> NonNull<T> {
2682 /// Creates a new `NonNull` that is dangling, but well-aligned.
2684 /// This is useful for initializing types which lazily allocate, like
2685 /// `Vec::new` does.
2686 #[stable(feature = "nonnull", since = "1.25.0")]
2687 pub fn dangling() -> Self {
2689 let ptr = mem::align_of::<T>() as *mut T;
2690 NonNull::new_unchecked(ptr)
2695 #[allow(deprecated)]
2696 impl<T: ?Sized> NonNull<T> {
2697 /// Creates a new `NonNull`.
2701 /// `ptr` must be non-null.
2702 #[stable(feature = "nonnull", since = "1.25.0")]
2703 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2704 NonNull { pointer: NonZero(ptr as _) }
2707 /// Creates a new `NonNull` if `ptr` is non-null.
2708 #[stable(feature = "nonnull", since = "1.25.0")]
2709 pub fn new(ptr: *mut T) -> Option<Self> {
2711 Some(NonNull { pointer: NonZero(ptr as _) })
2717 /// Acquires the underlying `*mut` pointer.
2718 #[stable(feature = "nonnull", since = "1.25.0")]
2719 pub fn as_ptr(self) -> *mut T {
2720 self.pointer.0 as *mut T
2723 /// Dereferences the content.
2725 /// The resulting lifetime is bound to self so this behaves "as if"
2726 /// it were actually an instance of T that is getting borrowed. If a longer
2727 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2728 #[stable(feature = "nonnull", since = "1.25.0")]
2729 pub unsafe fn as_ref(&self) -> &T {
2733 /// Mutably dereferences the content.
2735 /// The resulting lifetime is bound to self so this behaves "as if"
2736 /// it were actually an instance of T that is getting borrowed. If a longer
2737 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2738 #[stable(feature = "nonnull", since = "1.25.0")]
2739 pub unsafe fn as_mut(&mut self) -> &mut T {
2743 /// Cast to a pointer of another type
2744 #[stable(feature = "nonnull_cast", since = "1.27.0")]
2745 pub fn cast<U>(self) -> NonNull<U> {
2747 NonNull::new_unchecked(self.as_ptr() as *mut U)
2751 /// Cast to an `Opaque` pointer
2752 #[unstable(feature = "allocator_api", issue = "32838")]
2753 pub fn as_opaque(self) -> NonNull<::alloc::Opaque> {
2755 NonNull::new_unchecked(self.as_ptr() as _)
2760 #[stable(feature = "nonnull", since = "1.25.0")]
2761 impl<T: ?Sized> Clone for NonNull<T> {
2762 fn clone(&self) -> Self {
2767 #[stable(feature = "nonnull", since = "1.25.0")]
2768 impl<T: ?Sized> Copy for NonNull<T> { }
2770 #[unstable(feature = "coerce_unsized", issue = "27732")]
2771 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2773 #[stable(feature = "nonnull", since = "1.25.0")]
2774 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2775 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2776 fmt::Pointer::fmt(&self.as_ptr(), f)
2780 #[stable(feature = "nonnull", since = "1.25.0")]
2781 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2782 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2783 fmt::Pointer::fmt(&self.as_ptr(), f)
2787 #[stable(feature = "nonnull", since = "1.25.0")]
2788 impl<T: ?Sized> Eq for NonNull<T> {}
2790 #[stable(feature = "nonnull", since = "1.25.0")]
2791 impl<T: ?Sized> PartialEq for NonNull<T> {
2792 fn eq(&self, other: &Self) -> bool {
2793 self.as_ptr() == other.as_ptr()
2797 #[stable(feature = "nonnull", since = "1.25.0")]
2798 impl<T: ?Sized> Ord for NonNull<T> {
2799 fn cmp(&self, other: &Self) -> Ordering {
2800 self.as_ptr().cmp(&other.as_ptr())
2804 #[stable(feature = "nonnull", since = "1.25.0")]
2805 impl<T: ?Sized> PartialOrd for NonNull<T> {
2806 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2807 self.as_ptr().partial_cmp(&other.as_ptr())
2811 #[stable(feature = "nonnull", since = "1.25.0")]
2812 impl<T: ?Sized> hash::Hash for NonNull<T> {
2813 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2814 self.as_ptr().hash(state)
2818 #[unstable(feature = "ptr_internals", issue = "0")]
2819 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2820 fn from(unique: Unique<T>) -> Self {
2821 NonNull { pointer: unique.pointer }
2825 #[stable(feature = "nonnull", since = "1.25.0")]
2826 #[allow(deprecated)]
2827 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
2828 fn from(reference: &'a mut T) -> Self {
2829 NonNull { pointer: NonZero(reference as _) }
2833 #[stable(feature = "nonnull", since = "1.25.0")]
2834 #[allow(deprecated)]
2835 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
2836 fn from(reference: &'a T) -> Self {
2837 NonNull { pointer: NonZero(reference as _) }