1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 #[stable(feature = "rust1", since = "1.0.0")]
31 pub use intrinsics::copy_nonoverlapping;
33 #[stable(feature = "rust1", since = "1.0.0")]
34 pub use intrinsics::copy;
36 #[stable(feature = "rust1", since = "1.0.0")]
37 pub use intrinsics::write_bytes;
39 /// Executes the destructor (if any) of the pointed-to value.
41 /// This has two use cases:
43 /// * It is *required* to use `drop_in_place` to drop unsized types like
44 /// trait objects, because they can't be read out onto the stack and
47 /// * It is friendlier to the optimizer to do this over `ptr::read` when
48 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
49 /// as the compiler doesn't need to prove that it's sound to elide the
54 /// This has all the same safety problems as `ptr::read` with respect to
55 /// invalid pointers, types, and double drops.
56 #[stable(feature = "drop_in_place", since = "1.8.0")]
57 #[lang = "drop_in_place"]
58 #[allow(unconditional_recursion)]
59 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
60 // Code here does not matter - this is replaced by the
61 // real drop glue by the compiler.
62 drop_in_place(to_drop);
65 /// Creates a null raw pointer.
72 /// let p: *const i32 = ptr::null();
73 /// assert!(p.is_null());
76 #[stable(feature = "rust1", since = "1.0.0")]
77 pub const fn null<T>() -> *const T { 0 as *const T }
79 /// Creates a null mutable raw pointer.
86 /// let p: *mut i32 = ptr::null_mut();
87 /// assert!(p.is_null());
90 #[stable(feature = "rust1", since = "1.0.0")]
91 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
93 /// Swaps the values at two mutable locations of the same type, without
94 /// deinitializing either.
96 /// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
97 /// is otherwise equivalent. If the values do overlap, then the overlapping
98 /// region of memory from `x` will be used. This is demonstrated in the
99 /// examples section below.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
110 /// Swapping two non-overlapping regions:
115 /// let mut array = [0, 1, 2, 3];
117 /// let x = array[0..].as_mut_ptr() as *mut [u32; 2];
118 /// let y = array[2..].as_mut_ptr() as *mut [u32; 2];
122 /// assert_eq!([2, 3, 0, 1], array);
126 /// Swapping two overlapping regions:
131 /// let mut array = [0, 1, 2, 3];
133 /// let x = array[0..].as_mut_ptr() as *mut [u32; 3];
134 /// let y = array[1..].as_mut_ptr() as *mut [u32; 3];
138 /// assert_eq!([1, 0, 1, 2], array);
142 #[stable(feature = "rust1", since = "1.0.0")]
143 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
144 // Give ourselves some scratch space to work with
145 let mut tmp: T = mem::uninitialized();
148 copy_nonoverlapping(x, &mut tmp, 1);
149 copy(y, x, 1); // `x` and `y` may overlap
150 copy_nonoverlapping(&tmp, y, 1);
152 // y and t now point to the same thing, but we need to completely forget `tmp`
153 // because it's no longer relevant.
157 /// Swaps a sequence of values at two mutable locations of the same type.
161 /// The two arguments must each point to the beginning of `count` locations
162 /// of valid memory, and the two memory ranges must not overlap.
171 /// let mut x = [1, 2, 3, 4];
172 /// let mut y = [7, 8, 9];
175 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
178 /// assert_eq!(x, [7, 8, 3, 4]);
179 /// assert_eq!(y, [1, 2, 9]);
182 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
183 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
184 let x = x as *mut u8;
185 let y = y as *mut u8;
186 let len = mem::size_of::<T>() * count;
187 swap_nonoverlapping_bytes(x, y, len)
191 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
192 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
193 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
194 // Haswell E processors. LLVM is more able to optimize if we give a struct a
195 // #[repr(simd)], even if we don't actually use this struct directly.
197 // FIXME repr(simd) broken on emscripten and redox
198 // It's also broken on big-endian powerpc64 and s390x. #42778
199 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
200 target_endian = "big")),
202 struct Block(u64, u64, u64, u64);
203 struct UnalignedBlock(u64, u64, u64, u64);
205 let block_size = mem::size_of::<Block>();
207 // Loop through x & y, copying them `Block` at a time
208 // The optimizer should unroll the loop fully for most types
209 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
211 while i + block_size <= len {
212 // Create some uninitialized memory as scratch space
213 // Declaring `t` here avoids aligning the stack when this loop is unused
214 let mut t: Block = mem::uninitialized();
215 let t = &mut t as *mut _ as *mut u8;
216 let x = x.offset(i as isize);
217 let y = y.offset(i as isize);
219 // Swap a block of bytes of x & y, using t as a temporary buffer
220 // This should be optimized into efficient SIMD operations where available
221 copy_nonoverlapping(x, t, block_size);
222 copy_nonoverlapping(y, x, block_size);
223 copy_nonoverlapping(t, y, block_size);
228 // Swap any remaining bytes
229 let mut t: UnalignedBlock = mem::uninitialized();
232 let t = &mut t as *mut _ as *mut u8;
233 let x = x.offset(i as isize);
234 let y = y.offset(i as isize);
236 copy_nonoverlapping(x, t, rem);
237 copy_nonoverlapping(y, x, rem);
238 copy_nonoverlapping(t, y, rem);
242 /// Replaces the value at `dest` with `src`, returning the old
243 /// value, without dropping either.
247 /// This is only unsafe because it accepts a raw pointer.
248 /// Otherwise, this operation is identical to `mem::replace`.
250 #[stable(feature = "rust1", since = "1.0.0")]
251 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
252 mem::swap(&mut *dest, &mut src); // cannot overlap
256 /// Reads the value from `src` without moving it. This leaves the
257 /// memory in `src` unchanged.
261 /// Beyond accepting a raw pointer, this is unsafe because it semantically
262 /// moves the value out of `src` without preventing further usage of `src`.
263 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
264 /// `src` is not used before the data is overwritten again (e.g. with `write`,
265 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
266 /// because it will attempt to drop the value previously at `*src`.
268 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
276 /// let y = &x as *const i32;
279 /// assert_eq!(std::ptr::read(y), 12);
283 #[stable(feature = "rust1", since = "1.0.0")]
284 pub unsafe fn read<T>(src: *const T) -> T {
285 let mut tmp: T = mem::uninitialized();
286 copy_nonoverlapping(src, &mut tmp, 1);
290 /// Reads the value from `src` without moving it. This leaves the
291 /// memory in `src` unchanged.
293 /// Unlike `read`, the pointer may be unaligned.
297 /// Beyond accepting a raw pointer, this is unsafe because it semantically
298 /// moves the value out of `src` without preventing further usage of `src`.
299 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
300 /// `src` is not used before the data is overwritten again (e.g. with `write`,
301 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
302 /// because it will attempt to drop the value previously at `*src`.
310 /// let y = &x as *const i32;
313 /// assert_eq!(std::ptr::read_unaligned(y), 12);
317 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
318 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
319 let mut tmp: T = mem::uninitialized();
320 copy_nonoverlapping(src as *const u8,
321 &mut tmp as *mut T as *mut u8,
322 mem::size_of::<T>());
326 /// Overwrites a memory location with the given value without reading or
327 /// dropping the old value.
331 /// This operation is marked unsafe because it accepts a raw pointer.
333 /// It does not drop the contents of `dst`. This is safe, but it could leak
334 /// allocations or resources, so care must be taken not to overwrite an object
335 /// that should be dropped.
337 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
338 /// location pointed to by `dst`.
340 /// This is appropriate for initializing uninitialized memory, or overwriting
341 /// memory that has previously been `read` from.
343 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
351 /// let y = &mut x as *mut i32;
355 /// std::ptr::write(y, z);
356 /// assert_eq!(std::ptr::read(y), 12);
360 #[stable(feature = "rust1", since = "1.0.0")]
361 pub unsafe fn write<T>(dst: *mut T, src: T) {
362 intrinsics::move_val_init(&mut *dst, src)
365 /// Overwrites a memory location with the given value without reading or
366 /// dropping the old value.
368 /// Unlike `write`, the pointer may be unaligned.
372 /// This operation is marked unsafe because it accepts a raw pointer.
374 /// It does not drop the contents of `dst`. This is safe, but it could leak
375 /// allocations or resources, so care must be taken not to overwrite an object
376 /// that should be dropped.
378 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
379 /// location pointed to by `dst`.
381 /// This is appropriate for initializing uninitialized memory, or overwriting
382 /// memory that has previously been `read` from.
390 /// let y = &mut x as *mut i32;
394 /// std::ptr::write_unaligned(y, z);
395 /// assert_eq!(std::ptr::read_unaligned(y), 12);
399 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
400 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
401 copy_nonoverlapping(&src as *const T as *const u8,
403 mem::size_of::<T>());
407 /// Performs a volatile read of the value from `src` without moving it. This
408 /// leaves the memory in `src` unchanged.
410 /// Volatile operations are intended to act on I/O memory, and are guaranteed
411 /// to not be elided or reordered by the compiler across other volatile
416 /// Rust does not currently have a rigorously and formally defined memory model,
417 /// so the precise semantics of what "volatile" means here is subject to change
418 /// over time. That being said, the semantics will almost always end up pretty
419 /// similar to [C11's definition of volatile][c11].
421 /// The compiler shouldn't change the relative order or number of volatile
422 /// memory operations. However, volatile memory operations on zero-sized types
423 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
424 /// and may be ignored.
426 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
430 /// Beyond accepting a raw pointer, this is unsafe because it semantically
431 /// moves the value out of `src` without preventing further usage of `src`.
432 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
433 /// `src` is not used before the data is overwritten again (e.g. with `write`,
434 /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
435 /// because it will attempt to drop the value previously at `*src`.
443 /// let y = &x as *const i32;
446 /// assert_eq!(std::ptr::read_volatile(y), 12);
450 #[stable(feature = "volatile", since = "1.9.0")]
451 pub unsafe fn read_volatile<T>(src: *const T) -> T {
452 intrinsics::volatile_load(src)
455 /// Performs a volatile write of a memory location with the given value without
456 /// reading or dropping the old value.
458 /// Volatile operations are intended to act on I/O memory, and are guaranteed
459 /// to not be elided or reordered by the compiler across other volatile
464 /// Rust does not currently have a rigorously and formally defined memory model,
465 /// so the precise semantics of what "volatile" means here is subject to change
466 /// over time. That being said, the semantics will almost always end up pretty
467 /// similar to [C11's definition of volatile][c11].
469 /// The compiler shouldn't change the relative order or number of volatile
470 /// memory operations. However, volatile memory operations on zero-sized types
471 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
472 /// and may be ignored.
474 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
478 /// This operation is marked unsafe because it accepts a raw pointer.
480 /// It does not drop the contents of `dst`. This is safe, but it could leak
481 /// allocations or resources, so care must be taken not to overwrite an object
482 /// that should be dropped.
484 /// This is appropriate for initializing uninitialized memory, or overwriting
485 /// memory that has previously been `read` from.
493 /// let y = &mut x as *mut i32;
497 /// std::ptr::write_volatile(y, z);
498 /// assert_eq!(std::ptr::read_volatile(y), 12);
502 #[stable(feature = "volatile", since = "1.9.0")]
503 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
504 intrinsics::volatile_store(dst, src);
507 #[lang = "const_ptr"]
508 impl<T: ?Sized> *const T {
509 /// Returns `true` if the pointer is null.
511 /// Note that unsized types have many possible null pointers, as only the
512 /// raw data pointer is considered, not their length, vtable, etc.
513 /// Therefore, two pointers that are null may still not compare equal to
521 /// let s: &str = "Follow the rabbit";
522 /// let ptr: *const u8 = s.as_ptr();
523 /// assert!(!ptr.is_null());
525 #[stable(feature = "rust1", since = "1.0.0")]
527 pub fn is_null(self) -> bool {
528 // Compare via a cast to a thin pointer, so fat pointers are only
529 // considering their "data" part for null-ness.
530 (self as *const u8) == null()
533 /// Returns `None` if the pointer is null, or else returns a reference to
534 /// the value wrapped in `Some`.
538 /// While this method and its mutable counterpart are useful for
539 /// null-safety, it is important to note that this is still an unsafe
540 /// operation because the returned value could be pointing to invalid
543 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
544 /// not necessarily reflect the actual lifetime of the data.
551 /// let ptr: *const u8 = &10u8 as *const u8;
554 /// if let Some(val_back) = ptr.as_ref() {
555 /// println!("We got back the value: {}!", val_back);
559 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
561 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
569 /// Calculates the offset from a pointer.
571 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
572 /// offset of `3 * size_of::<T>()` bytes.
576 /// If any of the following conditions are violated, the result is Undefined
579 /// * Both the starting and resulting pointer must be either in bounds or one
580 /// byte past the end of an allocated object.
582 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
584 /// * The offset being in bounds cannot rely on "wrapping around" the address
585 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
587 /// The compiler and standard library generally tries to ensure allocations
588 /// never reach a size where an offset is a concern. For instance, `Vec`
589 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
590 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
592 /// Most platforms fundamentally can't even construct such an allocation.
593 /// For instance, no known 64-bit platform can ever serve a request
594 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
595 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
596 /// more than `isize::MAX` bytes with things like Physical Address
597 /// Extension. As such, memory acquired directly from allocators or memory
598 /// mapped files *may* be too large to handle with this function.
600 /// Consider using `wrapping_offset` instead if these constraints are
601 /// difficult to satisfy. The only advantage of this method is that it
602 /// enables more aggressive compiler optimizations.
609 /// let s: &str = "123";
610 /// let ptr: *const u8 = s.as_ptr();
613 /// println!("{}", *ptr.offset(1) as char);
614 /// println!("{}", *ptr.offset(2) as char);
617 #[stable(feature = "rust1", since = "1.0.0")]
619 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
620 intrinsics::offset(self, count)
623 /// Calculates the offset from a pointer using wrapping arithmetic.
625 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
626 /// offset of `3 * size_of::<T>()` bytes.
630 /// The resulting pointer does not need to be in bounds, but it is
631 /// potentially hazardous to dereference (which requires `unsafe`).
633 /// Always use `.offset(count)` instead when possible, because `offset`
634 /// allows the compiler to optimize better.
641 /// // Iterate using a raw pointer in increments of two elements
642 /// let data = [1u8, 2, 3, 4, 5];
643 /// let mut ptr: *const u8 = data.as_ptr();
645 /// let end_rounded_up = ptr.wrapping_offset(6);
647 /// // This loop prints "1, 3, 5, "
648 /// while ptr != end_rounded_up {
650 /// print!("{}, ", *ptr);
652 /// ptr = ptr.wrapping_offset(step);
655 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
657 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
659 intrinsics::arith_offset(self, count)
663 /// Calculates the distance between two pointers. The returned value is in
664 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
666 /// If the address different between the two pointers ia not a multiple of
667 /// `mem::size_of::<T>()` then the result of the division is rounded towards
670 /// This function returns `None` if `T` is a zero-sized type.
677 /// #![feature(offset_to)]
678 /// #![allow(deprecated)]
682 /// let ptr1: *const i32 = &a[1];
683 /// let ptr2: *const i32 = &a[3];
684 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
685 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
686 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
687 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
690 #[unstable(feature = "offset_to", issue = "41079")]
691 #[rustc_deprecated(since = "1.27.0", reason = "Replaced by `wrapping_offset_from`, with the \
692 opposite argument order. If you're writing unsafe code, consider `offset_from`.")]
694 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
695 let size = mem::size_of::<T>();
699 Some(other.wrapping_offset_from(self))
703 /// Calculates the distance between two pointers. The returned value is in
704 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
706 /// This function is the inverse of [`offset`].
708 /// [`offset`]: #method.offset
709 /// [`wrapping_offset_from`]: #method.wrapping_offset_from
713 /// If any of the following conditions are violated, the result is Undefined
716 /// * Both the starting and other pointer must be either in bounds or one
717 /// byte past the end of the same allocated object.
719 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
721 /// * The distance between the pointers, in bytes, must be an exact multiple
722 /// of the size of `T`.
724 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
726 /// The compiler and standard library generally try to ensure allocations
727 /// never reach a size where an offset is a concern. For instance, `Vec`
728 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
729 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
731 /// Most platforms fundamentally can't even construct such an allocation.
732 /// For instance, no known 64-bit platform can ever serve a request
733 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
734 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
735 /// more than `isize::MAX` bytes with things like Physical Address
736 /// Extension. As such, memory acquired directly from allocators or memory
737 /// mapped files *may* be too large to handle with this function.
739 /// Consider using [`wrapping_offset_from`] instead if these constraints are
740 /// difficult to satisfy. The only advantage of this method is that it
741 /// enables more aggressive compiler optimizations.
745 /// This function panics if `T` is a Zero-Sized Type ("ZST").
752 /// #![feature(ptr_offset_from)]
755 /// let ptr1: *const i32 = &a[1];
756 /// let ptr2: *const i32 = &a[3];
758 /// assert_eq!(ptr2.offset_from(ptr1), 2);
759 /// assert_eq!(ptr1.offset_from(ptr2), -2);
760 /// assert_eq!(ptr1.offset(2), ptr2);
761 /// assert_eq!(ptr2.offset(-2), ptr1);
764 #[unstable(feature = "ptr_offset_from", issue = "41079")]
766 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
767 let pointee_size = mem::size_of::<T>();
768 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
770 // This is the same sequence that Clang emits for pointer subtraction.
771 // It can be neither `nsw` nor `nuw` because the input is treated as
772 // unsigned but then the output is treated as signed, so neither works.
773 let d = isize::wrapping_sub(self as _, origin as _);
774 intrinsics::exact_div(d, pointee_size as _)
777 /// Calculates the distance between two pointers. The returned value is in
778 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
780 /// If the address different between the two pointers is not a multiple of
781 /// `mem::size_of::<T>()` then the result of the division is rounded towards
784 /// Though this method is safe for any two pointers, note that its result
785 /// will be mostly useless if the two pointers aren't into the same allocated
786 /// object, for example if they point to two different local variables.
790 /// This function panics if `T` is a zero-sized type.
797 /// #![feature(ptr_wrapping_offset_from)]
800 /// let ptr1: *const i32 = &a[1];
801 /// let ptr2: *const i32 = &a[3];
802 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
803 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
804 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
805 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
807 /// let ptr1: *const i32 = 3 as _;
808 /// let ptr2: *const i32 = 13 as _;
809 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
811 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
813 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
814 let pointee_size = mem::size_of::<T>();
815 assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize);
817 let d = isize::wrapping_sub(self as _, origin as _);
818 d.wrapping_div(pointee_size as _)
821 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
823 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
824 /// offset of `3 * size_of::<T>()` bytes.
828 /// If any of the following conditions are violated, the result is Undefined
831 /// * Both the starting and resulting pointer must be either in bounds or one
832 /// byte past the end of an allocated object.
834 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
836 /// * The offset being in bounds cannot rely on "wrapping around" the address
837 /// space. That is, the infinite-precision sum must fit in a `usize`.
839 /// The compiler and standard library generally tries to ensure allocations
840 /// never reach a size where an offset is a concern. For instance, `Vec`
841 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
842 /// `vec.as_ptr().add(vec.len())` is always safe.
844 /// Most platforms fundamentally can't even construct such an allocation.
845 /// For instance, no known 64-bit platform can ever serve a request
846 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
847 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
848 /// more than `isize::MAX` bytes with things like Physical Address
849 /// Extension. As such, memory acquired directly from allocators or memory
850 /// mapped files *may* be too large to handle with this function.
852 /// Consider using `wrapping_offset` instead if these constraints are
853 /// difficult to satisfy. The only advantage of this method is that it
854 /// enables more aggressive compiler optimizations.
861 /// let s: &str = "123";
862 /// let ptr: *const u8 = s.as_ptr();
865 /// println!("{}", *ptr.add(1) as char);
866 /// println!("{}", *ptr.add(2) as char);
869 #[stable(feature = "pointer_methods", since = "1.26.0")]
871 pub unsafe fn add(self, count: usize) -> Self
874 self.offset(count as isize)
877 /// Calculates the offset from a pointer (convenience for
878 /// `.offset((count as isize).wrapping_neg())`).
880 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
881 /// offset of `3 * size_of::<T>()` bytes.
885 /// If any of the following conditions are violated, the result is Undefined
888 /// * Both the starting and resulting pointer must be either in bounds or one
889 /// byte past the end of an allocated object.
891 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
893 /// * The offset being in bounds cannot rely on "wrapping around" the address
894 /// space. That is, the infinite-precision sum must fit in a usize.
896 /// The compiler and standard library generally tries to ensure allocations
897 /// never reach a size where an offset is a concern. For instance, `Vec`
898 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
899 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
901 /// Most platforms fundamentally can't even construct such an allocation.
902 /// For instance, no known 64-bit platform can ever serve a request
903 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
904 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
905 /// more than `isize::MAX` bytes with things like Physical Address
906 /// Extension. As such, memory acquired directly from allocators or memory
907 /// mapped files *may* be too large to handle with this function.
909 /// Consider using `wrapping_offset` instead if these constraints are
910 /// difficult to satisfy. The only advantage of this method is that it
911 /// enables more aggressive compiler optimizations.
918 /// let s: &str = "123";
921 /// let end: *const u8 = s.as_ptr().add(3);
922 /// println!("{}", *end.sub(1) as char);
923 /// println!("{}", *end.sub(2) as char);
926 #[stable(feature = "pointer_methods", since = "1.26.0")]
928 pub unsafe fn sub(self, count: usize) -> Self
931 self.offset((count as isize).wrapping_neg())
934 /// Calculates the offset from a pointer using wrapping arithmetic.
935 /// (convenience for `.wrapping_offset(count as isize)`)
937 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
938 /// offset of `3 * size_of::<T>()` bytes.
942 /// The resulting pointer does not need to be in bounds, but it is
943 /// potentially hazardous to dereference (which requires `unsafe`).
945 /// Always use `.add(count)` instead when possible, because `add`
946 /// allows the compiler to optimize better.
953 /// // Iterate using a raw pointer in increments of two elements
954 /// let data = [1u8, 2, 3, 4, 5];
955 /// let mut ptr: *const u8 = data.as_ptr();
957 /// let end_rounded_up = ptr.wrapping_add(6);
959 /// // This loop prints "1, 3, 5, "
960 /// while ptr != end_rounded_up {
962 /// print!("{}, ", *ptr);
964 /// ptr = ptr.wrapping_add(step);
967 #[stable(feature = "pointer_methods", since = "1.26.0")]
969 pub fn wrapping_add(self, count: usize) -> Self
972 self.wrapping_offset(count as isize)
975 /// Calculates the offset from a pointer using wrapping arithmetic.
976 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
978 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
979 /// offset of `3 * size_of::<T>()` bytes.
983 /// The resulting pointer does not need to be in bounds, but it is
984 /// potentially hazardous to dereference (which requires `unsafe`).
986 /// Always use `.sub(count)` instead when possible, because `sub`
987 /// allows the compiler to optimize better.
994 /// // Iterate using a raw pointer in increments of two elements (backwards)
995 /// let data = [1u8, 2, 3, 4, 5];
996 /// let mut ptr: *const u8 = data.as_ptr();
997 /// let start_rounded_down = ptr.wrapping_sub(2);
998 /// ptr = ptr.wrapping_add(4);
1000 /// // This loop prints "5, 3, 1, "
1001 /// while ptr != start_rounded_down {
1003 /// print!("{}, ", *ptr);
1005 /// ptr = ptr.wrapping_sub(step);
1008 #[stable(feature = "pointer_methods", since = "1.26.0")]
1010 pub fn wrapping_sub(self, count: usize) -> Self
1013 self.wrapping_offset((count as isize).wrapping_neg())
1016 /// Reads the value from `self` without moving it. This leaves the
1017 /// memory in `self` unchanged.
1021 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1022 /// moves the value out of `self` without preventing further usage of `self`.
1023 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1024 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1025 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1026 /// because it will attempt to drop the value previously at `*self`.
1028 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1036 /// let y = &x as *const i32;
1039 /// assert_eq!(y.read(), 12);
1042 #[stable(feature = "pointer_methods", since = "1.26.0")]
1044 pub unsafe fn read(self) -> T
1050 /// Performs a volatile read of the value from `self` without moving it. This
1051 /// leaves the memory in `self` unchanged.
1053 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1054 /// to not be elided or reordered by the compiler across other volatile
1059 /// Rust does not currently have a rigorously and formally defined memory model,
1060 /// so the precise semantics of what "volatile" means here is subject to change
1061 /// over time. That being said, the semantics will almost always end up pretty
1062 /// similar to [C11's definition of volatile][c11].
1064 /// The compiler shouldn't change the relative order or number of volatile
1065 /// memory operations. However, volatile memory operations on zero-sized types
1066 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1067 /// and may be ignored.
1069 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1073 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1074 /// moves the value out of `self` without preventing further usage of `self`.
1075 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1076 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1077 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1078 /// because it will attempt to drop the value previously at `*self`.
1086 /// let y = &x as *const i32;
1089 /// assert_eq!(y.read_volatile(), 12);
1092 #[stable(feature = "pointer_methods", since = "1.26.0")]
1094 pub unsafe fn read_volatile(self) -> T
1100 /// Reads the value from `self` without moving it. This leaves the
1101 /// memory in `self` unchanged.
1103 /// Unlike `read`, the pointer may be unaligned.
1107 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1108 /// moves the value out of `self` without preventing further usage of `self`.
1109 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1110 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1111 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1112 /// because it will attempt to drop the value previously at `*self`.
1120 /// let y = &x as *const i32;
1123 /// assert_eq!(y.read_unaligned(), 12);
1126 #[stable(feature = "pointer_methods", since = "1.26.0")]
1128 pub unsafe fn read_unaligned(self) -> T
1131 read_unaligned(self)
1134 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1135 /// and destination may overlap.
1137 /// NOTE: this has the *same* argument order as `ptr::copy`.
1139 /// This is semantically equivalent to C's `memmove`.
1143 /// Care must be taken with the ownership of `self` and `dest`.
1144 /// This method semantically moves the values of `self` into `dest`.
1145 /// However it does not drop the contents of `self`, or prevent the contents
1146 /// of `dest` from being dropped or used.
1150 /// Efficiently create a Rust vector from an unsafe buffer:
1153 /// # #[allow(dead_code)]
1154 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1155 /// let mut dst = Vec::with_capacity(elts);
1156 /// dst.set_len(elts);
1157 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1161 #[stable(feature = "pointer_methods", since = "1.26.0")]
1163 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1166 copy(self, dest, count)
1169 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1170 /// and destination may *not* overlap.
1172 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1174 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1178 /// Beyond requiring that the program must be allowed to access both regions
1179 /// of memory, it is Undefined Behavior for source and destination to
1180 /// overlap. Care must also be taken with the ownership of `self` and
1181 /// `self`. This method semantically moves the values of `self` into `dest`.
1182 /// However it does not drop the contents of `dest`, or prevent the contents
1183 /// of `self` from being dropped or used.
1187 /// Efficiently create a Rust vector from an unsafe buffer:
1190 /// # #[allow(dead_code)]
1191 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1192 /// let mut dst = Vec::with_capacity(elts);
1193 /// dst.set_len(elts);
1194 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1198 #[stable(feature = "pointer_methods", since = "1.26.0")]
1200 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1203 copy_nonoverlapping(self, dest, count)
1206 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1209 /// If it is not possible to align the pointer, the implementation returns
1210 /// `usize::max_value()`.
1212 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1213 /// used with the `offset` or `offset_to` methods.
1215 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
1216 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1217 /// the returned offset is correct in all terms other than alignment.
1221 /// The function panics if `align` is not a power-of-two.
1225 /// Accessing adjacent `u8` as `u16`
1228 /// # #![feature(align_offset)]
1229 /// # fn foo(n: usize) {
1230 /// # use std::mem::align_of;
1232 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
1233 /// let ptr = &x[n] as *const u8;
1234 /// let offset = ptr.align_offset(align_of::<u16>());
1235 /// if offset < x.len() - n - 1 {
1236 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
1237 /// assert_ne!(*u16_ptr, 500);
1239 /// // while the pointer can be aligned via `offset`, it would point
1240 /// // outside the allocation
1244 #[unstable(feature = "align_offset", issue = "44488")]
1246 pub fn align_offset(self, align: usize) -> usize where T: Sized {
1247 if !align.is_power_of_two() {
1248 panic!("align_offset: align is not a power-of-two");
1251 align_offset(self, align)
1255 /// definitely docs.
1256 #[unstable(feature = "align_offset", issue = "44488")]
1258 pub fn align_offset(self, align: usize) -> usize where T: Sized {
1259 if !align.is_power_of_two() {
1260 panic!("align_offset: align is not a power-of-two");
1263 intrinsics::align_offset(self as *const (), align)
1270 impl<T: ?Sized> *mut T {
1271 /// Returns `true` if the pointer is null.
1273 /// Note that unsized types have many possible null pointers, as only the
1274 /// raw data pointer is considered, not their length, vtable, etc.
1275 /// Therefore, two pointers that are null may still not compare equal to
1283 /// let mut s = [1, 2, 3];
1284 /// let ptr: *mut u32 = s.as_mut_ptr();
1285 /// assert!(!ptr.is_null());
1287 #[stable(feature = "rust1", since = "1.0.0")]
1289 pub fn is_null(self) -> bool {
1290 // Compare via a cast to a thin pointer, so fat pointers are only
1291 // considering their "data" part for null-ness.
1292 (self as *mut u8) == null_mut()
1295 /// Returns `None` if the pointer is null, or else returns a reference to
1296 /// the value wrapped in `Some`.
1300 /// While this method and its mutable counterpart are useful for
1301 /// null-safety, it is important to note that this is still an unsafe
1302 /// operation because the returned value could be pointing to invalid
1305 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1306 /// not necessarily reflect the actual lifetime of the data.
1313 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1316 /// if let Some(val_back) = ptr.as_ref() {
1317 /// println!("We got back the value: {}!", val_back);
1321 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1323 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
1331 /// Calculates the offset from a pointer.
1333 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1334 /// offset of `3 * size_of::<T>()` bytes.
1338 /// If any of the following conditions are violated, the result is Undefined
1341 /// * Both the starting and resulting pointer must be either in bounds or one
1342 /// byte past the end of an allocated object.
1344 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1346 /// * The offset being in bounds cannot rely on "wrapping around" the address
1347 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1349 /// The compiler and standard library generally tries to ensure allocations
1350 /// never reach a size where an offset is a concern. For instance, `Vec`
1351 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1352 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1354 /// Most platforms fundamentally can't even construct such an allocation.
1355 /// For instance, no known 64-bit platform can ever serve a request
1356 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1357 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1358 /// more than `isize::MAX` bytes with things like Physical Address
1359 /// Extension. As such, memory acquired directly from allocators or memory
1360 /// mapped files *may* be too large to handle with this function.
1362 /// Consider using `wrapping_offset` instead if these constraints are
1363 /// difficult to satisfy. The only advantage of this method is that it
1364 /// enables more aggressive compiler optimizations.
1371 /// let mut s = [1, 2, 3];
1372 /// let ptr: *mut u32 = s.as_mut_ptr();
1375 /// println!("{}", *ptr.offset(1));
1376 /// println!("{}", *ptr.offset(2));
1379 #[stable(feature = "rust1", since = "1.0.0")]
1381 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1382 intrinsics::offset(self, count) as *mut T
1385 /// Calculates the offset from a pointer using wrapping arithmetic.
1386 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1387 /// offset of `3 * size_of::<T>()` bytes.
1391 /// The resulting pointer does not need to be in bounds, but it is
1392 /// potentially hazardous to dereference (which requires `unsafe`).
1394 /// Always use `.offset(count)` instead when possible, because `offset`
1395 /// allows the compiler to optimize better.
1402 /// // Iterate using a raw pointer in increments of two elements
1403 /// let mut data = [1u8, 2, 3, 4, 5];
1404 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1406 /// let end_rounded_up = ptr.wrapping_offset(6);
1408 /// while ptr != end_rounded_up {
1412 /// ptr = ptr.wrapping_offset(step);
1414 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1416 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1418 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1420 intrinsics::arith_offset(self, count) as *mut T
1424 /// Returns `None` if the pointer is null, or else returns a mutable
1425 /// reference to the value wrapped in `Some`.
1429 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1430 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1431 /// returned is indeed a valid lifetime for the contained data.
1438 /// let mut s = [1, 2, 3];
1439 /// let ptr: *mut u32 = s.as_mut_ptr();
1440 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1441 /// *first_value = 4;
1442 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1444 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1446 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
1454 /// Calculates the distance between two pointers. The returned value is in
1455 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1457 /// If the address different between the two pointers ia not a multiple of
1458 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1461 /// This function returns `None` if `T` is a zero-sized type.
1468 /// #![feature(offset_to)]
1469 /// #![allow(deprecated)]
1472 /// let mut a = [0; 5];
1473 /// let ptr1: *mut i32 = &mut a[1];
1474 /// let ptr2: *mut i32 = &mut a[3];
1475 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1476 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1477 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1478 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1481 #[unstable(feature = "offset_to", issue = "41079")]
1482 #[rustc_deprecated(since = "1.27.0", reason = "Replaced by `wrapping_offset_from`, with the \
1483 opposite argument order. If you're writing unsafe code, consider `offset_from`.")]
1485 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1486 let size = mem::size_of::<T>();
1490 Some(other.wrapping_offset_from(self))
1494 /// Calculates the distance between two pointers. The returned value is in
1495 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1497 /// This function is the inverse of [`offset`].
1499 /// [`offset`]: #method.offset-1
1500 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1
1504 /// If any of the following conditions are violated, the result is Undefined
1507 /// * Both the starting and other pointer must be either in bounds or one
1508 /// byte past the end of the same allocated object.
1510 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
1512 /// * The distance between the pointers, in bytes, must be an exact multiple
1513 /// of the size of `T`.
1515 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
1517 /// The compiler and standard library generally try to ensure allocations
1518 /// never reach a size where an offset is a concern. For instance, `Vec`
1519 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1520 /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
1522 /// Most platforms fundamentally can't even construct such an allocation.
1523 /// For instance, no known 64-bit platform can ever serve a request
1524 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1525 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1526 /// more than `isize::MAX` bytes with things like Physical Address
1527 /// Extension. As such, memory acquired directly from allocators or memory
1528 /// mapped files *may* be too large to handle with this function.
1530 /// Consider using [`wrapping_offset_from`] instead if these constraints are
1531 /// difficult to satisfy. The only advantage of this method is that it
1532 /// enables more aggressive compiler optimizations.
1536 /// This function panics if `T` is a Zero-Sized Type ("ZST").
1543 /// #![feature(ptr_offset_from)]
1545 /// let mut a = [0; 5];
1546 /// let ptr1: *mut i32 = &mut a[1];
1547 /// let ptr2: *mut i32 = &mut a[3];
1549 /// assert_eq!(ptr2.offset_from(ptr1), 2);
1550 /// assert_eq!(ptr1.offset_from(ptr2), -2);
1551 /// assert_eq!(ptr1.offset(2), ptr2);
1552 /// assert_eq!(ptr2.offset(-2), ptr1);
1555 #[unstable(feature = "ptr_offset_from", issue = "41079")]
1557 pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized {
1558 (self as *const T).offset_from(origin)
1561 /// Calculates the distance between two pointers. The returned value is in
1562 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1564 /// If the address different between the two pointers is not a multiple of
1565 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1568 /// Though this method is safe for any two pointers, note that its result
1569 /// will be mostly useless if the two pointers aren't into the same allocated
1570 /// object, for example if they point to two different local variables.
1574 /// This function panics if `T` is a zero-sized type.
1581 /// #![feature(ptr_wrapping_offset_from)]
1583 /// let mut a = [0; 5];
1584 /// let ptr1: *mut i32 = &mut a[1];
1585 /// let ptr2: *mut i32 = &mut a[3];
1586 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1587 /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2);
1588 /// assert_eq!(ptr1.wrapping_offset(2), ptr2);
1589 /// assert_eq!(ptr2.wrapping_offset(-2), ptr1);
1591 /// let ptr1: *mut i32 = 3 as _;
1592 /// let ptr2: *mut i32 = 13 as _;
1593 /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2);
1595 #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")]
1597 pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized {
1598 (self as *const T).wrapping_offset_from(origin)
1601 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1603 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1604 /// offset of `3 * size_of::<T>()` bytes.
1608 /// If any of the following conditions are violated, the result is Undefined
1611 /// * Both the starting and resulting pointer must be either in bounds or one
1612 /// byte past the end of an allocated object.
1614 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
1616 /// * The offset being in bounds cannot rely on "wrapping around" the address
1617 /// space. That is, the infinite-precision sum must fit in a `usize`.
1619 /// The compiler and standard library generally tries to ensure allocations
1620 /// never reach a size where an offset is a concern. For instance, `Vec`
1621 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1622 /// `vec.as_ptr().add(vec.len())` is always safe.
1624 /// Most platforms fundamentally can't even construct such an allocation.
1625 /// For instance, no known 64-bit platform can ever serve a request
1626 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1627 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1628 /// more than `isize::MAX` bytes with things like Physical Address
1629 /// Extension. As such, memory acquired directly from allocators or memory
1630 /// mapped files *may* be too large to handle with this function.
1632 /// Consider using `wrapping_offset` instead if these constraints are
1633 /// difficult to satisfy. The only advantage of this method is that it
1634 /// enables more aggressive compiler optimizations.
1641 /// let s: &str = "123";
1642 /// let ptr: *const u8 = s.as_ptr();
1645 /// println!("{}", *ptr.add(1) as char);
1646 /// println!("{}", *ptr.add(2) as char);
1649 #[stable(feature = "pointer_methods", since = "1.26.0")]
1651 pub unsafe fn add(self, count: usize) -> Self
1654 self.offset(count as isize)
1657 /// Calculates the offset from a pointer (convenience for
1658 /// `.offset((count as isize).wrapping_neg())`).
1660 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1661 /// offset of `3 * size_of::<T>()` bytes.
1665 /// If any of the following conditions are violated, the result is Undefined
1668 /// * Both the starting and resulting pointer must be either in bounds or one
1669 /// byte past the end of an allocated object.
1671 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1673 /// * The offset being in bounds cannot rely on "wrapping around" the address
1674 /// space. That is, the infinite-precision sum must fit in a usize.
1676 /// The compiler and standard library generally tries to ensure allocations
1677 /// never reach a size where an offset is a concern. For instance, `Vec`
1678 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1679 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1681 /// Most platforms fundamentally can't even construct such an allocation.
1682 /// For instance, no known 64-bit platform can ever serve a request
1683 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1684 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1685 /// more than `isize::MAX` bytes with things like Physical Address
1686 /// Extension. As such, memory acquired directly from allocators or memory
1687 /// mapped files *may* be too large to handle with this function.
1689 /// Consider using `wrapping_offset` instead if these constraints are
1690 /// difficult to satisfy. The only advantage of this method is that it
1691 /// enables more aggressive compiler optimizations.
1698 /// let s: &str = "123";
1701 /// let end: *const u8 = s.as_ptr().add(3);
1702 /// println!("{}", *end.sub(1) as char);
1703 /// println!("{}", *end.sub(2) as char);
1706 #[stable(feature = "pointer_methods", since = "1.26.0")]
1708 pub unsafe fn sub(self, count: usize) -> Self
1711 self.offset((count as isize).wrapping_neg())
1714 /// Calculates the offset from a pointer using wrapping arithmetic.
1715 /// (convenience for `.wrapping_offset(count as isize)`)
1717 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1718 /// offset of `3 * size_of::<T>()` bytes.
1722 /// The resulting pointer does not need to be in bounds, but it is
1723 /// potentially hazardous to dereference (which requires `unsafe`).
1725 /// Always use `.add(count)` instead when possible, because `add`
1726 /// allows the compiler to optimize better.
1733 /// // Iterate using a raw pointer in increments of two elements
1734 /// let data = [1u8, 2, 3, 4, 5];
1735 /// let mut ptr: *const u8 = data.as_ptr();
1737 /// let end_rounded_up = ptr.wrapping_add(6);
1739 /// // This loop prints "1, 3, 5, "
1740 /// while ptr != end_rounded_up {
1742 /// print!("{}, ", *ptr);
1744 /// ptr = ptr.wrapping_add(step);
1747 #[stable(feature = "pointer_methods", since = "1.26.0")]
1749 pub fn wrapping_add(self, count: usize) -> Self
1752 self.wrapping_offset(count as isize)
1755 /// Calculates the offset from a pointer using wrapping arithmetic.
1756 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1758 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1759 /// offset of `3 * size_of::<T>()` bytes.
1763 /// The resulting pointer does not need to be in bounds, but it is
1764 /// potentially hazardous to dereference (which requires `unsafe`).
1766 /// Always use `.sub(count)` instead when possible, because `sub`
1767 /// allows the compiler to optimize better.
1774 /// // Iterate using a raw pointer in increments of two elements (backwards)
1775 /// let data = [1u8, 2, 3, 4, 5];
1776 /// let mut ptr: *const u8 = data.as_ptr();
1777 /// let start_rounded_down = ptr.wrapping_sub(2);
1778 /// ptr = ptr.wrapping_add(4);
1780 /// // This loop prints "5, 3, 1, "
1781 /// while ptr != start_rounded_down {
1783 /// print!("{}, ", *ptr);
1785 /// ptr = ptr.wrapping_sub(step);
1788 #[stable(feature = "pointer_methods", since = "1.26.0")]
1790 pub fn wrapping_sub(self, count: usize) -> Self
1793 self.wrapping_offset((count as isize).wrapping_neg())
1796 /// Reads the value from `self` without moving it. This leaves the
1797 /// memory in `self` unchanged.
1801 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1802 /// moves the value out of `self` without preventing further usage of `self`.
1803 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1804 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1805 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1806 /// because it will attempt to drop the value previously at `*self`.
1808 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1816 /// let y = &x as *const i32;
1819 /// assert_eq!(y.read(), 12);
1822 #[stable(feature = "pointer_methods", since = "1.26.0")]
1824 pub unsafe fn read(self) -> T
1830 /// Performs a volatile read of the value from `self` without moving it. This
1831 /// leaves the memory in `self` unchanged.
1833 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1834 /// to not be elided or reordered by the compiler across other volatile
1839 /// Rust does not currently have a rigorously and formally defined memory model,
1840 /// so the precise semantics of what "volatile" means here is subject to change
1841 /// over time. That being said, the semantics will almost always end up pretty
1842 /// similar to [C11's definition of volatile][c11].
1844 /// The compiler shouldn't change the relative order or number of volatile
1845 /// memory operations. However, volatile memory operations on zero-sized types
1846 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1847 /// and may be ignored.
1849 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1853 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1854 /// moves the value out of `self` without preventing further usage of `self`.
1855 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1856 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1857 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1858 /// because it will attempt to drop the value previously at `*self`.
1866 /// let y = &x as *const i32;
1869 /// assert_eq!(y.read_volatile(), 12);
1872 #[stable(feature = "pointer_methods", since = "1.26.0")]
1874 pub unsafe fn read_volatile(self) -> T
1880 /// Reads the value from `self` without moving it. This leaves the
1881 /// memory in `self` unchanged.
1883 /// Unlike `read`, the pointer may be unaligned.
1887 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1888 /// moves the value out of `self` without preventing further usage of `self`.
1889 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1890 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1891 /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use
1892 /// because it will attempt to drop the value previously at `*self`.
1900 /// let y = &x as *const i32;
1903 /// assert_eq!(y.read_unaligned(), 12);
1906 #[stable(feature = "pointer_methods", since = "1.26.0")]
1908 pub unsafe fn read_unaligned(self) -> T
1911 read_unaligned(self)
1914 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1915 /// and destination may overlap.
1917 /// NOTE: this has the *same* argument order as `ptr::copy`.
1919 /// This is semantically equivalent to C's `memmove`.
1923 /// Care must be taken with the ownership of `self` and `dest`.
1924 /// This method semantically moves the values of `self` into `dest`.
1925 /// However it does not drop the contents of `self`, or prevent the contents
1926 /// of `dest` from being dropped or used.
1930 /// Efficiently create a Rust vector from an unsafe buffer:
1933 /// # #[allow(dead_code)]
1934 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1935 /// let mut dst = Vec::with_capacity(elts);
1936 /// dst.set_len(elts);
1937 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1941 #[stable(feature = "pointer_methods", since = "1.26.0")]
1943 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1946 copy(self, dest, count)
1949 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1950 /// and destination may *not* overlap.
1952 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1954 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1958 /// Beyond requiring that the program must be allowed to access both regions
1959 /// of memory, it is Undefined Behavior for source and destination to
1960 /// overlap. Care must also be taken with the ownership of `self` and
1961 /// `self`. This method semantically moves the values of `self` into `dest`.
1962 /// However it does not drop the contents of `dest`, or prevent the contents
1963 /// of `self` from being dropped or used.
1967 /// Efficiently create a Rust vector from an unsafe buffer:
1970 /// # #[allow(dead_code)]
1971 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1972 /// let mut dst = Vec::with_capacity(elts);
1973 /// dst.set_len(elts);
1974 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1978 #[stable(feature = "pointer_methods", since = "1.26.0")]
1980 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1983 copy_nonoverlapping(self, dest, count)
1986 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1987 /// and destination may overlap.
1989 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1991 /// This is semantically equivalent to C's `memmove`.
1995 /// Care must be taken with the ownership of `src` and `self`.
1996 /// This method semantically moves the values of `src` into `self`.
1997 /// However it does not drop the contents of `self`, or prevent the contents
1998 /// of `src` from being dropped or used.
2002 /// Efficiently create a Rust vector from an unsafe buffer:
2005 /// # #[allow(dead_code)]
2006 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2007 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2008 /// dst.set_len(elts);
2009 /// dst.as_mut_ptr().copy_from(ptr, elts);
2013 #[stable(feature = "pointer_methods", since = "1.26.0")]
2015 pub unsafe fn copy_from(self, src: *const T, count: usize)
2018 copy(src, self, count)
2021 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
2022 /// and destination may *not* overlap.
2024 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
2026 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
2030 /// Beyond requiring that the program must be allowed to access both regions
2031 /// of memory, it is Undefined Behavior for source and destination to
2032 /// overlap. Care must also be taken with the ownership of `src` and
2033 /// `self`. This method semantically moves the values of `src` into `self`.
2034 /// However it does not drop the contents of `self`, or prevent the contents
2035 /// of `src` from being dropped or used.
2039 /// Efficiently create a Rust vector from an unsafe buffer:
2042 /// # #[allow(dead_code)]
2043 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
2044 /// let mut dst: Vec<T> = Vec::with_capacity(elts);
2045 /// dst.set_len(elts);
2046 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
2050 #[stable(feature = "pointer_methods", since = "1.26.0")]
2052 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
2055 copy_nonoverlapping(src, self, count)
2058 /// Executes the destructor (if any) of the pointed-to value.
2060 /// This has two use cases:
2062 /// * It is *required* to use `drop_in_place` to drop unsized types like
2063 /// trait objects, because they can't be read out onto the stack and
2064 /// dropped normally.
2066 /// * It is friendlier to the optimizer to do this over `ptr::read` when
2067 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
2068 /// as the compiler doesn't need to prove that it's sound to elide the
2073 /// This has all the same safety problems as `ptr::read` with respect to
2074 /// invalid pointers, types, and double drops.
2075 #[stable(feature = "pointer_methods", since = "1.26.0")]
2077 pub unsafe fn drop_in_place(self) {
2081 /// Overwrites a memory location with the given value without reading or
2082 /// dropping the old value.
2086 /// This operation is marked unsafe because it writes through a raw pointer.
2088 /// It does not drop the contents of `self`. This is safe, but it could leak
2089 /// allocations or resources, so care must be taken not to overwrite an object
2090 /// that should be dropped.
2092 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
2093 /// location pointed to by `self`.
2095 /// This is appropriate for initializing uninitialized memory, or overwriting
2096 /// memory that has previously been `read` from.
2098 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
2106 /// let y = &mut x as *mut i32;
2111 /// assert_eq!(y.read(), 12);
2114 #[stable(feature = "pointer_methods", since = "1.26.0")]
2116 pub unsafe fn write(self, val: T)
2122 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
2123 /// bytes of memory starting at `self` to `val`.
2128 /// let mut vec = vec![0; 4];
2130 /// let vec_ptr = vec.as_mut_ptr();
2131 /// vec_ptr.write_bytes(b'a', 2);
2133 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
2135 #[stable(feature = "pointer_methods", since = "1.26.0")]
2137 pub unsafe fn write_bytes(self, val: u8, count: usize)
2140 write_bytes(self, val, count)
2143 /// Performs a volatile write of a memory location with the given value without
2144 /// reading or dropping the old value.
2146 /// Volatile operations are intended to act on I/O memory, and are guaranteed
2147 /// to not be elided or reordered by the compiler across other volatile
2152 /// Rust does not currently have a rigorously and formally defined memory model,
2153 /// so the precise semantics of what "volatile" means here is subject to change
2154 /// over time. That being said, the semantics will almost always end up pretty
2155 /// similar to [C11's definition of volatile][c11].
2157 /// The compiler shouldn't change the relative order or number of volatile
2158 /// memory operations. However, volatile memory operations on zero-sized types
2159 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
2160 /// and may be ignored.
2162 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
2166 /// This operation is marked unsafe because it accepts a raw pointer.
2168 /// It does not drop the contents of `self`. This is safe, but it could leak
2169 /// allocations or resources, so care must be taken not to overwrite an object
2170 /// that should be dropped.
2172 /// This is appropriate for initializing uninitialized memory, or overwriting
2173 /// memory that has previously been `read` from.
2181 /// let y = &mut x as *mut i32;
2185 /// y.write_volatile(z);
2186 /// assert_eq!(y.read_volatile(), 12);
2189 #[stable(feature = "pointer_methods", since = "1.26.0")]
2191 pub unsafe fn write_volatile(self, val: T)
2194 write_volatile(self, val)
2197 /// Overwrites a memory location with the given value without reading or
2198 /// dropping the old value.
2200 /// Unlike `write`, the pointer may be unaligned.
2204 /// This operation is marked unsafe because it writes through a raw pointer.
2206 /// It does not drop the contents of `self`. This is safe, but it could leak
2207 /// allocations or resources, so care must be taken not to overwrite an object
2208 /// that should be dropped.
2210 /// Additionally, it does not drop `self`. Semantically, `self` is moved into the
2211 /// location pointed to by `val`.
2213 /// This is appropriate for initializing uninitialized memory, or overwriting
2214 /// memory that has previously been `read` from.
2222 /// let y = &mut x as *mut i32;
2226 /// y.write_unaligned(z);
2227 /// assert_eq!(y.read_unaligned(), 12);
2230 #[stable(feature = "pointer_methods", since = "1.26.0")]
2232 pub unsafe fn write_unaligned(self, val: T)
2235 write_unaligned(self, val)
2238 /// Replaces the value at `self` with `src`, returning the old
2239 /// value, without dropping either.
2243 /// This is only unsafe because it accepts a raw pointer.
2244 /// Otherwise, this operation is identical to `mem::replace`.
2245 #[stable(feature = "pointer_methods", since = "1.26.0")]
2247 pub unsafe fn replace(self, src: T) -> T
2253 /// Swaps the values at two mutable locations of the same type, without
2254 /// deinitializing either. They may overlap, unlike `mem::swap` which is
2255 /// otherwise equivalent.
2259 /// This function copies the memory through the raw pointers passed to it
2262 /// Ensure that these pointers are valid before calling `swap`.
2263 #[stable(feature = "pointer_methods", since = "1.26.0")]
2265 pub unsafe fn swap(self, with: *mut T)
2271 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
2274 /// If it is not possible to align the pointer, the implementation returns
2275 /// `usize::max_value()`.
2277 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
2278 /// used with the `offset` or `offset_to` methods.
2280 /// There are no guarantees whatsover that offsetting the pointer will not overflow or go
2281 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
2282 /// the returned offset is correct in all terms other than alignment.
2286 /// The function panics if `align` is not a power-of-two.
2290 /// Accessing adjacent `u8` as `u16`
2293 /// # #![feature(align_offset)]
2294 /// # fn foo(n: usize) {
2295 /// # use std::mem::align_of;
2297 /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
2298 /// let ptr = &x[n] as *const u8;
2299 /// let offset = ptr.align_offset(align_of::<u16>());
2300 /// if offset < x.len() - n - 1 {
2301 /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
2302 /// assert_ne!(*u16_ptr, 500);
2304 /// // while the pointer can be aligned via `offset`, it would point
2305 /// // outside the allocation
2309 #[unstable(feature = "align_offset", issue = "44488")]
2311 pub fn align_offset(self, align: usize) -> usize where T: Sized {
2312 if !align.is_power_of_two() {
2313 panic!("align_offset: align is not a power-of-two");
2316 align_offset(self, align)
2320 /// definitely docs.
2321 #[unstable(feature = "align_offset", issue = "44488")]
2323 pub fn align_offset(self, align: usize) -> usize where T: Sized {
2324 if !align.is_power_of_two() {
2325 panic!("align_offset: align is not a power-of-two");
2328 intrinsics::align_offset(self as *const (), align)
2333 /// Align pointer `p`.
2335 /// Calculate offset (in terms of elements of `stride` stride) that has to be applied
2336 /// to pointer `p` so that pointer `p` would get aligned to `a`.
2338 /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
2339 /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
2342 /// If we ever decide to make it possible to call the intrinsic with `a` that is not a
2343 /// power-of-two, it will probably be more prudent to just change to a naive implementation rather
2344 /// than trying to adapt this to accomodate that change.
2346 /// Any questions go to @nagisa.
2347 #[lang="align_offset"]
2349 pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
2350 /// Calculate multiplicative modular inverse of `x` modulo `m`.
2352 /// This implementation is tailored for align_offset and has following preconditions:
2354 /// * `m` is a power-of-two;
2355 /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
2357 /// Implementation of this function shall not panic. Ever.
2359 fn mod_inv(x: usize, m: usize) -> usize {
2360 /// Multiplicative modular inverse table modulo 2⁴ = 16.
2362 /// Note, that this table does not contain values where inverse does not exist (i.e. for
2363 /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
2364 const INV_TABLE_MOD_16: [usize; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
2365 /// Modulo for which the `INV_TABLE_MOD_16` is intended.
2366 const INV_TABLE_MOD: usize = 16;
2368 const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
2370 let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1];
2371 if m <= INV_TABLE_MOD {
2372 return table_inverse & (m - 1);
2374 // We iterate "up" using the following formula:
2376 // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
2378 // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
2379 let mut inverse = table_inverse;
2380 let mut going_mod = INV_TABLE_MOD_SQUARED;
2382 // y = y * (2 - xy) mod n
2384 // Note, that we use wrapping operations here intentionally – the original formula
2385 // uses e.g. subtraction `mod n`. It is entirely fine to do them `mod
2386 // usize::max_value()` instead, because we take the result `mod n` at the end
2388 inverse = inverse.wrapping_mul(
2389 2usize.wrapping_sub(x.wrapping_mul(inverse))
2390 ) & (going_mod - 1);
2392 return inverse & (m - 1);
2394 going_mod = going_mod.wrapping_mul(going_mod);
2399 let stride = ::mem::size_of::<T>();
2400 let a_minus_one = a.wrapping_sub(1);
2401 let pmoda = p as usize & a_minus_one;
2404 // Already aligned. Yay!
2409 return if stride == 0 {
2410 // If the pointer is not aligned, and the element is zero-sized, then no amount of
2411 // elements will ever align the pointer.
2414 a.wrapping_sub(pmoda)
2418 let smoda = stride & a_minus_one;
2419 // a is power-of-two so cannot be 0. stride = 0 is handled above.
2420 let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
2421 let gcd = 1usize << gcdpow;
2424 // This branch solves for the variable $o$ in following linear congruence equation:
2426 // ⎰ p + o ≡ 0 (mod a) # $p + o$ must be aligned to specified alignment $a$
2427 // ⎱ o ≡ 0 (mod s) # offset $o$ must be a multiple of stride $s$
2431 // * a, s are co-prime
2433 // This gives us the formula below:
2435 // o = (a - (p mod a)) * (s⁻¹ mod a) * s
2437 // The first term is “the relative alignment of p to a”, the second term is “how does
2438 // incrementing p by one s change the relative alignment of p”, the third term is
2439 // translating change in units of s to a byte count.
2441 // Furthermore, the result produced by this solution is not “minimal”, so it is necessary
2442 // to take the result $o mod lcm(s, a)$. Since $s$ and $a$ are co-prime (i.e. $gcd(s, a) =
2443 // 1$) and $lcm(s, a) = s * a / gcd(s, a)$, we can replace $lcm(s, a)$ with just a $s * a$.
2445 // (Author note: we decided later on to express the offset in "elements" rather than bytes,
2446 // which drops the multiplication by `s` on both sides of the modulo.)
2447 return intrinsics::unchecked_rem(a.wrapping_sub(pmoda).wrapping_mul(mod_inv(smoda, a)), a);
2450 if p as usize & (gcd - 1) == 0 {
2451 // This can be aligned, but `a` and `stride` are not co-prime, so a somewhat adapted
2453 let j = a.wrapping_sub(pmoda) >> gcdpow;
2454 let k = smoda >> gcdpow;
2455 return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow);
2458 // Cannot be aligned at all.
2459 return usize::max_value();
2464 // Equality for pointers
2465 #[stable(feature = "rust1", since = "1.0.0")]
2466 impl<T: ?Sized> PartialEq for *const T {
2468 fn eq(&self, other: &*const T) -> bool { *self == *other }
2471 #[stable(feature = "rust1", since = "1.0.0")]
2472 impl<T: ?Sized> Eq for *const T {}
2474 #[stable(feature = "rust1", since = "1.0.0")]
2475 impl<T: ?Sized> PartialEq for *mut T {
2477 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2480 #[stable(feature = "rust1", since = "1.0.0")]
2481 impl<T: ?Sized> Eq for *mut T {}
2483 /// Compare raw pointers for equality.
2485 /// This is the same as using the `==` operator, but less generic:
2486 /// the arguments have to be `*const T` raw pointers,
2487 /// not anything that implements `PartialEq`.
2489 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2490 /// by their address rather than comparing the values they point to
2491 /// (which is what the `PartialEq for &T` implementation does).
2499 /// let other_five = 5;
2500 /// let five_ref = &five;
2501 /// let same_five_ref = &five;
2502 /// let other_five_ref = &other_five;
2504 /// assert!(five_ref == same_five_ref);
2505 /// assert!(five_ref == other_five_ref);
2507 /// assert!(ptr::eq(five_ref, same_five_ref));
2508 /// assert!(!ptr::eq(five_ref, other_five_ref));
2510 #[stable(feature = "ptr_eq", since = "1.17.0")]
2512 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2516 // Impls for function pointers
2517 macro_rules! fnptr_impls_safety_abi {
2518 ($FnTy: ty, $($Arg: ident),*) => {
2519 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2520 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2522 fn eq(&self, other: &Self) -> bool {
2523 *self as usize == *other as usize
2527 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2528 impl<Ret, $($Arg),*> Eq for $FnTy {}
2530 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2531 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2533 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2534 (*self as usize).partial_cmp(&(*other as usize))
2538 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2539 impl<Ret, $($Arg),*> Ord for $FnTy {
2541 fn cmp(&self, other: &Self) -> Ordering {
2542 (*self as usize).cmp(&(*other as usize))
2546 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2547 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2548 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2549 state.write_usize(*self as usize)
2553 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2554 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2555 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2556 fmt::Pointer::fmt(&(*self as *const ()), f)
2560 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2561 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2562 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2563 fmt::Pointer::fmt(&(*self as *const ()), f)
2569 macro_rules! fnptr_impls_args {
2570 ($($Arg: ident),+) => {
2571 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2572 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2573 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2574 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2575 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2576 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2579 // No variadic functions with 0 parameters
2580 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2581 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2582 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2583 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2587 fnptr_impls_args! { }
2588 fnptr_impls_args! { A }
2589 fnptr_impls_args! { A, B }
2590 fnptr_impls_args! { A, B, C }
2591 fnptr_impls_args! { A, B, C, D }
2592 fnptr_impls_args! { A, B, C, D, E }
2593 fnptr_impls_args! { A, B, C, D, E, F }
2594 fnptr_impls_args! { A, B, C, D, E, F, G }
2595 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2596 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2597 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2598 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2599 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2601 // Comparison for pointers
2602 #[stable(feature = "rust1", since = "1.0.0")]
2603 impl<T: ?Sized> Ord for *const T {
2605 fn cmp(&self, other: &*const T) -> Ordering {
2608 } else if self == other {
2616 #[stable(feature = "rust1", since = "1.0.0")]
2617 impl<T: ?Sized> PartialOrd for *const T {
2619 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2620 Some(self.cmp(other))
2624 fn lt(&self, other: &*const T) -> bool { *self < *other }
2627 fn le(&self, other: &*const T) -> bool { *self <= *other }
2630 fn gt(&self, other: &*const T) -> bool { *self > *other }
2633 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2636 #[stable(feature = "rust1", since = "1.0.0")]
2637 impl<T: ?Sized> Ord for *mut T {
2639 fn cmp(&self, other: &*mut T) -> Ordering {
2642 } else if self == other {
2650 #[stable(feature = "rust1", since = "1.0.0")]
2651 impl<T: ?Sized> PartialOrd for *mut T {
2653 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2654 Some(self.cmp(other))
2658 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2661 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2664 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2667 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2670 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2671 /// of this wrapper owns the referent. Useful for building abstractions like
2672 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2674 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2675 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2676 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2677 /// the referent of the pointer should not be modified without a unique path to
2678 /// its owning Unique.
2680 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2681 /// consider using `NonNull`, which has weaker semantics.
2683 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2684 /// is never dereferenced. This is so that enums may use this forbidden value
2685 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2686 /// However the pointer may still dangle if it isn't dereferenced.
2688 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2689 /// for any type which upholds Unique's aliasing requirements.
2690 #[unstable(feature = "ptr_internals", issue = "0",
2691 reason = "use NonNull instead and consider PhantomData<T> \
2692 (if you also use #[may_dangle]), Send, and/or Sync")]
2694 pub struct Unique<T: ?Sized> {
2695 pointer: NonZero<*const T>,
2696 // NOTE: this marker has no consequences for variance, but is necessary
2697 // for dropck to understand that we logically own a `T`.
2699 // For details, see:
2700 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2701 _marker: PhantomData<T>,
2704 #[unstable(feature = "ptr_internals", issue = "0")]
2705 impl<T: ?Sized> fmt::Debug for Unique<T> {
2706 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2707 fmt::Pointer::fmt(&self.as_ptr(), f)
2711 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2712 /// reference is unaliased. Note that this aliasing invariant is
2713 /// unenforced by the type system; the abstraction using the
2714 /// `Unique` must enforce it.
2715 #[unstable(feature = "ptr_internals", issue = "0")]
2716 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2718 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2719 /// reference is unaliased. Note that this aliasing invariant is
2720 /// unenforced by the type system; the abstraction using the
2721 /// `Unique` must enforce it.
2722 #[unstable(feature = "ptr_internals", issue = "0")]
2723 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2725 #[unstable(feature = "ptr_internals", issue = "0")]
2726 impl<T: Sized> Unique<T> {
2727 /// Creates a new `Unique` that is dangling, but well-aligned.
2729 /// This is useful for initializing types which lazily allocate, like
2730 /// `Vec::new` does.
2731 // FIXME: rename to dangling() to match NonNull?
2732 pub const fn empty() -> Self {
2734 Unique::new_unchecked(mem::align_of::<T>() as *mut T)
2739 #[unstable(feature = "ptr_internals", issue = "0")]
2740 impl<T: ?Sized> Unique<T> {
2741 /// Creates a new `Unique`.
2745 /// `ptr` must be non-null.
2746 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2747 Unique { pointer: NonZero(ptr as _), _marker: PhantomData }
2750 /// Creates a new `Unique` if `ptr` is non-null.
2751 pub fn new(ptr: *mut T) -> Option<Self> {
2753 Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData })
2759 /// Acquires the underlying `*mut` pointer.
2760 pub fn as_ptr(self) -> *mut T {
2761 self.pointer.0 as *mut T
2764 /// Dereferences the content.
2766 /// The resulting lifetime is bound to self so this behaves "as if"
2767 /// it were actually an instance of T that is getting borrowed. If a longer
2768 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2769 pub unsafe fn as_ref(&self) -> &T {
2773 /// Mutably dereferences the content.
2775 /// The resulting lifetime is bound to self so this behaves "as if"
2776 /// it were actually an instance of T that is getting borrowed. If a longer
2777 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2778 pub unsafe fn as_mut(&mut self) -> &mut T {
2783 #[unstable(feature = "ptr_internals", issue = "0")]
2784 impl<T: ?Sized> Clone for Unique<T> {
2785 fn clone(&self) -> Self {
2790 #[unstable(feature = "ptr_internals", issue = "0")]
2791 impl<T: ?Sized> Copy for Unique<T> { }
2793 #[unstable(feature = "ptr_internals", issue = "0")]
2794 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2796 #[unstable(feature = "ptr_internals", issue = "0")]
2797 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2798 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2799 fmt::Pointer::fmt(&self.as_ptr(), f)
2803 #[unstable(feature = "ptr_internals", issue = "0")]
2804 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2805 fn from(reference: &'a mut T) -> Self {
2806 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2810 #[unstable(feature = "ptr_internals", issue = "0")]
2811 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2812 fn from(reference: &'a T) -> Self {
2813 Unique { pointer: NonZero(reference as _), _marker: PhantomData }
2817 #[unstable(feature = "ptr_internals", issue = "0")]
2818 impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> {
2819 fn from(p: NonNull<T>) -> Self {
2820 Unique { pointer: p.pointer, _marker: PhantomData }
2824 /// `*mut T` but non-zero and covariant.
2826 /// This is often the correct thing to use when building data structures using
2827 /// raw pointers, but is ultimately more dangerous to use because of its additional
2828 /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
2830 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2831 /// is never dereferenced. This is so that enums may use this forbidden value
2832 /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
2833 /// However the pointer may still dangle if it isn't dereferenced.
2835 /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
2836 /// for your use case, you should include some PhantomData in your type to
2837 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2838 /// Usually this won't be necessary; covariance is correct for most safe abstractions,
2839 /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they
2840 /// provide a public API that follows the normal shared XOR mutable rules of Rust.
2841 #[stable(feature = "nonnull", since = "1.25.0")]
2842 pub struct NonNull<T: ?Sized> {
2843 pointer: NonZero<*const T>,
2846 /// `NonNull` pointers are not `Send` because the data they reference may be aliased.
2847 // NB: This impl is unnecessary, but should provide better error messages.
2848 #[stable(feature = "nonnull", since = "1.25.0")]
2849 impl<T: ?Sized> !Send for NonNull<T> { }
2851 /// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
2852 // NB: This impl is unnecessary, but should provide better error messages.
2853 #[stable(feature = "nonnull", since = "1.25.0")]
2854 impl<T: ?Sized> !Sync for NonNull<T> { }
2856 impl<T: Sized> NonNull<T> {
2857 /// Creates a new `NonNull` that is dangling, but well-aligned.
2859 /// This is useful for initializing types which lazily allocate, like
2860 /// `Vec::new` does.
2861 #[stable(feature = "nonnull", since = "1.25.0")]
2862 pub fn dangling() -> Self {
2864 let ptr = mem::align_of::<T>() as *mut T;
2865 NonNull::new_unchecked(ptr)
2870 impl<T: ?Sized> NonNull<T> {
2871 /// Creates a new `NonNull`.
2875 /// `ptr` must be non-null.
2876 #[stable(feature = "nonnull", since = "1.25.0")]
2877 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2878 NonNull { pointer: NonZero(ptr as _) }
2881 /// Creates a new `NonNull` if `ptr` is non-null.
2882 #[stable(feature = "nonnull", since = "1.25.0")]
2883 pub fn new(ptr: *mut T) -> Option<Self> {
2885 Some(NonNull { pointer: NonZero(ptr as _) })
2891 /// Acquires the underlying `*mut` pointer.
2892 #[stable(feature = "nonnull", since = "1.25.0")]
2893 pub fn as_ptr(self) -> *mut T {
2894 self.pointer.0 as *mut T
2897 /// Dereferences the content.
2899 /// The resulting lifetime is bound to self so this behaves "as if"
2900 /// it were actually an instance of T that is getting borrowed. If a longer
2901 /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
2902 #[stable(feature = "nonnull", since = "1.25.0")]
2903 pub unsafe fn as_ref(&self) -> &T {
2907 /// Mutably dereferences the content.
2909 /// The resulting lifetime is bound to self so this behaves "as if"
2910 /// it were actually an instance of T that is getting borrowed. If a longer
2911 /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
2912 #[stable(feature = "nonnull", since = "1.25.0")]
2913 pub unsafe fn as_mut(&mut self) -> &mut T {
2917 /// Cast to a pointer of another type
2918 #[stable(feature = "nonnull_cast", since = "1.27.0")]
2919 pub fn cast<U>(self) -> NonNull<U> {
2921 NonNull::new_unchecked(self.as_ptr() as *mut U)
2925 /// Cast to an `Opaque` pointer
2926 #[unstable(feature = "allocator_api", issue = "32838")]
2927 pub fn as_opaque(self) -> NonNull<::alloc::Opaque> {
2929 NonNull::new_unchecked(self.as_ptr() as _)
2934 #[stable(feature = "nonnull", since = "1.25.0")]
2935 impl<T: ?Sized> Clone for NonNull<T> {
2936 fn clone(&self) -> Self {
2941 #[stable(feature = "nonnull", since = "1.25.0")]
2942 impl<T: ?Sized> Copy for NonNull<T> { }
2944 #[unstable(feature = "coerce_unsized", issue = "27732")]
2945 impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
2947 #[stable(feature = "nonnull", since = "1.25.0")]
2948 impl<T: ?Sized> fmt::Debug for NonNull<T> {
2949 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2950 fmt::Pointer::fmt(&self.as_ptr(), f)
2954 #[stable(feature = "nonnull", since = "1.25.0")]
2955 impl<T: ?Sized> fmt::Pointer for NonNull<T> {
2956 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2957 fmt::Pointer::fmt(&self.as_ptr(), f)
2961 #[stable(feature = "nonnull", since = "1.25.0")]
2962 impl<T: ?Sized> Eq for NonNull<T> {}
2964 #[stable(feature = "nonnull", since = "1.25.0")]
2965 impl<T: ?Sized> PartialEq for NonNull<T> {
2966 fn eq(&self, other: &Self) -> bool {
2967 self.as_ptr() == other.as_ptr()
2971 #[stable(feature = "nonnull", since = "1.25.0")]
2972 impl<T: ?Sized> Ord for NonNull<T> {
2973 fn cmp(&self, other: &Self) -> Ordering {
2974 self.as_ptr().cmp(&other.as_ptr())
2978 #[stable(feature = "nonnull", since = "1.25.0")]
2979 impl<T: ?Sized> PartialOrd for NonNull<T> {
2980 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2981 self.as_ptr().partial_cmp(&other.as_ptr())
2985 #[stable(feature = "nonnull", since = "1.25.0")]
2986 impl<T: ?Sized> hash::Hash for NonNull<T> {
2987 fn hash<H: hash::Hasher>(&self, state: &mut H) {
2988 self.as_ptr().hash(state)
2992 #[unstable(feature = "ptr_internals", issue = "0")]
2993 impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
2994 fn from(unique: Unique<T>) -> Self {
2995 NonNull { pointer: unique.pointer }
2999 #[stable(feature = "nonnull", since = "1.25.0")]
3000 impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> {
3001 fn from(reference: &'a mut T) -> Self {
3002 NonNull { pointer: NonZero(reference as _) }
3006 #[stable(feature = "nonnull", since = "1.25.0")]
3007 impl<'a, T: ?Sized> From<&'a T> for NonNull<T> {
3008 fn from(reference: &'a T) -> Self {
3009 NonNull { pointer: NonZero(reference as _) }