1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
13 //! Raw, unsafe pointers, `*const T`, and `*mut T`.
15 //! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
17 #![stable(feature = "rust1", since = "1.0.0")]
21 use ops::CoerceUnsized;
24 use marker::{PhantomData, Unsize};
28 use cmp::Ordering::{self, Less, Equal, Greater};
30 // FIXME #19649: intrinsic docs don't render, so these have no docs :(
32 #[stable(feature = "rust1", since = "1.0.0")]
33 pub use intrinsics::copy_nonoverlapping;
35 #[stable(feature = "rust1", since = "1.0.0")]
36 pub use intrinsics::copy;
38 #[stable(feature = "rust1", since = "1.0.0")]
39 pub use intrinsics::write_bytes;
41 /// Executes the destructor (if any) of the pointed-to value.
43 /// This has two use cases:
45 /// * It is *required* to use `drop_in_place` to drop unsized types like
46 /// trait objects, because they can't be read out onto the stack and
49 /// * It is friendlier to the optimizer to do this over `ptr::read` when
50 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
51 /// as the compiler doesn't need to prove that it's sound to elide the
56 /// This has all the same safety problems as `ptr::read` with respect to
57 /// invalid pointers, types, and double drops.
58 #[stable(feature = "drop_in_place", since = "1.8.0")]
59 #[lang="drop_in_place"]
60 #[allow(unconditional_recursion)]
61 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
62 // Code here does not matter - this is replaced by the
63 // real drop glue by the compiler.
64 drop_in_place(to_drop);
67 /// Creates a null raw pointer.
74 /// let p: *const i32 = ptr::null();
75 /// assert!(p.is_null());
78 #[stable(feature = "rust1", since = "1.0.0")]
79 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_null"))]
80 pub const fn null<T>() -> *const T { 0 as *const T }
82 /// Creates a null mutable raw pointer.
89 /// let p: *mut i32 = ptr::null_mut();
90 /// assert!(p.is_null());
93 #[stable(feature = "rust1", since = "1.0.0")]
94 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_null_mut"))]
95 pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
97 /// Swaps the values at two mutable locations of the same type, without
98 /// deinitializing either. They may overlap, unlike `mem::swap` which is
99 /// otherwise equivalent.
103 /// This function copies the memory through the raw pointers passed to it
106 /// Ensure that these pointers are valid before calling `swap`.
108 #[stable(feature = "rust1", since = "1.0.0")]
109 pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
110 // Give ourselves some scratch space to work with
111 let mut tmp: T = mem::uninitialized();
114 copy_nonoverlapping(x, &mut tmp, 1);
115 copy(y, x, 1); // `x` and `y` may overlap
116 copy_nonoverlapping(&tmp, y, 1);
118 // y and t now point to the same thing, but we need to completely forget `tmp`
119 // because it's no longer relevant.
123 /// Swaps a sequence of values at two mutable locations of the same type.
127 /// The two arguments must each point to the beginning of `count` locations
128 /// of valid memory, and the two memory ranges must not overlap.
135 /// #![feature(swap_nonoverlapping)]
139 /// let mut x = [1, 2, 3, 4];
140 /// let mut y = [7, 8, 9];
143 /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
146 /// assert_eq!(x, [7, 8, 3, 4]);
147 /// assert_eq!(y, [1, 2, 9]);
150 #[unstable(feature = "swap_nonoverlapping", issue = "42818")]
151 pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
152 let x = x as *mut u8;
153 let y = y as *mut u8;
154 let len = mem::size_of::<T>() * count;
155 swap_nonoverlapping_bytes(x, y, len)
159 unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
160 // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
161 // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
162 // Haswell E processors. LLVM is more able to optimize if we give a struct a
163 // #[repr(simd)], even if we don't actually use this struct directly.
165 // FIXME repr(simd) broken on emscripten and redox
166 // It's also broken on big-endian powerpc64 and s390x. #42778
167 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox",
168 target_endian = "big")),
170 struct Block(u64, u64, u64, u64);
171 struct UnalignedBlock(u64, u64, u64, u64);
173 let block_size = mem::size_of::<Block>();
175 // Loop through x & y, copying them `Block` at a time
176 // The optimizer should unroll the loop fully for most types
177 // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
179 while i + block_size <= len {
180 // Create some uninitialized memory as scratch space
181 // Declaring `t` here avoids aligning the stack when this loop is unused
182 let mut t: Block = mem::uninitialized();
183 let t = &mut t as *mut _ as *mut u8;
184 let x = x.offset(i as isize);
185 let y = y.offset(i as isize);
187 // Swap a block of bytes of x & y, using t as a temporary buffer
188 // This should be optimized into efficient SIMD operations where available
189 copy_nonoverlapping(x, t, block_size);
190 copy_nonoverlapping(y, x, block_size);
191 copy_nonoverlapping(t, y, block_size);
196 // Swap any remaining bytes
197 let mut t: UnalignedBlock = mem::uninitialized();
200 let t = &mut t as *mut _ as *mut u8;
201 let x = x.offset(i as isize);
202 let y = y.offset(i as isize);
204 copy_nonoverlapping(x, t, rem);
205 copy_nonoverlapping(y, x, rem);
206 copy_nonoverlapping(t, y, rem);
210 /// Replaces the value at `dest` with `src`, returning the old
211 /// value, without dropping either.
215 /// This is only unsafe because it accepts a raw pointer.
216 /// Otherwise, this operation is identical to `mem::replace`.
218 #[stable(feature = "rust1", since = "1.0.0")]
219 pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
220 mem::swap(&mut *dest, &mut src); // cannot overlap
224 /// Reads the value from `src` without moving it. This leaves the
225 /// memory in `src` unchanged.
229 /// Beyond accepting a raw pointer, this is unsafe because it semantically
230 /// moves the value out of `src` without preventing further usage of `src`.
231 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
232 /// `src` is not used before the data is overwritten again (e.g. with `write`,
233 /// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
234 /// because it will attempt to drop the value previously at `*src`.
236 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
244 /// let y = &x as *const i32;
247 /// assert_eq!(std::ptr::read(y), 12);
251 #[stable(feature = "rust1", since = "1.0.0")]
252 pub unsafe fn read<T>(src: *const T) -> T {
253 let mut tmp: T = mem::uninitialized();
254 copy_nonoverlapping(src, &mut tmp, 1);
258 /// Reads the value from `src` without moving it. This leaves the
259 /// memory in `src` unchanged.
261 /// Unlike `read`, the pointer may be unaligned.
265 /// Beyond accepting a raw pointer, this is unsafe because it semantically
266 /// moves the value out of `src` without preventing further usage of `src`.
267 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
268 /// `src` is not used before the data is overwritten again (e.g. with `write`,
269 /// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
270 /// because it will attempt to drop the value previously at `*src`.
278 /// let y = &x as *const i32;
281 /// assert_eq!(std::ptr::read_unaligned(y), 12);
285 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
286 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
287 let mut tmp: T = mem::uninitialized();
288 copy_nonoverlapping(src as *const u8,
289 &mut tmp as *mut T as *mut u8,
290 mem::size_of::<T>());
294 /// Overwrites a memory location with the given value without reading or
295 /// dropping the old value.
299 /// This operation is marked unsafe because it accepts a raw pointer.
301 /// It does not drop the contents of `dst`. This is safe, but it could leak
302 /// allocations or resources, so care must be taken not to overwrite an object
303 /// that should be dropped.
305 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
306 /// location pointed to by `dst`.
308 /// This is appropriate for initializing uninitialized memory, or overwriting
309 /// memory that has previously been `read` from.
311 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
319 /// let y = &mut x as *mut i32;
323 /// std::ptr::write(y, z);
324 /// assert_eq!(std::ptr::read(y), 12);
328 #[stable(feature = "rust1", since = "1.0.0")]
329 pub unsafe fn write<T>(dst: *mut T, src: T) {
330 intrinsics::move_val_init(&mut *dst, src)
333 /// Overwrites a memory location with the given value without reading or
334 /// dropping the old value.
336 /// Unlike `write`, the pointer may be unaligned.
340 /// This operation is marked unsafe because it accepts a raw pointer.
342 /// It does not drop the contents of `dst`. This is safe, but it could leak
343 /// allocations or resources, so care must be taken not to overwrite an object
344 /// that should be dropped.
346 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
347 /// location pointed to by `dst`.
349 /// This is appropriate for initializing uninitialized memory, or overwriting
350 /// memory that has previously been `read` from.
358 /// let y = &mut x as *mut i32;
362 /// std::ptr::write_unaligned(y, z);
363 /// assert_eq!(std::ptr::read_unaligned(y), 12);
367 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
368 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
369 copy_nonoverlapping(&src as *const T as *const u8,
371 mem::size_of::<T>());
375 /// Performs a volatile read of the value from `src` without moving it. This
376 /// leaves the memory in `src` unchanged.
378 /// Volatile operations are intended to act on I/O memory, and are guaranteed
379 /// to not be elided or reordered by the compiler across other volatile
384 /// Rust does not currently have a rigorously and formally defined memory model,
385 /// so the precise semantics of what "volatile" means here is subject to change
386 /// over time. That being said, the semantics will almost always end up pretty
387 /// similar to [C11's definition of volatile][c11].
389 /// The compiler shouldn't change the relative order or number of volatile
390 /// memory operations. However, volatile memory operations on zero-sized types
391 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
392 /// and may be ignored.
394 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
398 /// Beyond accepting a raw pointer, this is unsafe because it semantically
399 /// moves the value out of `src` without preventing further usage of `src`.
400 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
401 /// `src` is not used before the data is overwritten again (e.g. with `write`,
402 /// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
403 /// because it will attempt to drop the value previously at `*src`.
411 /// let y = &x as *const i32;
414 /// assert_eq!(std::ptr::read_volatile(y), 12);
418 #[stable(feature = "volatile", since = "1.9.0")]
419 pub unsafe fn read_volatile<T>(src: *const T) -> T {
420 intrinsics::volatile_load(src)
423 /// Performs a volatile write of a memory location with the given value without
424 /// reading or dropping the old value.
426 /// Volatile operations are intended to act on I/O memory, and are guaranteed
427 /// to not be elided or reordered by the compiler across other volatile
432 /// Rust does not currently have a rigorously and formally defined memory model,
433 /// so the precise semantics of what "volatile" means here is subject to change
434 /// over time. That being said, the semantics will almost always end up pretty
435 /// similar to [C11's definition of volatile][c11].
437 /// The compiler shouldn't change the relative order or number of volatile
438 /// memory operations. However, volatile memory operations on zero-sized types
439 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
440 /// and may be ignored.
442 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
446 /// This operation is marked unsafe because it accepts a raw pointer.
448 /// It does not drop the contents of `dst`. This is safe, but it could leak
449 /// allocations or resources, so care must be taken not to overwrite an object
450 /// that should be dropped.
452 /// This is appropriate for initializing uninitialized memory, or overwriting
453 /// memory that has previously been `read` from.
461 /// let y = &mut x as *mut i32;
465 /// std::ptr::write_volatile(y, z);
466 /// assert_eq!(std::ptr::read_volatile(y), 12);
470 #[stable(feature = "volatile", since = "1.9.0")]
471 pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
472 intrinsics::volatile_store(dst, src);
475 #[lang = "const_ptr"]
476 impl<T: ?Sized> *const T {
477 /// Returns `true` if the pointer is null.
484 /// let s: &str = "Follow the rabbit";
485 /// let ptr: *const u8 = s.as_ptr();
486 /// assert!(!ptr.is_null());
488 #[stable(feature = "rust1", since = "1.0.0")]
490 pub fn is_null(self) -> bool where T: Sized {
494 /// Returns `None` if the pointer is null, or else returns a reference to
495 /// the value wrapped in `Some`.
499 /// While this method and its mutable counterpart are useful for
500 /// null-safety, it is important to note that this is still an unsafe
501 /// operation because the returned value could be pointing to invalid
504 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
505 /// not necessarily reflect the actual lifetime of the data.
512 /// let ptr: *const u8 = &10u8 as *const u8;
515 /// if let Some(val_back) = ptr.as_ref() {
516 /// println!("We got back the value: {}!", val_back);
520 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
522 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized {
530 /// Calculates the offset from a pointer.
532 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
533 /// offset of `3 * size_of::<T>()` bytes.
537 /// If any of the following conditions are violated, the result is Undefined
540 /// * Both the starting and resulting pointer must be either in bounds or one
541 /// byte past the end of an allocated object.
543 /// * The computed offset, **in bytes**, cannot overflow or underflow an
546 /// * The offset being in bounds cannot rely on "wrapping around" the address
547 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
549 /// The compiler and standard library generally tries to ensure allocations
550 /// never reach a size where an offset is a concern. For instance, `Vec`
551 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
552 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
554 /// Most platforms fundamentally can't even construct such an allocation.
555 /// For instance, no known 64-bit platform can ever serve a request
556 /// for 2^63 bytes due to page-table limitations or splitting the address space.
557 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
558 /// more than `isize::MAX` bytes with things like Physical Address
559 /// Extension. As such, memory acquired directly from allocators or memory
560 /// mapped files *may* be too large to handle with this function.
562 /// Consider using `wrapping_offset` instead if these constraints are
563 /// difficult to satisfy. The only advantage of this method is that it
564 /// enables more aggressive compiler optimizations.
571 /// let s: &str = "123";
572 /// let ptr: *const u8 = s.as_ptr();
575 /// println!("{}", *ptr.offset(1) as char);
576 /// println!("{}", *ptr.offset(2) as char);
579 #[stable(feature = "rust1", since = "1.0.0")]
581 pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
582 intrinsics::offset(self, count)
585 /// Calculates the offset from a pointer using wrapping arithmetic.
587 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
588 /// offset of `3 * size_of::<T>()` bytes.
592 /// The resulting pointer does not need to be in bounds, but it is
593 /// potentially hazardous to dereference (which requires `unsafe`).
595 /// Always use `.offset(count)` instead when possible, because `offset`
596 /// allows the compiler to optimize better.
603 /// // Iterate using a raw pointer in increments of two elements
604 /// let data = [1u8, 2, 3, 4, 5];
605 /// let mut ptr: *const u8 = data.as_ptr();
607 /// let end_rounded_up = ptr.wrapping_offset(6);
609 /// // This loop prints "1, 3, 5, "
610 /// while ptr != end_rounded_up {
612 /// print!("{}, ", *ptr);
614 /// ptr = ptr.wrapping_offset(step);
617 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
619 pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized {
621 intrinsics::arith_offset(self, count)
625 /// Calculates the distance between two pointers. The returned value is in
626 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
628 /// If the address different between the two pointers ia not a multiple of
629 /// `mem::size_of::<T>()` then the result of the division is rounded towards
632 /// This function returns `None` if `T` is a zero-sized typed.
639 /// #![feature(offset_to)]
643 /// let ptr1: *const i32 = &a[1];
644 /// let ptr2: *const i32 = &a[3];
645 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
646 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
647 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
648 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
651 #[unstable(feature = "offset_to", issue = "41079")]
653 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
654 let size = mem::size_of::<T>();
658 let diff = (other as isize).wrapping_sub(self as isize);
659 Some(diff / size as isize)
663 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
665 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
666 /// offset of `3 * size_of::<T>()` bytes.
670 /// If any of the following conditions are violated, the result is Undefined
673 /// * Both the starting and resulting pointer must be either in bounds or one
674 /// byte past the end of an allocated object.
676 /// * The computed offset, **in bytes**, cannot overflow or underflow an
679 /// * The offset being in bounds cannot rely on "wrapping around" the address
680 /// space. That is, the infinite-precision sum must fit in a `usize`.
682 /// The compiler and standard library generally tries to ensure allocations
683 /// never reach a size where an offset is a concern. For instance, `Vec`
684 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
685 /// `vec.as_ptr().add(vec.len())` is always safe.
687 /// Most platforms fundamentally can't even construct such an allocation.
688 /// For instance, no known 64-bit platform can ever serve a request
689 /// for 2^63 bytes due to page-table limitations or splitting the address space.
690 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
691 /// more than `isize::MAX` bytes with things like Physical Address
692 /// Extension. As such, memory acquired directly from allocators or memory
693 /// mapped files *may* be too large to handle with this function.
695 /// Consider using `wrapping_offset` instead if these constraints are
696 /// difficult to satisfy. The only advantage of this method is that it
697 /// enables more aggressive compiler optimizations.
704 /// #![feature(pointer_methods)]
706 /// let s: &str = "123";
707 /// let ptr: *const u8 = s.as_ptr();
710 /// println!("{}", *ptr.add(1) as char);
711 /// println!("{}", *ptr.add(2) as char);
714 #[unstable(feature = "pointer_methods", issue = "43941")]
716 pub unsafe fn add(self, count: usize) -> Self
719 self.offset(count as isize)
722 /// Calculates the offset from a pointer (convenience for
723 /// `.offset((count as isize).wrapping_neg())`).
725 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
726 /// offset of `3 * size_of::<T>()` bytes.
730 /// If any of the following conditions are violated, the result is Undefined
733 /// * Both the starting and resulting pointer must be either in bounds or one
734 /// byte past the end of an allocated object.
736 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
738 /// * The offset being in bounds cannot rely on "wrapping around" the address
739 /// space. That is, the infinite-precision sum must fit in a usize.
741 /// The compiler and standard library generally tries to ensure allocations
742 /// never reach a size where an offset is a concern. For instance, `Vec`
743 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
744 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
746 /// Most platforms fundamentally can't even construct such an allocation.
747 /// For instance, no known 64-bit platform can ever serve a request
748 /// for 2^63 bytes due to page-table limitations or splitting the address space.
749 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
750 /// more than `isize::MAX` bytes with things like Physical Address
751 /// Extension. As such, memory acquired directly from allocators or memory
752 /// mapped files *may* be too large to handle with this function.
754 /// Consider using `wrapping_offset` instead if these constraints are
755 /// difficult to satisfy. The only advantage of this method is that it
756 /// enables more aggressive compiler optimizations.
763 /// #![feature(pointer_methods)]
765 /// let s: &str = "123";
768 /// let end: *const u8 = s.as_ptr().add(3);
769 /// println!("{}", *end.sub(1) as char);
770 /// println!("{}", *end.sub(2) as char);
773 #[unstable(feature = "pointer_methods", issue = "43941")]
775 pub unsafe fn sub(self, count: usize) -> Self
778 self.offset((count as isize).wrapping_neg())
781 /// Calculates the offset from a pointer using wrapping arithmetic.
782 /// (convenience for `.wrapping_offset(count as isize)`)
784 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
785 /// offset of `3 * size_of::<T>()` bytes.
789 /// The resulting pointer does not need to be in bounds, but it is
790 /// potentially hazardous to dereference (which requires `unsafe`).
792 /// Always use `.add(count)` instead when possible, because `add`
793 /// allows the compiler to optimize better.
800 /// #![feature(pointer_methods)]
802 /// // Iterate using a raw pointer in increments of two elements
803 /// let data = [1u8, 2, 3, 4, 5];
804 /// let mut ptr: *const u8 = data.as_ptr();
806 /// let end_rounded_up = ptr.wrapping_add(6);
808 /// // This loop prints "1, 3, 5, "
809 /// while ptr != end_rounded_up {
811 /// print!("{}, ", *ptr);
813 /// ptr = ptr.wrapping_add(step);
816 #[unstable(feature = "pointer_methods", issue = "43941")]
818 pub fn wrapping_add(self, count: usize) -> Self
821 self.wrapping_offset(count as isize)
824 /// Calculates the offset from a pointer using wrapping arithmetic.
825 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
827 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
828 /// offset of `3 * size_of::<T>()` bytes.
832 /// The resulting pointer does not need to be in bounds, but it is
833 /// potentially hazardous to dereference (which requires `unsafe`).
835 /// Always use `.sub(count)` instead when possible, because `sub`
836 /// allows the compiler to optimize better.
843 /// #![feature(pointer_methods)]
845 /// // Iterate using a raw pointer in increments of two elements (backwards)
846 /// let data = [1u8, 2, 3, 4, 5];
847 /// let mut ptr: *const u8 = data.as_ptr();
848 /// let start_rounded_down = ptr.wrapping_sub(2);
849 /// ptr = ptr.wrapping_add(4);
851 /// // This loop prints "5, 3, 1, "
852 /// while ptr != start_rounded_down {
854 /// print!("{}, ", *ptr);
856 /// ptr = ptr.wrapping_sub(step);
859 #[unstable(feature = "pointer_methods", issue = "43941")]
861 pub fn wrapping_sub(self, count: usize) -> Self
864 self.wrapping_offset((count as isize).wrapping_neg())
867 /// Reads the value from `self` without moving it. This leaves the
868 /// memory in `self` unchanged.
872 /// Beyond accepting a raw pointer, this is unsafe because it semantically
873 /// moves the value out of `self` without preventing further usage of `self`.
874 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
875 /// `self` is not used before the data is overwritten again (e.g. with `write`,
876 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
877 /// because it will attempt to drop the value previously at `*self`.
879 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
886 /// #![feature(pointer_methods)]
889 /// let y = &x as *const i32;
892 /// assert_eq!(y.read(), 12);
895 #[unstable(feature = "pointer_methods", issue = "43941")]
897 pub unsafe fn read(self) -> T
903 /// Performs a volatile read of the value from `self` without moving it. This
904 /// leaves the memory in `self` unchanged.
906 /// Volatile operations are intended to act on I/O memory, and are guaranteed
907 /// to not be elided or reordered by the compiler across other volatile
912 /// Rust does not currently have a rigorously and formally defined memory model,
913 /// so the precise semantics of what "volatile" means here is subject to change
914 /// over time. That being said, the semantics will almost always end up pretty
915 /// similar to [C11's definition of volatile][c11].
917 /// The compiler shouldn't change the relative order or number of volatile
918 /// memory operations. However, volatile memory operations on zero-sized types
919 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
920 /// and may be ignored.
922 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
926 /// Beyond accepting a raw pointer, this is unsafe because it semantically
927 /// moves the value out of `self` without preventing further usage of `self`.
928 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
929 /// `self` is not used before the data is overwritten again (e.g. with `write`,
930 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
931 /// because it will attempt to drop the value previously at `*self`.
938 /// #![feature(pointer_methods)]
941 /// let y = &x as *const i32;
944 /// assert_eq!(y.read_volatile(), 12);
947 #[unstable(feature = "pointer_methods", issue = "43941")]
949 pub unsafe fn read_volatile(self) -> T
955 /// Reads the value from `self` without moving it. This leaves the
956 /// memory in `self` unchanged.
958 /// Unlike `read`, the pointer may be unaligned.
962 /// Beyond accepting a raw pointer, this is unsafe because it semantically
963 /// moves the value out of `self` without preventing further usage of `self`.
964 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
965 /// `self` is not used before the data is overwritten again (e.g. with `write`,
966 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
967 /// because it will attempt to drop the value previously at `*self`.
974 /// #![feature(pointer_methods)]
977 /// let y = &x as *const i32;
980 /// assert_eq!(y.read_unaligned(), 12);
983 #[unstable(feature = "pointer_methods", issue = "43941")]
985 pub unsafe fn read_unaligned(self) -> T
991 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
992 /// and destination may overlap.
994 /// NOTE: this has the *same* argument order as `ptr::copy`.
996 /// This is semantically equivalent to C's `memmove`.
1000 /// Care must be taken with the ownership of `self` and `dest`.
1001 /// This method semantically moves the values of `self` into `dest`.
1002 /// However it does not drop the contents of `self`, or prevent the contents
1003 /// of `dest` from being dropped or used.
1007 /// Efficiently create a Rust vector from an unsafe buffer:
1010 /// #![feature(pointer_methods)]
1012 /// # #[allow(dead_code)]
1013 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1014 /// let mut dst = Vec::with_capacity(elts);
1015 /// dst.set_len(elts);
1016 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1020 #[unstable(feature = "pointer_methods", issue = "43941")]
1022 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1025 copy(self, dest, count)
1028 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1029 /// and destination may *not* overlap.
1031 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1033 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1037 /// Beyond requiring that the program must be allowed to access both regions
1038 /// of memory, it is Undefined Behavior for source and destination to
1039 /// overlap. Care must also be taken with the ownership of `self` and
1040 /// `self`. This method semantically moves the values of `self` into `dest`.
1041 /// However it does not drop the contents of `dest`, or prevent the contents
1042 /// of `self` from being dropped or used.
1046 /// Efficiently create a Rust vector from an unsafe buffer:
1049 /// #![feature(pointer_methods)]
1051 /// # #[allow(dead_code)]
1052 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1053 /// let mut dst = Vec::with_capacity(elts);
1054 /// dst.set_len(elts);
1055 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1059 #[unstable(feature = "pointer_methods", issue = "43941")]
1061 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1064 copy_nonoverlapping(self, dest, count)
1071 impl<T: ?Sized> *mut T {
1072 /// Returns `true` if the pointer is null.
1079 /// let mut s = [1, 2, 3];
1080 /// let ptr: *mut u32 = s.as_mut_ptr();
1081 /// assert!(!ptr.is_null());
1083 #[stable(feature = "rust1", since = "1.0.0")]
1085 pub fn is_null(self) -> bool where T: Sized {
1089 /// Returns `None` if the pointer is null, or else returns a reference to
1090 /// the value wrapped in `Some`.
1094 /// While this method and its mutable counterpart are useful for
1095 /// null-safety, it is important to note that this is still an unsafe
1096 /// operation because the returned value could be pointing to invalid
1099 /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does
1100 /// not necessarily reflect the actual lifetime of the data.
1107 /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
1110 /// if let Some(val_back) = ptr.as_ref() {
1111 /// println!("We got back the value: {}!", val_back);
1115 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1117 pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized {
1125 /// Calculates the offset from a pointer.
1127 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1128 /// offset of `3 * size_of::<T>()` bytes.
1132 /// If any of the following conditions are violated, the result is Undefined
1135 /// * Both the starting and resulting pointer must be either in bounds or one
1136 /// byte past the end of an allocated object.
1138 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1141 /// * The offset being in bounds cannot rely on "wrapping around" the address
1142 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
1144 /// The compiler and standard library generally tries to ensure allocations
1145 /// never reach a size where an offset is a concern. For instance, `Vec`
1146 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1147 /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
1149 /// Most platforms fundamentally can't even construct such an allocation.
1150 /// For instance, no known 64-bit platform can ever serve a request
1151 /// for 2^63 bytes due to page-table limitations or splitting the address space.
1152 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1153 /// more than `isize::MAX` bytes with things like Physical Address
1154 /// Extension. As such, memory acquired directly from allocators or memory
1155 /// mapped files *may* be too large to handle with this function.
1157 /// Consider using `wrapping_offset` instead if these constraints are
1158 /// difficult to satisfy. The only advantage of this method is that it
1159 /// enables more aggressive compiler optimizations.
1166 /// let mut s = [1, 2, 3];
1167 /// let ptr: *mut u32 = s.as_mut_ptr();
1170 /// println!("{}", *ptr.offset(1));
1171 /// println!("{}", *ptr.offset(2));
1174 #[stable(feature = "rust1", since = "1.0.0")]
1176 pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
1177 intrinsics::offset(self, count) as *mut T
1180 /// Calculates the offset from a pointer using wrapping arithmetic.
1181 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1182 /// offset of `3 * size_of::<T>()` bytes.
1186 /// The resulting pointer does not need to be in bounds, but it is
1187 /// potentially hazardous to dereference (which requires `unsafe`).
1189 /// Always use `.offset(count)` instead when possible, because `offset`
1190 /// allows the compiler to optimize better.
1197 /// // Iterate using a raw pointer in increments of two elements
1198 /// let mut data = [1u8, 2, 3, 4, 5];
1199 /// let mut ptr: *mut u8 = data.as_mut_ptr();
1201 /// let end_rounded_up = ptr.wrapping_offset(6);
1203 /// while ptr != end_rounded_up {
1207 /// ptr = ptr.wrapping_offset(step);
1209 /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
1211 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
1213 pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized {
1215 intrinsics::arith_offset(self, count) as *mut T
1219 /// Returns `None` if the pointer is null, or else returns a mutable
1220 /// reference to the value wrapped in `Some`.
1224 /// As with `as_ref`, this is unsafe because it cannot verify the validity
1225 /// of the returned pointer, nor can it ensure that the lifetime `'a`
1226 /// returned is indeed a valid lifetime for the contained data.
1233 /// let mut s = [1, 2, 3];
1234 /// let ptr: *mut u32 = s.as_mut_ptr();
1235 /// let first_value = unsafe { ptr.as_mut().unwrap() };
1236 /// *first_value = 4;
1237 /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
1239 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
1241 pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> where T: Sized {
1249 /// Calculates the distance between two pointers. The returned value is in
1250 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
1252 /// If the address different between the two pointers ia not a multiple of
1253 /// `mem::size_of::<T>()` then the result of the division is rounded towards
1256 /// This function returns `None` if `T` is a zero-sized typed.
1263 /// #![feature(offset_to)]
1266 /// let mut a = [0; 5];
1267 /// let ptr1: *mut i32 = &mut a[1];
1268 /// let ptr2: *mut i32 = &mut a[3];
1269 /// assert_eq!(ptr1.offset_to(ptr2), Some(2));
1270 /// assert_eq!(ptr2.offset_to(ptr1), Some(-2));
1271 /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2);
1272 /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1);
1275 #[unstable(feature = "offset_to", issue = "41079")]
1277 pub fn offset_to(self, other: *const T) -> Option<isize> where T: Sized {
1278 let size = mem::size_of::<T>();
1282 let diff = (other as isize).wrapping_sub(self as isize);
1283 Some(diff / size as isize)
1288 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
1290 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1291 /// offset of `3 * size_of::<T>()` bytes.
1295 /// If any of the following conditions are violated, the result is Undefined
1298 /// * Both the starting and resulting pointer must be either in bounds or one
1299 /// byte past the end of an allocated object.
1301 /// * The computed offset, **in bytes**, cannot overflow or underflow an
1304 /// * The offset being in bounds cannot rely on "wrapping around" the address
1305 /// space. That is, the infinite-precision sum must fit in a `usize`.
1307 /// The compiler and standard library generally tries to ensure allocations
1308 /// never reach a size where an offset is a concern. For instance, `Vec`
1309 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1310 /// `vec.as_ptr().add(vec.len())` is always safe.
1312 /// Most platforms fundamentally can't even construct such an allocation.
1313 /// For instance, no known 64-bit platform can ever serve a request
1314 /// for 2^63 bytes due to page-table limitations or splitting the address space.
1315 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1316 /// more than `isize::MAX` bytes with things like Physical Address
1317 /// Extension. As such, memory acquired directly from allocators or memory
1318 /// mapped files *may* be too large to handle with this function.
1320 /// Consider using `wrapping_offset` instead if these constraints are
1321 /// difficult to satisfy. The only advantage of this method is that it
1322 /// enables more aggressive compiler optimizations.
1329 /// #![feature(pointer_methods)]
1331 /// let s: &str = "123";
1332 /// let ptr: *const u8 = s.as_ptr();
1335 /// println!("{}", *ptr.add(1) as char);
1336 /// println!("{}", *ptr.add(2) as char);
1339 #[unstable(feature = "pointer_methods", issue = "43941")]
1341 pub unsafe fn add(self, count: usize) -> Self
1344 self.offset(count as isize)
1347 /// Calculates the offset from a pointer (convenience for
1348 /// `.offset((count as isize).wrapping_neg())`).
1350 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1351 /// offset of `3 * size_of::<T>()` bytes.
1355 /// If any of the following conditions are violated, the result is Undefined
1358 /// * Both the starting and resulting pointer must be either in bounds or one
1359 /// byte past the end of an allocated object.
1361 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
1363 /// * The offset being in bounds cannot rely on "wrapping around" the address
1364 /// space. That is, the infinite-precision sum must fit in a usize.
1366 /// The compiler and standard library generally tries to ensure allocations
1367 /// never reach a size where an offset is a concern. For instance, `Vec`
1368 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1369 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1371 /// Most platforms fundamentally can't even construct such an allocation.
1372 /// For instance, no known 64-bit platform can ever serve a request
1373 /// for 2^63 bytes due to page-table limitations or splitting the address space.
1374 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1375 /// more than `isize::MAX` bytes with things like Physical Address
1376 /// Extension. As such, memory acquired directly from allocators or memory
1377 /// mapped files *may* be too large to handle with this function.
1379 /// Consider using `wrapping_offset` instead if these constraints are
1380 /// difficult to satisfy. The only advantage of this method is that it
1381 /// enables more aggressive compiler optimizations.
1388 /// #![feature(pointer_methods)]
1390 /// let s: &str = "123";
1393 /// let end: *const u8 = s.as_ptr().add(3);
1394 /// println!("{}", *end.sub(1) as char);
1395 /// println!("{}", *end.sub(2) as char);
1398 #[unstable(feature = "pointer_methods", issue = "43941")]
1400 pub unsafe fn sub(self, count: usize) -> Self
1403 self.offset((count as isize).wrapping_neg())
1406 /// Calculates the offset from a pointer using wrapping arithmetic.
1407 /// (convenience for `.wrapping_offset(count as isize)`)
1409 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1410 /// offset of `3 * size_of::<T>()` bytes.
1414 /// The resulting pointer does not need to be in bounds, but it is
1415 /// potentially hazardous to dereference (which requires `unsafe`).
1417 /// Always use `.add(count)` instead when possible, because `add`
1418 /// allows the compiler to optimize better.
1425 /// #![feature(pointer_methods)]
1427 /// // Iterate using a raw pointer in increments of two elements
1428 /// let data = [1u8, 2, 3, 4, 5];
1429 /// let mut ptr: *const u8 = data.as_ptr();
1431 /// let end_rounded_up = ptr.wrapping_add(6);
1433 /// // This loop prints "1, 3, 5, "
1434 /// while ptr != end_rounded_up {
1436 /// print!("{}, ", *ptr);
1438 /// ptr = ptr.wrapping_add(step);
1441 #[unstable(feature = "pointer_methods", issue = "43941")]
1443 pub fn wrapping_add(self, count: usize) -> Self
1446 self.wrapping_offset(count as isize)
1449 /// Calculates the offset from a pointer using wrapping arithmetic.
1450 /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
1452 /// `count` is in units of T; e.g. a `count` of 3 represents a pointer
1453 /// offset of `3 * size_of::<T>()` bytes.
1457 /// The resulting pointer does not need to be in bounds, but it is
1458 /// potentially hazardous to dereference (which requires `unsafe`).
1460 /// Always use `.sub(count)` instead when possible, because `sub`
1461 /// allows the compiler to optimize better.
1468 /// #![feature(pointer_methods)]
1470 /// // Iterate using a raw pointer in increments of two elements (backwards)
1471 /// let data = [1u8, 2, 3, 4, 5];
1472 /// let mut ptr: *const u8 = data.as_ptr();
1473 /// let start_rounded_down = ptr.wrapping_sub(2);
1474 /// ptr = ptr.wrapping_add(4);
1476 /// // This loop prints "5, 3, 1, "
1477 /// while ptr != start_rounded_down {
1479 /// print!("{}, ", *ptr);
1481 /// ptr = ptr.wrapping_sub(step);
1484 #[unstable(feature = "pointer_methods", issue = "43941")]
1486 pub fn wrapping_sub(self, count: usize) -> Self
1489 self.wrapping_offset((count as isize).wrapping_neg())
1492 /// Reads the value from `self` without moving it. This leaves the
1493 /// memory in `self` unchanged.
1497 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1498 /// moves the value out of `self` without preventing further usage of `self`.
1499 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1500 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1501 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
1502 /// because it will attempt to drop the value previously at `*self`.
1504 /// The pointer must be aligned; use `read_unaligned` if that is not the case.
1511 /// #![feature(pointer_methods)]
1514 /// let y = &x as *const i32;
1517 /// assert_eq!(y.read(), 12);
1520 #[unstable(feature = "pointer_methods", issue = "43941")]
1522 pub unsafe fn read(self) -> T
1528 /// Performs a volatile read of the value from `self` without moving it. This
1529 /// leaves the memory in `self` unchanged.
1531 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1532 /// to not be elided or reordered by the compiler across other volatile
1537 /// Rust does not currently have a rigorously and formally defined memory model,
1538 /// so the precise semantics of what "volatile" means here is subject to change
1539 /// over time. That being said, the semantics will almost always end up pretty
1540 /// similar to [C11's definition of volatile][c11].
1542 /// The compiler shouldn't change the relative order or number of volatile
1543 /// memory operations. However, volatile memory operations on zero-sized types
1544 /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops
1545 /// and may be ignored.
1547 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1551 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1552 /// moves the value out of `self` without preventing further usage of `self`.
1553 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1554 /// `src` is not used before the data is overwritten again (e.g. with `write`,
1555 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
1556 /// because it will attempt to drop the value previously at `*self`.
1563 /// #![feature(pointer_methods)]
1566 /// let y = &x as *const i32;
1569 /// assert_eq!(y.read_volatile(), 12);
1572 #[unstable(feature = "pointer_methods", issue = "43941")]
1574 pub unsafe fn read_volatile(self) -> T
1580 /// Reads the value from `self` without moving it. This leaves the
1581 /// memory in `self` unchanged.
1583 /// Unlike `read`, the pointer may be unaligned.
1587 /// Beyond accepting a raw pointer, this is unsafe because it semantically
1588 /// moves the value out of `self` without preventing further usage of `self`.
1589 /// If `T` is not `Copy`, then care must be taken to ensure that the value at
1590 /// `self` is not used before the data is overwritten again (e.g. with `write`,
1591 /// `zero_memory`, or `copy_memory`). Note that `*self = foo` counts as a use
1592 /// because it will attempt to drop the value previously at `*self`.
1599 /// #![feature(pointer_methods)]
1602 /// let y = &x as *const i32;
1605 /// assert_eq!(y.read_unaligned(), 12);
1608 #[unstable(feature = "pointer_methods", issue = "43941")]
1610 pub unsafe fn read_unaligned(self) -> T
1613 read_unaligned(self)
1616 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1617 /// and destination may overlap.
1619 /// NOTE: this has the *same* argument order as `ptr::copy`.
1621 /// This is semantically equivalent to C's `memmove`.
1625 /// Care must be taken with the ownership of `self` and `dest`.
1626 /// This method semantically moves the values of `self` into `dest`.
1627 /// However it does not drop the contents of `self`, or prevent the contents
1628 /// of `dest` from being dropped or used.
1632 /// Efficiently create a Rust vector from an unsafe buffer:
1635 /// #![feature(pointer_methods)]
1637 /// # #[allow(dead_code)]
1638 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1639 /// let mut dst = Vec::with_capacity(elts);
1640 /// dst.set_len(elts);
1641 /// ptr.copy_to(dst.as_mut_ptr(), elts);
1645 #[unstable(feature = "pointer_methods", issue = "43941")]
1647 pub unsafe fn copy_to(self, dest: *mut T, count: usize)
1650 copy(self, dest, count)
1653 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1654 /// and destination may *not* overlap.
1656 /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`.
1658 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1662 /// Beyond requiring that the program must be allowed to access both regions
1663 /// of memory, it is Undefined Behavior for source and destination to
1664 /// overlap. Care must also be taken with the ownership of `self` and
1665 /// `self`. This method semantically moves the values of `self` into `dest`.
1666 /// However it does not drop the contents of `dest`, or prevent the contents
1667 /// of `self` from being dropped or used.
1671 /// Efficiently create a Rust vector from an unsafe buffer:
1674 /// #![feature(pointer_methods)]
1676 /// # #[allow(dead_code)]
1677 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1678 /// let mut dst = Vec::with_capacity(elts);
1679 /// dst.set_len(elts);
1680 /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts);
1684 #[unstable(feature = "pointer_methods", issue = "43941")]
1686 pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1689 copy_nonoverlapping(self, dest, count)
1692 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1693 /// and destination may overlap.
1695 /// NOTE: this has the *opposite* argument order of `ptr::copy`.
1697 /// This is semantically equivalent to C's `memmove`.
1701 /// Care must be taken with the ownership of `src` and `self`.
1702 /// This method semantically moves the values of `src` into `self`.
1703 /// However it does not drop the contents of `self`, or prevent the contents
1704 /// of `src` from being dropped or used.
1708 /// Efficiently create a Rust vector from an unsafe buffer:
1711 /// #![feature(pointer_methods)]
1713 /// # #[allow(dead_code)]
1714 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1715 /// let mut dst = Vec::with_capacity(elts);
1716 /// dst.set_len(elts);
1717 /// dst.as_mut_ptr().copy_from(ptr, elts);
1721 #[unstable(feature = "pointer_methods", issue = "43941")]
1723 pub unsafe fn copy_from(self, src: *const T, count: usize)
1726 copy(src, self, count)
1729 /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
1730 /// and destination may *not* overlap.
1732 /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`.
1734 /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
1738 /// Beyond requiring that the program must be allowed to access both regions
1739 /// of memory, it is Undefined Behavior for source and destination to
1740 /// overlap. Care must also be taken with the ownership of `src` and
1741 /// `self`. This method semantically moves the values of `src` into `self`.
1742 /// However it does not drop the contents of `self`, or prevent the contents
1743 /// of `src` from being dropped or used.
1747 /// Efficiently create a Rust vector from an unsafe buffer:
1750 /// #![feature(pointer_methods)]
1752 /// # #[allow(dead_code)]
1753 /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> {
1754 /// let mut dst = Vec::with_capacity(elts);
1755 /// dst.set_len(elts);
1756 /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts);
1760 #[unstable(feature = "pointer_methods", issue = "43941")]
1762 pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
1765 copy_nonoverlapping(src, self, count)
1768 /// Executes the destructor (if any) of the pointed-to value.
1770 /// This has two use cases:
1772 /// * It is *required* to use `drop_in_place` to drop unsized types like
1773 /// trait objects, because they can't be read out onto the stack and
1774 /// dropped normally.
1776 /// * It is friendlier to the optimizer to do this over `ptr::read` when
1777 /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
1778 /// as the compiler doesn't need to prove that it's sound to elide the
1783 /// This has all the same safety problems as `ptr::read` with respect to
1784 /// invalid pointers, types, and double drops.
1785 #[unstable(feature = "pointer_methods", issue = "43941")]
1787 pub unsafe fn drop_in_place(self) {
1791 /// Overwrites a memory location with the given value without reading or
1792 /// dropping the old value.
1796 /// This operation is marked unsafe because it writes through a raw pointer.
1798 /// It does not drop the contents of `self`. This is safe, but it could leak
1799 /// allocations or resources, so care must be taken not to overwrite an object
1800 /// that should be dropped.
1802 /// Additionally, it does not drop `val`. Semantically, `val` is moved into the
1803 /// location pointed to by `self`.
1805 /// This is appropriate for initializing uninitialized memory, or overwriting
1806 /// memory that has previously been `read` from.
1808 /// The pointer must be aligned; use `write_unaligned` if that is not the case.
1815 /// #![feature(pointer_methods)]
1818 /// let y = &mut x as *mut i32;
1823 /// assert_eq!(y.read(), 12);
1826 #[unstable(feature = "pointer_methods", issue = "43941")]
1828 pub unsafe fn write(self, val: T)
1834 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
1835 /// bytes of memory starting at `self` to `val`.
1840 /// #![feature(pointer_methods)]
1842 /// let mut vec = vec![0; 4];
1844 /// let vec_ptr = vec.as_mut_ptr();
1845 /// vec_ptr.write_bytes(b'a', 2);
1847 /// assert_eq!(vec, [b'a', b'a', 0, 0]);
1849 #[unstable(feature = "pointer_methods", issue = "43941")]
1851 pub unsafe fn write_bytes(self, val: u8, count: usize)
1854 write_bytes(self, val, count)
1857 /// Performs a volatile write of a memory location with the given value without
1858 /// reading or dropping the old value.
1860 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1861 /// to not be elided or reordered by the compiler across other volatile
1866 /// Rust does not currently have a rigorously and formally defined memory model,
1867 /// so the precise semantics of what "volatile" means here is subject to change
1868 /// over time. That being said, the semantics will almost always end up pretty
1869 /// similar to [C11's definition of volatile][c11].
1871 /// The compiler shouldn't change the relative order or number of volatile
1872 /// memory operations. However, volatile memory operations on zero-sized types
1873 /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops
1874 /// and may be ignored.
1876 /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
1880 /// This operation is marked unsafe because it accepts a raw pointer.
1882 /// It does not drop the contents of `self`. This is safe, but it could leak
1883 /// allocations or resources, so care must be taken not to overwrite an object
1884 /// that should be dropped.
1886 /// This is appropriate for initializing uninitialized memory, or overwriting
1887 /// memory that has previously been `read` from.
1894 /// #![feature(pointer_methods)]
1897 /// let y = &mut x as *mut i32;
1901 /// y.write_volatile(z);
1902 /// assert_eq!(y.read_volatile(), 12);
1905 #[unstable(feature = "pointer_methods", issue = "43941")]
1907 pub unsafe fn write_volatile(self, val: T)
1910 write_volatile(self, val)
1913 /// Overwrites a memory location with the given value without reading or
1914 /// dropping the old value.
1916 /// Unlike `write`, the pointer may be unaligned.
1920 /// This operation is marked unsafe because it writes through a raw pointer.
1922 /// It does not drop the contents of `self`. This is safe, but it could leak
1923 /// allocations or resources, so care must be taken not to overwrite an object
1924 /// that should be dropped.
1926 /// Additionally, it does not drop `src`. Semantically, `src` is moved into the
1927 /// location pointed to by `dst`.
1929 /// This is appropriate for initializing uninitialized memory, or overwriting
1930 /// memory that has previously been `read` from.
1937 /// #![feature(pointer_methods)]
1940 /// let y = &mut x as *mut i32;
1944 /// y.write_unaligned(z);
1945 /// assert_eq!(y.read_unaligned(), 12);
1948 #[unstable(feature = "pointer_methods", issue = "43941")]
1950 pub unsafe fn write_unaligned(self, val: T)
1953 write_unaligned(self, val)
1956 /// Replaces the value at `self` with `src`, returning the old
1957 /// value, without dropping either.
1961 /// This is only unsafe because it accepts a raw pointer.
1962 /// Otherwise, this operation is identical to `mem::replace`.
1963 #[unstable(feature = "pointer_methods", issue = "43941")]
1965 pub unsafe fn replace(self, src: T) -> T
1971 /// Swaps the values at two mutable locations of the same type, without
1972 /// deinitializing either. They may overlap, unlike `mem::swap` which is
1973 /// otherwise equivalent.
1977 /// This function copies the memory through the raw pointers passed to it
1980 /// Ensure that these pointers are valid before calling `swap`.
1981 #[unstable(feature = "pointer_methods", issue = "43941")]
1983 pub unsafe fn swap(self, with: *mut T)
1990 // Equality for pointers
1991 #[stable(feature = "rust1", since = "1.0.0")]
1992 impl<T: ?Sized> PartialEq for *const T {
1994 fn eq(&self, other: &*const T) -> bool { *self == *other }
1997 #[stable(feature = "rust1", since = "1.0.0")]
1998 impl<T: ?Sized> Eq for *const T {}
2000 #[stable(feature = "rust1", since = "1.0.0")]
2001 impl<T: ?Sized> PartialEq for *mut T {
2003 fn eq(&self, other: &*mut T) -> bool { *self == *other }
2006 #[stable(feature = "rust1", since = "1.0.0")]
2007 impl<T: ?Sized> Eq for *mut T {}
2009 /// Compare raw pointers for equality.
2011 /// This is the same as using the `==` operator, but less generic:
2012 /// the arguments have to be `*const T` raw pointers,
2013 /// not anything that implements `PartialEq`.
2015 /// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
2016 /// by their address rather than comparing the values they point to
2017 /// (which is what the `PartialEq for &T` implementation does).
2025 /// let other_five = 5;
2026 /// let five_ref = &five;
2027 /// let same_five_ref = &five;
2028 /// let other_five_ref = &other_five;
2030 /// assert!(five_ref == same_five_ref);
2031 /// assert!(five_ref == other_five_ref);
2033 /// assert!(ptr::eq(five_ref, same_five_ref));
2034 /// assert!(!ptr::eq(five_ref, other_five_ref));
2036 #[stable(feature = "ptr_eq", since = "1.17.0")]
2038 pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
2042 // Impls for function pointers
2043 macro_rules! fnptr_impls_safety_abi {
2044 ($FnTy: ty, $($Arg: ident),*) => {
2045 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2046 impl<Ret, $($Arg),*> PartialEq for $FnTy {
2048 fn eq(&self, other: &Self) -> bool {
2049 *self as usize == *other as usize
2053 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2054 impl<Ret, $($Arg),*> Eq for $FnTy {}
2056 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2057 impl<Ret, $($Arg),*> PartialOrd for $FnTy {
2059 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2060 (*self as usize).partial_cmp(&(*other as usize))
2064 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2065 impl<Ret, $($Arg),*> Ord for $FnTy {
2067 fn cmp(&self, other: &Self) -> Ordering {
2068 (*self as usize).cmp(&(*other as usize))
2072 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2073 impl<Ret, $($Arg),*> hash::Hash for $FnTy {
2074 fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
2075 state.write_usize(*self as usize)
2079 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2080 impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
2081 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2082 fmt::Pointer::fmt(&(*self as *const ()), f)
2086 #[stable(feature = "fnptr_impls", since = "1.4.0")]
2087 impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
2088 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2089 fmt::Pointer::fmt(&(*self as *const ()), f)
2095 macro_rules! fnptr_impls_args {
2096 ($($Arg: ident),+) => {
2097 fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2098 fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2099 fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2100 fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* }
2101 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* }
2102 fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* }
2105 // No variadic functions with 0 parameters
2106 fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
2107 fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
2108 fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
2109 fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
2113 fnptr_impls_args! { }
2114 fnptr_impls_args! { A }
2115 fnptr_impls_args! { A, B }
2116 fnptr_impls_args! { A, B, C }
2117 fnptr_impls_args! { A, B, C, D }
2118 fnptr_impls_args! { A, B, C, D, E }
2119 fnptr_impls_args! { A, B, C, D, E, F }
2120 fnptr_impls_args! { A, B, C, D, E, F, G }
2121 fnptr_impls_args! { A, B, C, D, E, F, G, H }
2122 fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
2123 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
2124 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
2125 fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
2127 // Comparison for pointers
2128 #[stable(feature = "rust1", since = "1.0.0")]
2129 impl<T: ?Sized> Ord for *const T {
2131 fn cmp(&self, other: &*const T) -> Ordering {
2134 } else if self == other {
2142 #[stable(feature = "rust1", since = "1.0.0")]
2143 impl<T: ?Sized> PartialOrd for *const T {
2145 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
2146 Some(self.cmp(other))
2150 fn lt(&self, other: &*const T) -> bool { *self < *other }
2153 fn le(&self, other: &*const T) -> bool { *self <= *other }
2156 fn gt(&self, other: &*const T) -> bool { *self > *other }
2159 fn ge(&self, other: &*const T) -> bool { *self >= *other }
2162 #[stable(feature = "rust1", since = "1.0.0")]
2163 impl<T: ?Sized> Ord for *mut T {
2165 fn cmp(&self, other: &*mut T) -> Ordering {
2168 } else if self == other {
2176 #[stable(feature = "rust1", since = "1.0.0")]
2177 impl<T: ?Sized> PartialOrd for *mut T {
2179 fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
2180 Some(self.cmp(other))
2184 fn lt(&self, other: &*mut T) -> bool { *self < *other }
2187 fn le(&self, other: &*mut T) -> bool { *self <= *other }
2190 fn gt(&self, other: &*mut T) -> bool { *self > *other }
2193 fn ge(&self, other: &*mut T) -> bool { *self >= *other }
2196 /// A wrapper around a raw non-null `*mut T` that indicates that the possessor
2197 /// of this wrapper owns the referent. Useful for building abstractions like
2198 /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
2200 /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
2201 /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
2202 /// the kind of strong aliasing guarantees an instance of `T` can expect:
2203 /// the referent of the pointer should not be modified without a unique path to
2204 /// its owning Unique.
2206 /// If you're uncertain of whether it's correct to use `Unique` for your purposes,
2207 /// consider using `Shared`, which has weaker semantics.
2209 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2210 /// is never dereferenced. This is so that enums may use this forbidden value
2211 /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
2212 /// However the pointer may still dangle if it isn't dereferenced.
2214 /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
2215 /// for any type which upholds Unique's aliasing requirements.
2216 #[allow(missing_debug_implementations)]
2217 #[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
2219 pub struct Unique<T: ?Sized> {
2220 pointer: NonZero<*const T>,
2221 // NOTE: this marker has no consequences for variance, but is necessary
2222 // for dropck to understand that we logically own a `T`.
2224 // For details, see:
2225 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2226 _marker: PhantomData<T>,
2229 /// `Unique` pointers are `Send` if `T` is `Send` because the data they
2230 /// reference is unaliased. Note that this aliasing invariant is
2231 /// unenforced by the type system; the abstraction using the
2232 /// `Unique` must enforce it.
2233 #[unstable(feature = "unique", issue = "27730")]
2234 unsafe impl<T: Send + ?Sized> Send for Unique<T> { }
2236 /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
2237 /// reference is unaliased. Note that this aliasing invariant is
2238 /// unenforced by the type system; the abstraction using the
2239 /// `Unique` must enforce it.
2240 #[unstable(feature = "unique", issue = "27730")]
2241 unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { }
2243 #[unstable(feature = "unique", issue = "27730")]
2244 impl<T: Sized> Unique<T> {
2245 /// Creates a new `Unique` that is dangling, but well-aligned.
2247 /// This is useful for initializing types which lazily allocate, like
2248 /// `Vec::new` does.
2249 pub fn empty() -> Self {
2251 let ptr = mem::align_of::<T>() as *mut T;
2252 Unique::new_unchecked(ptr)
2257 #[unstable(feature = "unique", issue = "27730")]
2258 impl<T: ?Sized> Unique<T> {
2259 /// Creates a new `Unique`.
2263 /// `ptr` must be non-null.
2264 #[unstable(feature = "unique", issue = "27730")]
2265 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_unique_new"))]
2266 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2267 Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2270 /// Creates a new `Unique` if `ptr` is non-null.
2271 pub fn new(ptr: *mut T) -> Option<Self> {
2272 NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData })
2275 /// Acquires the underlying `*mut` pointer.
2276 pub fn as_ptr(self) -> *mut T {
2277 self.pointer.get() as *mut T
2280 /// Dereferences the content.
2282 /// The resulting lifetime is bound to self so this behaves "as if"
2283 /// it were actually an instance of T that is getting borrowed. If a longer
2284 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2285 pub unsafe fn as_ref(&self) -> &T {
2289 /// Mutably dereferences the content.
2291 /// The resulting lifetime is bound to self so this behaves "as if"
2292 /// it were actually an instance of T that is getting borrowed. If a longer
2293 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr()`.
2294 pub unsafe fn as_mut(&mut self) -> &mut T {
2299 #[unstable(feature = "unique", issue = "27730")]
2300 impl<T: ?Sized> Clone for Unique<T> {
2301 fn clone(&self) -> Self {
2306 #[unstable(feature = "unique", issue = "27730")]
2307 impl<T: ?Sized> Copy for Unique<T> { }
2309 #[unstable(feature = "unique", issue = "27730")]
2310 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
2312 #[unstable(feature = "unique", issue = "27730")]
2313 impl<T: ?Sized> fmt::Pointer for Unique<T> {
2314 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2315 fmt::Pointer::fmt(&self.as_ptr(), f)
2319 #[unstable(feature = "unique", issue = "27730")]
2320 impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> {
2321 fn from(reference: &'a mut T) -> Self {
2322 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2326 #[unstable(feature = "unique", issue = "27730")]
2327 impl<'a, T: ?Sized> From<&'a T> for Unique<T> {
2328 fn from(reference: &'a T) -> Self {
2329 Unique { pointer: NonZero::from(reference), _marker: PhantomData }
2333 /// A wrapper around a raw `*mut T` that indicates that the possessor
2334 /// of this wrapper has shared ownership of the referent. Useful for
2335 /// building abstractions like `Rc<T>`, `Arc<T>`, or doubly-linked lists, which
2336 /// internally use aliased raw pointers to manage the memory that they own.
2338 /// This is similar to `Unique`, except that it doesn't make any aliasing
2339 /// guarantees, and doesn't derive Send and Sync. Note that unlike `&T`,
2340 /// Shared has no special mutability requirements. Shared may mutate data
2341 /// aliased by other Shared pointers. More precise rules require Rust to
2342 /// develop an actual aliasing model.
2344 /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
2345 /// is never dereferenced. This is so that enums may use this forbidden value
2346 /// as a discriminant -- `Option<Shared<T>>` has the same size as `Shared<T>`.
2347 /// However the pointer may still dangle if it isn't dereferenced.
2349 /// Unlike `*mut T`, `Shared<T>` is covariant over `T`. If this is incorrect
2350 /// for your use case, you should include some PhantomData in your type to
2351 /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
2352 /// Usually this won't be necessary; covariance is correct for Rc, Arc, and LinkedList
2353 /// because they provide a public API that follows the normal shared XOR mutable
2355 #[allow(missing_debug_implementations)]
2356 #[unstable(feature = "shared", reason = "needs an RFC to flesh out design",
2358 pub struct Shared<T: ?Sized> {
2359 pointer: NonZero<*const T>,
2360 // NOTE: this marker has no consequences for variance, but is necessary
2361 // for dropck to understand that we logically own a `T`.
2363 // For details, see:
2364 // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
2365 _marker: PhantomData<T>,
2368 /// `Shared` pointers are not `Send` because the data they reference may be aliased.
2369 // NB: This impl is unnecessary, but should provide better error messages.
2370 #[unstable(feature = "shared", issue = "27730")]
2371 impl<T: ?Sized> !Send for Shared<T> { }
2373 /// `Shared` pointers are not `Sync` because the data they reference may be aliased.
2374 // NB: This impl is unnecessary, but should provide better error messages.
2375 #[unstable(feature = "shared", issue = "27730")]
2376 impl<T: ?Sized> !Sync for Shared<T> { }
2378 #[unstable(feature = "shared", issue = "27730")]
2379 impl<T: Sized> Shared<T> {
2380 /// Creates a new `Shared` that is dangling, but well-aligned.
2382 /// This is useful for initializing types which lazily allocate, like
2383 /// `Vec::new` does.
2384 pub fn empty() -> Self {
2386 let ptr = mem::align_of::<T>() as *mut T;
2387 Shared::new_unchecked(ptr)
2392 #[unstable(feature = "shared", issue = "27730")]
2393 impl<T: ?Sized> Shared<T> {
2394 /// Creates a new `Shared`.
2398 /// `ptr` must be non-null.
2399 #[unstable(feature = "shared", issue = "27730")]
2400 #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_shared_new"))]
2401 pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
2402 Shared { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData }
2405 /// Creates a new `Shared` if `ptr` is non-null.
2406 pub fn new(ptr: *mut T) -> Option<Self> {
2407 NonZero::new(ptr as *const T).map(|nz| Shared { pointer: nz, _marker: PhantomData })
2410 /// Acquires the underlying `*mut` pointer.
2411 pub fn as_ptr(self) -> *mut T {
2412 self.pointer.get() as *mut T
2415 /// Dereferences the content.
2417 /// The resulting lifetime is bound to self so this behaves "as if"
2418 /// it were actually an instance of T that is getting borrowed. If a longer
2419 /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`.
2420 pub unsafe fn as_ref(&self) -> &T {
2424 /// Mutably dereferences the content.
2426 /// The resulting lifetime is bound to self so this behaves "as if"
2427 /// it were actually an instance of T that is getting borrowed. If a longer
2428 /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr_mut()`.
2429 pub unsafe fn as_mut(&mut self) -> &mut T {
2433 /// Acquires the underlying pointer as a `*mut` pointer.
2434 #[rustc_deprecated(since = "1.19", reason = "renamed to `as_ptr` for ergonomics/consistency")]
2435 #[unstable(feature = "shared", issue = "27730")]
2436 pub unsafe fn as_mut_ptr(&self) -> *mut T {
2441 #[unstable(feature = "shared", issue = "27730")]
2442 impl<T: ?Sized> Clone for Shared<T> {
2443 fn clone(&self) -> Self {
2448 #[unstable(feature = "shared", issue = "27730")]
2449 impl<T: ?Sized> Copy for Shared<T> { }
2451 #[unstable(feature = "shared", issue = "27730")]
2452 impl<T: ?Sized, U: ?Sized> CoerceUnsized<Shared<U>> for Shared<T> where T: Unsize<U> { }
2454 #[unstable(feature = "shared", issue = "27730")]
2455 impl<T: ?Sized> fmt::Pointer for Shared<T> {
2456 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2457 fmt::Pointer::fmt(&self.as_ptr(), f)
2461 #[unstable(feature = "shared", issue = "27730")]
2462 impl<T: ?Sized> From<Unique<T>> for Shared<T> {
2463 fn from(unique: Unique<T>) -> Self {
2464 Shared { pointer: unique.pointer, _marker: PhantomData }
2468 #[unstable(feature = "shared", issue = "27730")]
2469 impl<'a, T: ?Sized> From<&'a mut T> for Shared<T> {
2470 fn from(reference: &'a mut T) -> Self {
2471 Shared { pointer: NonZero::from(reference), _marker: PhantomData }
2475 #[unstable(feature = "shared", issue = "27730")]
2476 impl<'a, T: ?Sized> From<&'a T> for Shared<T> {
2477 fn from(reference: &'a T) -> Self {
2478 Shared { pointer: NonZero::from(reference), _marker: PhantomData }