1 use core::cell::RefCell;
2 use core::mem::{self, MaybeUninit};
3 use core::num::NonZeroUsize;
6 use std::fmt::{Debug, Display};
9 fn test_const_from_raw_parts() {
10 const SLICE: &[u8] = &[1, 2, 3, 4];
11 const FROM_RAW: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) };
12 assert_eq!(SLICE, FROM_RAW);
14 let slice = &[1, 2, 3, 4, 5];
15 let from_raw = unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) };
16 assert_eq!(&slice[..2], from_raw);
27 let mut p = Pair { fst: 10, snd: 20 };
28 let pptr: *mut Pair = &mut p;
29 let iptr: *mut isize = pptr as *mut isize;
30 assert_eq!(*iptr, 10);
32 assert_eq!(*iptr, 30);
33 assert_eq!(p.fst, 30);
35 *pptr = Pair { fst: 50, snd: 60 };
36 assert_eq!(*iptr, 50);
37 assert_eq!(p.fst, 50);
38 assert_eq!(p.snd, 60);
40 let v0 = vec![32000u16, 32001u16, 32002u16];
41 let mut v1 = vec![0u16, 0u16, 0u16];
43 copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
44 assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
45 copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
46 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 0u16));
47 copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
48 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 32000u16));
54 let p: *const isize = null();
57 let q = p.wrapping_offset(1);
58 assert!(!q.is_null());
60 let mp: *mut isize = null_mut();
61 assert!(mp.is_null());
63 let mq = mp.wrapping_offset(1);
64 assert!(!mq.is_null());
66 // Pointers to unsized types -- slices
67 let s: &mut [u8] = &mut [1, 2, 3];
68 let cs: *const [u8] = s;
69 assert!(!cs.is_null());
71 let ms: *mut [u8] = s;
72 assert!(!ms.is_null());
74 let cz: *const [u8] = &[];
75 assert!(!cz.is_null());
77 let mz: *mut [u8] = &mut [];
78 assert!(!mz.is_null());
80 let ncs: *const [u8] = null::<[u8; 3]>();
81 assert!(ncs.is_null());
83 let nms: *mut [u8] = null_mut::<[u8; 3]>();
84 assert!(nms.is_null());
86 // Pointers to unsized types -- trait objects
87 let ci: *const dyn ToString = &3;
88 assert!(!ci.is_null());
90 let mi: *mut dyn ToString = &mut 3;
91 assert!(!mi.is_null());
93 let nci: *const dyn ToString = null::<isize>();
94 assert!(nci.is_null());
96 let nmi: *mut dyn ToString = null_mut::<isize>();
97 assert!(nmi.is_null());
102 let ec: *const Extern = null::<Extern>();
103 assert!(ec.is_null());
105 let em: *mut Extern = null_mut::<Extern>();
106 assert!(em.is_null());
112 let p: *const isize = null();
113 assert_eq!(p.as_ref(), None);
115 let q: *const isize = &2;
116 assert_eq!(q.as_ref().unwrap(), &2);
118 let p: *mut isize = null_mut();
119 assert_eq!(p.as_ref(), None);
121 let q: *mut isize = &mut 2;
122 assert_eq!(q.as_ref().unwrap(), &2);
124 // Lifetime inference
127 let p = &u as *const isize;
128 assert_eq!(p.as_ref().unwrap(), &2);
131 // Pointers to unsized types -- slices
132 let s: &mut [u8] = &mut [1, 2, 3];
133 let cs: *const [u8] = s;
134 assert_eq!(cs.as_ref(), Some(&*s));
136 let ms: *mut [u8] = s;
137 assert_eq!(ms.as_ref(), Some(&*s));
139 let cz: *const [u8] = &[];
140 assert_eq!(cz.as_ref(), Some(&[][..]));
142 let mz: *mut [u8] = &mut [];
143 assert_eq!(mz.as_ref(), Some(&[][..]));
145 let ncs: *const [u8] = null::<[u8; 3]>();
146 assert_eq!(ncs.as_ref(), None);
148 let nms: *mut [u8] = null_mut::<[u8; 3]>();
149 assert_eq!(nms.as_ref(), None);
151 // Pointers to unsized types -- trait objects
152 let ci: *const dyn ToString = &3;
153 assert!(ci.as_ref().is_some());
155 let mi: *mut dyn ToString = &mut 3;
156 assert!(mi.as_ref().is_some());
158 let nci: *const dyn ToString = null::<isize>();
159 assert!(nci.as_ref().is_none());
161 let nmi: *mut dyn ToString = null_mut::<isize>();
162 assert!(nmi.as_ref().is_none());
169 let p: *mut isize = null_mut();
170 assert!(p.as_mut() == None);
172 let q: *mut isize = &mut 2;
173 assert!(q.as_mut().unwrap() == &mut 2);
175 // Lifetime inference
178 let p = &mut u as *mut isize;
179 assert!(p.as_mut().unwrap() == &mut 2);
182 // Pointers to unsized types -- slices
183 let s: &mut [u8] = &mut [1, 2, 3];
184 let ms: *mut [u8] = s;
185 assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
187 let mz: *mut [u8] = &mut [];
188 assert_eq!(mz.as_mut(), Some(&mut [][..]));
190 let nms: *mut [u8] = null_mut::<[u8; 3]>();
191 assert_eq!(nms.as_mut(), None);
193 // Pointers to unsized types -- trait objects
194 let mi: *mut dyn ToString = &mut 3;
195 assert!(mi.as_mut().is_some());
197 let nmi: *mut dyn ToString = null_mut::<isize>();
198 assert!(nmi.as_mut().is_none());
203 fn test_ptr_addition() {
205 let xs = vec![5; 16];
206 let mut ptr = xs.as_ptr();
207 let end = ptr.offset(16);
215 let mut m_ptr = xs_mut.as_mut_ptr();
216 let m_end = m_ptr.offset(16);
218 while m_ptr < m_end {
220 m_ptr = m_ptr.offset(1);
223 assert!(xs_mut == vec![10; 16]);
228 fn test_ptr_subtraction() {
230 let xs = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
232 let ptr = xs.as_ptr();
235 assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
240 let m_start = xs_mut.as_mut_ptr();
241 let mut m_ptr = m_start.offset(9);
245 if m_ptr == m_start {
248 m_ptr = m_ptr.offset(-1);
251 assert_eq!(xs_mut, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
256 fn test_set_memory() {
257 let mut xs = [0u8; 20];
258 let ptr = xs.as_mut_ptr();
260 write_bytes(ptr, 5u8, xs.len());
262 assert!(xs == [5u8; 20]);
266 fn test_set_memory_const() {
267 const XS: [u8; 20] = {
268 let mut xs = [0u8; 20];
269 let ptr = xs.as_mut_ptr();
271 ptr.write_bytes(5u8, xs.len());
276 assert!(XS == [5u8; 20]);
280 fn test_unsized_nonnull() {
281 let xs: &[i32] = &[1, 2, 3];
282 let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
283 let ys = unsafe { ptr.as_ref() };
284 let zs: &[i32] = &[1, 2, 3];
289 fn test_const_nonnull_new() {
291 assert!(NonNull::new(core::ptr::null_mut::<()>()).is_none());
293 let value = &mut 0u32;
294 let mut ptr = NonNull::new(value).unwrap();
295 unsafe { *ptr.as_mut() = 42 };
297 let reference = unsafe { &*ptr.as_ref() };
298 assert!(*reference == *value);
299 assert!(*reference == 42);
304 #[cfg(unix)] // printf may not be available on other platforms
305 #[allow(deprecated)] // For SipHasher
306 pub fn test_variadic_fnptr() {
308 use core::hash::{Hash, SipHasher};
310 // This needs to use the correct function signature even though it isn't called as some
311 // codegen backends make it UB to declare a function with multiple conflicting signatures
312 // (like LLVM) while others straight up return an error (like Cranelift).
313 fn printf(_: *const ffi::c_char, ...) -> ffi::c_int;
315 let p: unsafe extern "C" fn(*const ffi::c_char, ...) -> ffi::c_int = printf;
319 let mut s = SipHasher::new();
320 assert_eq!(p.hash(&mut s), q.hash(&mut s));
324 fn write_unaligned_drop() {
326 static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
331 impl Drop for Dropper {
333 DROPS.with(|d| d.borrow_mut().push(self.0));
339 let mut t = Dropper(1);
341 write_unaligned(&mut t, c);
344 DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
348 fn align_offset_zst() {
349 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
350 // all, because no amount of elements will align the pointer.
353 assert_eq!(ptr::invalid::<()>(p).align_offset(p), 0);
355 assert_eq!(ptr::invalid::<()>(p + 1).align_offset(p), !0);
357 p = (p + 1).next_power_of_two();
362 #[cfg(not(bootstrap))]
363 fn align_offset_zst_const() {
365 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
366 // all, because no amount of elements will align the pointer.
369 assert!(ptr::invalid::<()>(p).align_offset(p) == 0);
371 assert!(ptr::invalid::<()>(p + 1).align_offset(p) == !0);
373 p = (p + 1).next_power_of_two();
379 fn align_offset_stride_one() {
380 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
384 for ptr in 1..2 * align {
385 let expected = ptr % align;
386 let offset = if expected == 0 { 0 } else { align - expected };
388 ptr::invalid::<u8>(ptr).align_offset(align),
390 "ptr = {}, align = {}, size = 1",
395 align = (align + 1).next_power_of_two();
400 #[cfg(not(bootstrap))]
401 fn align_offset_stride_one_const() {
403 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
408 while ptr < 2 * align {
409 let expected = ptr % align;
410 let offset = if expected == 0 { 0 } else { align - expected };
411 assert!(ptr::invalid::<u8>(ptr).align_offset(align) == offset);
414 align = (align + 1).next_power_of_two();
420 fn align_offset_various_strides() {
421 unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
422 let numptr = ptr as usize;
423 let mut expected = usize::MAX;
424 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
426 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
431 let got = ptr.align_offset(align);
434 "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
436 ::std::mem::size_of::<T>(),
446 // For pointers of stride != 1, we verify the algorithm against the naivest possible
451 let limit = if cfg!(miri) { 32 } else { 1024 };
452 while align < limit {
453 for ptr in 1usize..4 * align {
457 x |= test_stride::<A3>(ptr::invalid::<A3>(ptr), align);
460 x |= test_stride::<A4>(ptr::invalid::<A4>(ptr), align);
464 x |= test_stride::<A5>(ptr::invalid::<A5>(ptr), align);
468 x |= test_stride::<A6>(ptr::invalid::<A6>(ptr), align);
471 struct A7(u32, u16, u8);
472 x |= test_stride::<A7>(ptr::invalid::<A7>(ptr), align);
476 x |= test_stride::<A8>(ptr::invalid::<A8>(ptr), align);
479 struct A9(u32, u32, u8);
480 x |= test_stride::<A9>(ptr::invalid::<A9>(ptr), align);
483 struct A10(u32, u32, u16);
484 x |= test_stride::<A10>(ptr::invalid::<A10>(ptr), align);
486 x |= test_stride::<u32>(ptr::invalid::<u32>(ptr), align);
487 x |= test_stride::<u128>(ptr::invalid::<u128>(ptr), align);
490 align = (align + 1).next_power_of_two();
496 #[cfg(not(bootstrap))]
497 fn align_offset_various_strides_const() {
498 const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
499 let mut expected = usize::MAX;
500 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
503 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
509 let got = ptr.align_offset(align);
510 assert!(got == expected);
513 // For pointers of stride != 1, we verify the algorithm against the naivest possible
517 while align < limit {
518 for ptr in 1usize..4 * align {
522 test_stride::<A3>(ptr::invalid::<A3>(ptr), ptr, align);
525 test_stride::<A4>(ptr::invalid::<A4>(ptr), ptr, align);
529 test_stride::<A5>(ptr::invalid::<A5>(ptr), ptr, align);
533 test_stride::<A6>(ptr::invalid::<A6>(ptr), ptr, align);
536 struct A7(u32, u16, u8);
537 test_stride::<A7>(ptr::invalid::<A7>(ptr), ptr, align);
541 test_stride::<A8>(ptr::invalid::<A8>(ptr), ptr, align);
544 struct A9(u32, u32, u8);
545 test_stride::<A9>(ptr::invalid::<A9>(ptr), ptr, align);
548 struct A10(u32, u32, u16);
549 test_stride::<A10>(ptr::invalid::<A10>(ptr), ptr, align);
551 test_stride::<u32>(ptr::invalid::<u32>(ptr), ptr, align);
552 test_stride::<u128>(ptr::invalid::<u128>(ptr), ptr, align);
555 align = (align + 1).next_power_of_two();
560 #[cfg(not(bootstrap))]
561 fn align_offset_with_provenance_const() {
565 let ptr: *const i32 = &data;
566 assert!(ptr.align_offset(1) == 0);
567 assert!(ptr.align_offset(2) == 0);
568 assert!(ptr.align_offset(4) == 0);
569 assert!(ptr.align_offset(8) == usize::MAX);
570 assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0);
571 assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX);
572 assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0);
573 assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0);
574 assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX);
575 assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0);
576 assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX);
578 assert!(ptr.wrapping_add(42).align_offset(4) == 0);
579 assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX);
581 let ptr1: *const i8 = ptr.cast();
582 assert!(ptr1.align_offset(1) == 0);
583 assert!(ptr1.align_offset(2) == 0);
584 assert!(ptr1.align_offset(4) == 0);
585 assert!(ptr1.align_offset(8) == usize::MAX);
586 assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0);
587 assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1);
588 assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3);
589 assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX);
590 assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0);
591 assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0);
592 assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2);
593 assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX);
594 assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0);
595 assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1);
596 assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1);
597 assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX);
599 let ptr2: *const i16 = ptr.cast();
600 assert!(ptr2.align_offset(1) == 0);
601 assert!(ptr2.align_offset(2) == 0);
602 assert!(ptr2.align_offset(4) == 0);
603 assert!(ptr2.align_offset(8) == usize::MAX);
604 assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0);
605 assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX);
606 assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0);
607 assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0);
608 assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1);
609 assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX);
610 assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0);
611 assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX);
613 let ptr3: *const i64 = ptr.cast();
614 assert!(ptr3.align_offset(1) == 0);
615 assert!(ptr3.align_offset(2) == 0);
616 assert!(ptr3.align_offset(4) == 0);
617 assert!(ptr3.align_offset(8) == usize::MAX);
618 assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0);
619 assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX);
624 fn align_offset_issue_103361() {
625 #[cfg(target_pointer_width = "64")]
626 const SIZE: usize = 1 << 47;
627 #[cfg(target_pointer_width = "32")]
628 const SIZE: usize = 1 << 30;
629 #[cfg(target_pointer_width = "16")]
630 const SIZE: usize = 1 << 13;
631 struct HugeSize([u8; SIZE - 1]);
632 let _ = (SIZE as *const HugeSize).align_offset(SIZE);
638 let ptr1: *mut i32 = &mut a[1];
639 let ptr2: *mut i32 = &mut a[3];
641 assert_eq!(ptr2.offset_from(ptr1), 2);
642 assert_eq!(ptr1.offset_from(ptr2), -2);
643 assert_eq!(ptr1.offset(2), ptr2);
644 assert_eq!(ptr2.offset(-2), ptr1);
651 struct Pair<A, B: ?Sized>(A, B);
655 let () = metadata(&());
656 let () = metadata(&Unit);
657 let () = metadata(&4_u32);
658 let () = metadata(&String::new());
659 let () = metadata(&Some(4_u32));
660 let () = metadata(&ptr_metadata);
661 let () = metadata(&|| {});
662 let () = metadata(&[4, 7]);
663 let () = metadata(&(4, String::new()));
664 let () = metadata(&Pair(4, String::new()));
665 let () = metadata(ptr::null::<()>() as *const Extern);
666 let () = metadata(ptr::null::<()>() as *const <&u32 as std::ops::Deref>::Target);
668 assert_eq!(metadata("foo"), 3_usize);
669 assert_eq!(metadata(&[4, 7][..]), 2_usize);
671 let dst_tuple: &(bool, [u8]) = &(true, [0x66, 0x6F, 0x6F]);
672 let dst_struct: &Pair<bool, [u8]> = &Pair(true, [0x66, 0x6F, 0x6F]);
673 assert_eq!(metadata(dst_tuple), 3_usize);
674 assert_eq!(metadata(dst_struct), 3_usize);
676 let dst_tuple: &(bool, str) = std::mem::transmute(dst_tuple);
677 let dst_struct: &Pair<bool, str> = std::mem::transmute(dst_struct);
678 assert_eq!(&dst_tuple.1, "foo");
679 assert_eq!(&dst_struct.1, "foo");
680 assert_eq!(metadata(dst_tuple), 3_usize);
681 assert_eq!(metadata(dst_struct), 3_usize);
684 let vtable_1: DynMetadata<dyn Debug> = metadata(&4_u16 as &dyn Debug);
685 let vtable_2: DynMetadata<dyn Display> = metadata(&4_u16 as &dyn Display);
686 let vtable_3: DynMetadata<dyn Display> = metadata(&4_u32 as &dyn Display);
687 let vtable_4: DynMetadata<dyn Display> = metadata(&(true, 7_u32) as &(bool, dyn Display));
688 let vtable_5: DynMetadata<dyn Display> =
689 metadata(&Pair(true, 7_u32) as &Pair<bool, dyn Display>);
691 let address_1: *const () = std::mem::transmute(vtable_1);
692 let address_2: *const () = std::mem::transmute(vtable_2);
693 let address_3: *const () = std::mem::transmute(vtable_3);
694 let address_4: *const () = std::mem::transmute(vtable_4);
695 let address_5: *const () = std::mem::transmute(vtable_5);
696 // Different trait => different vtable pointer
697 assert_ne!(address_1, address_2);
698 // Different erased type => different vtable pointer
699 assert_ne!(address_2, address_3);
700 // Same erased type and same trait => same vtable pointer
701 assert_eq!(address_3, address_4);
702 assert_eq!(address_3, address_5);
707 fn ptr_metadata_bounds() {
708 fn metadata_eq_method_address<T: ?Sized>() -> usize {
709 // The `Metadata` associated type has an `Ord` bound, so this is valid:
710 <<T as Pointee>::Metadata as PartialEq>::eq as usize
712 // "Synthetic" trait impls generated by the compiler like those of `Pointee`
713 // are not checked for bounds of associated type.
714 // So with a buggy libcore we could have both:
715 // * `<dyn Display as Pointee>::Metadata == DynMetadata`
716 // * `DynMetadata: !PartialEq`
717 // … and cause an ICE here:
718 metadata_eq_method_address::<dyn Display>();
720 // For this reason, let’s check here that bounds are satisfied:
722 let _ = static_assert_expected_bounds_for_metadata::<()>;
723 let _ = static_assert_expected_bounds_for_metadata::<usize>;
724 let _ = static_assert_expected_bounds_for_metadata::<DynMetadata<dyn Display>>;
725 fn _static_assert_associated_type<T: ?Sized>() {
726 let _ = static_assert_expected_bounds_for_metadata::<<T as Pointee>::Metadata>;
729 fn static_assert_expected_bounds_for_metadata<Meta>()
731 // Keep this in sync with the associated type in `library/core/src/ptr/metadata.rs`
732 Meta: Copy + Send + Sync + Ord + std::hash::Hash + Unpin,
741 struct Something([u8; 47]);
743 let value = Something([0; 47]);
744 let trait_object: &dyn Debug = &value;
745 let meta = metadata(trait_object);
747 assert_eq!(meta.size_of(), 64);
748 assert_eq!(meta.size_of(), std::mem::size_of::<Something>());
749 assert_eq!(meta.align_of(), 32);
750 assert_eq!(meta.align_of(), std::mem::align_of::<Something>());
751 assert_eq!(meta.layout(), std::alloc::Layout::new::<Something>());
753 assert!(format!("{meta:?}").starts_with("DynMetadata(0x"));
757 fn from_raw_parts() {
758 let mut value = 5_u32;
759 let address = &mut value as *mut _ as *mut ();
760 let trait_object: &dyn Display = &mut value;
761 let vtable = metadata(trait_object);
762 let trait_object = NonNull::from(trait_object);
764 assert_eq!(ptr::from_raw_parts(address, vtable), trait_object.as_ptr());
765 assert_eq!(ptr::from_raw_parts_mut(address, vtable), trait_object.as_ptr());
766 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), vtable), trait_object);
768 let mut array = [5_u32, 5, 5, 5, 5];
769 let address = &mut array as *mut _ as *mut ();
770 let array_ptr = NonNull::from(&mut array);
771 let slice_ptr = NonNull::from(&mut array[..]);
773 assert_eq!(ptr::from_raw_parts(address, ()), array_ptr.as_ptr());
774 assert_eq!(ptr::from_raw_parts_mut(address, ()), array_ptr.as_ptr());
775 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), ()), array_ptr);
777 assert_eq!(ptr::from_raw_parts(address, 5), slice_ptr.as_ptr());
778 assert_eq!(ptr::from_raw_parts_mut(address, 5), slice_ptr.as_ptr());
779 assert_eq!(NonNull::from_raw_parts(NonNull::new(address).unwrap(), 5), slice_ptr);
784 let foo = ThinBox::<dyn Display>::new(4);
785 assert_eq!(foo.to_string(), "4");
787 let bar = ThinBox::<dyn Display>::new(7);
788 assert_eq!(bar.to_string(), "7");
790 // A slightly more interesting library that could be built on top of metadata APIs.
792 // * It could be generalized to any `T: ?Sized` (not just trait object)
793 // if `{size,align}_of_for_meta<T: ?Sized>(T::Metadata)` are added.
794 // * Constructing a `ThinBox` without consuming and deallocating a `Box`
795 // requires either the unstable `Unsize` marker trait,
796 // or the unstable `unsized_locals` language feature,
797 // or taking `&dyn T` and restricting to `T: Copy`.
800 use std::marker::PhantomData;
804 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
806 ptr: NonNull<DynMetadata<T>>,
807 phantom: PhantomData<T>,
812 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
814 pub fn new<Value: std::marker::Unsize<T>>(value: Value) -> Self {
815 let unsized_: &T = &value;
816 let meta = metadata(unsized_);
817 let meta_layout = Layout::for_value(&meta);
818 let value_layout = Layout::for_value(&value);
819 let (layout, offset) = meta_layout.extend(value_layout).unwrap();
820 // `DynMetadata` is pointer-sized:
821 assert!(layout.size() > 0);
822 // If `ThinBox<T>` is generalized to any `T: ?Sized`,
823 // handle ZSTs with a dangling pointer without going through `alloc()`,
824 // like `Box<T>` does.
826 let ptr = NonNull::new(alloc(layout))
827 .unwrap_or_else(|| handle_alloc_error(layout))
828 .cast::<DynMetadata<T>>();
829 ptr.as_ptr().write(meta);
830 ptr.as_ptr().byte_add(offset).cast::<Value>().write(value);
831 Self { ptr, phantom: PhantomData }
835 fn meta(&self) -> DynMetadata<T> {
836 unsafe { *self.ptr.as_ref() }
839 fn layout(&self) -> (Layout, usize) {
840 let meta = self.meta();
841 Layout::for_value(&meta).extend(meta.layout()).unwrap()
844 fn value_ptr(&self) -> *const T {
845 let (_, offset) = self.layout();
846 let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
847 ptr::from_raw_parts(data_ptr.cast(), self.meta())
850 fn value_mut_ptr(&mut self) -> *mut T {
851 let (_, offset) = self.layout();
852 // FIXME: can this line be shared with the same in `value_ptr()`
853 // without upsetting Stacked Borrows?
854 let data_ptr = unsafe { self.ptr.cast::<u8>().as_ptr().add(offset) };
855 from_raw_parts_mut(data_ptr.cast(), self.meta())
859 impl<T> std::ops::Deref for ThinBox<T>
861 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
865 fn deref(&self) -> &T {
866 unsafe { &*self.value_ptr() }
870 impl<T> std::ops::DerefMut for ThinBox<T>
872 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
874 fn deref_mut(&mut self) -> &mut T {
875 unsafe { &mut *self.value_mut_ptr() }
879 impl<T> std::ops::Drop for ThinBox<T>
881 T: ?Sized + Pointee<Metadata = DynMetadata<T>>,
884 let (layout, _) = self.layout();
886 drop_in_place::<T>(&mut **self);
887 dealloc(self.ptr.cast().as_ptr(), layout);
894 fn nonnull_tagged_pointer_with_provenance() {
895 let raw_pointer = Box::into_raw(Box::new(10));
897 let mut p = TaggedPointer::new(raw_pointer).unwrap();
898 assert_eq!(p.tag(), 0);
901 assert_eq!(p.tag(), 1);
902 assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
905 assert_eq!(p.tag(), 3);
906 assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
908 unsafe { Box::from_raw(p.pointer().as_ptr()) };
910 /// A non-null pointer type which carries several bits of metadata and maintains provenance.
912 pub struct TaggedPointer<T>(NonNull<T>);
914 impl<T> Clone for TaggedPointer<T> {
915 fn clone(&self) -> Self {
920 impl<T> Copy for TaggedPointer<T> {}
922 impl<T> TaggedPointer<T> {
923 /// The ABI-required minimum alignment of the `P` type.
924 pub const ALIGNMENT: usize = core::mem::align_of::<T>();
925 /// A mask for data-carrying bits of the address.
926 pub const DATA_MASK: usize = !Self::ADDRESS_MASK;
927 /// Number of available bits of storage in the address.
928 pub const NUM_BITS: u32 = Self::ALIGNMENT.trailing_zeros();
929 /// A mask for the non-data-carrying bits of the address.
930 pub const ADDRESS_MASK: usize = usize::MAX << Self::NUM_BITS;
932 /// Create a new tagged pointer from a possibly null pointer.
933 pub fn new(pointer: *mut T) -> Option<TaggedPointer<T>> {
934 Some(TaggedPointer(NonNull::new(pointer)?))
937 /// Consume this tagged pointer and produce a raw mutable pointer to the
939 pub fn pointer(self) -> NonNull<T> {
940 // SAFETY: The `addr` guaranteed to have bits set in the Self::ADDRESS_MASK, so the result will be non-null.
941 self.0.map_addr(|addr| unsafe {
942 NonZeroUsize::new_unchecked(addr.get() & Self::ADDRESS_MASK)
946 /// Consume this tagged pointer and produce the data it carries.
947 pub fn tag(&self) -> usize {
948 self.0.addr().get() & Self::DATA_MASK
951 /// Update the data this tagged pointer carries to a new value.
952 pub fn set_tag(&mut self, data: usize) {
954 data & Self::ADDRESS_MASK,
956 "cannot set more data beyond the lowest NUM_BITS"
958 let data = data & Self::DATA_MASK;
960 // SAFETY: This value will always be non-zero because the upper bits (from
961 // ADDRESS_MASK) will always be non-zero. This a property of the type and its
963 self.0 = self.0.map_addr(|addr| unsafe {
964 NonZeroUsize::new_unchecked((addr.get() & Self::ADDRESS_MASK) | data)
971 fn swap_copy_untyped() {
972 // We call `{swap,copy}{,_nonoverlapping}` at `bool` type on data that is not a valid bool.
973 // These should all do untyped copies, so this should work fine.
977 let ptr1 = &mut x as *mut u8 as *mut bool;
978 let ptr2 = &mut y as *mut u8 as *mut bool;
981 ptr::swap(ptr1, ptr2);
982 ptr::swap_nonoverlapping(ptr1, ptr2, 1);
988 ptr::copy(ptr1, ptr2, 1);
989 ptr::copy_nonoverlapping(ptr1, ptr2, 1);
996 fn test_const_copy() {
1001 // Copy ptr1 to ptr2, bytewise.
1004 &ptr1 as *const _ as *const MaybeUninit<u8>,
1005 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
1006 mem::size_of::<&i32>(),
1010 // Make sure they still work.
1011 assert!(*ptr1 == 1);
1012 assert!(*ptr2 == 1);
1017 let mut ptr2 = &666;
1019 // Copy ptr1 to ptr2, bytewise.
1021 ptr::copy_nonoverlapping(
1022 &ptr1 as *const _ as *const MaybeUninit<u8>,
1023 &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
1024 mem::size_of::<&i32>(),
1028 // Make sure they still work.
1029 assert!(*ptr1 == 1);
1030 assert!(*ptr2 == 1);