1 use core::cell::RefCell;
3 use std::fmt::{Debug, Display};
6 fn test_const_from_raw_parts() {
7 const SLICE: &[u8] = &[1, 2, 3, 4];
8 const FROM_RAW: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) };
9 assert_eq!(SLICE, FROM_RAW);
11 let slice = &[1, 2, 3, 4, 5];
12 let from_raw = unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) };
13 assert_eq!(&slice[..2], from_raw);
23 let mut p = Pair { fst: 10, snd: 20 };
24 let pptr: *mut Pair = &mut p;
25 let iptr: *mut isize = pptr as *mut isize;
26 assert_eq!(*iptr, 10);
28 assert_eq!(*iptr, 30);
29 assert_eq!(p.fst, 30);
31 *pptr = Pair { fst: 50, snd: 60 };
32 assert_eq!(*iptr, 50);
33 assert_eq!(p.fst, 50);
34 assert_eq!(p.snd, 60);
36 let v0 = vec![32000u16, 32001u16, 32002u16];
37 let mut v1 = vec![0u16, 0u16, 0u16];
39 copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
40 assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
41 copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
42 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 0u16));
43 copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
44 assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 32000u16));
50 let p: *const isize = null();
53 let q = p.wrapping_offset(1);
54 assert!(!q.is_null());
56 let mp: *mut isize = null_mut();
57 assert!(mp.is_null());
59 let mq = mp.wrapping_offset(1);
60 assert!(!mq.is_null());
62 // Pointers to unsized types -- slices
63 let s: &mut [u8] = &mut [1, 2, 3];
64 let cs: *const [u8] = s;
65 assert!(!cs.is_null());
67 let ms: *mut [u8] = s;
68 assert!(!ms.is_null());
70 let cz: *const [u8] = &[];
71 assert!(!cz.is_null());
73 let mz: *mut [u8] = &mut [];
74 assert!(!mz.is_null());
76 let ncs: *const [u8] = null::<[u8; 3]>();
77 assert!(ncs.is_null());
79 let nms: *mut [u8] = null_mut::<[u8; 3]>();
80 assert!(nms.is_null());
82 // Pointers to unsized types -- trait objects
83 let ci: *const dyn ToString = &3;
84 assert!(!ci.is_null());
86 let mi: *mut dyn ToString = &mut 3;
87 assert!(!mi.is_null());
89 let nci: *const dyn ToString = null::<isize>();
90 assert!(nci.is_null());
92 let nmi: *mut dyn ToString = null_mut::<isize>();
93 assert!(nmi.is_null());
99 let p: *const isize = null();
100 assert_eq!(p.as_ref(), None);
102 let q: *const isize = &2;
103 assert_eq!(q.as_ref().unwrap(), &2);
105 let p: *mut isize = null_mut();
106 assert_eq!(p.as_ref(), None);
108 let q: *mut isize = &mut 2;
109 assert_eq!(q.as_ref().unwrap(), &2);
111 // Lifetime inference
114 let p = &u as *const isize;
115 assert_eq!(p.as_ref().unwrap(), &2);
118 // Pointers to unsized types -- slices
119 let s: &mut [u8] = &mut [1, 2, 3];
120 let cs: *const [u8] = s;
121 assert_eq!(cs.as_ref(), Some(&*s));
123 let ms: *mut [u8] = s;
124 assert_eq!(ms.as_ref(), Some(&*s));
126 let cz: *const [u8] = &[];
127 assert_eq!(cz.as_ref(), Some(&[][..]));
129 let mz: *mut [u8] = &mut [];
130 assert_eq!(mz.as_ref(), Some(&[][..]));
132 let ncs: *const [u8] = null::<[u8; 3]>();
133 assert_eq!(ncs.as_ref(), None);
135 let nms: *mut [u8] = null_mut::<[u8; 3]>();
136 assert_eq!(nms.as_ref(), None);
138 // Pointers to unsized types -- trait objects
139 let ci: *const dyn ToString = &3;
140 assert!(ci.as_ref().is_some());
142 let mi: *mut dyn ToString = &mut 3;
143 assert!(mi.as_ref().is_some());
145 let nci: *const dyn ToString = null::<isize>();
146 assert!(nci.as_ref().is_none());
148 let nmi: *mut dyn ToString = null_mut::<isize>();
149 assert!(nmi.as_ref().is_none());
156 let p: *mut isize = null_mut();
157 assert!(p.as_mut() == None);
159 let q: *mut isize = &mut 2;
160 assert!(q.as_mut().unwrap() == &mut 2);
162 // Lifetime inference
165 let p = &mut u as *mut isize;
166 assert!(p.as_mut().unwrap() == &mut 2);
169 // Pointers to unsized types -- slices
170 let s: &mut [u8] = &mut [1, 2, 3];
171 let ms: *mut [u8] = s;
172 assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
174 let mz: *mut [u8] = &mut [];
175 assert_eq!(mz.as_mut(), Some(&mut [][..]));
177 let nms: *mut [u8] = null_mut::<[u8; 3]>();
178 assert_eq!(nms.as_mut(), None);
180 // Pointers to unsized types -- trait objects
181 let mi: *mut dyn ToString = &mut 3;
182 assert!(mi.as_mut().is_some());
184 let nmi: *mut dyn ToString = null_mut::<isize>();
185 assert!(nmi.as_mut().is_none());
190 fn test_ptr_addition() {
192 let xs = vec![5; 16];
193 let mut ptr = xs.as_ptr();
194 let end = ptr.offset(16);
202 let mut m_ptr = xs_mut.as_mut_ptr();
203 let m_end = m_ptr.offset(16);
205 while m_ptr < m_end {
207 m_ptr = m_ptr.offset(1);
210 assert!(xs_mut == vec![10; 16]);
215 fn test_ptr_subtraction() {
217 let xs = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
219 let ptr = xs.as_ptr();
222 assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
227 let m_start = xs_mut.as_mut_ptr();
228 let mut m_ptr = m_start.offset(9);
232 if m_ptr == m_start {
235 m_ptr = m_ptr.offset(-1);
238 assert_eq!(xs_mut, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
243 fn test_set_memory() {
244 let mut xs = [0u8; 20];
245 let ptr = xs.as_mut_ptr();
247 write_bytes(ptr, 5u8, xs.len());
249 assert!(xs == [5u8; 20]);
253 fn test_unsized_nonnull() {
254 let xs: &[i32] = &[1, 2, 3];
255 let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
256 let ys = unsafe { ptr.as_ref() };
257 let zs: &[i32] = &[1, 2, 3];
263 // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
264 // ABI, or even point to an actual executable code, because the function itself is never invoked.
266 pub fn test_variadic_fnptr() {
267 use core::hash::{Hash, SipHasher};
269 fn test_variadic_fnptr(_: u64, ...) -> f64;
271 let p: unsafe extern "C" fn(u64, ...) -> f64 = test_variadic_fnptr;
275 let mut s = SipHasher::new();
276 assert_eq!(p.hash(&mut s), q.hash(&mut s));
280 fn write_unaligned_drop() {
282 static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
287 impl Drop for Dropper {
289 DROPS.with(|d| d.borrow_mut().push(self.0));
295 let mut t = Dropper(1);
297 write_unaligned(&mut t, c);
300 DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
304 fn align_offset_zst() {
305 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
306 // all, because no amount of elements will align the pointer.
309 assert_eq!((p as *const ()).align_offset(p), 0);
311 assert_eq!(((p + 1) as *const ()).align_offset(p), !0);
313 p = (p + 1).next_power_of_two();
318 fn align_offset_stride1() {
319 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
323 for ptr in 1..2 * align {
324 let expected = ptr % align;
325 let offset = if expected == 0 { 0 } else { align - expected };
327 (ptr as *const u8).align_offset(align),
329 "ptr = {}, align = {}, size = 1",
334 align = (align + 1).next_power_of_two();
339 fn align_offset_weird_strides() {
348 struct A7(u32, u16, u8);
352 struct A9(u32, u32, u8);
354 struct A10(u32, u32, u16);
356 unsafe fn test_weird_stride<T>(ptr: *const T, align: usize) -> bool {
357 let numptr = ptr as usize;
358 let mut expected = usize::MAX;
359 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
361 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
366 let got = ptr.align_offset(align);
369 "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
371 ::std::mem::size_of::<T>(),
381 // For pointers of stride != 1, we verify the algorithm against the naivest possible
386 let limit = if cfg!(miri) { 32 } else { 1024 };
387 while align < limit {
388 for ptr in 1usize..4 * align {
390 x |= test_weird_stride::<A3>(ptr as *const A3, align);
391 x |= test_weird_stride::<A4>(ptr as *const A4, align);
392 x |= test_weird_stride::<A5>(ptr as *const A5, align);
393 x |= test_weird_stride::<A6>(ptr as *const A6, align);
394 x |= test_weird_stride::<A7>(ptr as *const A7, align);
395 x |= test_weird_stride::<A8>(ptr as *const A8, align);
396 x |= test_weird_stride::<A9>(ptr as *const A9, align);
397 x |= test_weird_stride::<A10>(ptr as *const A10, align);
400 align = (align + 1).next_power_of_two();
408 let ptr1: *mut i32 = &mut a[1];
409 let ptr2: *mut i32 = &mut a[3];
411 assert_eq!(ptr2.offset_from(ptr1), 2);
412 assert_eq!(ptr1.offset_from(ptr2), -2);
413 assert_eq!(ptr1.offset(2), ptr2);
414 assert_eq!(ptr2.offset(-2), ptr1);
419 #[cfg(not(bootstrap))]
422 struct Pair<A, B: ?Sized>(A, B);
426 let () = metadata(&());
427 let () = metadata(&Unit);
428 let () = metadata(&4_u32);
429 let () = metadata(&String::new());
430 let () = metadata(&Some(4_u32));
431 let () = metadata(&ptr_metadata);
432 let () = metadata(&|| {});
433 let () = metadata(&[4, 7]);
434 let () = metadata(&(4, String::new()));
435 let () = metadata(&Pair(4, String::new()));
436 let () = metadata(0 as *const Extern);
437 let () = metadata(0 as *const <&u32 as std::ops::Deref>::Target);
439 assert_eq!(metadata("foo"), 3_usize);
440 assert_eq!(metadata(&[4, 7][..]), 2_usize);
442 let dst_tuple: &(bool, [u8]) = &(true, [0x66, 0x6F, 0x6F]);
443 let dst_struct: &Pair<bool, [u8]> = &Pair(true, [0x66, 0x6F, 0x6F]);
444 assert_eq!(metadata(dst_tuple), 3_usize);
445 assert_eq!(metadata(dst_struct), 3_usize);
447 let dst_tuple: &(bool, str) = std::mem::transmute(dst_tuple);
448 let dst_struct: &Pair<bool, str> = std::mem::transmute(dst_struct);
449 assert_eq!(&dst_tuple.1, "foo");
450 assert_eq!(&dst_struct.1, "foo");
451 assert_eq!(metadata(dst_tuple), 3_usize);
452 assert_eq!(metadata(dst_struct), 3_usize);
455 let vtable_1: DynMetadata<dyn Debug> = metadata(&4_u16 as &dyn Debug);
456 let vtable_2: DynMetadata<dyn Display> = metadata(&4_u16 as &dyn Display);
457 let vtable_3: DynMetadata<dyn Display> = metadata(&4_u32 as &dyn Display);
458 let vtable_4: DynMetadata<dyn Display> = metadata(&(true, 7_u32) as &(bool, dyn Display));
459 let vtable_5: DynMetadata<dyn Display> =
460 metadata(&Pair(true, 7_u32) as &Pair<bool, dyn Display>);
462 let address_1: usize = std::mem::transmute(vtable_1);
463 let address_2: usize = std::mem::transmute(vtable_2);
464 let address_3: usize = std::mem::transmute(vtable_3);
465 let address_4: usize = std::mem::transmute(vtable_4);
466 let address_5: usize = std::mem::transmute(vtable_5);
467 // Different trait => different vtable pointer
468 assert_ne!(address_1, address_2);
469 // Different erased type => different vtable pointer
470 assert_ne!(address_2, address_3);
471 // Same erased type and same trait => same vtable pointer
472 assert_eq!(address_3, address_4);
473 assert_eq!(address_3, address_5);
478 #[cfg(not(bootstrap))]
479 fn ptr_metadata_bounds() {
480 fn metadata_eq_method_address<T: ?Sized>() -> usize {
481 // The `Metadata` associated type has an `Ord` bound, so this is valid:
482 <<T as Pointee>::Metadata as PartialEq>::eq as usize
484 // "Synthetic" trait impls generated by the compiler like those of `Pointee`
485 // are not checked for bounds of associated type.
486 // So with a buggy libcore we could have both:
487 // * `<dyn Display as Pointee>::Metadata == DynMetadata`
488 // * `DynMetadata: !PartialEq`
489 // … and cause an ICE here:
490 metadata_eq_method_address::<dyn Display>();
492 // For this reason, let’s check here that bounds are satisfied:
494 let _ = static_assert_expected_bounds_for_metadata::<()>;
495 let _ = static_assert_expected_bounds_for_metadata::<usize>;
496 let _ = static_assert_expected_bounds_for_metadata::<DynMetadata<dyn Display>>;
497 fn _static_assert_associated_type<T: ?Sized>() {
498 let _ = static_assert_expected_bounds_for_metadata::<<T as Pointee>::Metadata>;
501 fn static_assert_expected_bounds_for_metadata<Meta>()
503 // Keep this in sync with the associated type in `library/core/src/ptr/metadata.rs`
504 Meta: Copy + Send + Sync + Ord + std::hash::Hash + Unpin,