2 use core::cell::RefCell;
11 let mut p = Pair {fst: 10, snd: 20};
12 let pptr: *mut Pair = &mut p;
13 let iptr: *mut isize = pptr as *mut isize;
14 assert_eq!(*iptr, 10);
16 assert_eq!(*iptr, 30);
17 assert_eq!(p.fst, 30);
19 *pptr = Pair {fst: 50, snd: 60};
20 assert_eq!(*iptr, 50);
21 assert_eq!(p.fst, 50);
22 assert_eq!(p.snd, 60);
24 let v0 = vec![32000u16, 32001u16, 32002u16];
25 let mut v1 = vec![0u16, 0u16, 0u16];
27 copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
28 assert!((v1[0] == 0u16 &&
31 copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
32 assert!((v1[0] == 32002u16 &&
35 copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
36 assert!((v1[0] == 32002u16 &&
44 let p: *const isize = null();
47 let q = p.wrapping_offset(1);
48 assert!(!q.is_null());
50 let mp: *mut isize = null_mut();
51 assert!(mp.is_null());
53 let mq = mp.wrapping_offset(1);
54 assert!(!mq.is_null());
56 // Pointers to unsized types -- slices
57 let s: &mut [u8] = &mut [1, 2, 3];
58 let cs: *const [u8] = s;
59 assert!(!cs.is_null());
61 let ms: *mut [u8] = s;
62 assert!(!ms.is_null());
64 let cz: *const [u8] = &[];
65 assert!(!cz.is_null());
67 let mz: *mut [u8] = &mut [];
68 assert!(!mz.is_null());
70 let ncs: *const [u8] = null::<[u8; 3]>();
71 assert!(ncs.is_null());
73 let nms: *mut [u8] = null_mut::<[u8; 3]>();
74 assert!(nms.is_null());
76 // Pointers to unsized types -- trait objects
77 let ci: *const dyn ToString = &3;
78 assert!(!ci.is_null());
80 let mi: *mut dyn ToString = &mut 3;
81 assert!(!mi.is_null());
83 let nci: *const dyn ToString = null::<isize>();
84 assert!(nci.is_null());
86 let nmi: *mut dyn ToString = null_mut::<isize>();
87 assert!(nmi.is_null());
93 let p: *const isize = null();
94 assert_eq!(p.as_ref(), None);
96 let q: *const isize = &2;
97 assert_eq!(q.as_ref().unwrap(), &2);
99 let p: *mut isize = null_mut();
100 assert_eq!(p.as_ref(), None);
102 let q: *mut isize = &mut 2;
103 assert_eq!(q.as_ref().unwrap(), &2);
105 // Lifetime inference
108 let p = &u as *const isize;
109 assert_eq!(p.as_ref().unwrap(), &2);
112 // Pointers to unsized types -- slices
113 let s: &mut [u8] = &mut [1, 2, 3];
114 let cs: *const [u8] = s;
115 assert_eq!(cs.as_ref(), Some(&*s));
117 let ms: *mut [u8] = s;
118 assert_eq!(ms.as_ref(), Some(&*s));
120 let cz: *const [u8] = &[];
121 assert_eq!(cz.as_ref(), Some(&[][..]));
123 let mz: *mut [u8] = &mut [];
124 assert_eq!(mz.as_ref(), Some(&[][..]));
126 let ncs: *const [u8] = null::<[u8; 3]>();
127 assert_eq!(ncs.as_ref(), None);
129 let nms: *mut [u8] = null_mut::<[u8; 3]>();
130 assert_eq!(nms.as_ref(), None);
132 // Pointers to unsized types -- trait objects
133 let ci: *const dyn ToString = &3;
134 assert!(ci.as_ref().is_some());
136 let mi: *mut dyn ToString = &mut 3;
137 assert!(mi.as_ref().is_some());
139 let nci: *const dyn ToString = null::<isize>();
140 assert!(nci.as_ref().is_none());
142 let nmi: *mut dyn ToString = null_mut::<isize>();
143 assert!(nmi.as_ref().is_none());
150 let p: *mut isize = null_mut();
151 assert!(p.as_mut() == None);
153 let q: *mut isize = &mut 2;
154 assert!(q.as_mut().unwrap() == &mut 2);
156 // Lifetime inference
159 let p = &mut u as *mut isize;
160 assert!(p.as_mut().unwrap() == &mut 2);
163 // Pointers to unsized types -- slices
164 let s: &mut [u8] = &mut [1, 2, 3];
165 let ms: *mut [u8] = s;
166 assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
168 let mz: *mut [u8] = &mut [];
169 assert_eq!(mz.as_mut(), Some(&mut [][..]));
171 let nms: *mut [u8] = null_mut::<[u8; 3]>();
172 assert_eq!(nms.as_mut(), None);
174 // Pointers to unsized types -- trait objects
175 let mi: *mut dyn ToString = &mut 3;
176 assert!(mi.as_mut().is_some());
178 let nmi: *mut dyn ToString = null_mut::<isize>();
179 assert!(nmi.as_mut().is_none());
184 fn test_ptr_addition() {
186 let xs = vec![5; 16];
187 let mut ptr = xs.as_ptr();
188 let end = ptr.offset(16);
196 let mut m_ptr = xs_mut.as_mut_ptr();
197 let m_end = m_ptr.offset(16);
199 while m_ptr < m_end {
201 m_ptr = m_ptr.offset(1);
204 assert!(xs_mut == vec![10; 16]);
209 fn test_ptr_subtraction() {
211 let xs = vec![0,1,2,3,4,5,6,7,8,9];
213 let ptr = xs.as_ptr();
216 assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
221 let m_start = xs_mut.as_mut_ptr();
222 let mut m_ptr = m_start.offset(9);
226 if m_ptr == m_start {
229 m_ptr = m_ptr.offset(-1);
232 assert_eq!(xs_mut, [0,2,4,6,8,10,12,14,16,18]);
237 fn test_set_memory() {
238 let mut xs = [0u8; 20];
239 let ptr = xs.as_mut_ptr();
240 unsafe { write_bytes(ptr, 5u8, xs.len()); }
241 assert!(xs == [5u8; 20]);
245 fn test_unsized_nonnull() {
246 let xs: &[i32] = &[1, 2, 3];
247 let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
248 let ys = unsafe { ptr.as_ref() };
249 let zs: &[i32] = &[1, 2, 3];
255 // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
256 // ABI, or even point to an actual executable code, because the function itself is never invoked.
258 pub fn test_variadic_fnptr() {
259 use core::hash::{Hash, SipHasher};
261 fn test_variadic_fnptr(_: u64, ...) -> f64;
263 let p: unsafe extern fn(u64, ...) -> f64 = test_variadic_fnptr;
267 let mut s = SipHasher::new();
268 assert_eq!(p.hash(&mut s), q.hash(&mut s));
272 fn write_unaligned_drop() {
274 static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
279 impl Drop for Dropper {
281 DROPS.with(|d| d.borrow_mut().push(self.0));
287 let mut t = Dropper(1);
288 unsafe { write_unaligned(&mut t, c); }
290 DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
294 #[cfg(not(miri))] // Miri does not compute a maximal `mid` for `align_offset`
295 fn align_offset_zst() {
296 // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
297 // all, because no amount of elements will align the pointer.
300 assert_eq!((p as *const ()).align_offset(p), 0);
302 assert_eq!(((p + 1) as *const ()).align_offset(p), !0);
304 p = (p + 1).next_power_of_two();
309 #[cfg(not(miri))] // Miri does not compute a maximal `mid` for `align_offset`
310 fn align_offset_stride1() {
311 // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
315 for ptr in 1..2*align {
316 let expected = ptr % align;
317 let offset = if expected == 0 { 0 } else { align - expected };
318 assert_eq!((ptr as *const u8).align_offset(align), offset,
319 "ptr = {}, align = {}, size = 1", ptr, align);
321 align = (align + 1).next_power_of_two();
326 #[cfg(not(miri))] // Miri is too slow
327 fn align_offset_weird_strides() {
336 struct A7(u32, u16, u8);
340 struct A9(u32, u32, u8);
342 struct A10(u32, u32, u16);
344 unsafe fn test_weird_stride<T>(ptr: *const T, align: usize) -> bool {
345 let numptr = ptr as usize;
346 let mut expected = usize::max_value();
347 // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
349 if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
354 let got = ptr.align_offset(align);
356 eprintln!("aligning {:p} (with stride of {}) to {}, expected {}, got {}", ptr,
357 ::std::mem::size_of::<T>(), align, expected, got);
363 // For pointers of stride != 1, we verify the algorithm against the naivest possible
368 for ptr in 1usize..4*align {
370 x |= test_weird_stride::<A3>(ptr as *const A3, align);
371 x |= test_weird_stride::<A4>(ptr as *const A4, align);
372 x |= test_weird_stride::<A5>(ptr as *const A5, align);
373 x |= test_weird_stride::<A6>(ptr as *const A6, align);
374 x |= test_weird_stride::<A7>(ptr as *const A7, align);
375 x |= test_weird_stride::<A8>(ptr as *const A8, align);
376 x |= test_weird_stride::<A9>(ptr as *const A9, align);
377 x |= test_weird_stride::<A10>(ptr as *const A10, align);
380 align = (align + 1).next_power_of_two();