#![feature(rustc_attrs)]
#![cfg_attr(test, feature(test))]
#![feature(strict_provenance)]
+#![feature(ptr_const_cast)]
use smallvec::SmallVec;
use std::cmp;
use std::marker::{PhantomData, Send};
use std::mem::{self, MaybeUninit};
-use std::ptr;
+use std::ptr::{self, NonNull};
use std::slice;
#[inline(never)]
struct ArenaChunk<T = u8> {
/// The raw storage for the arena chunk.
- storage: Box<[MaybeUninit<T>]>,
+ storage: NonNull<[MaybeUninit<T>]>,
/// The number of valid entries in the chunk.
entries: usize,
}
+unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
+ fn drop(&mut self) {
+ unsafe { Box::from_raw(self.storage.as_mut()) };
+ }
+}
+
impl<T> ArenaChunk<T> {
#[inline]
unsafe fn new(capacity: usize) -> ArenaChunk<T> {
- ArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 }
+ ArenaChunk {
+ storage: NonNull::new(Box::into_raw(Box::new_uninit_slice(capacity))).unwrap(),
+ entries: 0,
+ }
}
/// Destroys this arena chunk.
// The branch on needs_drop() is an -O1 performance optimization.
// Without the branch, dropping TypedArena<u8> takes linear time.
if mem::needs_drop::<T>() {
- ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut self.storage[..len]));
+ let slice = &mut *(self.storage.as_mut());
+ ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len]));
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&mut self) -> *mut T {
- MaybeUninit::slice_as_mut_ptr(&mut self.storage)
+ self.storage.as_ptr() as *mut T
}
// Returns a pointer to the end of the allocated space.
// A pointer as large as possible for zero-sized elements.
ptr::invalid_mut(!0)
} else {
- self.start().add(self.storage.len())
+ self.start().add((*self.storage.as_ptr()).len())
}
}
}
// If the previous chunk's len is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
- new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
+ new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / elem_size / 2);
new_cap *= 2;
} else {
new_cap = PAGE / elem_size;
// If the previous chunk's len is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
- new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
+ new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / 2);
new_cap *= 2;
} else {
new_cap = PAGE;