// except according to those terms.
use core::cmp;
-use core::heap::{Alloc, Layout};
use core::mem;
use core::ops::Drop;
-use core::ptr::{self, Unique};
+use core::ptr::{self, NonNull, Unique};
use core::slice;
-use heap::Heap;
-use super::boxed::Box;
-use super::allocator::CollectionAllocErr;
-use super::allocator::CollectionAllocErr::*;
+
+use alloc::{Alloc, Layout, Global, oom};
+use alloc::CollectionAllocErr;
+use alloc::CollectionAllocErr::*;
+use boxed::Box;
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
-pub struct RawVec<T, A: Alloc = Heap> {
+pub struct RawVec<T, A: Alloc = Global> {
ptr: Unique<T>,
cap: usize,
a: A,
}
}
+ /// Like `empty` but parametrized over the choice of allocator for the returned `RawVec`.
+ pub const fn empty_in(a: A) -> Self {
+ // Unique::empty() doubles as "unallocated" and "zero-sized allocation"
+ RawVec {
+ ptr: Unique::empty(),
+ cap: 0,
+ a,
+ }
+ }
+
/// Like `with_capacity` but parameterized over the choice of
/// allocator for the returned RawVec.
#[inline]
unsafe {
let elem_size = mem::size_of::<T>();
- let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
- alloc_guard(alloc_size).expect("capacity overflow");
+ let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
+ alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
- mem::align_of::<T>() as *mut u8
+ NonNull::<T>::dangling().as_opaque()
} else {
let align = mem::align_of::<T>();
let result = if zeroed {
};
match result {
Ok(ptr) => ptr,
- Err(err) => a.oom(err),
+ Err(_) => oom(),
}
};
RawVec {
- ptr: Unique::new_unchecked(ptr as *mut _),
+ ptr: ptr.cast().into(),
cap,
a,
}
}
}
-impl<T> RawVec<T, Heap> {
+impl<T> RawVec<T, Global> {
/// Creates the biggest possible RawVec (on the system heap)
/// without allocating. If T has positive size, then this makes a
/// RawVec with capacity 0. If T has 0 size, then it makes a
/// RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
- Self::new_in(Heap)
+ Self::new_in(Global)
+ }
+
+ /// Create a `RawVec` with capcity 0 (on the system heap), regardless of `T`, without
+ /// allocating.
+ pub fn empty() -> Self {
+ Self::empty_in(Global)
}
/// Creates a RawVec (on the system heap) with exactly the
/// Aborts on OOM
#[inline]
pub fn with_capacity(cap: usize) -> Self {
- RawVec::allocate_in(cap, false, Heap)
+ RawVec::allocate_in(cap, false, Global)
}
/// Like `with_capacity` but guarantees the buffer is zeroed.
#[inline]
pub fn with_capacity_zeroed(cap: usize) -> Self {
- RawVec::allocate_in(cap, true, Heap)
+ RawVec::allocate_in(cap, true, Global)
}
}
}
}
-impl<T> RawVec<T, Heap> {
+impl<T> RawVec<T, Global> {
/// Reconstitutes a RawVec from a pointer, capacity.
///
/// # Undefined Behavior
RawVec {
ptr: Unique::new_unchecked(ptr),
cap,
- a: Heap,
+ a: Global,
}
}
// `from_size_align_unchecked`.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
- let new_layout = Layout::from_size_align_unchecked(new_size, cur.align());
- alloc_guard(new_size).expect("capacity overflow");
- let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8,
+ alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
+ let ptr_res = self.a.realloc(NonNull::from(self.ptr).as_opaque(),
cur,
- new_layout);
+ new_size);
match ptr_res {
- Ok(ptr) => (new_cap, Unique::new_unchecked(ptr as *mut T)),
- Err(e) => self.a.oom(e),
+ Ok(ptr) => (new_cap, ptr.cast().into()),
+ Err(_) => oom(),
}
}
None => {
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
match self.a.alloc_array::<T>(new_cap) {
Ok(ptr) => (new_cap, ptr.into()),
- Err(e) => self.a.oom(e),
+ Err(_) => oom(),
}
}
};
// overflow and the alignment is sufficiently small.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
- alloc_guard(new_size).expect("capacity overflow");
- let ptr = self.ptr() as *mut _;
- let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
- match self.a.grow_in_place(ptr, old_layout, new_layout) {
+ alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
+ match self.a.grow_in_place(NonNull::from(self.ptr).as_opaque(), old_layout, new_size) {
Ok(_) => {
// We can't directly divide `size`.
self.cap = new_cap;
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
- let new_layout = Layout::array::<T>(new_cap).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
- let old_ptr = self.ptr.as_ptr() as *mut u8;
- self.a.realloc(old_ptr, layout, new_layout)
+ debug_assert!(new_layout.align() == layout.align());
+ self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
- self.ptr = Unique::new_unchecked(res? as *mut T);
+ self.ptr = res?.cast().into();
self.cap = new_cap;
Ok(())
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
match self.try_reserve_exact(used_cap, needed_extra_cap) {
- Err(CapacityOverflow) => panic!("capacity overflow"),
- Err(AllocErr(e)) => self.a.oom(e),
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocErr) => oom(),
Ok(()) => { /* yay */ }
}
}
}
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?;
- let new_layout = Layout::array::<T>(new_cap).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
- let old_ptr = self.ptr.as_ptr() as *mut u8;
- self.a.realloc(old_ptr, layout, new_layout)
+ debug_assert!(new_layout.align() == layout.align());
+ self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
- self.ptr = Unique::new_unchecked(res? as *mut T);
+ self.ptr = res?.cast().into();
self.cap = new_cap;
Ok(())
/// The same as try_reserve, but errors are lowered to a call to oom().
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
match self.try_reserve(used_cap, needed_extra_cap) {
- Err(CapacityOverflow) => panic!("capacity overflow"),
- Err(AllocErr(e)) => self.a.oom(e),
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocErr) => oom(),
Ok(()) => { /* yay */ }
}
}
}
let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)
- .expect("capacity overflow");
+ .unwrap_or_else(|_| capacity_overflow());
// Here, `cap < used_cap + needed_extra_cap <= new_cap`
// (regardless of whether `self.cap - used_cap` wrapped).
// Therefore we can safely call grow_in_place.
- let ptr = self.ptr() as *mut _;
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
// FIXME: may crash and burn on over-reserve
- alloc_guard(new_layout.size()).expect("capacity overflow");
- match self.a.grow_in_place(ptr, old_layout, new_layout) {
+ alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
+ match self.a.grow_in_place(
+ NonNull::from(self.ptr).as_opaque(), old_layout, new_layout.size(),
+ ) {
Ok(_) => {
self.cap = new_cap;
true
let new_size = elem_size * amount;
let align = mem::align_of::<T>();
let old_layout = Layout::from_size_align_unchecked(old_size, align);
- let new_layout = Layout::from_size_align_unchecked(new_size, align);
- match self.a.realloc(self.ptr.as_ptr() as *mut u8,
+ match self.a.realloc(NonNull::from(self.ptr).as_opaque(),
old_layout,
- new_layout) {
- Ok(p) => self.ptr = Unique::new_unchecked(p as *mut T),
- Err(err) => self.a.oom(err),
+ new_size) {
+ Ok(p) => self.ptr = p.cast().into(),
+ Err(_) => oom(),
}
}
self.cap = amount;
}
}
-impl<T> RawVec<T, Heap> {
+impl<T> RawVec<T, Global> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behavior to call
let elem_size = mem::size_of::<T>();
if elem_size != 0 {
if let Some(layout) = self.current_layout() {
- let ptr = self.ptr() as *mut u8;
- self.a.dealloc(ptr, layout);
+ self.a.dealloc(NonNull::from(self.ptr).as_opaque(), layout);
}
}
}
}
}
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+fn capacity_overflow() -> ! {
+ panic!("capacity overflow")
+}
+
#[cfg(test)]
mod tests {
use super::*;
+ use alloc::Opaque;
#[test]
fn allocator_param() {
// before allocation attempts start failing.
struct BoundedAlloc { fuel: usize }
unsafe impl Alloc for BoundedAlloc {
- unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+ unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<Opaque>, AllocErr> {
let size = layout.size();
if size > self.fuel {
- return Err(AllocErr::Unsupported { details: "fuel exhausted" });
+ return Err(AllocErr);
}
- match Heap.alloc(layout) {
+ match Global.alloc(layout) {
ok @ Ok(_) => { self.fuel -= size; ok }
err @ Err(_) => err,
}
}
- unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
- Heap.dealloc(ptr, layout)
+ unsafe fn dealloc(&mut self, ptr: NonNull<Opaque>, layout: Layout) {
+ Global.dealloc(ptr, layout)
}
}