1 //! The arena, a fast but limited type of allocator.
3 //! Arenas are a type of allocator that destroy the objects within, all at
4 //! once, once the arena itself is destroyed. They do not support deallocation
5 //! of individual objects while the arena itself is still alive. The benefit
6 //! of an arena is very fast allocation; just a pointer bump.
8 //! This crate implements `TypedArena`, a simple arena that can only hold
9 //! objects of a single type.
11 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/",
12 test(no_crate_inject, attr(deny(warnings))))]
14 #![deny(rust_2018_idioms)]
16 #![deny(unused_lifetimes)]
18 #![feature(core_intrinsics)]
19 #![feature(dropck_eyepatch)]
20 #![feature(raw_vec_internals)]
21 #![cfg_attr(test, feature(test))]
27 use rustc_data_structures::cold_path;
28 use rustc_data_structures::sync::MTLock;
29 use smallvec::SmallVec;
31 use std::cell::{Cell, RefCell};
34 use std::marker::{PhantomData, Send};
39 use alloc::raw_vec::RawVec;
41 /// An arena that can hold objects of only one type.
42 pub struct TypedArena<T> {
43 /// A pointer to the next object to be allocated.
46 /// A pointer to the end of the allocated area. When this pointer is
47 /// reached, a new chunk is allocated.
50 /// A vector of arena chunks.
51 chunks: RefCell<Vec<TypedArenaChunk<T>>>,
53 /// Marker indicating that dropping the arena causes its owned
54 /// instances of `T` to be dropped.
58 struct TypedArenaChunk<T> {
59 /// The raw storage for the arena chunk.
61 /// The number of valid entries in the chunk.
65 impl<T> TypedArenaChunk<T> {
67 unsafe fn new(capacity: usize) -> TypedArenaChunk<T> {
69 storage: RawVec::with_capacity(capacity),
74 /// Destroys this arena chunk.
76 unsafe fn destroy(&mut self, len: usize) {
77 // The branch on needs_drop() is an -O1 performance optimization.
78 // Without the branch, dropping TypedArena<u8> takes linear time.
79 if mem::needs_drop::<T>() {
80 let mut start = self.start();
81 // Destroy all allocated objects.
83 ptr::drop_in_place(start);
84 start = start.offset(1);
89 // Returns a pointer to the first allocated object.
91 fn start(&self) -> *mut T {
95 // Returns a pointer to the end of the allocated space.
97 fn end(&self) -> *mut T {
99 if mem::size_of::<T>() == 0 {
100 // A pointer as large as possible for zero-sized elements.
103 self.start().add(self.storage.cap())
109 const PAGE: usize = 4096;
111 impl<T> Default for TypedArena<T> {
112 /// Creates a new `TypedArena`.
113 fn default() -> TypedArena<T> {
115 // We set both `ptr` and `end` to 0 so that the first call to
116 // alloc() will trigger a grow().
117 ptr: Cell::new(0 as *mut T),
118 end: Cell::new(0 as *mut T),
119 chunks: RefCell::new(vec![]),
125 impl<T> TypedArena<T> {
126 pub fn in_arena(&self, ptr: *const T) -> bool {
127 let ptr = ptr as *const T as *mut T;
129 self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end())
131 /// Allocates an object in the `TypedArena`, returning a reference to it.
133 pub fn alloc(&self, object: T) -> &mut T {
134 if self.ptr == self.end {
139 if mem::size_of::<T>() == 0 {
141 .set(intrinsics::arith_offset(self.ptr.get() as *mut u8, 1)
143 let ptr = mem::align_of::<T>() as *mut T;
144 // Don't drop the object. This `write` is equivalent to `forget`.
145 ptr::write(ptr, object);
148 let ptr = self.ptr.get();
149 // Advance the pointer.
150 self.ptr.set(self.ptr.get().offset(1));
151 // Write into uninitialized memory.
152 ptr::write(ptr, object);
159 fn can_allocate(&self, len: usize) -> bool {
160 let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize;
161 let at_least_bytes = len.checked_mul(mem::size_of::<T>()).unwrap();
162 available_capacity_bytes >= at_least_bytes
165 /// Ensures there's enough space in the current chunk to fit `len` objects.
167 fn ensure_capacity(&self, len: usize) {
168 if !self.can_allocate(len) {
170 debug_assert!(self.can_allocate(len));
175 unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
176 assert!(mem::size_of::<T>() != 0);
179 self.ensure_capacity(len);
181 let start_ptr = self.ptr.get();
182 self.ptr.set(start_ptr.add(len));
186 /// Allocates a slice of objects that are copied into the `TypedArena`, returning a mutable
187 /// reference to it. Will panic if passed a zero-sized types.
191 /// - Zero-sized types
192 /// - Zero-length slices
194 pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
199 let len = slice.len();
200 let start_ptr = self.alloc_raw_slice(len);
201 slice.as_ptr().copy_to_nonoverlapping(start_ptr, len);
202 slice::from_raw_parts_mut(start_ptr, len)
207 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
208 assert!(mem::size_of::<T>() != 0);
209 let mut iter = iter.into_iter();
210 let size_hint = iter.size_hint();
213 (min, Some(max)) if min == max => {
214 // We know the exact number of elements the iterator will produce here
221 self.ensure_capacity(len);
223 let slice = self.ptr.get();
226 let mut ptr = self.ptr.get();
228 // Write into uninitialized memory.
229 ptr::write(ptr, iter.next().unwrap());
230 // Advance the pointer.
232 // Update the pointer per iteration so if `iter.next()` panics
233 // we destroy the correct amount
236 slice::from_raw_parts_mut(slice, len)
240 cold_path(move || -> &mut [T] {
241 let mut vec: SmallVec<[_; 8]> = iter.collect();
245 // Move the content to the arena by copying it and then forgetting
246 // the content of the SmallVec
249 let start_ptr = self.alloc_raw_slice(len);
250 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
252 slice::from_raw_parts_mut(start_ptr, len)
262 fn grow(&self, n: usize) {
264 let mut chunks = self.chunks.borrow_mut();
265 let (chunk, mut new_capacity);
266 if let Some(last_chunk) = chunks.last_mut() {
267 let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
268 let currently_used_cap = used_bytes / mem::size_of::<T>();
269 last_chunk.entries = currently_used_cap;
270 if last_chunk.storage.reserve_in_place(currently_used_cap, n) {
271 self.end.set(last_chunk.end());
274 new_capacity = last_chunk.storage.cap();
276 new_capacity = new_capacity.checked_mul(2).unwrap();
277 if new_capacity >= currently_used_cap + n {
283 let elem_size = cmp::max(1, mem::size_of::<T>());
284 new_capacity = cmp::max(n, PAGE / elem_size);
286 chunk = TypedArenaChunk::<T>::new(new_capacity);
287 self.ptr.set(chunk.start());
288 self.end.set(chunk.end());
293 /// Clears the arena. Deallocates all but the longest chunk which may be reused.
294 pub fn clear(&mut self) {
296 // Clear the last chunk, which is partially filled.
297 let mut chunks_borrow = self.chunks.borrow_mut();
298 if let Some(mut last_chunk) = chunks_borrow.last_mut() {
299 self.clear_last_chunk(&mut last_chunk);
300 let len = chunks_borrow.len();
301 // If `T` is ZST, code below has no effect.
302 for mut chunk in chunks_borrow.drain(..len-1) {
303 chunk.destroy(chunk.entries);
309 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
311 fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) {
312 // Determine how much was filled.
313 let start = last_chunk.start() as usize;
314 // We obtain the value of the pointer to the first uninitialized element.
315 let end = self.ptr.get() as usize;
316 // We then calculate the number of elements to be dropped in the last chunk,
317 // which is the filled area's length.
318 let diff = if mem::size_of::<T>() == 0 {
319 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
320 // the number of zero-sized values in the last and only chunk, just out of caution.
321 // Recall that `end` was incremented for each allocated value.
324 (end - start) / mem::size_of::<T>()
326 // Pass that to the `destroy` method.
328 last_chunk.destroy(diff);
331 self.ptr.set(last_chunk.start());
335 unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
338 // Determine how much was filled.
339 let mut chunks_borrow = self.chunks.borrow_mut();
340 if let Some(mut last_chunk) = chunks_borrow.pop() {
341 // Drop the contents of the last chunk.
342 self.clear_last_chunk(&mut last_chunk);
343 // The last chunk will be dropped. Destroy all other chunks.
344 for chunk in chunks_borrow.iter_mut() {
345 chunk.destroy(chunk.entries);
348 // RawVec handles deallocation of `last_chunk` and `self.chunks`.
353 unsafe impl<T: Send> Send for TypedArena<T> {}
355 pub struct DroplessArena {
356 /// A pointer to the next object to be allocated.
359 /// A pointer to the end of the allocated area. When this pointer is
360 /// reached, a new chunk is allocated.
363 /// A vector of arena chunks.
364 chunks: RefCell<Vec<TypedArenaChunk<u8>>>,
367 unsafe impl Send for DroplessArena {}
369 impl Default for DroplessArena {
371 fn default() -> DroplessArena {
373 ptr: Cell::new(0 as *mut u8),
374 end: Cell::new(0 as *mut u8),
375 chunks: Default::default(),
381 pub fn in_arena<T: ?Sized>(&self, ptr: *const T) -> bool {
382 let ptr = ptr as *const u8 as *mut u8;
384 self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end())
388 fn align(&self, align: usize) {
389 let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1);
390 self.ptr.set(final_address as *mut u8);
391 assert!(self.ptr <= self.end);
396 fn grow(&self, needed_bytes: usize) {
398 let mut chunks = self.chunks.borrow_mut();
399 let (chunk, mut new_capacity);
400 if let Some(last_chunk) = chunks.last_mut() {
401 let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
404 .reserve_in_place(used_bytes, needed_bytes)
406 self.end.set(last_chunk.end());
409 new_capacity = last_chunk.storage.cap();
411 new_capacity = new_capacity.checked_mul(2).unwrap();
412 if new_capacity >= used_bytes + needed_bytes {
418 new_capacity = cmp::max(needed_bytes, PAGE);
420 chunk = TypedArenaChunk::<u8>::new(new_capacity);
421 self.ptr.set(chunk.start());
422 self.end.set(chunk.end());
428 pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] {
434 let future_end = intrinsics::arith_offset(self.ptr.get(), bytes as isize);
435 if (future_end as *mut u8) >= self.end.get() {
439 let ptr = self.ptr.get();
440 // Set the pointer past ourselves
442 intrinsics::arith_offset(self.ptr.get(), bytes as isize) as *mut u8,
444 slice::from_raw_parts_mut(ptr, bytes)
449 pub fn alloc<T>(&self, object: T) -> &mut T {
450 assert!(!mem::needs_drop::<T>());
452 let mem = self.alloc_raw(
454 mem::align_of::<T>()) as *mut _ as *mut T;
457 // Write into uninitialized memory.
458 ptr::write(mem, object);
463 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
464 /// reference to it. Will panic if passed a zero-sized type.
468 /// - Zero-sized types
469 /// - Zero-length slices
471 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
475 assert!(!mem::needs_drop::<T>());
476 assert!(mem::size_of::<T>() != 0);
477 assert!(!slice.is_empty());
479 let mem = self.alloc_raw(
480 slice.len() * mem::size_of::<T>(),
481 mem::align_of::<T>()) as *mut _ as *mut T;
484 let arena_slice = slice::from_raw_parts_mut(mem, slice.len());
485 arena_slice.copy_from_slice(slice);
491 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
498 // Use a manual loop since LLVM manages to optimize it better for
501 let value = iter.next();
502 if i >= len || value.is_none() {
503 // We only return as many items as the iterator gave us, even
504 // though it was supposed to give us `len`
505 return slice::from_raw_parts_mut(mem, i);
507 ptr::write(mem.offset(i as isize), value.unwrap());
513 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
514 let iter = iter.into_iter();
515 assert!(mem::size_of::<T>() != 0);
516 assert!(!mem::needs_drop::<T>());
518 let size_hint = iter.size_hint();
521 (min, Some(max)) if min == max => {
522 // We know the exact number of elements the iterator will produce here
528 let size = len.checked_mul(mem::size_of::<T>()).unwrap();
529 let mem = self.alloc_raw(size, mem::align_of::<T>()) as *mut _ as *mut T;
531 self.write_from_iter(iter, len, mem)
535 cold_path(move || -> &mut [T] {
536 let mut vec: SmallVec<[_; 8]> = iter.collect();
540 // Move the content to the arena by copying it and then forgetting
541 // the content of the SmallVec
544 let start_ptr = self.alloc_raw(
545 len * mem::size_of::<T>(),
547 ) as *mut _ as *mut T;
548 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
550 slice::from_raw_parts_mut(start_ptr, len)
559 // FIXME(@Zoxc): this type is entirely unused in rustc
560 pub struct SyncTypedArena<T> {
561 lock: MTLock<TypedArena<T>>,
564 impl<T> SyncTypedArena<T> {
566 pub fn alloc(&self, object: T) -> &mut T {
567 // Extend the lifetime of the result since it's limited to the lock guard
568 unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
572 pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
576 // Extend the lifetime of the result since it's limited to the lock guard
577 unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }
581 pub fn clear(&mut self) {
582 self.lock.get_mut().clear();
587 pub struct SyncDroplessArena {
588 lock: MTLock<DroplessArena>,
591 impl SyncDroplessArena {
593 pub fn in_arena<T: ?Sized>(&self, ptr: *const T) -> bool {
594 self.lock.lock().in_arena(ptr)
598 pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] {
599 // Extend the lifetime of the result since it's limited to the lock guard
600 unsafe { &mut *(self.lock.lock().alloc_raw(bytes, align) as *mut [u8]) }
604 pub fn alloc<T>(&self, object: T) -> &mut T {
605 // Extend the lifetime of the result since it's limited to the lock guard
606 unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
610 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
614 // Extend the lifetime of the result since it's limited to the lock guard
615 unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }