1 //! The arena, a fast but limited type of allocator.
3 //! Arenas are a type of allocator that destroy the objects within, all at
4 //! once, once the arena itself is destroyed. They do not support deallocation
5 //! of individual objects while the arena itself is still alive. The benefit
6 //! of an arena is very fast allocation; just a pointer bump.
8 //! This crate implements `TypedArena`, a simple arena that can only hold
9 //! objects of a single type.
12 html_root_url = "https://doc.rust-lang.org/nightly/",
13 test(no_crate_inject, attr(deny(warnings)))
15 #![feature(core_intrinsics)]
16 #![feature(dropck_eyepatch)]
17 #![feature(raw_vec_internals)]
18 #![cfg_attr(test, feature(test))]
23 use rustc_data_structures::cold_path;
24 use rustc_data_structures::sync::MTLock;
25 use smallvec::SmallVec;
27 use std::cell::{Cell, RefCell};
30 use std::marker::{PhantomData, Send};
35 use alloc::raw_vec::RawVec;
37 /// An arena that can hold objects of only one type.
38 pub struct TypedArena<T> {
39 /// A pointer to the next object to be allocated.
42 /// A pointer to the end of the allocated area. When this pointer is
43 /// reached, a new chunk is allocated.
46 /// A vector of arena chunks.
47 chunks: RefCell<Vec<TypedArenaChunk<T>>>,
49 /// Marker indicating that dropping the arena causes its owned
50 /// instances of `T` to be dropped.
54 struct TypedArenaChunk<T> {
55 /// The raw storage for the arena chunk.
57 /// The number of valid entries in the chunk.
61 impl<T> TypedArenaChunk<T> {
63 unsafe fn new(capacity: usize) -> TypedArenaChunk<T> {
64 TypedArenaChunk { storage: RawVec::with_capacity(capacity), entries: 0 }
67 /// Destroys this arena chunk.
69 unsafe fn destroy(&mut self, len: usize) {
70 // The branch on needs_drop() is an -O1 performance optimization.
71 // Without the branch, dropping TypedArena<u8> takes linear time.
72 if mem::needs_drop::<T>() {
73 let mut start = self.start();
74 // Destroy all allocated objects.
76 ptr::drop_in_place(start);
77 start = start.offset(1);
82 // Returns a pointer to the first allocated object.
84 fn start(&self) -> *mut T {
88 // Returns a pointer to the end of the allocated space.
90 fn end(&self) -> *mut T {
92 if mem::size_of::<T>() == 0 {
93 // A pointer as large as possible for zero-sized elements.
96 self.start().add(self.storage.capacity())
102 const PAGE: usize = 4096;
104 impl<T> Default for TypedArena<T> {
105 /// Creates a new `TypedArena`.
106 fn default() -> TypedArena<T> {
108 // We set both `ptr` and `end` to 0 so that the first call to
109 // alloc() will trigger a grow().
110 ptr: Cell::new(ptr::null_mut()),
111 end: Cell::new(ptr::null_mut()),
112 chunks: RefCell::new(vec![]),
118 impl<T> TypedArena<T> {
119 pub fn in_arena(&self, ptr: *const T) -> bool {
120 let ptr = ptr as *const T as *mut T;
122 self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end())
124 /// Allocates an object in the `TypedArena`, returning a reference to it.
126 pub fn alloc(&self, object: T) -> &mut T {
127 if self.ptr == self.end {
132 if mem::size_of::<T>() == 0 {
133 self.ptr.set(intrinsics::arith_offset(self.ptr.get() as *mut u8, 1) as *mut T);
134 let ptr = mem::align_of::<T>() as *mut T;
135 // Don't drop the object. This `write` is equivalent to `forget`.
136 ptr::write(ptr, object);
139 let ptr = self.ptr.get();
140 // Advance the pointer.
141 self.ptr.set(self.ptr.get().offset(1));
142 // Write into uninitialized memory.
143 ptr::write(ptr, object);
150 fn can_allocate(&self, len: usize) -> bool {
151 let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize;
152 let at_least_bytes = len.checked_mul(mem::size_of::<T>()).unwrap();
153 available_capacity_bytes >= at_least_bytes
156 /// Ensures there's enough space in the current chunk to fit `len` objects.
158 fn ensure_capacity(&self, len: usize) {
159 if !self.can_allocate(len) {
161 debug_assert!(self.can_allocate(len));
166 unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
167 assert!(mem::size_of::<T>() != 0);
170 self.ensure_capacity(len);
172 let start_ptr = self.ptr.get();
173 self.ptr.set(start_ptr.add(len));
177 /// Allocates a slice of objects that are copied into the `TypedArena`, returning a mutable
178 /// reference to it. Will panic if passed a zero-sized types.
182 /// - Zero-sized types
183 /// - Zero-length slices
185 pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
190 let len = slice.len();
191 let start_ptr = self.alloc_raw_slice(len);
192 slice.as_ptr().copy_to_nonoverlapping(start_ptr, len);
193 slice::from_raw_parts_mut(start_ptr, len)
198 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
199 assert!(mem::size_of::<T>() != 0);
200 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
204 // Move the content to the arena by copying it and then forgetting
205 // the content of the SmallVec
208 let start_ptr = self.alloc_raw_slice(len);
209 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
211 slice::from_raw_parts_mut(start_ptr, len)
218 fn grow(&self, n: usize) {
220 let mut chunks = self.chunks.borrow_mut();
221 let (chunk, mut new_capacity);
222 if let Some(last_chunk) = chunks.last_mut() {
223 let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
224 let currently_used_cap = used_bytes / mem::size_of::<T>();
225 last_chunk.entries = currently_used_cap;
226 if last_chunk.storage.reserve_in_place(currently_used_cap, n) {
227 self.end.set(last_chunk.end());
230 new_capacity = last_chunk.storage.capacity();
232 new_capacity = new_capacity.checked_mul(2).unwrap();
233 if new_capacity >= currently_used_cap + n {
239 let elem_size = cmp::max(1, mem::size_of::<T>());
240 new_capacity = cmp::max(n, PAGE / elem_size);
242 chunk = TypedArenaChunk::<T>::new(new_capacity);
243 self.ptr.set(chunk.start());
244 self.end.set(chunk.end());
249 /// Clears the arena. Deallocates all but the longest chunk which may be reused.
250 pub fn clear(&mut self) {
252 // Clear the last chunk, which is partially filled.
253 let mut chunks_borrow = self.chunks.borrow_mut();
254 if let Some(mut last_chunk) = chunks_borrow.last_mut() {
255 self.clear_last_chunk(&mut last_chunk);
256 let len = chunks_borrow.len();
257 // If `T` is ZST, code below has no effect.
258 for mut chunk in chunks_borrow.drain(..len - 1) {
259 chunk.destroy(chunk.entries);
265 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
267 fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) {
268 // Determine how much was filled.
269 let start = last_chunk.start() as usize;
270 // We obtain the value of the pointer to the first uninitialized element.
271 let end = self.ptr.get() as usize;
272 // We then calculate the number of elements to be dropped in the last chunk,
273 // which is the filled area's length.
274 let diff = if mem::size_of::<T>() == 0 {
275 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
276 // the number of zero-sized values in the last and only chunk, just out of caution.
277 // Recall that `end` was incremented for each allocated value.
280 (end - start) / mem::size_of::<T>()
282 // Pass that to the `destroy` method.
284 last_chunk.destroy(diff);
287 self.ptr.set(last_chunk.start());
291 unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
294 // Determine how much was filled.
295 let mut chunks_borrow = self.chunks.borrow_mut();
296 if let Some(mut last_chunk) = chunks_borrow.pop() {
297 // Drop the contents of the last chunk.
298 self.clear_last_chunk(&mut last_chunk);
299 // The last chunk will be dropped. Destroy all other chunks.
300 for chunk in chunks_borrow.iter_mut() {
301 chunk.destroy(chunk.entries);
304 // RawVec handles deallocation of `last_chunk` and `self.chunks`.
309 unsafe impl<T: Send> Send for TypedArena<T> {}
311 pub struct DroplessArena {
312 /// A pointer to the next object to be allocated.
315 /// A pointer to the end of the allocated area. When this pointer is
316 /// reached, a new chunk is allocated.
319 /// A vector of arena chunks.
320 chunks: RefCell<Vec<TypedArenaChunk<u8>>>,
323 unsafe impl Send for DroplessArena {}
325 impl Default for DroplessArena {
327 fn default() -> DroplessArena {
329 ptr: Cell::new(ptr::null_mut()),
330 end: Cell::new(ptr::null_mut()),
331 chunks: Default::default(),
337 pub fn in_arena<T: ?Sized>(&self, ptr: *const T) -> bool {
338 let ptr = ptr as *const u8 as *mut u8;
340 self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end())
344 fn align(&self, align: usize) {
345 let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1);
346 self.ptr.set(final_address as *mut u8);
347 assert!(self.ptr <= self.end);
352 fn grow(&self, needed_bytes: usize) {
354 let mut chunks = self.chunks.borrow_mut();
355 let (chunk, mut new_capacity);
356 if let Some(last_chunk) = chunks.last_mut() {
357 let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
358 if last_chunk.storage.reserve_in_place(used_bytes, needed_bytes) {
359 self.end.set(last_chunk.end());
362 new_capacity = last_chunk.storage.capacity();
364 new_capacity = new_capacity.checked_mul(2).unwrap();
365 if new_capacity >= used_bytes + needed_bytes {
371 new_capacity = cmp::max(needed_bytes, PAGE);
373 chunk = TypedArenaChunk::<u8>::new(new_capacity);
374 self.ptr.set(chunk.start());
375 self.end.set(chunk.end());
381 pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] {
387 let future_end = intrinsics::arith_offset(self.ptr.get(), bytes as isize);
388 if (future_end as *mut u8) >= self.end.get() {
392 let ptr = self.ptr.get();
393 // Set the pointer past ourselves
394 self.ptr.set(intrinsics::arith_offset(self.ptr.get(), bytes as isize) as *mut u8);
395 slice::from_raw_parts_mut(ptr, bytes)
400 pub fn alloc<T>(&self, object: T) -> &mut T {
401 assert!(!mem::needs_drop::<T>());
403 let mem = self.alloc_raw(mem::size_of::<T>(), mem::align_of::<T>()) as *mut _ as *mut T;
406 // Write into uninitialized memory.
407 ptr::write(mem, object);
412 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
413 /// reference to it. Will panic if passed a zero-sized type.
417 /// - Zero-sized types
418 /// - Zero-length slices
420 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
424 assert!(!mem::needs_drop::<T>());
425 assert!(mem::size_of::<T>() != 0);
426 assert!(!slice.is_empty());
428 let mem = self.alloc_raw(slice.len() * mem::size_of::<T>(), mem::align_of::<T>()) as *mut _
432 let arena_slice = slice::from_raw_parts_mut(mem, slice.len());
433 arena_slice.copy_from_slice(slice);
439 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
446 // Use a manual loop since LLVM manages to optimize it better for
449 let value = iter.next();
450 if i >= len || value.is_none() {
451 // We only return as many items as the iterator gave us, even
452 // though it was supposed to give us `len`
453 return slice::from_raw_parts_mut(mem, i);
455 ptr::write(mem.add(i), value.unwrap());
461 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
462 let iter = iter.into_iter();
463 assert!(mem::size_of::<T>() != 0);
464 assert!(!mem::needs_drop::<T>());
466 let size_hint = iter.size_hint();
469 (min, Some(max)) if min == max => {
470 // We know the exact number of elements the iterator will produce here
476 let size = len.checked_mul(mem::size_of::<T>()).unwrap();
477 let mem = self.alloc_raw(size, mem::align_of::<T>()) as *mut _ as *mut T;
478 unsafe { self.write_from_iter(iter, len, mem) }
481 cold_path(move || -> &mut [T] {
482 let mut vec: SmallVec<[_; 8]> = iter.collect();
486 // Move the content to the arena by copying it and then forgetting
487 // the content of the SmallVec
491 .alloc_raw(len * mem::size_of::<T>(), mem::align_of::<T>())
493 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
495 slice::from_raw_parts_mut(start_ptr, len)
504 // FIXME(@Zoxc): this type is entirely unused in rustc
505 pub struct SyncTypedArena<T> {
506 lock: MTLock<TypedArena<T>>,
509 impl<T> SyncTypedArena<T> {
511 pub fn alloc(&self, object: T) -> &mut T {
512 // Extend the lifetime of the result since it's limited to the lock guard
513 unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
517 pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
521 // Extend the lifetime of the result since it's limited to the lock guard
522 unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }
526 pub fn clear(&mut self) {
527 self.lock.get_mut().clear();
532 pub struct SyncDroplessArena {
533 lock: MTLock<DroplessArena>,
536 impl SyncDroplessArena {
538 pub fn in_arena<T: ?Sized>(&self, ptr: *const T) -> bool {
539 self.lock.lock().in_arena(ptr)
543 pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] {
544 // Extend the lifetime of the result since it's limited to the lock guard
545 unsafe { &mut *(self.lock.lock().alloc_raw(bytes, align) as *mut [u8]) }
549 pub fn alloc<T>(&self, object: T) -> &mut T {
550 // Extend the lifetime of the result since it's limited to the lock guard
551 unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
555 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
559 // Extend the lifetime of the result since it's limited to the lock guard
560 unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }