1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! The arena, a fast but limited type of allocator.
13 //! Arenas are a type of allocator that destroy the objects within, all at
14 //! once, once the arena itself is destroyed. They do not support deallocation
15 //! of individual objects while the arena itself is still alive. The benefit
16 //! of an arena is very fast allocation; just a pointer bump.
18 //! This crate has two arenas implemented: TypedArena, which is a simpler
19 //! arena but can only hold objects of a single type, and Arena, which is a
20 //! more complex, slower Arena which can hold objects of any type.
22 #![crate_id = "arena#0.11.0-pre"]
23 #![crate_type = "rlib"]
24 #![crate_type = "dylib"]
25 #![license = "MIT/ASL2"]
26 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
27 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
28 html_root_url = "http://doc.rust-lang.org/")]
29 #![allow(missing_doc)]
31 extern crate collections;
33 use std::cell::{Cell, RefCell};
35 use std::intrinsics::{TyDesc, get_tydesc};
41 use std::rt::heap::allocate;
43 // The way arena uses arrays is really deeply awful. The arrays are
44 // allocated, and have capacities reserved, but the fill for the array
45 // will always stay at 0.
46 #[deriving(Clone, PartialEq)]
48 data: Rc<RefCell<Vec<u8> >>,
53 fn capacity(&self) -> uint {
54 self.data.borrow().capacity()
57 unsafe fn as_ptr(&self) -> *u8 {
58 self.data.borrow().as_ptr()
62 /// A slower reflection-based arena that can allocate objects of any type.
64 /// This arena uses Vec<u8> as a backing store to allocate objects from. For
65 /// each allocated object, the arena stores a pointer to the type descriptor
66 /// followed by the object. (Potentially with alignment padding after each
67 /// element.) When the arena is destroyed, it iterates through all of its
68 /// chunks, and uses the tydesc information to trace through the objects,
69 /// calling the destructors on them. One subtle point that needs to be
70 /// addressed is how to handle failures while running the user provided
71 /// initializer function. It is important to not run the destructor on
72 /// uninitialized objects, but how to detect them is somewhat subtle. Since
73 /// alloc() can be invoked recursively, it is not sufficient to simply exclude
74 /// the most recent object. To solve this without requiring extra space, we
75 /// use the low order bit of the tydesc pointer to encode whether the object
76 /// it describes has been fully initialized.
78 /// As an optimization, objects with destructors are stored in
79 /// different chunks than objects without destructors. This reduces
80 /// overhead when initializing plain-old-data and means we don't need
81 /// to waste time running the destructors of POD.
83 // The head is separated out from the list as a unbenchmarked
84 // microoptimization, to avoid needing to case on the list to access the
88 chunks: RefCell<Vec<Chunk>>,
92 /// Allocate a new Arena with 32 bytes preallocated.
93 pub fn new() -> Arena {
94 Arena::new_with_size(32u)
97 /// Allocate a new Arena with `initial_size` bytes preallocated.
98 pub fn new_with_size(initial_size: uint) -> Arena {
100 head: chunk(initial_size, false),
101 copy_head: chunk(initial_size, true),
102 chunks: RefCell::new(Vec::new()),
107 fn chunk(size: uint, is_copy: bool) -> Chunk {
109 data: Rc::new(RefCell::new(Vec::with_capacity(size))),
111 is_copy: Cell::new(is_copy),
116 impl Drop for Arena {
119 destroy_chunk(&self.head);
120 for chunk in self.chunks.borrow().iter() {
121 if !chunk.is_copy.get() {
122 destroy_chunk(chunk);
130 fn round_up(base: uint, align: uint) -> uint {
131 (base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
134 // Walk down a chunk, running the destructors for any objects stored
136 unsafe fn destroy_chunk(chunk: &Chunk) {
138 let buf = chunk.as_ptr();
139 let fill = chunk.fill.get();
142 let tydesc_data: *uint = mem::transmute(buf.offset(idx as int));
143 let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
144 let (size, align) = ((*tydesc).size, (*tydesc).align);
146 let after_tydesc = idx + mem::size_of::<*TyDesc>();
148 let start = round_up(after_tydesc, align);
150 //debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
151 // start, size, align, is_done);
153 ((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
156 // Find where the next tydesc lives
157 idx = round_up(start + size, mem::align_of::<*TyDesc>());
161 // We encode whether the object a tydesc describes has been
162 // initialized in the arena in the low bit of the tydesc pointer. This
163 // is necessary in order to properly do cleanup if a failure occurs
164 // during an initializer.
166 fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
167 p as uint | (is_done as uint)
170 fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
171 ((p & !1) as *TyDesc, p & 1 == 1)
175 fn chunk_size(&self) -> uint {
176 self.copy_head.capacity()
178 // Functions for the POD part of the arena
179 fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
180 // Allocate a new chunk.
181 let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
182 self.chunks.borrow_mut().push(self.copy_head.clone());
184 chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
186 return self.alloc_copy_inner(n_bytes, align);
190 fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
192 let start = round_up(self.copy_head.fill.get(), align);
193 let end = start + n_bytes;
194 if end > self.chunk_size() {
195 return self.alloc_copy_grow(n_bytes, align);
197 self.copy_head.fill.set(end);
199 //debug!("idx = {}, size = {}, align = {}, fill = {}",
200 // start, n_bytes, align, head.fill.get());
202 self.copy_head.as_ptr().offset(start as int)
207 fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
209 let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
210 mem::min_align_of::<T>());
211 let ptr = ptr as *mut T;
212 mem::overwrite(&mut (*ptr), op());
217 // Functions for the non-POD part of the arena
218 fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
220 // Allocate a new chunk.
221 let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
222 self.chunks.borrow_mut().push(self.head.clone());
224 chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
226 return self.alloc_noncopy_inner(n_bytes, align);
230 fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
233 let tydesc_start = self.head.fill.get();
234 let after_tydesc = self.head.fill.get() + mem::size_of::<*TyDesc>();
235 let start = round_up(after_tydesc, align);
236 let end = start + n_bytes;
238 if end > self.head.capacity() {
239 return self.alloc_noncopy_grow(n_bytes, align);
242 self.head.fill.set(round_up(end, mem::align_of::<*TyDesc>()));
244 //debug!("idx = {}, size = {}, align = {}, fill = {}",
245 // start, n_bytes, align, head.fill);
247 let buf = self.head.as_ptr();
248 return (buf.offset(tydesc_start as int), buf.offset(start as int));
253 fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
255 let tydesc = get_tydesc::<T>();
257 self.alloc_noncopy_inner(mem::size_of::<T>(),
258 mem::min_align_of::<T>());
259 let ty_ptr = ty_ptr as *mut uint;
260 let ptr = ptr as *mut T;
261 // Write in our tydesc along with a bit indicating that it
262 // has *not* been initialized yet.
263 *ty_ptr = mem::transmute(tydesc);
264 // Actually initialize it
265 mem::overwrite(&mut(*ptr), op());
266 // Now that we are done, update the tydesc to indicate that
267 // the object is there.
268 *ty_ptr = bitpack_tydesc_ptr(tydesc, true);
274 /// Allocate a new item in the arena, using `op` to initialize the value
275 /// and returning a reference to it.
277 pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
279 // FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
280 let this: &mut Arena = mem::transmute::<&_, &mut _>(self);
281 if intrinsics::needs_drop::<T>() {
282 this.alloc_noncopy(op)
291 fn test_arena_destructors() {
292 let arena = Arena::new();
293 for i in range(0u, 10) {
294 // Arena allocate something with drop glue to make sure it
296 arena.alloc(|| Rc::new(i));
297 // Allocate something with funny size and alignment, to keep
298 // things interesting.
299 arena.alloc(|| [0u8, 1u8, 2u8]);
305 fn test_arena_destructors_fail() {
306 let arena = Arena::new();
307 // Put some stuff in the arena.
308 for i in range(0u, 10) {
309 // Arena allocate something with drop glue to make sure it
311 arena.alloc(|| { Rc::new(i) });
312 // Allocate something with funny size and alignment, to keep
313 // things interesting.
314 arena.alloc(|| { [0u8, 1u8, 2u8] });
316 // Now, fail while allocating
317 arena.alloc::<Rc<int>>(|| {
323 /// A faster arena that can hold objects of only one type.
325 /// Safety note: Modifying objects in the arena that have already had their
326 /// `drop` destructors run can cause leaks, because the destructor will not
327 /// run again for these objects.
328 pub struct TypedArena<T> {
329 /// A pointer to the next object to be allocated.
332 /// A pointer to the end of the allocated area. When this pointer is
333 /// reached, a new chunk is allocated.
336 /// A pointer to the first arena segment.
337 first: Option<Box<TypedArenaChunk<T>>>,
340 struct TypedArenaChunk<T> {
341 /// Pointer to the next arena segment.
342 next: Option<Box<TypedArenaChunk<T>>>,
344 /// The number of elements that this chunk can hold.
347 // Objects follow here, suitably aligned.
350 impl<T> TypedArenaChunk<T> {
352 fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
353 -> Box<TypedArenaChunk<T>> {
354 let mut size = mem::size_of::<TypedArenaChunk<T>>();
355 size = round_up(size, mem::min_align_of::<T>());
356 let elem_size = mem::size_of::<T>();
357 let elems_size = elem_size.checked_mul(&capacity).unwrap();
358 size = size.checked_add(&elems_size).unwrap();
360 let mut chunk = unsafe {
361 let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
362 let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
363 mem::overwrite(&mut chunk.next, next);
367 chunk.capacity = capacity;
371 /// Destroys this arena chunk. If the type descriptor is supplied, the
372 /// drop glue is called; otherwise, drop glue is not called.
374 unsafe fn destroy(&mut self, len: uint) {
375 // Destroy all the allocated objects.
376 if intrinsics::needs_drop::<T>() {
377 let mut start = self.start();
378 for _ in range(0, len) {
379 read(start as *T); // run the destructor on the pointer
380 start = start.offset(mem::size_of::<T>() as int)
384 // Destroy the next chunk.
385 let next_opt = mem::replace(&mut self.next, None);
389 // We assume that the next chunk is completely filled.
390 next.destroy(next.capacity)
395 // Returns a pointer to the first allocated object.
397 fn start(&self) -> *u8 {
398 let this: *TypedArenaChunk<T> = self;
400 mem::transmute(round_up(this.offset(1) as uint,
401 mem::min_align_of::<T>()))
405 // Returns a pointer to the end of the allocated space.
407 fn end(&self) -> *u8 {
409 let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
410 self.start().offset(size as int)
415 impl<T> TypedArena<T> {
416 /// Creates a new TypedArena with preallocated space for 8 objects.
418 pub fn new() -> TypedArena<T> {
419 TypedArena::with_capacity(8)
422 /// Creates a new TypedArena with preallocated space for the given number of
425 pub fn with_capacity(capacity: uint) -> TypedArena<T> {
426 let chunk = TypedArenaChunk::<T>::new(None, capacity);
428 ptr: chunk.start() as *T,
429 end: chunk.end() as *T,
434 /// Allocates an object in the TypedArena, returning a reference to it.
436 pub fn alloc<'a>(&'a self, object: T) -> &'a T {
438 // FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
439 let this: &mut TypedArena<T> = mem::transmute::<&_, &mut _>(self);
440 if this.ptr == this.end {
444 let ptr: &'a mut T = mem::transmute(this.ptr);
445 mem::overwrite(ptr, object);
446 this.ptr = this.ptr.offset(1);
447 let ptr: &'a T = ptr;
455 let chunk = self.first.take_unwrap();
456 let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
457 let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
458 self.ptr = chunk.start() as *T;
459 self.end = chunk.end() as *T;
460 self.first = Some(chunk)
465 impl<T> Drop for TypedArena<T> {
467 // Determine how much was filled.
468 let start = self.first.get_ref().start() as uint;
469 let end = self.ptr as uint;
470 let diff = (end - start) / mem::size_of::<T>();
472 // Pass that to the `destroy` method.
474 self.first.get_mut_ref().destroy(diff)
482 use self::test::Bencher;
483 use super::{Arena, TypedArena};
493 let arena = TypedArena::new();
494 for _ in range(0, 100000) {
504 pub fn bench_copy(b: &mut Bencher) {
505 let arena = TypedArena::new();
516 pub fn bench_copy_nonarena(b: &mut Bencher) {
527 pub fn bench_copy_old_arena(b: &mut Bencher) {
528 let arena = Arena::new();
546 pub fn test_noncopy() {
547 let arena = TypedArena::new();
548 for _ in range(0, 100000) {
549 arena.alloc(Noncopy {
550 string: "hello world".to_string(),
551 array: vec!( 1, 2, 3, 4, 5 ),
557 pub fn bench_noncopy(b: &mut Bencher) {
558 let arena = TypedArena::new();
560 arena.alloc(Noncopy {
561 string: "hello world".to_string(),
562 array: vec!( 1, 2, 3, 4, 5 ),
568 pub fn bench_noncopy_nonarena(b: &mut Bencher) {
571 string: "hello world".to_string(),
572 array: vec!( 1, 2, 3, 4, 5 ),
578 pub fn bench_noncopy_old_arena(b: &mut Bencher) {
579 let arena = Arena::new();
581 arena.alloc(|| Noncopy {
582 string: "hello world".to_string(),
583 array: vec!( 1, 2, 3, 4, 5 ),