1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! The arena, a fast but limited type of allocator.
13 //! Arenas are a type of allocator that destroy the objects within, all at
14 //! once, once the arena itself is destroyed. They do not support deallocation
15 //! of individual objects while the arena itself is still alive. The benefit
16 //! of an arena is very fast allocation; just a pointer bump.
18 #[crate_id = "arena#0.10-pre"];
19 #[crate_type = "rlib"];
20 #[crate_type = "dylib"];
21 #[license = "MIT/ASL2"];
22 #[allow(missing_doc)];
23 #[feature(managed_boxes)];
27 use extra::list::{List, Cons, Nil};
31 use std::cast::{transmute, transmute_mut, transmute_mut_region};
33 use std::cell::{Cell, RefCell};
36 use std::kinds::marker;
38 use std::rt::global_heap;
39 use std::unstable::intrinsics::{TyDesc, get_tydesc};
40 use std::unstable::intrinsics;
43 // The way arena uses arrays is really deeply awful. The arrays are
44 // allocated, and have capacities reserved, but the fill for the array
45 // will always stay at 0.
53 // Arenas are used to quickly allocate objects that share a
54 // lifetime. The arena uses ~[u8] vectors as a backing store to
55 // allocate objects from. For each allocated object, the arena stores
56 // a pointer to the type descriptor followed by the
57 // object. (Potentially with alignment padding after each of them.)
58 // When the arena is destroyed, it iterates through all of its chunks,
59 // and uses the tydesc information to trace through the objects,
60 // calling the destructors on them.
61 // One subtle point that needs to be addressed is how to handle
62 // failures while running the user provided initializer function. It
63 // is important to not run the destructor on uninitialized objects, but
64 // how to detect them is somewhat subtle. Since alloc() can be invoked
65 // recursively, it is not sufficient to simply exclude the most recent
66 // object. To solve this without requiring extra space, we use the low
67 // order bit of the tydesc pointer to encode whether the object it
68 // describes has been fully initialized.
70 // As an optimization, objects with destructors are stored in
71 // different chunks than objects without destructors. This reduces
72 // overhead when initializing plain-old-data and means we don't need
73 // to waste time running the destructors of POD.
75 // The head is separated out from the list as a unbenchmarked
76 // microoptimization, to avoid needing to case on the list to
80 priv chunks: RefCell<@List<Chunk>>,
81 priv no_freeze: marker::NoFreeze,
85 pub fn new() -> Arena {
86 Arena::new_with_size(32u)
89 pub fn new_with_size(initial_size: uint) -> Arena {
91 head: chunk(initial_size, false),
92 pod_head: chunk(initial_size, true),
93 chunks: RefCell::new(@Nil),
94 no_freeze: marker::NoFreeze,
99 fn chunk(size: uint, is_pod: bool) -> Chunk {
100 let mut v: @[u8] = @[];
101 unsafe { at_vec::raw::reserve(&mut v, size); }
103 data: RefCell::new(unsafe { cast::transmute(v) }),
105 is_pod: Cell::new(is_pod),
110 impl Drop for Arena {
113 destroy_chunk(&self.head);
115 list::each(self.chunks.get(), |chunk| {
116 if !chunk.is_pod.get() {
117 destroy_chunk(chunk);
126 fn round_up(base: uint, align: uint) -> uint {
127 (base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
130 // Walk down a chunk, running the destructors for any objects stored
132 unsafe fn destroy_chunk(chunk: &Chunk) {
135 let data = chunk.data.borrow();
138 let fill = chunk.fill.get();
141 let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
142 let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
143 let (size, align) = ((*tydesc).size, (*tydesc).align);
145 let after_tydesc = idx + mem::size_of::<*TyDesc>();
147 let start = round_up(after_tydesc, align);
149 //debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
150 // start, size, align, is_done);
152 ((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
155 // Find where the next tydesc lives
156 idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
160 // We encode whether the object a tydesc describes has been
161 // initialized in the arena in the low bit of the tydesc pointer. This
162 // is necessary in order to properly do cleanup if a failure occurs
163 // during an initializer.
165 unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
166 let p_bits: uint = transmute(p);
167 p_bits | (is_done as uint)
170 unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
171 (transmute(p & !1), p & 1 == 1)
175 // Functions for the POD part of the arena
176 fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
177 // Allocate a new chunk.
178 let chunk_size = at_vec::capacity(self.pod_head.data.get());
179 let new_min_chunk_size = num::max(n_bytes, chunk_size);
180 self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
182 chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
184 return self.alloc_pod_inner(n_bytes, align);
188 fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
190 let this = transmute_mut_region(self);
191 let start = round_up(this.pod_head.fill.get(), align);
192 let end = start + n_bytes;
193 if end > at_vec::capacity(this.pod_head.data.get()) {
194 return this.alloc_pod_grow(n_bytes, align);
196 this.pod_head.fill.set(end);
198 //debug!("idx = {}, size = {}, align = {}, fill = {}",
199 // start, n_bytes, align, head.fill.get());
201 ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
206 fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
208 let tydesc = get_tydesc::<T>();
209 let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
210 let ptr: *mut T = transmute(ptr);
211 intrinsics::move_val_init(&mut (*ptr), op());
212 return transmute(ptr);
216 // Functions for the non-POD part of the arena
217 fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
219 // Allocate a new chunk.
220 let chunk_size = at_vec::capacity(self.head.data.get());
221 let new_min_chunk_size = num::max(n_bytes, chunk_size);
222 self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
224 chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
226 return self.alloc_nonpod_inner(n_bytes, align);
230 fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
239 let head = transmute_mut_region(&mut self.head);
241 tydesc_start = head.fill.get();
242 after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
243 start = round_up(after_tydesc, align);
244 end = start + n_bytes;
247 if end > at_vec::capacity(self.head.data.get()) {
248 return self.alloc_nonpod_grow(n_bytes, align);
251 let head = transmute_mut_region(&mut self.head);
252 head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
254 //debug!("idx = {}, size = {}, align = {}, fill = {}",
255 // start, n_bytes, align, head.fill);
257 let buf = self.head.data.get().as_ptr();
258 return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
263 fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
265 let tydesc = get_tydesc::<T>();
267 self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
268 let ty_ptr: *mut uint = transmute(ty_ptr);
269 let ptr: *mut T = transmute(ptr);
270 // Write in our tydesc along with a bit indicating that it
271 // has *not* been initialized yet.
272 *ty_ptr = transmute(tydesc);
273 // Actually initialize it
274 intrinsics::move_val_init(&mut(*ptr), op());
275 // Now that we are done, update the tydesc to indicate that
276 // the object is there.
277 *ty_ptr = bitpack_tydesc_ptr(tydesc, true);
279 return transmute(ptr);
283 // The external interface
285 pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
287 // FIXME: Borrow check
288 let this = transmute_mut(self);
289 if intrinsics::needs_drop::<T>() {
290 this.alloc_nonpod(op)
299 fn test_arena_destructors() {
300 let arena = Arena::new();
301 for i in range(0u, 10) {
302 // Arena allocate something with drop glue to make sure it
305 // Allocate something with funny size and alignment, to keep
306 // things interesting.
307 arena.alloc(|| [0u8, 1u8, 2u8]);
313 fn test_arena_destructors_fail() {
314 let arena = Arena::new();
315 // Put some stuff in the arena.
316 for i in range(0u, 10) {
317 // Arena allocate something with drop glue to make sure it
319 arena.alloc(|| { @i });
320 // Allocate something with funny size and alignment, to keep
321 // things interesting.
322 arena.alloc(|| { [0u8, 1u8, 2u8] });
324 // Now, fail while allocating
325 arena.alloc::<@int>(|| {
331 /// An arena that can hold objects of only one type.
333 /// Safety note: Modifying objects in the arena that have already had their
334 /// `drop` destructors run can cause leaks, because the destructor will not
335 /// run again for these objects.
336 pub struct TypedArena<T> {
337 /// A pointer to the next object to be allocated.
340 /// A pointer to the end of the allocated area. When this pointer is
341 /// reached, a new chunk is allocated.
344 /// The type descriptor of the objects in the arena. This should not be
345 /// necessary, but is until generic destructors are supported.
346 priv tydesc: *TyDesc,
348 /// A pointer to the first arena segment.
349 priv first: Option<~TypedArenaChunk>,
352 struct TypedArenaChunk {
353 /// Pointer to the next arena segment.
354 next: Option<~TypedArenaChunk>,
356 /// The number of elements that this chunk can hold.
359 // Objects follow here, suitably aligned.
362 impl TypedArenaChunk {
364 fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
365 -> ~TypedArenaChunk {
366 let mut size = mem::size_of::<TypedArenaChunk>();
367 size = round_up(size, mem::min_align_of::<T>());
368 let elem_size = mem::size_of::<T>();
369 let elems_size = elem_size.checked_mul(&capacity).unwrap();
370 size = size.checked_add(&elems_size).unwrap();
372 let mut chunk = unsafe {
373 let chunk = global_heap::exchange_malloc(size);
374 let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
375 intrinsics::move_val_init(&mut chunk.next, next);
379 chunk.capacity = capacity;
383 /// Destroys this arena chunk. If the type descriptor is supplied, the
384 /// drop glue is called; otherwise, drop glue is not called.
386 unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
387 // Destroy all the allocated objects.
391 let mut start = self.start(tydesc);
392 for _ in range(0, len) {
393 ((*tydesc).drop_glue)(start as *i8);
394 start = start.offset((*tydesc).size as int)
399 // Destroy the next chunk.
400 let next_opt = util::replace(&mut self.next, None);
404 // We assume that the next chunk is completely filled.
405 next.destroy(next.capacity, opt_tydesc)
410 // Returns a pointer to the first allocated object.
412 fn start(&self, tydesc: *TyDesc) -> *u8 {
413 let this: *TypedArenaChunk = self;
415 cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
419 // Returns a pointer to the end of the allocated space.
421 fn end(&self, tydesc: *TyDesc) -> *u8 {
423 let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
424 self.start(tydesc).offset(size as int)
429 impl<T> TypedArena<T> {
430 /// Creates a new arena with preallocated space for 8 objects.
432 pub fn new() -> TypedArena<T> {
433 TypedArena::with_capacity(8)
436 /// Creates a new arena with preallocated space for the given number of
439 pub fn with_capacity(capacity: uint) -> TypedArena<T> {
440 let chunk = TypedArenaChunk::new::<T>(None, capacity);
441 let tydesc = unsafe {
442 intrinsics::get_tydesc::<T>()
445 ptr: chunk.start(tydesc) as *T,
446 end: chunk.end(tydesc) as *T,
452 /// Allocates an object into this arena.
454 pub fn alloc<'a>(&'a self, object: T) -> &'a T {
456 let this = cast::transmute_mut(self);
457 if this.ptr == this.end {
461 let ptr: &'a mut T = cast::transmute(this.ptr);
462 intrinsics::move_val_init(ptr, object);
463 this.ptr = this.ptr.offset(1);
464 let ptr: &'a T = ptr;
472 let chunk = self.first.take_unwrap();
473 let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
474 let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
475 self.ptr = chunk.start(self.tydesc) as *T;
476 self.end = chunk.end(self.tydesc) as *T;
477 self.first = Some(chunk)
482 impl<T> Drop for TypedArena<T> {
484 // Determine how much was filled.
485 let start = self.first.get_ref().start(self.tydesc) as uint;
486 let end = self.ptr as uint;
487 let diff = (end - start) / mem::size_of::<T>();
489 // Pass that to the `destroy` method.
491 let opt_tydesc = if intrinsics::needs_drop::<T>() {
496 self.first.get_mut_ref().destroy(diff, opt_tydesc)
503 use super::{Arena, TypedArena};
504 use extra::test::BenchHarness;
514 let arena = TypedArena::new();
515 for _ in range(0, 100000) {
525 pub fn bench_pod(bh: &mut BenchHarness) {
526 let arena = TypedArena::new();
537 pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
548 pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
549 let arena = Arena::new();
567 pub fn test_nonpod() {
568 let arena = TypedArena::new();
569 for _ in range(0, 100000) {
571 string: ~"hello world",
572 array: ~[ 1, 2, 3, 4, 5 ],
578 pub fn bench_nonpod(bh: &mut BenchHarness) {
579 let arena = TypedArena::new();
582 string: ~"hello world",
583 array: ~[ 1, 2, 3, 4, 5 ],
589 pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
592 string: ~"hello world",
593 array: ~[ 1, 2, 3, 4, 5 ],
599 pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
600 let arena = Arena::new();
602 let _ = arena.alloc(|| Nonpod {
603 string: ~"hello world",
604 array: ~[ 1, 2, 3, 4, 5 ],