1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! The arena, a fast but limited type of allocator.
13 //! Arenas are a type of allocator that destroy the objects within, all at
14 //! once, once the arena itself is destroyed. They do not support deallocation
15 //! of individual objects while the arena itself is still alive. The benefit
16 //! of an arena is very fast allocation; just a pointer bump.
18 #[crate_id = "arena#0.10-pre"];
19 #[crate_type = "rlib"];
20 #[crate_type = "dylib"];
21 #[license = "MIT/ASL2"];
22 #[doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
23 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
24 html_root_url = "http://static.rust-lang.org/doc/master")];
25 #[allow(missing_doc)];
26 #[feature(managed_boxes)];
27 #[allow(deprecated_owned_vector)]; // NOTE: remove after stage0
29 extern crate collections;
31 use collections::list::{List, Cons, Nil};
33 use std::cast::{transmute, transmute_mut, transmute_mut_region};
35 use std::cell::{Cell, RefCell};
40 use std::kinds::marker;
42 use std::rt::global_heap;
43 use std::intrinsics::{TyDesc, get_tydesc};
47 // The way arena uses arrays is really deeply awful. The arrays are
48 // allocated, and have capacities reserved, but the fill for the array
49 // will always stay at 0.
50 #[deriving(Clone, Eq)]
52 data: Rc<RefCell<~[u8]>>,
57 fn capacity(&self) -> uint {
58 self.data.deref().borrow().get().capacity()
61 unsafe fn as_ptr(&self) -> *u8 {
62 self.data.deref().borrow().get().as_ptr()
66 // Arenas are used to quickly allocate objects that share a
67 // lifetime. The arena uses ~[u8] vectors as a backing store to
68 // allocate objects from. For each allocated object, the arena stores
69 // a pointer to the type descriptor followed by the
70 // object. (Potentially with alignment padding after each of them.)
71 // When the arena is destroyed, it iterates through all of its chunks,
72 // and uses the tydesc information to trace through the objects,
73 // calling the destructors on them.
74 // One subtle point that needs to be addressed is how to handle
75 // failures while running the user provided initializer function. It
76 // is important to not run the destructor on uninitialized objects, but
77 // how to detect them is somewhat subtle. Since alloc() can be invoked
78 // recursively, it is not sufficient to simply exclude the most recent
79 // object. To solve this without requiring extra space, we use the low
80 // order bit of the tydesc pointer to encode whether the object it
81 // describes has been fully initialized.
83 // As an optimization, objects with destructors are stored in
84 // different chunks than objects without destructors. This reduces
85 // overhead when initializing plain-old-data and means we don't need
86 // to waste time running the destructors of POD.
88 // The head is separated out from the list as a unbenchmarked
89 // microoptimization, to avoid needing to case on the list to
93 priv chunks: RefCell<@List<Chunk>>,
94 priv no_freeze: marker::NoFreeze,
98 pub fn new() -> Arena {
99 Arena::new_with_size(32u)
102 pub fn new_with_size(initial_size: uint) -> Arena {
104 head: chunk(initial_size, false),
105 pod_head: chunk(initial_size, true),
106 chunks: RefCell::new(@Nil),
107 no_freeze: marker::NoFreeze,
112 fn chunk(size: uint, is_pod: bool) -> Chunk {
114 data: Rc::new(RefCell::new(slice::with_capacity(size))),
116 is_pod: Cell::new(is_pod),
121 impl Drop for Arena {
124 destroy_chunk(&self.head);
125 for chunk in self.chunks.get().iter() {
126 if !chunk.is_pod.get() {
127 destroy_chunk(chunk);
135 fn round_up(base: uint, align: uint) -> uint {
136 (base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
139 // Walk down a chunk, running the destructors for any objects stored
141 unsafe fn destroy_chunk(chunk: &Chunk) {
143 let buf = chunk.as_ptr();
144 let fill = chunk.fill.get();
147 let tydesc_data: *uint = transmute(buf.offset(idx as int));
148 let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
149 let (size, align) = ((*tydesc).size, (*tydesc).align);
151 let after_tydesc = idx + mem::size_of::<*TyDesc>();
153 let start = round_up(after_tydesc, align);
155 //debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
156 // start, size, align, is_done);
158 ((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
161 // Find where the next tydesc lives
162 idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
166 // We encode whether the object a tydesc describes has been
167 // initialized in the arena in the low bit of the tydesc pointer. This
168 // is necessary in order to properly do cleanup if a failure occurs
169 // during an initializer.
171 fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
172 p as uint | (is_done as uint)
175 fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
176 ((p & !1) as *TyDesc, p & 1 == 1)
180 fn chunk_size(&self) -> uint {
181 self.pod_head.capacity()
183 // Functions for the POD part of the arena
184 fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
185 // Allocate a new chunk.
186 let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
187 self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
189 chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
191 return self.alloc_pod_inner(n_bytes, align);
195 fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
197 let this = transmute_mut_region(self);
198 let start = round_up(this.pod_head.fill.get(), align);
199 let end = start + n_bytes;
200 if end > self.chunk_size() {
201 return this.alloc_pod_grow(n_bytes, align);
203 this.pod_head.fill.set(end);
205 //debug!("idx = {}, size = {}, align = {}, fill = {}",
206 // start, n_bytes, align, head.fill.get());
208 this.pod_head.as_ptr().offset(start as int)
213 fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
215 let ptr = self.alloc_pod_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
216 let ptr: *mut T = transmute(ptr);
217 mem::move_val_init(&mut (*ptr), op());
218 return transmute(ptr);
222 // Functions for the non-POD part of the arena
223 fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
225 // Allocate a new chunk.
226 let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
227 self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
229 chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
231 return self.alloc_nonpod_inner(n_bytes, align);
235 fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
244 let head = transmute_mut_region(&mut self.head);
246 tydesc_start = head.fill.get();
247 after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
248 start = round_up(after_tydesc, align);
249 end = start + n_bytes;
252 if end > self.head.capacity() {
253 return self.alloc_nonpod_grow(n_bytes, align);
256 let head = transmute_mut_region(&mut self.head);
257 head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
259 //debug!("idx = {}, size = {}, align = {}, fill = {}",
260 // start, n_bytes, align, head.fill);
262 let buf = self.head.as_ptr();
263 return (buf.offset(tydesc_start as int), buf.offset(start as int));
268 fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
270 let tydesc = get_tydesc::<T>();
272 self.alloc_nonpod_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
273 let ty_ptr: *mut uint = transmute(ty_ptr);
274 let ptr: *mut T = transmute(ptr);
275 // Write in our tydesc along with a bit indicating that it
276 // has *not* been initialized yet.
277 *ty_ptr = transmute(tydesc);
278 // Actually initialize it
279 mem::move_val_init(&mut(*ptr), op());
280 // Now that we are done, update the tydesc to indicate that
281 // the object is there.
282 *ty_ptr = bitpack_tydesc_ptr(tydesc, true);
284 return transmute(ptr);
288 // The external interface
290 pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
292 // FIXME: Borrow check
293 let this = transmute_mut(self);
294 if intrinsics::needs_drop::<T>() {
295 this.alloc_nonpod(op)
304 fn test_arena_destructors() {
305 let arena = Arena::new();
306 for i in range(0u, 10) {
307 // Arena allocate something with drop glue to make sure it
310 // Allocate something with funny size and alignment, to keep
311 // things interesting.
312 arena.alloc(|| [0u8, 1u8, 2u8]);
318 fn test_arena_destructors_fail() {
319 let arena = Arena::new();
320 // Put some stuff in the arena.
321 for i in range(0u, 10) {
322 // Arena allocate something with drop glue to make sure it
324 arena.alloc(|| { @i });
325 // Allocate something with funny size and alignment, to keep
326 // things interesting.
327 arena.alloc(|| { [0u8, 1u8, 2u8] });
329 // Now, fail while allocating
330 arena.alloc::<@int>(|| {
336 /// An arena that can hold objects of only one type.
338 /// Safety note: Modifying objects in the arena that have already had their
339 /// `drop` destructors run can cause leaks, because the destructor will not
340 /// run again for these objects.
341 pub struct TypedArena<T> {
342 /// A pointer to the next object to be allocated.
345 /// A pointer to the end of the allocated area. When this pointer is
346 /// reached, a new chunk is allocated.
349 /// A pointer to the first arena segment.
350 priv first: Option<~TypedArenaChunk<T>>,
353 struct TypedArenaChunk<T> {
354 /// Pointer to the next arena segment.
355 next: Option<~TypedArenaChunk<T>>,
357 /// The number of elements that this chunk can hold.
360 // Objects follow here, suitably aligned.
363 impl<T> TypedArenaChunk<T> {
365 fn new(next: Option<~TypedArenaChunk<T>>, capacity: uint) -> ~TypedArenaChunk<T> {
366 let mut size = mem::size_of::<TypedArenaChunk<T>>();
367 size = round_up(size, mem::min_align_of::<T>());
368 let elem_size = mem::size_of::<T>();
369 let elems_size = elem_size.checked_mul(&capacity).unwrap();
370 size = size.checked_add(&elems_size).unwrap();
372 let mut chunk = unsafe {
373 let chunk = global_heap::exchange_malloc(size);
374 let mut chunk: ~TypedArenaChunk<T> = cast::transmute(chunk);
375 mem::move_val_init(&mut chunk.next, next);
379 chunk.capacity = capacity;
383 /// Destroys this arena chunk. If the type descriptor is supplied, the
384 /// drop glue is called; otherwise, drop glue is not called.
386 unsafe fn destroy(&mut self, len: uint) {
387 // Destroy all the allocated objects.
388 if intrinsics::needs_drop::<T>() {
389 let mut start = self.start();
390 for _ in range(0, len) {
391 read(start as *T); // run the destructor on the pointer
392 start = start.offset(mem::size_of::<T>() as int)
396 // Destroy the next chunk.
397 let next_opt = mem::replace(&mut self.next, None);
401 // We assume that the next chunk is completely filled.
402 next.destroy(next.capacity)
407 // Returns a pointer to the first allocated object.
409 fn start(&self) -> *u8 {
410 let this: *TypedArenaChunk<T> = self;
412 cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::<T>()))
416 // Returns a pointer to the end of the allocated space.
418 fn end(&self) -> *u8 {
420 let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
421 self.start().offset(size as int)
426 impl<T> TypedArena<T> {
427 /// Creates a new arena with preallocated space for 8 objects.
429 pub fn new() -> TypedArena<T> {
430 TypedArena::with_capacity(8)
433 /// Creates a new arena with preallocated space for the given number of
436 pub fn with_capacity(capacity: uint) -> TypedArena<T> {
437 let chunk = TypedArenaChunk::<T>::new(None, capacity);
439 ptr: chunk.start() as *T,
440 end: chunk.end() as *T,
445 /// Allocates an object into this arena.
447 pub fn alloc<'a>(&'a self, object: T) -> &'a T {
449 let this = cast::transmute_mut(self);
450 if this.ptr == this.end {
454 let ptr: &'a mut T = cast::transmute(this.ptr);
455 mem::move_val_init(ptr, object);
456 this.ptr = this.ptr.offset(1);
457 let ptr: &'a T = ptr;
465 let chunk = self.first.take_unwrap();
466 let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
467 let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
468 self.ptr = chunk.start() as *T;
469 self.end = chunk.end() as *T;
470 self.first = Some(chunk)
475 impl<T> Drop for TypedArena<T> {
477 // Determine how much was filled.
478 let start = self.first.get_ref().start() as uint;
479 let end = self.ptr as uint;
480 let diff = (end - start) / mem::size_of::<T>();
482 // Pass that to the `destroy` method.
484 self.first.get_mut_ref().destroy(diff)
492 use self::test::BenchHarness;
493 use super::{Arena, TypedArena};
503 let arena = TypedArena::new();
504 for _ in range(0, 100000) {
514 pub fn bench_pod(bh: &mut BenchHarness) {
515 let arena = TypedArena::new();
526 pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
537 pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
538 let arena = Arena::new();
556 pub fn test_nonpod() {
557 let arena = TypedArena::new();
558 for _ in range(0, 100000) {
560 string: ~"hello world",
561 array: ~[ 1, 2, 3, 4, 5 ],
567 pub fn bench_nonpod(bh: &mut BenchHarness) {
568 let arena = TypedArena::new();
571 string: ~"hello world",
572 array: ~[ 1, 2, 3, 4, 5 ],
578 pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
581 string: ~"hello world",
582 array: ~[ 1, 2, 3, 4, 5 ],
588 pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
589 let arena = Arena::new();
591 arena.alloc(|| Nonpod {
592 string: ~"hello world",
593 array: ~[ 1, 2, 3, 4, 5 ],