TARGET_CRATES := libc std green rustuv native flate arena glob term semver \
uuid serialize sync getopts collections num test time rand \
- workcache url log regex graphviz core rlibc
+ workcache url log regex graphviz core rlibc alloc
HOST_CRATES := syntax rustc rustdoc fourcc hexfloat regex_macros fmt_macros
CRATES := $(TARGET_CRATES) $(HOST_CRATES)
TOOLS := compiletest rustdoc rustc
DEPS_core :=
DEPS_rlibc :=
-DEPS_std := core libc native:rustrt native:compiler-rt native:backtrace native:jemalloc
+DEPS_alloc := core libc native:jemalloc
+DEPS_std := core libc alloc native:rustrt native:backtrace
DEPS_graphviz := std
DEPS_green := std rand native:context_switch
DEPS_rustuv := std native:uv native:uv_support
DEPS_term := std collections log
DEPS_semver := std
DEPS_uuid := std serialize rand
-DEPS_sync := std
+DEPS_sync := std alloc
DEPS_getopts := std
DEPS_collections := std rand
DEPS_fourcc := syntax std
ONLY_RLIB_core := 1
ONLY_RLIB_rlibc := 1
+ONLY_RLIB_alloc := 1
################################################################################
# You should not need to edit below this line
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * Concurrency-enabled mechanisms for sharing mutable and/or immutable state
+ * between tasks.
+ */
+
+use core::atomics;
+use core::clone::Clone;
+use core::kinds::{Share, Send};
+use core::mem::{min_align_of, size_of, drop};
+use core::mem;
+use core::ops::{Drop, Deref};
+use core::option::{Some, None, Option};
+use core::ptr;
+use core::ptr::RawPtr;
+use heap::deallocate;
+
+/// An atomically reference counted wrapper for shared state.
+///
+/// # Example
+///
+/// In this example, a large vector of floats is shared between several tasks.
+/// With simple pipes, without `Arc`, a copy would have to be made for each
+/// task.
+///
+/// ```rust
+/// extern crate sync;
+///
+/// use sync::Arc;
+///
+/// fn main() {
+/// let numbers = Vec::from_fn(100, |i| i as f32);
+/// let shared_numbers = Arc::new(numbers);
+///
+/// for _ in range(0, 10) {
+/// let child_numbers = shared_numbers.clone();
+///
+/// spawn(proc() {
+/// let local_numbers = child_numbers.as_slice();
+///
+/// // Work with the local numbers
+/// });
+/// }
+/// }
+/// ```
+#[unsafe_no_drop_flag]
+pub struct Arc<T> {
+ x: *mut ArcInner<T>,
+}
+
+/// A weak pointer to an `Arc`.
+///
+/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
+/// used to break cycles between `Arc` pointers.
+#[unsafe_no_drop_flag]
+pub struct Weak<T> {
+ x: *mut ArcInner<T>,
+}
+
+struct ArcInner<T> {
+ strong: atomics::AtomicUint,
+ weak: atomics::AtomicUint,
+ data: T,
+}
+
+impl<T: Share + Send> Arc<T> {
+ /// Create an atomically reference counted wrapper.
+ #[inline]
+ pub fn new(data: T) -> Arc<T> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x = box ArcInner {
+ strong: atomics::AtomicUint::new(1),
+ weak: atomics::AtomicUint::new(1),
+ data: data,
+ };
+ Arc { x: unsafe { mem::transmute(x) } }
+ }
+
+ #[inline]
+ fn inner<'a>(&'a self) -> &'a ArcInner<T> {
+ // This unsafety is ok because while this arc is alive we're guaranteed
+ // that the inner pointer is valid. Furthermore, we know that the
+ // `ArcInner` structure itself is `Share` because the inner data is
+ // `Share` as well, so we're ok loaning out an immutable pointer to
+ // these contents.
+ unsafe { &*self.x }
+ }
+
+ /// Downgrades a strong pointer to a weak pointer
+ ///
+ /// Weak pointers will not keep the data alive. Once all strong references
+ /// to the underlying data have been dropped, the data itself will be
+ /// destroyed.
+ pub fn downgrade(&self) -> Weak<T> {
+ // See the clone() impl for why this is relaxed
+ self.inner().weak.fetch_add(1, atomics::Relaxed);
+ Weak { x: self.x }
+ }
+}
+
+impl<T: Share + Send> Clone for Arc<T> {
+ /// Duplicate an atomically reference counted wrapper.
+ ///
+ /// The resulting two `Arc` objects will point to the same underlying data
+ /// object. However, one of the `Arc` objects can be sent to another task,
+ /// allowing them to share the underlying data.
+ #[inline]
+ fn clone(&self) -> Arc<T> {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ self.inner().strong.fetch_add(1, atomics::Relaxed);
+ Arc { x: self.x }
+ }
+}
+
+impl<T: Send + Share> Deref<T> for Arc<T> {
+ #[inline]
+ fn deref<'a>(&'a self) -> &'a T {
+ &self.inner().data
+ }
+}
+
+impl<T: Send + Share + Clone> Arc<T> {
+ /// Acquires a mutable pointer to the inner contents by guaranteeing that
+ /// the reference count is one (no sharing is possible).
+ ///
+ /// This is also referred to as a copy-on-write operation because the inner
+ /// data is cloned if the reference count is greater than one.
+ #[inline]
+ #[experimental]
+ pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
+ if self.inner().strong.load(atomics::SeqCst) != 1 {
+ *self = Arc::new(self.deref().clone())
+ }
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ unsafe { mem::transmute::<&_, &mut _>(self.deref()) }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Share + Send> Drop for Arc<T> {
+ fn drop(&mut self) {
+ // This structure has #[unsafe_no_drop_flag], so this drop glue may run
+ // more than once (but it is guaranteed to be zeroed after the first if
+ // it's run more than once)
+ if self.x.is_null() { return }
+
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object. This
+ // same logic applies to the below `fetch_sub` to the `weak` count.
+ if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the
+ // decreasing of the reference count sychronizes with this `Acquire`
+ // fence. This means that use of the data happens before decreasing
+ // the refernce count, which happens before this fence, which
+ // happens before the deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // It is important to enforce any possible access to the object in
+ // one thread (through an existing reference) to *happen before*
+ // deleting the object in a different thread. This is achieved by a
+ // "release" operation after dropping a reference (any access to the
+ // object through this reference must obviously happened before),
+ // and an "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ atomics::fence(atomics::Acquire);
+
+ // Destroy the data at this time, even though we may not free the box
+ // allocation itself (there may still be weak pointers lying around).
+ unsafe { drop(ptr::read(&self.inner().data)); }
+
+ if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
+ atomics::fence(atomics::Acquire);
+ unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
+ min_align_of::<ArcInner<T>>()) }
+ }
+ }
+}
+
+impl<T: Share + Send> Weak<T> {
+ /// Attempts to upgrade this weak reference to a strong reference.
+ ///
+ /// This method will fail to upgrade this reference if the strong reference
+ /// count has already reached 0, but if there are still other active strong
+ /// references this function will return a new strong reference to the data
+ pub fn upgrade(&self) -> Option<Arc<T>> {
+ // We use a CAS loop to increment the strong count instead of a
+ // fetch_add because once the count hits 0 is must never be above 0.
+ let inner = self.inner();
+ loop {
+ let n = inner.strong.load(atomics::SeqCst);
+ if n == 0 { return None }
+ let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
+ if old == n { return Some(Arc { x: self.x }) }
+ }
+ }
+
+ #[inline]
+ fn inner<'a>(&'a self) -> &'a ArcInner<T> {
+ // See comments above for why this is "safe"
+ unsafe { &*self.x }
+ }
+}
+
+impl<T: Share + Send> Clone for Weak<T> {
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ // See comments in Arc::clone() for why this is relaxed
+ self.inner().weak.fetch_add(1, atomics::Relaxed);
+ Weak { x: self.x }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Share + Send> Drop for Weak<T> {
+ fn drop(&mut self) {
+ // see comments above for why this check is here
+ if self.x.is_null() { return }
+
+ // If we find out that we were the last weak pointer, then its time to
+ // deallocate the data entirely. See the discussion in Arc::drop() about
+ // the memory orderings
+ if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
+ atomics::fence(atomics::Acquire);
+ unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
+ min_align_of::<ArcInner<T>>()) }
+ }
+ }
+}
+
+#[cfg(test)]
+#[allow(experimental)]
+mod tests {
+ use std::clone::Clone;
+ use std::comm::channel;
+ use std::mem::drop;
+ use std::ops::{Drop, Deref, DerefMut};
+ use std::option::{Option, Some, None};
+ use std::sync::atomics;
+ use std::task;
+ use std::vec::Vec;
+ use super::{Arc, Weak};
+ use sync::Mutex;
+
+ struct Canary(*mut atomics::AtomicUint);
+
+ impl Drop for Canary
+ {
+ fn drop(&mut self) {
+ unsafe {
+ match *self {
+ Canary(c) => {
+ (*c).fetch_add(1, atomics::SeqCst);
+ }
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn manually_share_arc() {
+ let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+ let arc_v = Arc::new(v);
+
+ let (tx, rx) = channel();
+
+ task::spawn(proc() {
+ let arc_v: Arc<Vec<int>> = rx.recv();
+ assert_eq!(*arc_v.get(3), 4);
+ });
+
+ tx.send(arc_v.clone());
+
+ assert_eq!(*arc_v.get(2), 3);
+ assert_eq!(*arc_v.get(4), 5);
+
+ info!("{:?}", arc_v);
+ }
+
+ #[test]
+ fn test_cowarc_clone_make_unique() {
+ let mut cow0 = Arc::new(75u);
+ let mut cow1 = cow0.clone();
+ let mut cow2 = cow1.clone();
+
+ assert!(75 == *cow0.make_unique());
+ assert!(75 == *cow1.make_unique());
+ assert!(75 == *cow2.make_unique());
+
+ *cow0.make_unique() += 1;
+ *cow1.make_unique() += 2;
+ *cow2.make_unique() += 3;
+
+ assert!(76 == *cow0);
+ assert!(77 == *cow1);
+ assert!(78 == *cow2);
+
+ // none should point to the same backing memory
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 != *cow2);
+ }
+
+ #[test]
+ fn test_cowarc_clone_unique2() {
+ let mut cow0 = Arc::new(75u);
+ let cow1 = cow0.clone();
+ let cow2 = cow1.clone();
+
+ assert!(75 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ *cow0.make_unique() += 1;
+
+ assert!(76 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ // cow1 and cow2 should share the same contents
+ // cow0 should have a unique reference
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 == *cow2);
+ }
+
+ #[test]
+ fn test_live() {
+ let x = Arc::new(5);
+ let y = x.downgrade();
+ assert!(y.upgrade().is_some());
+ }
+
+ #[test]
+ fn test_dead() {
+ let x = Arc::new(5);
+ let y = x.downgrade();
+ drop(x);
+ assert!(y.upgrade().is_none());
+ }
+
+ #[test]
+ fn weak_self_cyclic() {
+ struct Cycle {
+ x: Mutex<Option<Weak<Cycle>>>
+ }
+
+ let a = Arc::new(Cycle { x: Mutex::new(None) });
+ let b = a.clone().downgrade();
+ *a.deref().x.lock().deref_mut() = Some(b);
+
+ // hopefully we don't double-free (or leak)...
+ }
+
+ #[test]
+ fn drop_arc() {
+ let mut canary = atomics::AtomicUint::new(0);
+ let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
+ drop(x);
+ assert!(canary.load(atomics::Acquire) == 1);
+ }
+
+ #[test]
+ fn drop_arc_weak() {
+ let mut canary = atomics::AtomicUint::new(0);
+ let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
+ let arc_weak = arc.downgrade();
+ assert!(canary.load(atomics::Acquire) == 0);
+ drop(arc);
+ assert!(canary.load(atomics::Acquire) == 1);
+ drop(arc_weak);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// FIXME: #13994: port to the sized deallocation API when available
+// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias`
+
+use core::intrinsics::{abort, cttz32};
+use core::option::{None, Option};
+use core::ptr::{RawPtr, mut_null, null};
+use libc::{c_char, c_int, c_void, size_t};
+
+#[cfg(not(test))] use core::raw;
+#[cfg(not(test))] use util;
+
+#[link(name = "jemalloc", kind = "static")]
+extern {
+ fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
+ fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
+ fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
+ fn je_dallocx(ptr: *mut c_void, flags: c_int);
+ fn je_nallocx(size: size_t, flags: c_int) -> size_t;
+ fn je_malloc_stats_print(write_cb: Option<extern "C" fn(cbopaque: *mut c_void, *c_char)>,
+ cbopaque: *mut c_void,
+ opts: *c_char);
+}
+
+// -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough
+#[cfg(not(windows), not(target_os = "android"))]
+#[link(name = "pthread")]
+extern {}
+
+// MALLOCX_ALIGN(a) macro
+#[inline(always)]
+fn mallocx_align(a: uint) -> c_int { unsafe { cttz32(a as u32) as c_int } }
+
+/// Return a pointer to `size` bytes of memory.
+///
+/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
+/// alignment must be no larger than the largest supported page size on the platform.
+#[inline]
+pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
+ let ptr = je_mallocx(size as size_t, mallocx_align(align)) as *mut u8;
+ if ptr.is_null() {
+ abort()
+ }
+ ptr
+}
+
+/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory.
+///
+/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
+/// alignment must be no larger than the largest supported page size on the platform.
+///
+/// The `old_size` and `align` parameters are the parameters that were used to create the
+/// allocation referenced by `ptr`. The `old_size` parameter may also be the value returned by
+/// `usable_size` for the requested size.
+#[inline]
+#[allow(unused_variable)] // for the parameter names in the documentation
+pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 {
+ let ptr = je_rallocx(ptr as *mut c_void, size as size_t, mallocx_align(align)) as *mut u8;
+ if ptr.is_null() {
+ abort()
+ }
+ ptr
+}
+
+/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory in-place.
+///
+/// Return true if successful, otherwise false if the allocation was not altered.
+///
+/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
+/// alignment must be no larger than the largest supported page size on the platform.
+///
+/// The `old_size` and `align` parameters are the parameters that were used to
+/// create the allocation referenced by `ptr`. The `old_size` parameter may be
+/// any value in range_inclusive(requested_size, usable_size).
+#[inline]
+#[allow(unused_variable)] // for the parameter names in the documentation
+pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool {
+ je_xallocx(ptr as *mut c_void, size as size_t, 0, mallocx_align(align)) == size as size_t
+}
+
+/// Deallocate the memory referenced by `ptr`.
+///
+/// The `ptr` parameter must not be null.
+///
+/// The `size` and `align` parameters are the parameters that were used to create the
+/// allocation referenced by `ptr`. The `size` parameter may also be the value returned by
+/// `usable_size` for the requested size.
+#[inline]
+#[allow(unused_variable)] // for the parameter names in the documentation
+pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) {
+ je_dallocx(ptr as *mut c_void, mallocx_align(align))
+}
+
+/// Return the usable size of an allocation created with the specified the `size` and `align`.
+#[inline]
+pub fn usable_size(size: uint, align: uint) -> uint {
+ unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint }
+}
+
+/// Print implementation-defined allocator statistics.
+///
+/// These statistics may be inconsistent if other threads use the allocator during the call.
+#[unstable]
+pub fn stats_print() {
+ unsafe {
+ je_malloc_stats_print(None, mut_null(), null())
+ }
+}
+
+/// The allocator for unique pointers.
+#[cfg(not(test))]
+#[lang="exchange_malloc"]
+#[inline(always)]
+pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
+ exchange_malloc(size, align)
+}
+
+/// The allocator for unique pointers.
+#[inline]
+pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
+ // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
+ // allocations can point to this `static`. It would be incorrect to use a null
+ // pointer, due to enums assuming types like unique pointers are never null.
+ static EMPTY: () = ();
+
+ if size == 0 {
+ &EMPTY as *() as *mut u8
+ } else {
+ allocate(size, align)
+ }
+}
+
+#[cfg(not(test))]
+#[lang="exchange_free"]
+#[inline]
+// FIXME: #13994 (rustc should pass align and size here)
+unsafe fn exchange_free(ptr: *mut u8) {
+ deallocate(ptr, 0, 8);
+}
+
+// FIXME: #7496
+#[cfg(not(test))]
+#[lang="closure_exchange_malloc"]
+#[inline]
+#[allow(deprecated)]
+unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut u8 {
+ let total_size = util::get_box_size(size, align);
+ let p = allocate(total_size, 8);
+
+ let alloc = p as *mut raw::Box<()>;
+ (*alloc).drop_glue = drop_glue;
+
+ alloc as *mut u8
+}
+
+// hack for libcore
+#[no_mangle]
+#[doc(hidden)]
+#[deprecated]
+#[cfg(not(test))]
+pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
+ exchange_malloc(size, align)
+}
+
+// hack for libcore
+#[no_mangle]
+#[doc(hidden)]
+#[deprecated]
+#[cfg(not(test))]
+pub unsafe extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) {
+ deallocate(ptr, size, align)
+}
+
+#[cfg(test)]
+mod bench {
+ extern crate test;
+ use self::test::Bencher;
+
+ #[bench]
+ fn alloc_owned_small(b: &mut Bencher) {
+ b.iter(|| {
+ box 10
+ })
+ }
+
+ #[bench]
+ fn alloc_owned_big(b: &mut Bencher) {
+ b.iter(|| {
+ box [10, ..1000]
+ })
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rust's core allocation library
+//!
+//! This is the lowest level library through which allocation in Rust can be
+//! performed where the allocation is assumed to succeed. This library will
+//! trigger a task failure when allocation fails.
+//!
+//! This library, like libcore, is not intended for general usage, but rather as
+//! a building block of other libraries. The types and interfaces in this
+//! library are reexported through the [standard library](../std/index.html),
+//! and should not be used through this library.
+//!
+//! Currently, there are four major definitions in this library.
+//!
+//! ## Owned pointers
+//!
+//! The [`Box`](owned/index.html) type is the core owned pointer type in rust.
+//! There can only be one owner of a `Box`, and the owner can decide to mutate
+//! the contents.
+//!
+//! This type can be sent among tasks efficiently as the size of a `Box` value
+//! is just a pointer. Tree-like data structures are often built on owned
+//! pointers because each node often has only one owner, the parent.
+//!
+//! ## Reference counted pointers
+//!
+//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer
+//! type intended for sharing memory within a task. An `Rc` pointer wraps a
+//! type, `T`, and only allows access to `&T`, a shared reference.
+//!
+//! This type is useful when inherited mutability is too constraining for an
+//! application (such as using `Box`), and is often paired with the `Cell` or
+//! `RefCell` types in order to allow mutation.
+//!
+//! ## Atomically reference counted pointers
+//!
+//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc`
+//! type. It provides all the same functionality of `Rc`, except it requires
+//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself
+//! sendable while `Rc<T>` is not.
+//!
+//! This types allows for shared access to the contained data, and is often
+//! paired with synchronization primitives such as mutexes to allow mutation of
+//! shared resources.
+//!
+//! ## Heap interfaces
+//!
+//! The [`heap`](heap/index.html) and [`libc_heap`](libc_heap/index.html)
+//! modules are the unsafe interfaces to the underlying allocation systems. The
+//! `heap` module is considered the default heap, and is not necessarily backed
+//! by libc malloc/free. The `libc_heap` module is defined to be wired up to
+//! the system malloc/free.
+
+#![crate_id = "alloc#0.11.0-pre"]
+#![license = "MIT/ASL2"]
+#![crate_type = "rlib"]
+#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+ html_favicon_url = "http://www.rust-lang.org/favicon.ico",
+ html_root_url = "http://static.rust-lang.org/doc/master")]
+
+#![no_std]
+#![feature(phase)]
+
+#[phase(syntax, link)]
+extern crate core;
+extern crate libc;
+
+// Allow testing this library
+
+#[cfg(test)] extern crate sync;
+#[cfg(test)] extern crate native;
+#[cfg(test)] #[phase(syntax, link)] extern crate std;
+#[cfg(test)] #[phase(syntax, link)] extern crate log;
+
+// Heaps provided for low-level allocation strategies
+
+pub mod heap;
+pub mod libc_heap;
+pub mod util;
+
+// Primitive types using the heaps above
+
+#[cfg(not(test))]
+pub mod owned;
+pub mod arc;
+pub mod rc;
+
+#[cfg(not(test))]
+mod std {
+ pub use core::fmt;
+ pub use core::option;
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+//! The global (exchange) heap.
+
+use libc::{c_void, size_t, free, malloc, realloc};
+use core::ptr::{RawPtr, mut_null};
+use core::intrinsics::abort;
+
+/// A wrapper around libc::malloc, aborting on out-of-memory
+#[inline]
+pub unsafe fn malloc_raw(size: uint) -> *mut u8 {
+ // `malloc(0)` may allocate, but it may also return a null pointer
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
+ if size == 0 {
+ mut_null()
+ } else {
+ let p = malloc(size as size_t);
+ if p.is_null() {
+ // we need a non-allocating way to print an error here
+ abort();
+ }
+ p as *mut u8
+ }
+}
+
+/// A wrapper around libc::realloc, aborting on out-of-memory
+#[inline]
+pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 {
+ // `realloc(ptr, 0)` may allocate, but it may also return a null pointer
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/realloc.html
+ if size == 0 {
+ free(ptr as *mut c_void);
+ mut_null()
+ } else {
+ let p = realloc(ptr as *mut c_void, size as size_t);
+ if p.is_null() {
+ // we need a non-allocating way to print an error here
+ abort();
+ }
+ p as *mut u8
+ }
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A unique pointer type
+
+use core::any::{Any, AnyRefExt};
+use core::clone::Clone;
+use core::cmp::{Eq, Ord, TotalEq, TotalOrd, Ordering};
+use core::default::Default;
+use core::fmt;
+use core::intrinsics;
+use core::mem;
+use core::raw::TraitObject;
+use core::result::{Ok, Err, Result};
+
+/// A value that represents the global exchange heap. This is the default
+/// place that the `box` keyword allocates into when no place is supplied.
+///
+/// The following two examples are equivalent:
+///
+/// let foo = box(HEAP) Bar::new(...);
+/// let foo = box Bar::new(...);
+#[lang="exchange_heap"]
+pub static HEAP: () = ();
+
+/// A type that represents a uniquely-owned value.
+#[lang="owned_box"]
+pub struct Box<T>(*T);
+
+impl<T: Default> Default for Box<T> {
+ fn default() -> Box<T> { box Default::default() }
+}
+
+impl<T: Clone> Clone for Box<T> {
+ /// Return a copy of the owned box.
+ #[inline]
+ fn clone(&self) -> Box<T> { box {(**self).clone()} }
+
+ /// Perform copy-assignment from `source` by reusing the existing allocation.
+ #[inline]
+ fn clone_from(&mut self, source: &Box<T>) {
+ (**self).clone_from(&(**source));
+ }
+}
+
+// box pointers
+impl<T:Eq> Eq for Box<T> {
+ #[inline]
+ fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
+ #[inline]
+ fn ne(&self, other: &Box<T>) -> bool { *(*self) != *(*other) }
+}
+impl<T:Ord> Ord for Box<T> {
+ #[inline]
+ fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
+ #[inline]
+ fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
+ #[inline]
+ fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
+ #[inline]
+ fn gt(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
+}
+impl<T: TotalOrd> TotalOrd for Box<T> {
+ #[inline]
+ fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
+}
+impl<T: TotalEq> TotalEq for Box<T> {}
+
+/// Extension methods for an owning `Any` trait object
+pub trait AnyOwnExt {
+ /// Returns the boxed value if it is of type `T`, or
+ /// `Err(Self)` if it isn't.
+ fn move<T: 'static>(self) -> Result<Box<T>, Self>;
+}
+
+impl AnyOwnExt for Box<Any> {
+ #[inline]
+ fn move<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
+ if self.is::<T>() {
+ unsafe {
+ // Get the raw representation of the trait object
+ let to: TraitObject =
+ *mem::transmute::<&Box<Any>, &TraitObject>(&self);
+
+ // Prevent destructor on self being run
+ intrinsics::forget(self);
+
+ // Extract the data pointer
+ Ok(mem::transmute(to.data))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<T: fmt::Show> fmt::Show for Box<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[cfg(not(stage0))]
+impl fmt::Show for Box<Any> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("Box<Any>")
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*! Task-local reference-counted boxes (`Rc` type)
+
+The `Rc` type provides shared ownership of an immutable value. Destruction is deterministic, and
+will occur as soon as the last owner is gone. It is marked as non-sendable because it avoids the
+overhead of atomic reference counting.
+
+The `downgrade` method can be used to create a non-owning `Weak` pointer to the box. A `Weak`
+pointer can be upgraded to an `Rc` pointer, but will return `None` if the value has already been
+freed.
+
+For example, a tree with parent pointers can be represented by putting the nodes behind `Strong`
+pointers, and then storing the parent pointers as `Weak` pointers.
+
+*/
+
+use core::mem::transmute;
+use core::cell::Cell;
+use core::clone::Clone;
+use core::cmp::{Eq, Ord, TotalEq, TotalOrd, Ordering};
+use core::kinds::marker;
+use core::ops::{Deref, Drop};
+use core::option::{Option, Some, None};
+use core::ptr;
+use core::ptr::RawPtr;
+use core::mem::{min_align_of, size_of};
+
+use heap::deallocate;
+
+struct RcBox<T> {
+ value: T,
+ strong: Cell<uint>,
+ weak: Cell<uint>
+}
+
+/// Immutable reference counted pointer type
+#[unsafe_no_drop_flag]
+pub struct Rc<T> {
+ ptr: *mut RcBox<T>,
+ nosend: marker::NoSend,
+ noshare: marker::NoShare
+}
+
+impl<T> Rc<T> {
+ /// Construct a new reference-counted box
+ pub fn new(value: T) -> Rc<T> {
+ unsafe {
+ Rc {
+ // there is an implicit weak pointer owned by all the
+ // strong pointers, which ensures that the weak
+ // destructor never frees the allocation while the
+ // strong destructor is running, even if the weak
+ // pointer is stored inside the strong one.
+ ptr: transmute(box RcBox {
+ value: value,
+ strong: Cell::new(1),
+ weak: Cell::new(1)
+ }),
+ nosend: marker::NoSend,
+ noshare: marker::NoShare
+ }
+ }
+ }
+}
+
+impl<T> Rc<T> {
+ /// Downgrade the reference-counted pointer to a weak reference
+ pub fn downgrade(&self) -> Weak<T> {
+ self.inc_weak();
+ Weak {
+ ptr: self.ptr,
+ nosend: marker::NoSend,
+ noshare: marker::NoShare
+ }
+ }
+}
+
+impl<T> Deref<T> for Rc<T> {
+ /// Borrow the value contained in the reference-counted box
+ #[inline(always)]
+ fn deref<'a>(&'a self) -> &'a T {
+ &self.inner().value
+ }
+}
+
+#[unsafe_destructor]
+impl<T> Drop for Rc<T> {
+ fn drop(&mut self) {
+ unsafe {
+ if !self.ptr.is_null() {
+ self.dec_strong();
+ if self.strong() == 0 {
+ ptr::read(self.deref()); // destroy the contained object
+
+ // remove the implicit "strong weak" pointer now
+ // that we've destroyed the contents.
+ self.dec_weak();
+
+ if self.weak() == 0 {
+ deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
+ min_align_of::<RcBox<T>>())
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<T> Clone for Rc<T> {
+ #[inline]
+ fn clone(&self) -> Rc<T> {
+ self.inc_strong();
+ Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
+ }
+}
+
+impl<T: Eq> Eq for Rc<T> {
+ #[inline(always)]
+ fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
+ #[inline(always)]
+ fn ne(&self, other: &Rc<T>) -> bool { **self != **other }
+}
+
+impl<T: TotalEq> TotalEq for Rc<T> {}
+
+impl<T: Ord> Ord for Rc<T> {
+ #[inline(always)]
+ fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
+
+ #[inline(always)]
+ fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
+
+ #[inline(always)]
+ fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
+
+ #[inline(always)]
+ fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
+}
+
+impl<T: TotalOrd> TotalOrd for Rc<T> {
+ #[inline]
+ fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
+}
+
+/// Weak reference to a reference-counted box
+#[unsafe_no_drop_flag]
+pub struct Weak<T> {
+ ptr: *mut RcBox<T>,
+ nosend: marker::NoSend,
+ noshare: marker::NoShare
+}
+
+impl<T> Weak<T> {
+ /// Upgrade a weak reference to a strong reference
+ pub fn upgrade(&self) -> Option<Rc<T>> {
+ if self.strong() == 0 {
+ None
+ } else {
+ self.inc_strong();
+ Some(Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare })
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T> Drop for Weak<T> {
+ fn drop(&mut self) {
+ unsafe {
+ if !self.ptr.is_null() {
+ self.dec_weak();
+ // the weak count starts at 1, and will only go to
+ // zero if all the strong pointers have disappeared.
+ if self.weak() == 0 {
+ deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
+ min_align_of::<RcBox<T>>())
+ }
+ }
+ }
+ }
+}
+
+impl<T> Clone for Weak<T> {
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ self.inc_weak();
+ Weak { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
+ }
+}
+
+#[doc(hidden)]
+trait RcBoxPtr<T> {
+ fn inner<'a>(&'a self) -> &'a RcBox<T>;
+
+ #[inline]
+ fn strong(&self) -> uint { self.inner().strong.get() }
+
+ #[inline]
+ fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
+
+ #[inline]
+ fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
+
+ #[inline]
+ fn weak(&self) -> uint { self.inner().weak.get() }
+
+ #[inline]
+ fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
+
+ #[inline]
+ fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
+}
+
+impl<T> RcBoxPtr<T> for Rc<T> {
+ #[inline(always)]
+ fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
+}
+
+impl<T> RcBoxPtr<T> for Weak<T> {
+ #[inline(always)]
+ fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Rc, Weak};
+ use std::cell::RefCell;
+ use std::option::{Option, Some, None};
+ use std::mem::drop;
+ use std::clone::Clone;
+
+ #[test]
+ fn test_clone() {
+ let x = Rc::new(RefCell::new(5));
+ let y = x.clone();
+ *x.borrow_mut() = 20;
+ assert_eq!(*y.borrow(), 20);
+ }
+
+ #[test]
+ fn test_simple() {
+ let x = Rc::new(5);
+ assert_eq!(*x, 5);
+ }
+
+ #[test]
+ fn test_simple_clone() {
+ let x = Rc::new(5);
+ let y = x.clone();
+ assert_eq!(*x, 5);
+ assert_eq!(*y, 5);
+ }
+
+ #[test]
+ fn test_destructor() {
+ let x = Rc::new(box 5);
+ assert_eq!(**x, 5);
+ }
+
+ #[test]
+ fn test_live() {
+ let x = Rc::new(5);
+ let y = x.downgrade();
+ assert!(y.upgrade().is_some());
+ }
+
+ #[test]
+ fn test_dead() {
+ let x = Rc::new(5);
+ let y = x.downgrade();
+ drop(x);
+ assert!(y.upgrade().is_none());
+ }
+
+ #[test]
+ fn gc_inside() {
+ // see issue #11532
+ use std::gc::Gc;
+ let a = Rc::new(RefCell::new(Gc::new(1)));
+ assert!(a.try_borrow_mut().is_some());
+ }
+
+ #[test]
+ fn weak_self_cyclic() {
+ struct Cycle {
+ x: RefCell<Option<Weak<Cycle>>>
+ }
+
+ let a = Rc::new(Cycle { x: RefCell::new(None) });
+ let b = a.clone().downgrade();
+ *a.x.borrow_mut() = Some(b);
+
+ // hopefully we don't double-free (or leak)...
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![doc(hidden)]
+
+use core::mem;
+use core::raw;
+
+#[inline]
+#[deprecated]
+pub fn get_box_size(body_size: uint, body_align: uint) -> uint {
+ let header_size = mem::size_of::<raw::Box<()>>();
+ let total_size = align_to(header_size, body_align) + body_size;
+ total_size
+}
+
+// Rounds size to the next alignment. Alignment is required to be a power of
+// two.
+#[inline]
+fn align_to(size: uint, align: uint) -> uint {
+ assert!(align != 0);
+ (size + align - 1) & !(align - 1)
+}
// Make and rand accessible for benchmarking/testcases
#[cfg(test)] extern crate rand;
-extern crate libc;
+extern crate alloc;
extern crate core;
+extern crate libc;
// Make std testable by not duplicating lang items. See #2912
#[cfg(test)] extern crate realstd = "std";
-#[cfg(test)] pub use kinds = realstd::kinds;
-#[cfg(test)] pub use ops = realstd::ops;
-#[cfg(test)] pub use cmp = realstd::cmp;
-#[cfg(test)] pub use ty = realstd::ty;
-#[cfg(test)] pub use owned = realstd::owned;
+#[cfg(test)] pub use realstd::kinds;
+#[cfg(test)] pub use realstd::ops;
+#[cfg(test)] pub use realstd::cmp;
+#[cfg(test)] pub use realstd::ty;
-#[cfg(not(test))] pub use cmp = core::cmp;
-#[cfg(not(test))] pub use kinds = core::kinds;
-#[cfg(not(test))] pub use ops = core::ops;
-#[cfg(not(test))] pub use ty = core::ty;
+#[cfg(not(test))] pub use core::cmp;
+#[cfg(not(test))] pub use core::kinds;
+#[cfg(not(test))] pub use core::ops;
+#[cfg(not(test))] pub use core::ty;
pub use core::any;
pub use core::bool;
pub use core::tuple;
pub use core::result;
+pub use alloc::owned;
+pub use alloc::rc;
+
// Run tests with libgreen instead of libnative.
//
// FIXME: This egregiously hacks around starting the test runner in a different
pub mod ascii;
-pub mod rc;
pub mod gc;
-#[cfg(not(test))]
-pub mod owned;
/* Common traits */
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Operations on unique pointer types
-
-use any::{Any, AnyRefExt};
-use clone::Clone;
-use cmp::{Eq, Ord, TotalEq, TotalOrd, Ordering};
-use default::Default;
-use fmt;
-use intrinsics;
-use mem;
-use raw::TraitObject;
-use result::{Ok, Err, Result};
-
-/// A value that represents the global exchange heap. This is the default
-/// place that the `box` keyword allocates into when no place is supplied.
-///
-/// The following two examples are equivalent:
-///
-/// let foo = box(HEAP) Bar::new(...);
-/// let foo = box Bar::new(...);
-#[lang="exchange_heap"]
-pub static HEAP: () = ();
-
-/// A type that represents a uniquely-owned value.
-#[lang="owned_box"]
-pub struct Box<T>(*T);
-
-impl<T: Default> Default for Box<T> {
- fn default() -> Box<T> { box Default::default() }
-}
-
-impl<T: Clone> Clone for Box<T> {
- /// Return a copy of the owned box.
- #[inline]
- fn clone(&self) -> Box<T> { box {(**self).clone()} }
-
- /// Perform copy-assignment from `source` by reusing the existing allocation.
- #[inline]
- fn clone_from(&mut self, source: &Box<T>) {
- (**self).clone_from(&(**source));
- }
-}
-
-// box pointers
-impl<T:Eq> Eq for Box<T> {
- #[inline]
- fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
- #[inline]
- fn ne(&self, other: &Box<T>) -> bool { *(*self) != *(*other) }
-}
-impl<T:Ord> Ord for Box<T> {
- #[inline]
- fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
- #[inline]
- fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
- #[inline]
- fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
- #[inline]
- fn gt(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
-}
-impl<T: TotalOrd> TotalOrd for Box<T> {
- #[inline]
- fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
-}
-impl<T: TotalEq> TotalEq for Box<T> {}
-
-/// Extension methods for an owning `Any` trait object
-pub trait AnyOwnExt {
- /// Returns the boxed value if it is of type `T`, or
- /// `Err(Self)` if it isn't.
- fn move<T: 'static>(self) -> Result<Box<T>, Self>;
-}
-
-impl AnyOwnExt for Box<Any> {
- #[inline]
- fn move<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
- if self.is::<T>() {
- unsafe {
- // Get the raw representation of the trait object
- let to: TraitObject =
- *mem::transmute::<&Box<Any>, &TraitObject>(&self);
-
- // Prevent destructor on self being run
- intrinsics::forget(self);
-
- // Extract the data pointer
- Ok(mem::transmute(to.data))
- }
- } else {
- Err(self)
- }
- }
-}
-
-impl<T: fmt::Show> fmt::Show for Box<T> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[cfg(not(stage0))]
-impl fmt::Show for Box<Any> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad("Box<Any>")
- }
-}
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*! Task-local reference-counted boxes (`Rc` type)
-
-The `Rc` type provides shared ownership of an immutable value. Destruction is deterministic, and
-will occur as soon as the last owner is gone. It is marked as non-sendable because it avoids the
-overhead of atomic reference counting.
-
-The `downgrade` method can be used to create a non-owning `Weak` pointer to the box. A `Weak`
-pointer can be upgraded to an `Rc` pointer, but will return `None` if the value has already been
-freed.
-
-For example, a tree with parent pointers can be represented by putting the nodes behind `Strong`
-pointers, and then storing the parent pointers as `Weak` pointers.
-
-*/
-
-use mem::transmute;
-use cell::Cell;
-use clone::Clone;
-use cmp::{Eq, Ord, TotalEq, TotalOrd, Ordering};
-use kinds::marker;
-use ops::{Deref, Drop};
-use option::{Option, Some, None};
-use ptr;
-use ptr::RawPtr;
-use mem::{min_align_of, size_of};
-use rt::heap::deallocate;
-
-struct RcBox<T> {
- value: T,
- strong: Cell<uint>,
- weak: Cell<uint>
-}
-
-/// Immutable reference counted pointer type
-#[unsafe_no_drop_flag]
-pub struct Rc<T> {
- ptr: *mut RcBox<T>,
- nosend: marker::NoSend,
- noshare: marker::NoShare
-}
-
-impl<T> Rc<T> {
- /// Construct a new reference-counted box
- pub fn new(value: T) -> Rc<T> {
- unsafe {
- Rc {
- // there is an implicit weak pointer owned by all the
- // strong pointers, which ensures that the weak
- // destructor never frees the allocation while the
- // strong destructor is running, even if the weak
- // pointer is stored inside the strong one.
- ptr: transmute(box RcBox {
- value: value,
- strong: Cell::new(1),
- weak: Cell::new(1)
- }),
- nosend: marker::NoSend,
- noshare: marker::NoShare
- }
- }
- }
-}
-
-impl<T> Rc<T> {
- /// Downgrade the reference-counted pointer to a weak reference
- pub fn downgrade(&self) -> Weak<T> {
- self.inc_weak();
- Weak {
- ptr: self.ptr,
- nosend: marker::NoSend,
- noshare: marker::NoShare
- }
- }
-}
-
-impl<T> Deref<T> for Rc<T> {
- /// Borrow the value contained in the reference-counted box
- #[inline(always)]
- fn deref<'a>(&'a self) -> &'a T {
- &self.inner().value
- }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for Rc<T> {
- fn drop(&mut self) {
- unsafe {
- if !self.ptr.is_null() {
- self.dec_strong();
- if self.strong() == 0 {
- ptr::read(self.deref()); // destroy the contained object
-
- // remove the implicit "strong weak" pointer now
- // that we've destroyed the contents.
- self.dec_weak();
-
- if self.weak() == 0 {
- deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
- min_align_of::<RcBox<T>>())
- }
- }
- }
- }
- }
-}
-
-impl<T> Clone for Rc<T> {
- #[inline]
- fn clone(&self) -> Rc<T> {
- self.inc_strong();
- Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
- }
-}
-
-impl<T: Eq> Eq for Rc<T> {
- #[inline(always)]
- fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
- #[inline(always)]
- fn ne(&self, other: &Rc<T>) -> bool { **self != **other }
-}
-
-impl<T: TotalEq> TotalEq for Rc<T> {}
-
-impl<T: Ord> Ord for Rc<T> {
- #[inline(always)]
- fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
-
- #[inline(always)]
- fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
-
- #[inline(always)]
- fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
-
- #[inline(always)]
- fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
-}
-
-impl<T: TotalOrd> TotalOrd for Rc<T> {
- #[inline]
- fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
-}
-
-/// Weak reference to a reference-counted box
-#[unsafe_no_drop_flag]
-pub struct Weak<T> {
- ptr: *mut RcBox<T>,
- nosend: marker::NoSend,
- noshare: marker::NoShare
-}
-
-impl<T> Weak<T> {
- /// Upgrade a weak reference to a strong reference
- pub fn upgrade(&self) -> Option<Rc<T>> {
- if self.strong() == 0 {
- None
- } else {
- self.inc_strong();
- Some(Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare })
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for Weak<T> {
- fn drop(&mut self) {
- unsafe {
- if !self.ptr.is_null() {
- self.dec_weak();
- // the weak count starts at 1, and will only go to
- // zero if all the strong pointers have disappeared.
- if self.weak() == 0 {
- deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
- min_align_of::<RcBox<T>>())
- }
- }
- }
- }
-}
-
-impl<T> Clone for Weak<T> {
- #[inline]
- fn clone(&self) -> Weak<T> {
- self.inc_weak();
- Weak { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
- }
-}
-
-#[doc(hidden)]
-trait RcBoxPtr<T> {
- fn inner<'a>(&'a self) -> &'a RcBox<T>;
-
- #[inline]
- fn strong(&self) -> uint { self.inner().strong.get() }
-
- #[inline]
- fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
-
- #[inline]
- fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
-
- #[inline]
- fn weak(&self) -> uint { self.inner().weak.get() }
-
- #[inline]
- fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
-
- #[inline]
- fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
-}
-
-impl<T> RcBoxPtr<T> for Rc<T> {
- #[inline(always)]
- fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
-}
-
-impl<T> RcBoxPtr<T> for Weak<T> {
- #[inline(always)]
- fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
- use super::*;
- use cell::RefCell;
-
- #[test]
- fn test_clone() {
- let x = Rc::new(RefCell::new(5));
- let y = x.clone();
- *x.borrow_mut() = 20;
- assert_eq!(*y.borrow(), 20);
- }
-
- #[test]
- fn test_simple() {
- let x = Rc::new(5);
- assert_eq!(*x, 5);
- }
-
- #[test]
- fn test_simple_clone() {
- let x = Rc::new(5);
- let y = x.clone();
- assert_eq!(*x, 5);
- assert_eq!(*y, 5);
- }
-
- #[test]
- fn test_destructor() {
- let x = Rc::new(box 5);
- assert_eq!(**x, 5);
- }
-
- #[test]
- fn test_live() {
- let x = Rc::new(5);
- let y = x.downgrade();
- assert!(y.upgrade().is_some());
- }
-
- #[test]
- fn test_dead() {
- let x = Rc::new(5);
- let y = x.downgrade();
- drop(x);
- assert!(y.upgrade().is_none());
- }
-
- #[test]
- fn gc_inside() {
- // see issue #11532
- use gc::Gc;
- let a = Rc::new(RefCell::new(Gc::new(1)));
- assert!(a.try_borrow_mut().is_some());
- }
-
- #[test]
- fn weak_self_cyclic() {
- struct Cycle {
- x: RefCell<Option<Weak<Cycle>>>
- }
-
- let a = Rc::new(Cycle { x: RefCell::new(None) });
- let b = a.clone().downgrade();
- *a.x.borrow_mut() = Some(b);
-
- // hopefully we don't double-free (or leak)...
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// FIXME: #13994: port to the sized deallocation API when available
-// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias`
-
-use intrinsics::{abort, cttz32};
-use libc::{c_char, c_int, c_void, size_t};
-use ptr::{RawPtr, mut_null, null};
-use option::{None, Option};
-
-#[link(name = "jemalloc", kind = "static")]
-extern {
- fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
- fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
- fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
- fn je_dallocx(ptr: *mut c_void, flags: c_int);
- fn je_nallocx(size: size_t, flags: c_int) -> size_t;
- fn je_malloc_stats_print(write_cb: Option<extern "C" fn(cbopaque: *mut c_void, *c_char)>,
- cbopaque: *mut c_void,
- opts: *c_char);
-}
-
-// -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough
-#[cfg(not(windows), not(target_os = "android"))]
-#[link(name = "pthread")]
-extern {}
-
-// MALLOCX_ALIGN(a) macro
-#[inline(always)]
-fn mallocx_align(a: uint) -> c_int { unsafe { cttz32(a as u32) as c_int } }
-
-/// Return a pointer to `size` bytes of memory.
-///
-/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
-/// alignment must be no larger than the largest supported page size on the platform.
-#[inline]
-pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
- let ptr = je_mallocx(size as size_t, mallocx_align(align)) as *mut u8;
- if ptr.is_null() {
- abort()
- }
- ptr
-}
-
-/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory.
-///
-/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
-/// alignment must be no larger than the largest supported page size on the platform.
-///
-/// The `old_size` and `align` parameters are the parameters that were used to create the
-/// allocation referenced by `ptr`. The `old_size` parameter may also be the value returned by
-/// `usable_size` for the requested size.
-#[inline]
-#[allow(unused_variable)] // for the parameter names in the documentation
-pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 {
- let ptr = je_rallocx(ptr as *mut c_void, size as size_t, mallocx_align(align)) as *mut u8;
- if ptr.is_null() {
- abort()
- }
- ptr
-}
-
-/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory in-place.
-///
-/// Return true if successful, otherwise false if the allocation was not altered.
-///
-/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The
-/// alignment must be no larger than the largest supported page size on the platform.
-///
-/// The `old_size` and `align` parameters are the parameters that were used to
-/// create the allocation referenced by `ptr`. The `old_size` parameter may be
-/// any value in range_inclusive(requested_size, usable_size).
-#[inline]
-#[allow(unused_variable)] // for the parameter names in the documentation
-pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool {
- je_xallocx(ptr as *mut c_void, size as size_t, 0, mallocx_align(align)) == size as size_t
-}
-
-/// Deallocate the memory referenced by `ptr`.
-///
-/// The `ptr` parameter must not be null.
-///
-/// The `size` and `align` parameters are the parameters that were used to create the
-/// allocation referenced by `ptr`. The `size` parameter may also be the value returned by
-/// `usable_size` for the requested size.
-#[inline]
-#[allow(unused_variable)] // for the parameter names in the documentation
-pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) {
- je_dallocx(ptr as *mut c_void, mallocx_align(align))
-}
-
-/// Return the usable size of an allocation created with the specified the `size` and `align`.
-#[inline]
-pub fn usable_size(size: uint, align: uint) -> uint {
- unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint }
-}
-
-/// Print implementation-defined allocator statistics.
-///
-/// These statistics may be inconsistent if other threads use the allocator during the call.
-#[unstable]
-pub fn stats_print() {
- unsafe {
- je_malloc_stats_print(None, mut_null(), null())
- }
-}
-
-/// The allocator for unique pointers.
-#[cfg(not(test))]
-#[lang="exchange_malloc"]
-#[inline(always)]
-pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
- exchange_malloc(size, align)
-}
-
-/// The allocator for unique pointers.
-#[inline]
-pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
- // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
- // allocations can point to this `static`. It would be incorrect to use a null
- // pointer, due to enums assuming types like unique pointers are never null.
- static EMPTY: () = ();
-
- if size == 0 {
- &EMPTY as *() as *mut u8
- } else {
- allocate(size, align)
- }
-}
-
-#[cfg(not(test))]
-#[lang="exchange_free"]
-#[inline]
-// FIXME: #13994 (rustc should pass align and size here)
-unsafe fn exchange_free(ptr: *mut u8) {
- deallocate(ptr, 0, 8);
-}
-
-// FIXME: #7496
-#[cfg(not(test))]
-#[lang="closure_exchange_malloc"]
-#[inline]
-unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut u8 {
- let total_size = ::rt::util::get_box_size(size, align);
- let p = allocate(total_size, 8);
-
- let alloc = p as *mut ::raw::Box<()>;
- (*alloc).drop_glue = drop_glue;
-
- alloc as *mut u8
-}
-
-// hack for libcore
-#[no_mangle]
-#[doc(hidden)]
-#[deprecated]
-#[cfg(not(test))]
-pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
- exchange_malloc(size, align)
-}
-
-// hack for libcore
-#[no_mangle]
-#[doc(hidden)]
-#[deprecated]
-#[cfg(not(test))]
-pub unsafe extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) {
- deallocate(ptr, size, align)
-}
-
-#[cfg(test)]
-mod bench {
- extern crate test;
- use self::test::Bencher;
-
- #[bench]
- fn alloc_owned_small(b: &mut Bencher) {
- b.iter(|| {
- box 10
- })
- }
-
- #[bench]
- fn alloc_owned_big(b: &mut Bencher) {
- b.iter(|| {
- box [10, ..1000]
- })
- }
-}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-//! The global (exchange) heap.
-
-use libc::{c_void, size_t, free, malloc, realloc};
-use ptr::{RawPtr, mut_null};
-use intrinsics::abort;
-
-/// A wrapper around libc::malloc, aborting on out-of-memory
-#[inline]
-pub unsafe fn malloc_raw(size: uint) -> *mut u8 {
- // `malloc(0)` may allocate, but it may also return a null pointer
- // http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
- if size == 0 {
- mut_null()
- } else {
- let p = malloc(size as size_t);
- if p.is_null() {
- // we need a non-allocating way to print an error here
- abort();
- }
- p as *mut u8
- }
-}
-
-/// A wrapper around libc::realloc, aborting on out-of-memory
-#[inline]
-pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 {
- // `realloc(ptr, 0)` may allocate, but it may also return a null pointer
- // http://pubs.opengroup.org/onlinepubs/9699919799/functions/realloc.html
- if size == 0 {
- free(ptr as *mut c_void);
- mut_null()
- } else {
- let p = realloc(ptr as *mut c_void, size as size_t);
- if p.is_null() {
- // we need a non-allocating way to print an error here
- abort();
- }
- p as *mut u8
- }
-}
//! The local, garbage collected heap
+use alloc::util;
use iter::Iterator;
use libc::{c_void, free};
use mem;
#[inline]
pub fn alloc(&mut self, drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut Box {
- let total_size = ::rt::util::get_box_size(size, align);
+ let total_size = util::get_box_size(size, align);
let alloc = self.memory_region.malloc(total_size);
{
// Make sure that we can't use `mybox` outside of this scope
pub use self::util::{Stdio, Stdout, Stderr};
+pub use alloc::{heap, libc_heap};
+
// FIXME: these probably shouldn't be public...
#[doc(hidden)]
pub mod shouldnt_be_public {
// Internal macros used by the runtime.
mod macros;
-/// Wrappers around malloc / realloc aborting on out-of-memory.
-pub mod libc_heap;
-
-/// The low-level memory allocation API.
-pub mod heap;
-
/// Implementations of language-critical runtime features like @.
pub mod task;
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
-#[deprecated]
-#[doc(hidden)]
-#[inline]
-pub fn get_box_size(body_size: uint, body_align: uint) -> uint {
- let header_size = ::mem::size_of::<::raw::Box<()>>();
- let total_size = align_to(header_size, body_align) + body_size;
- total_size
-}
-
-// Rounds |size| to the nearest |alignment|. Invariant: |alignment| is a power
-// of two.
-#[inline]
-fn align_to(size: uint, align: uint) -> uint {
- assert!(align != 0);
- (size + align - 1) & !(align - 1)
-}
-
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!
- * Concurrency-enabled mechanisms for sharing mutable and/or immutable state
- * between tasks.
- */
-
-use std::mem;
-use std::ptr;
-use std::rt::heap::deallocate;
-use std::sync::atomics;
-use std::mem::{min_align_of, size_of};
-
-/// An atomically reference counted wrapper for shared state.
-///
-/// # Example
-///
-/// In this example, a large vector of floats is shared between several tasks.
-/// With simple pipes, without `Arc`, a copy would have to be made for each
-/// task.
-///
-/// ```rust
-/// use sync::Arc;
-///
-/// fn main() {
-/// let numbers = Vec::from_fn(100, |i| i as f32);
-/// let shared_numbers = Arc::new(numbers);
-///
-/// for _ in range(0, 10) {
-/// let child_numbers = shared_numbers.clone();
-///
-/// spawn(proc() {
-/// let local_numbers = child_numbers.as_slice();
-///
-/// // Work with the local numbers
-/// });
-/// }
-/// }
-/// ```
-#[unsafe_no_drop_flag]
-pub struct Arc<T> {
- x: *mut ArcInner<T>,
-}
-
-/// A weak pointer to an `Arc`.
-///
-/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
-/// used to break cycles between `Arc` pointers.
-#[unsafe_no_drop_flag]
-pub struct Weak<T> {
- x: *mut ArcInner<T>,
-}
-
-struct ArcInner<T> {
- strong: atomics::AtomicUint,
- weak: atomics::AtomicUint,
- data: T,
-}
-
-impl<T: Share + Send> Arc<T> {
- /// Create an atomically reference counted wrapper.
- #[inline]
- pub fn new(data: T) -> Arc<T> {
- // Start the weak pointer count as 1 which is the weak pointer that's
- // held by all the strong pointers (kinda), see std/rc.rs for more info
- let x = box ArcInner {
- strong: atomics::AtomicUint::new(1),
- weak: atomics::AtomicUint::new(1),
- data: data,
- };
- Arc { x: unsafe { mem::transmute(x) } }
- }
-
- #[inline]
- fn inner<'a>(&'a self) -> &'a ArcInner<T> {
- // This unsafety is ok because while this arc is alive we're guaranteed
- // that the inner pointer is valid. Furthermore, we know that the
- // `ArcInner` structure itself is `Share` because the inner data is
- // `Share` as well, so we're ok loaning out an immutable pointer to
- // these contents.
- unsafe { &*self.x }
- }
-
- /// Downgrades a strong pointer to a weak pointer
- ///
- /// Weak pointers will not keep the data alive. Once all strong references
- /// to the underlying data have been dropped, the data itself will be
- /// destroyed.
- pub fn downgrade(&self) -> Weak<T> {
- // See the clone() impl for why this is relaxed
- self.inner().weak.fetch_add(1, atomics::Relaxed);
- Weak { x: self.x }
- }
-}
-
-impl<T: Share + Send> Clone for Arc<T> {
- /// Duplicate an atomically reference counted wrapper.
- ///
- /// The resulting two `Arc` objects will point to the same underlying data
- /// object. However, one of the `Arc` objects can be sent to another task,
- /// allowing them to share the underlying data.
- #[inline]
- fn clone(&self) -> Arc<T> {
- // Using a relaxed ordering is alright here, as knowledge of the
- // original reference prevents other threads from erroneously deleting
- // the object.
- //
- // As explained in the [Boost documentation][1], Increasing the
- // reference counter can always be done with memory_order_relaxed: New
- // references to an object can only be formed from an existing
- // reference, and passing an existing reference from one thread to
- // another must already provide any required synchronization.
- //
- // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- self.inner().strong.fetch_add(1, atomics::Relaxed);
- Arc { x: self.x }
- }
-}
-
-impl<T: Send + Share> Deref<T> for Arc<T> {
- #[inline]
- fn deref<'a>(&'a self) -> &'a T {
- &self.inner().data
- }
-}
-
-impl<T: Send + Share + Clone> Arc<T> {
- /// Acquires a mutable pointer to the inner contents by guaranteeing that
- /// the reference count is one (no sharing is possible).
- ///
- /// This is also referred to as a copy-on-write operation because the inner
- /// data is cloned if the reference count is greater than one.
- #[inline]
- #[experimental]
- pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
- if self.inner().strong.load(atomics::SeqCst) != 1 {
- *self = Arc::new(self.deref().clone())
- }
- // This unsafety is ok because we're guaranteed that the pointer
- // returned is the *only* pointer that will ever be returned to T. Our
- // reference count is guaranteed to be 1 at this point, and we required
- // the Arc itself to be `mut`, so we're returning the only possible
- // reference to the inner data.
- unsafe { mem::transmute::<&_, &mut _>(self.deref()) }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Share + Send> Drop for Arc<T> {
- fn drop(&mut self) {
- // This structure has #[unsafe_no_drop_flag], so this drop glue may run
- // more than once (but it is guaranteed to be zeroed after the first if
- // it's run more than once)
- if self.x.is_null() { return }
-
- // Because `fetch_sub` is already atomic, we do not need to synchronize
- // with other threads unless we are going to delete the object. This
- // same logic applies to the below `fetch_sub` to the `weak` count.
- if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
-
- // This fence is needed to prevent reordering of use of the data and
- // deletion of the data. Because it is marked `Release`, the
- // decreasing of the reference count sychronizes with this `Acquire`
- // fence. This means that use of the data happens before decreasing
- // the refernce count, which happens before this fence, which
- // happens before the deletion of the data.
- //
- // As explained in the [Boost documentation][1],
- //
- // It is important to enforce any possible access to the object in
- // one thread (through an existing reference) to *happen before*
- // deleting the object in a different thread. This is achieved by a
- // "release" operation after dropping a reference (any access to the
- // object through this reference must obviously happened before),
- // and an "acquire" operation before deleting the object.
- //
- // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- atomics::fence(atomics::Acquire);
-
- // Destroy the data at this time, even though we may not free the box
- // allocation itself (there may still be weak pointers lying around).
- unsafe { drop(ptr::read(&self.inner().data)); }
-
- if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
- atomics::fence(atomics::Acquire);
- unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
- min_align_of::<ArcInner<T>>()) }
- }
- }
-}
-
-impl<T: Share + Send> Weak<T> {
- /// Attempts to upgrade this weak reference to a strong reference.
- ///
- /// This method will fail to upgrade this reference if the strong reference
- /// count has already reached 0, but if there are still other active strong
- /// references this function will return a new strong reference to the data
- pub fn upgrade(&self) -> Option<Arc<T>> {
- // We use a CAS loop to increment the strong count instead of a
- // fetch_add because once the count hits 0 is must never be above 0.
- let inner = self.inner();
- loop {
- let n = inner.strong.load(atomics::SeqCst);
- if n == 0 { return None }
- let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
- if old == n { return Some(Arc { x: self.x }) }
- }
- }
-
- #[inline]
- fn inner<'a>(&'a self) -> &'a ArcInner<T> {
- // See comments above for why this is "safe"
- unsafe { &*self.x }
- }
-}
-
-impl<T: Share + Send> Clone for Weak<T> {
- #[inline]
- fn clone(&self) -> Weak<T> {
- // See comments in Arc::clone() for why this is relaxed
- self.inner().weak.fetch_add(1, atomics::Relaxed);
- Weak { x: self.x }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Share + Send> Drop for Weak<T> {
- fn drop(&mut self) {
- // see comments above for why this check is here
- if self.x.is_null() { return }
-
- // If we find out that we were the last weak pointer, then its time to
- // deallocate the data entirely. See the discussion in Arc::drop() about
- // the memory orderings
- if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
- atomics::fence(atomics::Acquire);
- unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
- min_align_of::<ArcInner<T>>()) }
- }
- }
-}
-
-#[cfg(test)]
-#[allow(experimental)]
-mod tests {
- use super::{Arc, Weak};
- use std::sync::atomics;
- use std::task;
- use Mutex;
-
- struct Canary(*mut atomics::AtomicUint);
-
- impl Drop for Canary
- {
- fn drop(&mut self) {
- unsafe {
- match *self {
- Canary(c) => {
- (*c).fetch_add(1, atomics::SeqCst);
- }
- }
- }
- }
- }
-
- #[test]
- fn manually_share_arc() {
- let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
- let arc_v = Arc::new(v);
-
- let (tx, rx) = channel();
-
- task::spawn(proc() {
- let arc_v: Arc<Vec<int>> = rx.recv();
- assert_eq!(*arc_v.get(3), 4);
- });
-
- tx.send(arc_v.clone());
-
- assert_eq!(*arc_v.get(2), 3);
- assert_eq!(*arc_v.get(4), 5);
-
- info!("{:?}", arc_v);
- }
-
- #[test]
- fn test_cowarc_clone_make_unique() {
- let mut cow0 = Arc::new(75u);
- let mut cow1 = cow0.clone();
- let mut cow2 = cow1.clone();
-
- assert!(75 == *cow0.make_unique());
- assert!(75 == *cow1.make_unique());
- assert!(75 == *cow2.make_unique());
-
- *cow0.make_unique() += 1;
- *cow1.make_unique() += 2;
- *cow2.make_unique() += 3;
-
- assert!(76 == *cow0);
- assert!(77 == *cow1);
- assert!(78 == *cow2);
-
- // none should point to the same backing memory
- assert!(*cow0 != *cow1);
- assert!(*cow0 != *cow2);
- assert!(*cow1 != *cow2);
- }
-
- #[test]
- fn test_cowarc_clone_unique2() {
- let mut cow0 = Arc::new(75u);
- let cow1 = cow0.clone();
- let cow2 = cow1.clone();
-
- assert!(75 == *cow0);
- assert!(75 == *cow1);
- assert!(75 == *cow2);
-
- *cow0.make_unique() += 1;
-
- assert!(76 == *cow0);
- assert!(75 == *cow1);
- assert!(75 == *cow2);
-
- // cow1 and cow2 should share the same contents
- // cow0 should have a unique reference
- assert!(*cow0 != *cow1);
- assert!(*cow0 != *cow2);
- assert!(*cow1 == *cow2);
- }
-
- #[test]
- fn test_live() {
- let x = Arc::new(5);
- let y = x.downgrade();
- assert!(y.upgrade().is_some());
- }
-
- #[test]
- fn test_dead() {
- let x = Arc::new(5);
- let y = x.downgrade();
- drop(x);
- assert!(y.upgrade().is_none());
- }
-
- #[test]
- fn weak_self_cyclic() {
- struct Cycle {
- x: Mutex<Option<Weak<Cycle>>>
- }
-
- let a = Arc::new(Cycle { x: Mutex::new(None) });
- let b = a.clone().downgrade();
- *a.deref().x.lock().deref_mut() = Some(b);
-
- // hopefully we don't double-free (or leak)...
- }
-
- #[test]
- fn drop_arc() {
- let mut canary = atomics::AtomicUint::new(0);
- let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
- drop(x);
- assert!(canary.load(atomics::Acquire) == 1);
- }
-
- #[test]
- fn drop_arc_weak() {
- let mut canary = atomics::AtomicUint::new(0);
- let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
- let arc_weak = arc.downgrade();
- assert!(canary.load(atomics::Acquire) == 0);
- drop(arc);
- assert!(canary.load(atomics::Acquire) == 1);
- drop(arc_weak);
- }
-}
#[cfg(test)]
#[phase(syntax, link)] extern crate log;
+extern crate alloc;
+
pub use comm::{DuplexStream, duplex};
pub use task_pool::TaskPool;
pub use future::Future;
-pub use arc::{Arc, Weak};
+pub use alloc::arc::{Arc, Weak};
pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
RWLock, RWLockReadGuard, RWLockWriteGuard};
// The mutex/rwlock in this module are not meant for reexport
pub use raw::{Semaphore, SemaphoreGuard};
-mod arc;
mod comm;
mod future;
mod lock;
use std::task;
use std::task::TaskBuilder;
- use arc::Arc;
+ use Arc;
use super::{Mutex, Barrier, RWLock};
#[test]
#[cfg(test)]
mod tests {
- use arc::Arc;
+ use Arc;
use super::{Semaphore, Mutex, RWLock, Condvar};
use std::mem;
fn main() {
let x = Rc::new(5);
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `std::rc::Rc<int>`,
+ //~^ ERROR instantiating a type parameter with an incompatible type `alloc::rc::Rc<int>`,
// which does not fulfill `Send`
}