//!
//! Sharing some immutable data between tasks:
//!
-//! ```
+//! ```no_run
//! use std::sync::Arc;
//! use std::thread;
//!
//!
//! Sharing mutable data safely between tasks with a `Mutex`:
//!
-//! ```
+//! ```no_run
//! use std::sync::{Arc, Mutex};
//! use std::thread;
//!
use core::atomic;
use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::fmt;
-use core::cmp::{Ordering};
+use core::cmp::Ordering;
use core::default::Default;
use core::mem::{min_align_of, size_of};
use core::mem;
/// An atomically reference counted wrapper for shared state.
///
-/// # Example
+/// # Examples
///
/// In this example, a large vector of floats is shared between several tasks.
/// With simple pipes, without `Arc`, a copy would have to be made for each
/// task.
///
-/// ```rust
+/// When you clone an `Arc<T>`, it will create another pointer to the data and
+/// increase the reference counter.
+///
+/// ```
+/// # #![feature(alloc, core)]
/// use std::sync::Arc;
/// use std::thread;
///
/// let child_numbers = shared_numbers.clone();
///
/// thread::spawn(move || {
-/// let local_numbers = child_numbers.as_slice();
+/// let local_numbers = &child_numbers[..];
///
/// // Work with the local numbers
/// });
/// A weak pointer to an `Arc`.
///
-/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles
-/// between `Arc` pointers.
+/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
+/// used to break cycles between `Arc` pointers.
#[unsafe_no_drop_flag]
#[unstable(feature = "alloc",
reason = "Weak pointers may not belong in this module.")]
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
// contents.
unsafe { &**self._ptr }
}
+
+ // Non-inlined part of `drop`.
+ #[inline(never)]
+ unsafe fn drop_slow(&mut self) {
+ let ptr = *self._ptr;
+
+ // Destroy the data at this time, even though we may not free the box
+ // allocation itself (there may still be weak pointers lying around).
+ drop(ptr::read(&self.inner().data));
+
+ if self.inner().weak.fetch_sub(1, Release) == 1 {
+ atomic::fence(Acquire);
+ deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(), min_align_of::<ArcInner<T>>())
+ }
+ }
}
/// Get the number of weak references to this value.
#[unstable(feature = "alloc")]
pub fn strong_count<T>(this: &Arc<T>) -> usize { this.inner().strong.load(SeqCst) }
+
+/// Try accessing a mutable reference to the contents behind an unique `Arc<T>`.
+///
+/// The access is granted only if this is the only reference to the object.
+/// Otherwise, `None` is returned.
+///
+/// # Examples
+///
+/// ```
+/// # #![feature(alloc)]
+/// extern crate alloc;
+/// # fn main() {
+/// use alloc::arc;
+///
+/// let mut four = arc::Arc::new(4);
+///
+/// arc::unique(&mut four).map(|num| *num = 5);
+/// # }
+/// ```
+#[inline]
+#[unstable(feature = "alloc")]
+pub fn unique<T>(this: &mut Arc<T>) -> Option<&mut T> {
+ if strong_count(this) == 1 && weak_count(this) == 0 {
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ let inner = unsafe { &mut **this._ptr };
+ Some(&mut inner.data)
+ }else {
+ None
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Arc<T> {
/// Makes a clone of the `Arc<T>`.
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
impl<T: Clone> Arc<T> {
/// Make a mutable reference from the given `Arc<T>`.
///
- /// This is also referred to as a copy-on-write operation because the inner data is cloned if
- /// the reference count is greater than one.
+ /// This is also referred to as a copy-on-write operation because the inner
+ /// data is cloned if the reference count is greater than one.
///
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let mut five = Arc::new(5);
#[inline]
#[unstable(feature = "alloc")]
pub fn make_unique(&mut self) -> &mut T {
- // Note that we hold a strong reference, which also counts as a weak reference, so we only
- // clone if there is an additional reference of either kind.
+ // Note that we hold a strong reference, which also counts as a weak
+ // reference, so we only clone if there is an additional reference of
+ // either kind.
if self.inner().strong.load(SeqCst) != 1 ||
self.inner().weak.load(SeqCst) != 1 {
*self = Arc::new((**self).clone())
}
- // This unsafety is ok because we're guaranteed that the pointer returned is the *only*
- // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
- // this point, and we required the Arc itself to be `mut`, so we're returning the only
- // possible reference to the inner data.
+ // As with `unique()`, the unsafety is ok because our reference was
+ // either unique to begin with, or became one upon cloning the contents.
let inner = unsafe { &mut **self._ptr };
&mut inner.data
}
impl<T> Drop for Arc<T> {
/// Drops the `Arc<T>`.
///
- /// This will decrement the strong reference count. If the strong reference count becomes zero
- /// and the only other references are `Weak<T>` ones, `drop`s the inner value.
+ /// This will decrement the strong reference count. If the strong reference
+ /// count becomes zero and the only other references are `Weak<T>` ones,
+ /// `drop`s the inner value.
///
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// {
///
/// } // implicit drop
/// ```
+ #[inline]
fn drop(&mut self) {
- // This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
- // it is guaranteed to be zeroed after the first if it's run more than once)
+ // This structure has #[unsafe_no_drop_flag], so this drop glue may run
+ // more than once (but it is guaranteed to be zeroed after the first if
+ // it's run more than once)
let ptr = *self._ptr;
- if ptr.is_null() { return }
+ // if ptr.is_null() { return }
+ if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
- // Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
- // unless we are going to delete the object. This same logic applies to the below
- // `fetch_sub` to the `weak` count.
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object. This
+ // same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().strong.fetch_sub(1, Release) != 1 { return }
- // This fence is needed to prevent reordering of use of the data and deletion of the data.
- // Because it is marked `Release`, the decreasing of the reference count synchronizes with
- // this `Acquire` fence. This means that use of the data happens before decreasing the
- // reference count, which happens before this fence, which happens before the deletion of
- // the data.
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
//
// As explained in the [Boost documentation][1],
//
- // > It is important to enforce any possible access to the object in one thread (through an
- // > existing reference) to *happen before* deleting the object in a different thread. This
- // > is achieved by a "release" operation after dropping a reference (any access to the
- // > object through this reference must obviously happened before), and an "acquire"
- // > operation before deleting the object.
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
- // Destroy the data at this time, even though we may not free the box allocation itself
- // (there may still be weak pointers lying around).
- unsafe { drop(ptr::read(&self.inner().data)); }
-
- if self.inner().weak.fetch_sub(1, Release) == 1 {
- atomic::fence(Acquire);
- unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
- min_align_of::<ArcInner<T>>()) }
+ unsafe {
+ self.drop_slow()
}
}
}
///
/// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
///
- /// Returns `None` if there were no strong references and the data was destroyed.
+ /// Returns `None` if there were no strong references and the data was
+ /// destroyed.
///
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
/// let strong_five: Option<Arc<_>> = weak_five.upgrade();
/// ```
pub fn upgrade(&self) -> Option<Arc<T>> {
- // We use a CAS loop to increment the strong count instead of a fetch_add because once the
- // count hits 0 is must never be above 0.
+ // We use a CAS loop to increment the strong count instead of a
+ // fetch_add because once the count hits 0 is must never be above 0.
let inner = self.inner();
loop {
let n = inner.strong.load(SeqCst);
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// let weak_five = Arc::new(5).downgrade();
/// # Examples
///
/// ```
+ /// # #![feature(alloc)]
/// use std::sync::Arc;
///
/// {
let ptr = *self._ptr;
// see comments above for why this check is here
- if ptr.is_null() { return }
+ if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }
- // If we find out that we were the last weak pointer, then its time to deallocate the data
- // entirely. See the discussion in Arc::drop() about the memory orderings
+ // If we find out that we were the last weak pointer, then its time to
+ // deallocate the data entirely. See the discussion in Arc::drop() about
+ // the memory orderings
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
use std::sync::atomic::Ordering::{Acquire, SeqCst};
use std::thread;
use std::vec::Vec;
- use super::{Arc, Weak, weak_count, strong_count};
+ use super::{Arc, Weak, weak_count, strong_count, unique};
use std::sync::Mutex;
struct Canary(*mut atomic::AtomicUsize);
assert_eq!((*arc_v)[4], 5);
}
+ #[test]
+ fn test_arc_unique() {
+ let mut x = Arc::new(10);
+ assert!(unique(&mut x).is_some());
+ {
+ let y = x.clone();
+ assert!(unique(&mut x).is_none());
+ }
+ {
+ let z = x.downgrade();
+ assert!(unique(&mut x).is_none());
+ }
+ assert!(unique(&mut x).is_some());
+ }
+
#[test]
fn test_cowarc_clone_make_unique() {
let mut cow0 = Arc::new(75);