//! let five = five.clone();
//!
//! Thread::spawn(move || {
-//! let mut number = five.lock();
+//! let mut number = five.lock().unwrap();
//!
//! *number += 1;
//!
//! ```
use core::atomic;
+use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::borrow::BorrowFrom;
use core::clone::Clone;
use core::fmt::{mod, Show};
use core::ops::{Drop, Deref};
use core::option::Option;
use core::option::Option::{Some, None};
-use core::ptr::{mod, RawPtr};
+use core::ptr::{mod, PtrExt};
use heap::deallocate;
/// An atomically reference counted wrapper for shared state.
/// use std::thread::Thread;
///
/// fn main() {
-/// let numbers = Vec::from_fn(100, |i| i as f32);
+/// let numbers: Vec<_> = range(0, 100u32).map(|i| i as f32).collect();
/// let shared_numbers = Arc::new(numbers);
///
/// for _ in range(0u, 10) {
#[experimental = "Weak pointers may not belong in this module."]
pub fn downgrade(&self) -> Weak<T> {
// See the clone() impl for why this is relaxed
- self.inner().weak.fetch_add(1, atomic::Relaxed);
+ self.inner().weak.fetch_add(1, Relaxed);
Weak { _ptr: self._ptr }
}
}
/// Get the number of weak references to this value.
#[inline]
#[experimental]
-pub fn weak_count<T>(this: &Arc<T>) -> uint { this.inner().weak.load(atomic::SeqCst) - 1 }
+pub fn weak_count<T>(this: &Arc<T>) -> uint { this.inner().weak.load(SeqCst) - 1 }
/// Get the number of strong references to this value.
#[inline]
#[experimental]
-pub fn strong_count<T>(this: &Arc<T>) -> uint { this.inner().strong.load(atomic::SeqCst) }
+pub fn strong_count<T>(this: &Arc<T>) -> uint { this.inner().strong.load(SeqCst) }
#[stable]
impl<T> Clone for Arc<T> {
// must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- self.inner().strong.fetch_add(1, atomic::Relaxed);
+ self.inner().strong.fetch_add(1, Relaxed);
Arc { _ptr: self._ptr }
}
}
pub fn make_unique(&mut self) -> &mut T {
// Note that we hold a strong reference, which also counts as a weak reference, so we only
// clone if there is an additional reference of either kind.
- if self.inner().strong.load(atomic::SeqCst) != 1 ||
- self.inner().weak.load(atomic::SeqCst) != 1 {
+ if self.inner().strong.load(SeqCst) != 1 ||
+ self.inner().weak.load(SeqCst) != 1 {
*self = Arc::new((**self).clone())
}
// This unsafety is ok because we're guaranteed that the pointer returned is the *only*
// Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
// unless we are going to delete the object. This same logic applies to the below
// `fetch_sub` to the `weak` count.
- if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return }
+ if self.inner().strong.fetch_sub(1, Release) != 1 { return }
// This fence is needed to prevent reordering of use of the data and deletion of the data.
// Because it is marked `Release`, the decreasing of the reference count synchronizes with
// > operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- atomic::fence(atomic::Acquire);
+ atomic::fence(Acquire);
// Destroy the data at this time, even though we may not free the box allocation itself
// (there may still be weak pointers lying around).
unsafe { drop(ptr::read(&self.inner().data)); }
- if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
- atomic::fence(atomic::Acquire);
+ if self.inner().weak.fetch_sub(1, Release) == 1 {
+ atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
// count hits 0 is must never be above 0.
let inner = self.inner();
loop {
- let n = inner.strong.load(atomic::SeqCst);
+ let n = inner.strong.load(SeqCst);
if n == 0 { return None }
- let old = inner.strong.compare_and_swap(n, n + 1, atomic::SeqCst);
+ let old = inner.strong.compare_and_swap(n, n + 1, SeqCst);
if old == n { return Some(Arc { _ptr: self._ptr }) }
}
}
#[inline]
fn clone(&self) -> Weak<T> {
// See comments in Arc::clone() for why this is relaxed
- self.inner().weak.fetch_add(1, atomic::Relaxed);
+ self.inner().weak.fetch_add(1, Relaxed);
Weak { _ptr: self._ptr }
}
}
// If we find out that we were the last weak pointer, then its time to deallocate the data
// entirely. See the discussion in Arc::drop() about the memory orderings
- if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
- atomic::fence(atomic::Acquire);
+ if self.inner().weak.fetch_sub(1, Release) == 1 {
+ atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
}
-#[unstable = "waiting on PartialEq"]
+#[stable]
impl<T: PartialEq> PartialEq for Arc<T> {
/// Equality for two `Arc<T>`s.
///
/// ```
fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
}
-#[unstable = "waiting on PartialOrd"]
+#[stable]
impl<T: PartialOrd> PartialOrd for Arc<T> {
/// Partial comparison for two `Arc<T>`s.
///
/// ```
fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
}
-#[unstable = "waiting on Ord"]
+#[stable]
impl<T: Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) }
}
-#[unstable = "waiting on Eq"]
+#[stable]
impl<T: Eq> Eq for Arc<T> {}
impl<T: fmt::Show> fmt::Show for Arc<T> {
use std::option::Option::{Some, None};
use std::str::Str;
use std::sync::atomic;
+ use std::sync::atomic::Ordering::{Acquire, SeqCst};
use std::task;
use std::kinds::Send;
use std::vec::Vec;
unsafe {
match *self {
Canary(c) => {
- (*c).fetch_add(1, atomic::SeqCst);
+ (*c).fetch_add(1, SeqCst);
}
}
}
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = a.clone().downgrade();
- *a.x.lock() = Some(b);
+ *a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
}
let mut canary = atomic::AtomicUint::new(0);
let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
drop(x);
- assert!(canary.load(atomic::Acquire) == 1);
+ assert!(canary.load(Acquire) == 1);
}
#[test]
let mut canary = atomic::AtomicUint::new(0);
let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
let arc_weak = arc.downgrade();
- assert!(canary.load(atomic::Acquire) == 0);
+ assert!(canary.load(Acquire) == 0);
drop(arc);
- assert!(canary.load(atomic::Acquire) == 1);
+ assert!(canary.load(Acquire) == 1);
drop(arc_weak);
}