1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Threadsafe reference-counted boxes (the `Arc<T>` type).
15 //! The `Arc<T>` type provides shared ownership of an immutable value. Destruction is
16 //! deterministic, and will occur as soon as the last owner is gone. It is marked as `Send` because
17 //! it uses atomic reference counting.
19 //! If you do not need thread-safety, and just need shared ownership, consider the [`Rc<T>`
20 //! type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but does not use atomics, making it
21 //! both thread-unsafe as well as significantly faster when updating the reference count.
23 //! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer to the box. A
24 //! `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but will return `None` if the value
25 //! has already been dropped.
27 //! For example, a tree with parent pointers can be represented by putting the nodes behind strong
28 //! `Arc<T>` pointers, and then storing the parent pointers as `Weak<T>` pointers.
32 //! Sharing some immutable data between tasks:
35 //! use std::sync::Arc;
36 //! use std::thread::Thread;
38 //! let five = Arc::new(5i);
40 //! for i in range(0u, 10) {
41 //! let five = five.clone();
43 //! Thread::spawn(move || {
44 //! println!("{}", five);
49 //! Sharing mutable data safely between tasks with a `Mutex`:
52 //! use std::sync::{Arc, Mutex};
53 //! use std::thread::Thread;
55 //! let five = Arc::new(Mutex::new(5i));
57 //! for _ in range(0u, 10) {
58 //! let five = five.clone();
60 //! Thread::spawn(move || {
61 //! let mut number = five.lock().unwrap();
65 //! println!("{}", *number); // prints 6
71 use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
72 use core::borrow::BorrowFrom;
73 use core::clone::Clone;
74 use core::fmt::{self, Show};
75 use core::cmp::{Eq, Ord, PartialEq, PartialOrd, Ordering};
76 use core::default::Default;
77 use core::kinds::{Sync, Send};
78 use core::mem::{min_align_of, size_of, drop};
80 use core::nonzero::NonZero;
81 use core::ops::{Drop, Deref};
82 use core::option::Option;
83 use core::option::Option::{Some, None};
84 use core::ptr::{self, PtrExt};
87 /// An atomically reference counted wrapper for shared state.
91 /// In this example, a large vector of floats is shared between several tasks. With simple pipes,
92 /// without `Arc`, a copy would have to be made for each task.
95 /// use std::sync::Arc;
96 /// use std::thread::Thread;
99 /// let numbers: Vec<_> = range(0, 100u32).map(|i| i as f32).collect();
100 /// let shared_numbers = Arc::new(numbers);
102 /// for _ in range(0u, 10) {
103 /// let child_numbers = shared_numbers.clone();
105 /// Thread::spawn(move || {
106 /// let local_numbers = child_numbers.as_slice();
108 /// // Work with the local numbers
113 #[unsafe_no_drop_flag]
116 // FIXME #12808: strange name to try to avoid interfering with
117 // field accesses of the contained type via Deref
118 _ptr: NonZero<*mut ArcInner<T>>,
121 unsafe impl<T: Sync + Send> Send for Arc<T> { }
122 unsafe impl<T: Sync + Send> Sync for Arc<T> { }
125 /// A weak pointer to an `Arc`.
127 /// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles
128 /// between `Arc` pointers.
129 #[unsafe_no_drop_flag]
130 #[experimental = "Weak pointers may not belong in this module."]
132 // FIXME #12808: strange name to try to avoid interfering with
133 // field accesses of the contained type via Deref
134 _ptr: NonZero<*mut ArcInner<T>>,
137 unsafe impl<T: Sync + Send> Send for Weak<T> { }
138 unsafe impl<T: Sync + Send> Sync for Weak<T> { }
141 strong: atomic::AtomicUint,
142 weak: atomic::AtomicUint,
146 unsafe impl<T: Sync + Send> Send for ArcInner<T> {}
147 unsafe impl<T: Sync + Send> Sync for ArcInner<T> {}
150 /// Constructs a new `Arc<T>`.
155 /// use std::sync::Arc;
157 /// let five = Arc::new(5i);
161 pub fn new(data: T) -> Arc<T> {
162 // Start the weak pointer count as 1 which is the weak pointer that's
163 // held by all the strong pointers (kinda), see std/rc.rs for more info
164 let x = box ArcInner {
165 strong: atomic::AtomicUint::new(1),
166 weak: atomic::AtomicUint::new(1),
169 Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } }
172 /// Downgrades the `Arc<T>` to a `Weak<T>` reference.
177 /// use std::sync::Arc;
179 /// let five = Arc::new(5i);
181 /// let weak_five = five.downgrade();
183 #[experimental = "Weak pointers may not belong in this module."]
184 pub fn downgrade(&self) -> Weak<T> {
185 // See the clone() impl for why this is relaxed
186 self.inner().weak.fetch_add(1, Relaxed);
187 Weak { _ptr: self._ptr }
193 fn inner(&self) -> &ArcInner<T> {
194 // This unsafety is ok because while this arc is alive we're guaranteed that the inner
195 // pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync`
196 // because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer
197 // to these contents.
198 unsafe { &**self._ptr }
202 /// Get the number of weak references to this value.
205 pub fn weak_count<T>(this: &Arc<T>) -> uint { this.inner().weak.load(SeqCst) - 1 }
207 /// Get the number of strong references to this value.
210 pub fn strong_count<T>(this: &Arc<T>) -> uint { this.inner().strong.load(SeqCst) }
213 impl<T> Clone for Arc<T> {
214 /// Makes a clone of the `Arc<T>`.
216 /// This increases the strong reference count.
221 /// use std::sync::Arc;
223 /// let five = Arc::new(5i);
228 fn clone(&self) -> Arc<T> {
229 // Using a relaxed ordering is alright here, as knowledge of the original reference
230 // prevents other threads from erroneously deleting the object.
232 // As explained in the [Boost documentation][1], Increasing the reference counter can
233 // always be done with memory_order_relaxed: New references to an object can only be formed
234 // from an existing reference, and passing an existing reference from one thread to another
235 // must already provide any required synchronization.
237 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
238 self.inner().strong.fetch_add(1, Relaxed);
239 Arc { _ptr: self._ptr }
243 impl<T> BorrowFrom<Arc<T>> for T {
244 fn borrow_from(owned: &Arc<T>) -> &T {
249 #[experimental = "Deref is experimental."]
250 impl<T> Deref for Arc<T> {
254 fn deref(&self) -> &T {
259 impl<T: Send + Sync + Clone> Arc<T> {
260 /// Make a mutable reference from the given `Arc<T>`.
262 /// This is also referred to as a copy-on-write operation because the inner data is cloned if
263 /// the reference count is greater than one.
268 /// use std::sync::Arc;
270 /// let mut five = Arc::new(5i);
272 /// let mut_five = five.make_unique();
276 pub fn make_unique(&mut self) -> &mut T {
277 // Note that we hold a strong reference, which also counts as a weak reference, so we only
278 // clone if there is an additional reference of either kind.
279 if self.inner().strong.load(SeqCst) != 1 ||
280 self.inner().weak.load(SeqCst) != 1 {
281 *self = Arc::new((**self).clone())
283 // This unsafety is ok because we're guaranteed that the pointer returned is the *only*
284 // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
285 // this point, and we required the Arc itself to be `mut`, so we're returning the only
286 // possible reference to the inner data.
287 let inner = unsafe { &mut **self._ptr };
293 #[experimental = "waiting on stability of Drop"]
294 impl<T: Sync + Send> Drop for Arc<T> {
295 /// Drops the `Arc<T>`.
297 /// This will decrement the strong reference count. If the strong reference count becomes zero
298 /// and the only other references are `Weak<T>` ones, `drop`s the inner value.
303 /// use std::sync::Arc;
306 /// let five = Arc::new(5i);
310 /// drop(five); // explict drop
313 /// let five = Arc::new(5i);
317 /// } // implicit drop
320 // This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
321 // it is guaranteed to be zeroed after the first if it's run more than once)
322 let ptr = *self._ptr;
323 if ptr.is_null() { return }
325 // Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
326 // unless we are going to delete the object. This same logic applies to the below
327 // `fetch_sub` to the `weak` count.
328 if self.inner().strong.fetch_sub(1, Release) != 1 { return }
330 // This fence is needed to prevent reordering of use of the data and deletion of the data.
331 // Because it is marked `Release`, the decreasing of the reference count synchronizes with
332 // this `Acquire` fence. This means that use of the data happens before decreasing the
333 // reference count, which happens before this fence, which happens before the deletion of
336 // As explained in the [Boost documentation][1],
338 // > It is important to enforce any possible access to the object in one thread (through an
339 // > existing reference) to *happen before* deleting the object in a different thread. This
340 // > is achieved by a "release" operation after dropping a reference (any access to the
341 // > object through this reference must obviously happened before), and an "acquire"
342 // > operation before deleting the object.
344 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
345 atomic::fence(Acquire);
347 // Destroy the data at this time, even though we may not free the box allocation itself
348 // (there may still be weak pointers lying around).
349 unsafe { drop(ptr::read(&self.inner().data)); }
351 if self.inner().weak.fetch_sub(1, Release) == 1 {
352 atomic::fence(Acquire);
353 unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
354 min_align_of::<ArcInner<T>>()) }
359 #[experimental = "Weak pointers may not belong in this module."]
360 impl<T: Sync + Send> Weak<T> {
361 /// Upgrades a weak reference to a strong reference.
363 /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
365 /// Returns `None` if there were no strong references and the data was destroyed.
370 /// use std::sync::Arc;
372 /// let five = Arc::new(5i);
374 /// let weak_five = five.downgrade();
376 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
378 pub fn upgrade(&self) -> Option<Arc<T>> {
379 // We use a CAS loop to increment the strong count instead of a fetch_add because once the
380 // count hits 0 is must never be above 0.
381 let inner = self.inner();
383 let n = inner.strong.load(SeqCst);
384 if n == 0 { return None }
385 let old = inner.strong.compare_and_swap(n, n + 1, SeqCst);
386 if old == n { return Some(Arc { _ptr: self._ptr }) }
391 fn inner(&self) -> &ArcInner<T> {
392 // See comments above for why this is "safe"
393 unsafe { &**self._ptr }
397 #[experimental = "Weak pointers may not belong in this module."]
398 impl<T: Sync + Send> Clone for Weak<T> {
399 /// Makes a clone of the `Weak<T>`.
401 /// This increases the weak reference count.
406 /// use std::sync::Arc;
408 /// let weak_five = Arc::new(5i).downgrade();
410 /// weak_five.clone();
413 fn clone(&self) -> Weak<T> {
414 // See comments in Arc::clone() for why this is relaxed
415 self.inner().weak.fetch_add(1, Relaxed);
416 Weak { _ptr: self._ptr }
421 #[experimental = "Weak pointers may not belong in this module."]
422 impl<T: Sync + Send> Drop for Weak<T> {
423 /// Drops the `Weak<T>`.
425 /// This will decrement the weak reference count.
430 /// use std::sync::Arc;
433 /// let five = Arc::new(5i);
434 /// let weak_five = five.downgrade();
438 /// drop(weak_five); // explict drop
441 /// let five = Arc::new(5i);
442 /// let weak_five = five.downgrade();
446 /// } // implicit drop
449 let ptr = *self._ptr;
451 // see comments above for why this check is here
452 if ptr.is_null() { return }
454 // If we find out that we were the last weak pointer, then its time to deallocate the data
455 // entirely. See the discussion in Arc::drop() about the memory orderings
456 if self.inner().weak.fetch_sub(1, Release) == 1 {
457 atomic::fence(Acquire);
458 unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
459 min_align_of::<ArcInner<T>>()) }
465 impl<T: PartialEq> PartialEq for Arc<T> {
466 /// Equality for two `Arc<T>`s.
468 /// Two `Arc<T>`s are equal if their inner value are equal.
473 /// use std::sync::Arc;
475 /// let five = Arc::new(5i);
477 /// five == Arc::new(5i);
479 fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) }
481 /// Inequality for two `Arc<T>`s.
483 /// Two `Arc<T>`s are unequal if their inner value are unequal.
488 /// use std::sync::Arc;
490 /// let five = Arc::new(5i);
492 /// five != Arc::new(5i);
494 fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
497 impl<T: PartialOrd> PartialOrd for Arc<T> {
498 /// Partial comparison for two `Arc<T>`s.
500 /// The two are compared by calling `partial_cmp()` on their inner values.
505 /// use std::sync::Arc;
507 /// let five = Arc::new(5i);
509 /// five.partial_cmp(&Arc::new(5i));
511 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
512 (**self).partial_cmp(&**other)
515 /// Less-than comparison for two `Arc<T>`s.
517 /// The two are compared by calling `<` on their inner values.
522 /// use std::sync::Arc;
524 /// let five = Arc::new(5i);
526 /// five < Arc::new(5i);
528 fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) }
530 /// 'Less-than or equal to' comparison for two `Arc<T>`s.
532 /// The two are compared by calling `<=` on their inner values.
537 /// use std::sync::Arc;
539 /// let five = Arc::new(5i);
541 /// five <= Arc::new(5i);
543 fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) }
545 /// Greater-than comparison for two `Arc<T>`s.
547 /// The two are compared by calling `>` on their inner values.
552 /// use std::sync::Arc;
554 /// let five = Arc::new(5i);
556 /// five > Arc::new(5i);
558 fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) }
560 /// 'Greater-than or equal to' comparison for two `Arc<T>`s.
562 /// The two are compared by calling `>=` on their inner values.
567 /// use std::sync::Arc;
569 /// let five = Arc::new(5i);
571 /// five >= Arc::new(5i);
573 fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
576 impl<T: Ord> Ord for Arc<T> {
577 fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) }
580 impl<T: Eq> Eq for Arc<T> {}
582 impl<T: fmt::Show> fmt::Show for Arc<T> {
583 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
589 impl<T: Default + Sync + Send> Default for Arc<T> {
591 fn default() -> Arc<T> { Arc::new(Default::default()) }
595 #[allow(experimental)]
597 use std::clone::Clone;
598 use std::sync::mpsc::channel;
601 use std::option::Option;
602 use std::option::Option::{Some, None};
603 use std::sync::atomic;
604 use std::sync::atomic::Ordering::{Acquire, SeqCst};
605 use std::thread::Thread;
607 use super::{Arc, Weak, weak_count, strong_count};
608 use std::sync::Mutex;
610 struct Canary(*mut atomic::AtomicUint);
618 (*c).fetch_add(1, SeqCst);
626 fn manually_share_arc() {
627 let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
628 let arc_v = Arc::new(v);
630 let (tx, rx) = channel();
632 let _t = Thread::spawn(move || {
633 let arc_v: Arc<Vec<int>> = rx.recv().unwrap();
634 assert_eq!((*arc_v)[3], 4);
637 tx.send(arc_v.clone()).unwrap();
639 assert_eq!((*arc_v)[2], 3);
640 assert_eq!((*arc_v)[4], 5);
644 fn test_cowarc_clone_make_unique() {
645 let mut cow0 = Arc::new(75u);
646 let mut cow1 = cow0.clone();
647 let mut cow2 = cow1.clone();
649 assert!(75 == *cow0.make_unique());
650 assert!(75 == *cow1.make_unique());
651 assert!(75 == *cow2.make_unique());
653 *cow0.make_unique() += 1;
654 *cow1.make_unique() += 2;
655 *cow2.make_unique() += 3;
657 assert!(76 == *cow0);
658 assert!(77 == *cow1);
659 assert!(78 == *cow2);
661 // none should point to the same backing memory
662 assert!(*cow0 != *cow1);
663 assert!(*cow0 != *cow2);
664 assert!(*cow1 != *cow2);
668 fn test_cowarc_clone_unique2() {
669 let mut cow0 = Arc::new(75u);
670 let cow1 = cow0.clone();
671 let cow2 = cow1.clone();
673 assert!(75 == *cow0);
674 assert!(75 == *cow1);
675 assert!(75 == *cow2);
677 *cow0.make_unique() += 1;
679 assert!(76 == *cow0);
680 assert!(75 == *cow1);
681 assert!(75 == *cow2);
683 // cow1 and cow2 should share the same contents
684 // cow0 should have a unique reference
685 assert!(*cow0 != *cow1);
686 assert!(*cow0 != *cow2);
687 assert!(*cow1 == *cow2);
691 fn test_cowarc_clone_weak() {
692 let mut cow0 = Arc::new(75u);
693 let cow1_weak = cow0.downgrade();
695 assert!(75 == *cow0);
696 assert!(75 == *cow1_weak.upgrade().unwrap());
698 *cow0.make_unique() += 1;
700 assert!(76 == *cow0);
701 assert!(cow1_weak.upgrade().is_none());
706 let x = Arc::new(5i);
707 let y = x.downgrade();
708 assert!(y.upgrade().is_some());
713 let x = Arc::new(5i);
714 let y = x.downgrade();
716 assert!(y.upgrade().is_none());
720 fn weak_self_cyclic() {
722 x: Mutex<Option<Weak<Cycle>>>
725 let a = Arc::new(Cycle { x: Mutex::new(None) });
726 let b = a.clone().downgrade();
727 *a.x.lock().unwrap() = Some(b);
729 // hopefully we don't double-free (or leak)...
734 let mut canary = atomic::AtomicUint::new(0);
735 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
737 assert!(canary.load(Acquire) == 1);
742 let mut canary = atomic::AtomicUint::new(0);
743 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
744 let arc_weak = arc.downgrade();
745 assert!(canary.load(Acquire) == 0);
747 assert!(canary.load(Acquire) == 1);
752 fn test_strong_count() {
753 let a = Arc::new(0u32);
754 assert!(strong_count(&a) == 1);
755 let w = a.downgrade();
756 assert!(strong_count(&a) == 1);
757 let b = w.upgrade().expect("");
758 assert!(strong_count(&b) == 2);
759 assert!(strong_count(&a) == 2);
762 assert!(strong_count(&b) == 1);
764 assert!(strong_count(&b) == 2);
765 assert!(strong_count(&c) == 2);
769 fn test_weak_count() {
770 let a = Arc::new(0u32);
771 assert!(strong_count(&a) == 1);
772 assert!(weak_count(&a) == 0);
773 let w = a.downgrade();
774 assert!(strong_count(&a) == 1);
775 assert!(weak_count(&a) == 1);
777 assert!(weak_count(&a) == 2);
780 assert!(strong_count(&a) == 1);
781 assert!(weak_count(&a) == 0);
783 assert!(strong_count(&a) == 2);
784 assert!(weak_count(&a) == 0);
785 let d = c.downgrade();
786 assert!(weak_count(&c) == 1);
787 assert!(strong_count(&c) == 2);
796 let a = Arc::new(5u32);
797 assert!(format!("{}", a) == "5")
800 // Make sure deriving works with Arc<T>
801 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Show, Default)]
802 struct Foo { inner: Arc<int> }