1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![stable(feature = "rust1", since = "1.0.0")]
13 //! Threadsafe reference-counted boxes (the `Arc<T>` type).
15 //! The `Arc<T>` type provides shared ownership of an immutable value.
16 //! Destruction is deterministic, and will occur as soon as the last owner is
17 //! gone. It is marked as `Send` because it uses atomic reference counting.
19 //! If you do not need thread-safety, and just need shared ownership, consider
20 //! the [`Rc<T>` type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but
21 //! does not use atomics, making it both thread-unsafe as well as significantly
22 //! faster when updating the reference count.
24 //! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer
25 //! to the box. A `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but
26 //! will return `None` if the value has already been dropped.
28 //! For example, a tree with parent pointers can be represented by putting the
29 //! nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
30 //! as `Weak<T>` pointers.
34 //! Sharing some immutable data between threads:
37 //! use std::sync::Arc;
40 //! let five = Arc::new(5);
43 //! let five = five.clone();
45 //! thread::spawn(move || {
46 //! println!("{:?}", five);
51 //! Sharing mutable data safely between threads with a `Mutex`:
54 //! use std::sync::{Arc, Mutex};
57 //! let five = Arc::new(Mutex::new(5));
60 //! let five = five.clone();
62 //! thread::spawn(move || {
63 //! let mut number = five.lock().unwrap();
67 //! println!("{}", *number); // prints 6
74 use core::sync::atomic;
75 use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
78 use core::cmp::Ordering;
79 use core::mem::{align_of_val, size_of_val};
80 use core::intrinsics::{drop_in_place, abort};
82 use core::nonzero::NonZero;
83 use core::ops::{Deref, CoerceUnsized};
85 use core::marker::Unsize;
86 use core::hash::{Hash, Hasher};
87 use core::{usize, isize};
90 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
92 /// An atomically reference counted wrapper for shared state.
96 /// In this example, a large vector of floats is shared between several threads.
97 /// With simple pipes, without `Arc`, a copy would have to be made for each
100 /// When you clone an `Arc<T>`, it will create another pointer to the data and
101 /// increase the reference counter.
104 /// use std::sync::Arc;
108 /// let numbers: Vec<_> = (0..100u32).collect();
109 /// let shared_numbers = Arc::new(numbers);
112 /// let child_numbers = shared_numbers.clone();
114 /// thread::spawn(move || {
115 /// let local_numbers = &child_numbers[..];
117 /// // Work with the local numbers
122 #[unsafe_no_drop_flag]
123 #[stable(feature = "rust1", since = "1.0.0")]
124 pub struct Arc<T: ?Sized> {
125 // FIXME #12808: strange name to try to avoid interfering with
126 // field accesses of the contained type via Deref
127 _ptr: NonZero<*mut ArcInner<T>>,
130 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> { }
131 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> { }
133 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
135 /// A weak pointer to an `Arc`.
137 /// Weak pointers will not keep the data inside of the `Arc` alive, and can be
138 /// used to break cycles between `Arc` pointers.
139 #[unsafe_no_drop_flag]
140 #[stable(feature = "arc_weak", since = "1.4.0")]
141 pub struct Weak<T: ?Sized> {
142 // FIXME #12808: strange name to try to avoid interfering with
143 // field accesses of the contained type via Deref
144 _ptr: NonZero<*mut ArcInner<T>>,
147 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> { }
148 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> { }
150 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
152 #[stable(feature = "rust1", since = "1.0.0")]
153 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
154 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
159 struct ArcInner<T: ?Sized> {
160 strong: atomic::AtomicUsize,
162 // the value usize::MAX acts as a sentinel for temporarily "locking" the
163 // ability to upgrade weak pointers or downgrade strong ones; this is used
164 // to avoid races in `make_mut` and `get_mut`.
165 weak: atomic::AtomicUsize,
170 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
171 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
174 /// Constructs a new `Arc<T>`.
179 /// use std::sync::Arc;
181 /// let five = Arc::new(5);
184 #[stable(feature = "rust1", since = "1.0.0")]
185 pub fn new(data: T) -> Arc<T> {
186 // Start the weak pointer count as 1 which is the weak pointer that's
187 // held by all the strong pointers (kinda), see std/rc.rs for more info
188 let x: Box<_> = box ArcInner {
189 strong: atomic::AtomicUsize::new(1),
190 weak: atomic::AtomicUsize::new(1),
193 Arc { _ptr: unsafe { NonZero::new(Box::into_raw(x)) } }
196 /// Unwraps the contained value if the `Arc<T>` has only one strong reference.
197 /// This will succeed even if there are outstanding weak references.
199 /// Otherwise, an `Err` is returned with the same `Arc<T>`.
204 /// use std::sync::Arc;
206 /// let x = Arc::new(3);
207 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
209 /// let x = Arc::new(4);
210 /// let _y = x.clone();
211 /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
214 #[stable(feature = "arc_unique", since = "1.4.0")]
215 pub fn try_unwrap(this: Self) -> Result<T, Self> {
216 // See `drop` for why all these atomics are like this
217 if this.inner().strong.compare_and_swap(1, 0, Release) != 1 { return Err(this) }
219 atomic::fence(Acquire);
222 let ptr = *this._ptr;
223 let elem = ptr::read(&(*ptr).data);
225 // Make a weak pointer to clean up the implicit strong-weak reference
226 let _weak = Weak { _ptr: this._ptr };
234 impl<T: ?Sized> Arc<T> {
235 /// Downgrades the `Arc<T>` to a `Weak<T>` reference.
240 /// use std::sync::Arc;
242 /// let five = Arc::new(5);
244 /// let weak_five = Arc::downgrade(&five);
246 #[stable(feature = "arc_weak", since = "1.4.0")]
247 pub fn downgrade(this: &Self) -> Weak<T> {
249 // This Relaxed is OK because we're checking the value in the CAS
251 let cur = this.inner().weak.load(Relaxed);
253 // check if the weak counter is currently "locked"; if so, spin.
254 if cur == usize::MAX { continue }
256 // NOTE: this code currently ignores the possibility of overflow
257 // into usize::MAX; in general both Rc and Arc need to be adjusted
258 // to deal with overflow.
260 // Unlike with Clone(), we need this to be an Acquire read to
261 // synchronize with the write coming from `is_unique`, so that the
262 // events prior to that write happen before this read.
263 if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
264 return Weak { _ptr: this._ptr }
269 /// Get the number of weak references to this value.
271 #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy",
273 pub fn weak_count(this: &Self) -> usize {
274 this.inner().weak.load(SeqCst) - 1
277 /// Get the number of strong references to this value.
279 #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy",
281 pub fn strong_count(this: &Self) -> usize {
282 this.inner().strong.load(SeqCst)
286 fn inner(&self) -> &ArcInner<T> {
287 // This unsafety is ok because while this arc is alive we're guaranteed
288 // that the inner pointer is valid. Furthermore, we know that the
289 // `ArcInner` structure itself is `Sync` because the inner data is
290 // `Sync` as well, so we're ok loaning out an immutable pointer to these
292 unsafe { &**self._ptr }
295 // Non-inlined part of `drop`.
297 unsafe fn drop_slow(&mut self) {
298 let ptr = *self._ptr;
300 // Destroy the data at this time, even though we may not free the box
301 // allocation itself (there may still be weak pointers lying around).
302 drop_in_place(&mut (*ptr).data);
304 if self.inner().weak.fetch_sub(1, Release) == 1 {
305 atomic::fence(Acquire);
306 deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
311 #[stable(feature = "rust1", since = "1.0.0")]
312 impl<T: ?Sized> Clone for Arc<T> {
313 /// Makes a clone of the `Arc<T>`.
315 /// This increases the strong reference count.
320 /// use std::sync::Arc;
322 /// let five = Arc::new(5);
327 fn clone(&self) -> Arc<T> {
328 // Using a relaxed ordering is alright here, as knowledge of the
329 // original reference prevents other threads from erroneously deleting
332 // As explained in the [Boost documentation][1], Increasing the
333 // reference counter can always be done with memory_order_relaxed: New
334 // references to an object can only be formed from an existing
335 // reference, and passing an existing reference from one thread to
336 // another must already provide any required synchronization.
338 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
339 let old_size = self.inner().strong.fetch_add(1, Relaxed);
341 // However we need to guard against massive refcounts in case someone
342 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
343 // and users will use-after free. We racily saturate to `isize::MAX` on
344 // the assumption that there aren't ~2 billion threads incrementing
345 // the reference count at once. This branch will never be taken in
346 // any realistic program.
348 // We abort because such a program is incredibly degenerate, and we
349 // don't care to support it.
350 if old_size > MAX_REFCOUNT {
354 Arc { _ptr: self._ptr }
358 #[stable(feature = "rust1", since = "1.0.0")]
359 impl<T: ?Sized> Deref for Arc<T> {
363 fn deref(&self) -> &T {
368 impl<T: Clone> Arc<T> {
369 #[unstable(feature = "arc_make_unique", reason = "renamed to Arc::make_mut",
371 #[deprecated(since = "1.4.0", reason = "renamed to Arc::make_mut")]
372 pub fn make_unique(this: &mut Self) -> &mut T {
376 /// Make a mutable reference into the given `Arc<T>` by cloning the inner
377 /// data if the `Arc<T>` doesn't have one strong reference and no weak
380 /// This is also referred to as a copy-on-write.
385 /// use std::sync::Arc;
387 /// let mut data = Arc::new(5);
389 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
390 /// let mut other_data = data.clone(); // Won't clone inner data
391 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
392 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
393 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
395 /// // Note: data and other_data now point to different numbers
396 /// assert_eq!(*data, 8);
397 /// assert_eq!(*other_data, 12);
401 #[stable(feature = "arc_unique", since = "1.4.0")]
402 pub fn make_mut(this: &mut Self) -> &mut T {
403 // Note that we hold both a strong reference and a weak reference.
404 // Thus, releasing our strong reference only will not, by itself, cause
405 // the memory to be deallocated.
407 // Use Acquire to ensure that we see any writes to `weak` that happen
408 // before release writes (i.e., decrements) to `strong`. Since we hold a
409 // weak count, there's no chance the ArcInner itself could be
411 if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 {
412 // Another srong pointer exists; clone
413 *this = Arc::new((**this).clone());
414 } else if this.inner().weak.load(Relaxed) != 1 {
415 // Relaxed suffices in the above because this is fundamentally an
416 // optimization: we are always racing with weak pointers being
417 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
419 // We removed the last strong ref, but there are additional weak
420 // refs remaining. We'll move the contents to a new Arc, and
421 // invalidate the other weak refs.
423 // Note that it is not possible for the read of `weak` to yield
424 // usize::MAX (i.e., locked), since the weak count can only be
425 // locked by a thread with a strong reference.
427 // Materialize our own implicit weak pointer, so that it can clean
428 // up the ArcInner as needed.
429 let weak = Weak { _ptr: this._ptr };
431 // mark the data itself as already deallocated
433 // there is no data race in the implicit write caused by `read`
434 // here (due to zeroing) because data is no longer accessed by
435 // other threads (due to there being no more strong refs at this
437 let mut swap = Arc::new(ptr::read(&(**weak._ptr).data));
438 mem::swap(this, &mut swap);
442 // We were the sole reference of either kind; bump back up the
444 this.inner().strong.store(1, Release);
447 // As with `get_mut()`, the unsafety is ok because our reference was
448 // either unique to begin with, or became one upon cloning the contents.
450 let inner = &mut **this._ptr;
456 impl<T: ?Sized> Arc<T> {
457 /// Returns a mutable reference to the contained value if the `Arc<T>` has
458 /// one strong reference and no weak references.
463 /// use std::sync::Arc;
465 /// let mut x = Arc::new(3);
466 /// *Arc::get_mut(&mut x).unwrap() = 4;
467 /// assert_eq!(*x, 4);
469 /// let _y = x.clone();
470 /// assert!(Arc::get_mut(&mut x).is_none());
473 #[stable(feature = "arc_unique", since = "1.4.0")]
474 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
475 if this.is_unique() {
476 // This unsafety is ok because we're guaranteed that the pointer
477 // returned is the *only* pointer that will ever be returned to T. Our
478 // reference count is guaranteed to be 1 at this point, and we required
479 // the Arc itself to be `mut`, so we're returning the only possible
480 // reference to the inner data.
482 let inner = &mut **this._ptr;
483 Some(&mut inner.data)
490 /// Determine whether this is the unique reference (including weak refs) to
491 /// the underlying data.
493 /// Note that this requires locking the weak ref count.
494 fn is_unique(&mut self) -> bool {
495 // lock the weak pointer count if we appear to be the sole weak pointer
498 // The acquire label here ensures a happens-before relationship with any
499 // writes to `strong` prior to decrements of the `weak` count (via drop,
500 // which uses Release).
501 if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 {
502 // Due to the previous acquire read, this will observe any writes to
503 // `strong` that were due to upgrading weak pointers; only strong
504 // clones remain, which require that the strong count is > 1 anyway.
505 let unique = self.inner().strong.load(Relaxed) == 1;
507 // The release write here synchronizes with a read in `downgrade`,
508 // effectively preventing the above read of `strong` from happening
510 self.inner().weak.store(1, Release); // release the lock
518 #[stable(feature = "rust1", since = "1.0.0")]
519 impl<T: ?Sized> Drop for Arc<T> {
520 /// Drops the `Arc<T>`.
522 /// This will decrement the strong reference count. If the strong reference
523 /// count becomes zero and the only other references are `Weak<T>` ones,
524 /// `drop`s the inner value.
529 /// use std::sync::Arc;
532 /// let five = Arc::new(5);
536 /// drop(five); // explicit drop
539 /// let five = Arc::new(5);
543 /// } // implicit drop
547 // This structure has #[unsafe_no_drop_flag], so this drop glue may run
548 // more than once (but it is guaranteed to be zeroed after the first if
549 // it's run more than once)
550 let ptr = *self._ptr;
551 // if ptr.is_null() { return }
552 if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE {
556 // Because `fetch_sub` is already atomic, we do not need to synchronize
557 // with other threads unless we are going to delete the object. This
558 // same logic applies to the below `fetch_sub` to the `weak` count.
559 if self.inner().strong.fetch_sub(1, Release) != 1 { return }
561 // This fence is needed to prevent reordering of use of the data and
562 // deletion of the data. Because it is marked `Release`, the decreasing
563 // of the reference count synchronizes with this `Acquire` fence. This
564 // means that use of the data happens before decreasing the reference
565 // count, which happens before this fence, which happens before the
566 // deletion of the data.
568 // As explained in the [Boost documentation][1],
570 // > It is important to enforce any possible access to the object in one
571 // > thread (through an existing reference) to *happen before* deleting
572 // > the object in a different thread. This is achieved by a "release"
573 // > operation after dropping a reference (any access to the object
574 // > through this reference must obviously happened before), and an
575 // > "acquire" operation before deleting the object.
577 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
578 atomic::fence(Acquire);
586 impl<T: ?Sized> Weak<T> {
587 /// Upgrades a weak reference to a strong reference.
589 /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
591 /// Returns `None` if there were no strong references and the data was
597 /// use std::sync::Arc;
599 /// let five = Arc::new(5);
601 /// let weak_five = Arc::downgrade(&five);
603 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
605 #[stable(feature = "arc_weak", since = "1.4.0")]
606 pub fn upgrade(&self) -> Option<Arc<T>> {
607 // We use a CAS loop to increment the strong count instead of a
608 // fetch_add because once the count hits 0 it must never be above 0.
609 let inner = self.inner();
611 // Relaxed load because any write of 0 that we can observe
612 // leaves the field in a permanently zero state (so a
613 // "stale" read of 0 is fine), and any other value is
614 // confirmed via the CAS below.
615 let n = inner.strong.load(Relaxed);
616 if n == 0 { return None }
618 // Relaxed is valid for the same reason it is on Arc's Clone impl
619 let old = inner.strong.compare_and_swap(n, n + 1, Relaxed);
620 if old == n { return Some(Arc { _ptr: self._ptr }) }
625 fn inner(&self) -> &ArcInner<T> {
626 // See comments above for why this is "safe"
627 unsafe { &**self._ptr }
631 #[stable(feature = "arc_weak", since = "1.4.0")]
632 impl<T: ?Sized> Clone for Weak<T> {
633 /// Makes a clone of the `Weak<T>`.
635 /// This increases the weak reference count.
640 /// use std::sync::Arc;
642 /// let weak_five = Arc::downgrade(&Arc::new(5));
644 /// weak_five.clone();
647 fn clone(&self) -> Weak<T> {
648 // See comments in Arc::clone() for why this is relaxed. This can use a
649 // fetch_add (ignoring the lock) because the weak count is only locked
650 // where are *no other* weak pointers in existence. (So we can't be
651 // running this code in that case).
652 let old_size = self.inner().weak.fetch_add(1, Relaxed);
654 // See comments in Arc::clone() for why we do this (for mem::forget).
655 if old_size > MAX_REFCOUNT {
659 return Weak { _ptr: self._ptr }
663 #[stable(feature = "rust1", since = "1.0.0")]
664 impl<T: ?Sized> Drop for Weak<T> {
665 /// Drops the `Weak<T>`.
667 /// This will decrement the weak reference count.
672 /// use std::sync::Arc;
675 /// let five = Arc::new(5);
676 /// let weak_five = Arc::downgrade(&five);
680 /// drop(weak_five); // explicit drop
683 /// let five = Arc::new(5);
684 /// let weak_five = Arc::downgrade(&five);
688 /// } // implicit drop
691 let ptr = *self._ptr;
693 // see comments above for why this check is here
694 if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE {
698 // If we find out that we were the last weak pointer, then its time to
699 // deallocate the data entirely. See the discussion in Arc::drop() about
700 // the memory orderings
702 // It's not necessary to check for the locked state here, because the
703 // weak count can only be locked if there was precisely one weak ref,
704 // meaning that drop could only subsequently run ON that remaining weak
705 // ref, which can only happen after the lock is released.
706 if self.inner().weak.fetch_sub(1, Release) == 1 {
707 atomic::fence(Acquire);
708 unsafe { deallocate(ptr as *mut u8,
710 align_of_val(&*ptr)) }
715 #[stable(feature = "rust1", since = "1.0.0")]
716 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
717 /// Equality for two `Arc<T>`s.
719 /// Two `Arc<T>`s are equal if their inner value are equal.
724 /// use std::sync::Arc;
726 /// let five = Arc::new(5);
728 /// five == Arc::new(5);
730 fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) }
732 /// Inequality for two `Arc<T>`s.
734 /// Two `Arc<T>`s are unequal if their inner value are unequal.
739 /// use std::sync::Arc;
741 /// let five = Arc::new(5);
743 /// five != Arc::new(5);
745 fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
747 #[stable(feature = "rust1", since = "1.0.0")]
748 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
749 /// Partial comparison for two `Arc<T>`s.
751 /// The two are compared by calling `partial_cmp()` on their inner values.
756 /// use std::sync::Arc;
758 /// let five = Arc::new(5);
760 /// five.partial_cmp(&Arc::new(5));
762 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
763 (**self).partial_cmp(&**other)
766 /// Less-than comparison for two `Arc<T>`s.
768 /// The two are compared by calling `<` on their inner values.
773 /// use std::sync::Arc;
775 /// let five = Arc::new(5);
777 /// five < Arc::new(5);
779 fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) }
781 /// 'Less-than or equal to' comparison for two `Arc<T>`s.
783 /// The two are compared by calling `<=` on their inner values.
788 /// use std::sync::Arc;
790 /// let five = Arc::new(5);
792 /// five <= Arc::new(5);
794 fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) }
796 /// Greater-than comparison for two `Arc<T>`s.
798 /// The two are compared by calling `>` on their inner values.
803 /// use std::sync::Arc;
805 /// let five = Arc::new(5);
807 /// five > Arc::new(5);
809 fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) }
811 /// 'Greater-than or equal to' comparison for two `Arc<T>`s.
813 /// The two are compared by calling `>=` on their inner values.
818 /// use std::sync::Arc;
820 /// let five = Arc::new(5);
822 /// five >= Arc::new(5);
824 fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
826 #[stable(feature = "rust1", since = "1.0.0")]
827 impl<T: ?Sized + Ord> Ord for Arc<T> {
828 fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) }
830 #[stable(feature = "rust1", since = "1.0.0")]
831 impl<T: ?Sized + Eq> Eq for Arc<T> {}
833 #[stable(feature = "rust1", since = "1.0.0")]
834 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
835 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
836 fmt::Display::fmt(&**self, f)
840 #[stable(feature = "rust1", since = "1.0.0")]
841 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
842 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
843 fmt::Debug::fmt(&**self, f)
847 #[stable(feature = "rust1", since = "1.0.0")]
848 impl<T> fmt::Pointer for Arc<T> {
849 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
850 fmt::Pointer::fmt(&*self._ptr, f)
854 #[stable(feature = "rust1", since = "1.0.0")]
855 impl<T: Default> Default for Arc<T> {
856 #[stable(feature = "rust1", since = "1.0.0")]
857 fn default() -> Arc<T> { Arc::new(Default::default()) }
860 #[stable(feature = "rust1", since = "1.0.0")]
861 impl<T: ?Sized + Hash> Hash for Arc<T> {
862 fn hash<H: Hasher>(&self, state: &mut H) {
869 use std::clone::Clone;
870 use std::sync::mpsc::channel;
873 use std::option::Option;
874 use std::option::Option::{Some, None};
875 use std::sync::atomic;
876 use std::sync::atomic::Ordering::{Acquire, SeqCst};
879 use super::{Arc, Weak};
880 use std::sync::Mutex;
882 struct Canary(*mut atomic::AtomicUsize);
890 (*c).fetch_add(1, SeqCst);
898 fn manually_share_arc() {
899 let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
900 let arc_v = Arc::new(v);
902 let (tx, rx) = channel();
904 let _t = thread::spawn(move || {
905 let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
906 assert_eq!((*arc_v)[3], 4);
909 tx.send(arc_v.clone()).unwrap();
911 assert_eq!((*arc_v)[2], 3);
912 assert_eq!((*arc_v)[4], 5);
916 fn test_arc_get_mut() {
917 let mut x = Arc::new(3);
918 *Arc::get_mut(&mut x).unwrap() = 4;
921 assert!(Arc::get_mut(&mut x).is_none());
923 assert!(Arc::get_mut(&mut x).is_some());
924 let _w = Arc::downgrade(&x);
925 assert!(Arc::get_mut(&mut x).is_none());
931 assert_eq!(Arc::try_unwrap(x), Ok(3));
934 assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
936 let _w = Arc::downgrade(&x);
937 assert_eq!(Arc::try_unwrap(x), Ok(5));
941 fn test_cowarc_clone_make_mut() {
942 let mut cow0 = Arc::new(75);
943 let mut cow1 = cow0.clone();
944 let mut cow2 = cow1.clone();
946 assert!(75 == *Arc::make_mut(&mut cow0));
947 assert!(75 == *Arc::make_mut(&mut cow1));
948 assert!(75 == *Arc::make_mut(&mut cow2));
950 *Arc::make_mut(&mut cow0) += 1;
951 *Arc::make_mut(&mut cow1) += 2;
952 *Arc::make_mut(&mut cow2) += 3;
954 assert!(76 == *cow0);
955 assert!(77 == *cow1);
956 assert!(78 == *cow2);
958 // none should point to the same backing memory
959 assert!(*cow0 != *cow1);
960 assert!(*cow0 != *cow2);
961 assert!(*cow1 != *cow2);
965 fn test_cowarc_clone_unique2() {
966 let mut cow0 = Arc::new(75);
967 let cow1 = cow0.clone();
968 let cow2 = cow1.clone();
970 assert!(75 == *cow0);
971 assert!(75 == *cow1);
972 assert!(75 == *cow2);
974 *Arc::make_mut(&mut cow0) += 1;
975 assert!(76 == *cow0);
976 assert!(75 == *cow1);
977 assert!(75 == *cow2);
979 // cow1 and cow2 should share the same contents
980 // cow0 should have a unique reference
981 assert!(*cow0 != *cow1);
982 assert!(*cow0 != *cow2);
983 assert!(*cow1 == *cow2);
987 fn test_cowarc_clone_weak() {
988 let mut cow0 = Arc::new(75);
989 let cow1_weak = Arc::downgrade(&cow0);
991 assert!(75 == *cow0);
992 assert!(75 == *cow1_weak.upgrade().unwrap());
994 *Arc::make_mut(&mut cow0) += 1;
996 assert!(76 == *cow0);
997 assert!(cow1_weak.upgrade().is_none());
1002 let x = Arc::new(5);
1003 let y = Arc::downgrade(&x);
1004 assert!(y.upgrade().is_some());
1009 let x = Arc::new(5);
1010 let y = Arc::downgrade(&x);
1012 assert!(y.upgrade().is_none());
1016 fn weak_self_cyclic() {
1018 x: Mutex<Option<Weak<Cycle>>>
1021 let a = Arc::new(Cycle { x: Mutex::new(None) });
1022 let b = Arc::downgrade(&a.clone());
1023 *a.x.lock().unwrap() = Some(b);
1025 // hopefully we don't double-free (or leak)...
1030 let mut canary = atomic::AtomicUsize::new(0);
1031 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1033 assert!(canary.load(Acquire) == 1);
1037 fn drop_arc_weak() {
1038 let mut canary = atomic::AtomicUsize::new(0);
1039 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1040 let arc_weak = Arc::downgrade(&arc);
1041 assert!(canary.load(Acquire) == 0);
1043 assert!(canary.load(Acquire) == 1);
1048 fn test_strong_count() {
1049 let a = Arc::new(0u32);
1050 assert!(Arc::strong_count(&a) == 1);
1051 let w = Arc::downgrade(&a);
1052 assert!(Arc::strong_count(&a) == 1);
1053 let b = w.upgrade().expect("");
1054 assert!(Arc::strong_count(&b) == 2);
1055 assert!(Arc::strong_count(&a) == 2);
1058 assert!(Arc::strong_count(&b) == 1);
1060 assert!(Arc::strong_count(&b) == 2);
1061 assert!(Arc::strong_count(&c) == 2);
1065 fn test_weak_count() {
1066 let a = Arc::new(0u32);
1067 assert!(Arc::strong_count(&a) == 1);
1068 assert!(Arc::weak_count(&a) == 0);
1069 let w = Arc::downgrade(&a);
1070 assert!(Arc::strong_count(&a) == 1);
1071 assert!(Arc::weak_count(&a) == 1);
1073 assert!(Arc::weak_count(&a) == 2);
1076 assert!(Arc::strong_count(&a) == 1);
1077 assert!(Arc::weak_count(&a) == 0);
1079 assert!(Arc::strong_count(&a) == 2);
1080 assert!(Arc::weak_count(&a) == 0);
1081 let d = Arc::downgrade(&c);
1082 assert!(Arc::weak_count(&c) == 1);
1083 assert!(Arc::strong_count(&c) == 2);
1092 let a = Arc::new(5u32);
1093 assert_eq!(format!("{:?}", a), "5");
1096 // Make sure deriving works with Arc<T>
1097 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
1098 struct Foo { inner: Arc<i32> }
1102 let x: Arc<[i32]> = Arc::new([1, 2, 3]);
1103 assert_eq!(format!("{:?}", x), "[1, 2, 3]");
1104 let y = Arc::downgrade(&x.clone());
1106 assert!(y.upgrade().is_none());
1110 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
1111 fn borrow(&self) -> &T { &**self }