1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![stable(feature = "rust1", since = "1.0.0")]
13 //! Threadsafe reference-counted boxes (the `Arc<T>` type).
15 //! The `Arc<T>` type provides shared ownership of an immutable value through
16 //! atomic reference counting.
18 //! `Weak<T>` is a weak reference to the `Arc<T>` box, and it is created by
19 //! the `downgrade` method.
22 //! Sharing some immutable data between threads:
24 // Note that we **do not** run these tests here. The windows builders get super
25 // unhappy of a thread outlives the main thread and then exits at the same time
26 // (something deadlocks) so we just avoid this entirely by not running these
29 //! use std::sync::Arc;
32 //! let five = Arc::new(5);
35 //! let five = five.clone();
37 //! thread::spawn(move || {
38 //! println!("{:?}", five);
45 use core::sync::atomic;
46 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
49 use core::cmp::Ordering;
50 use core::mem::{align_of_val, size_of_val};
51 use core::intrinsics::abort;
53 use core::mem::uninitialized;
55 use core::ops::CoerceUnsized;
56 use core::ptr::{self, Shared};
57 use core::marker::Unsize;
58 use core::hash::{Hash, Hasher};
59 use core::{isize, usize};
60 use core::convert::From;
63 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
65 /// An atomically reference counted wrapper for shared state.
66 /// Destruction is deterministic, and will occur as soon as the last owner is
67 /// gone. It is marked as `Send` because it uses atomic reference counting.
69 /// If you do not need thread-safety, and just need shared ownership, consider
70 /// the [`Rc<T>` type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but
71 /// does not use atomics, making it both thread-unsafe as well as significantly
72 /// faster when updating the reference count.
76 /// In this example, a large vector of data will be shared by several threads. First we
77 /// wrap it with a `Arc::new` and then clone the `Arc<T>` reference for every thread (which will
78 /// increase the reference count atomically).
81 /// use std::sync::Arc;
85 /// let numbers: Vec<_> = (0..100).collect();
86 /// let shared_numbers = Arc::new(numbers);
89 /// // prepare a copy of reference here and it will be moved to the thread
90 /// let child_numbers = shared_numbers.clone();
92 /// thread::spawn(move || {
93 /// let local_numbers = &child_numbers[..];
95 /// // Work with the local numbers
100 /// You can also share mutable data between threads safely
101 /// by putting it inside `Mutex` and then share `Mutex` immutably
102 /// with `Arc<T>` as shown below.
104 // See comment at the top of this file for why the test is no_run
106 /// use std::sync::{Arc, Mutex};
109 /// let five = Arc::new(Mutex::new(5));
112 /// let five = five.clone();
114 /// thread::spawn(move || {
115 /// let mut number = five.lock().unwrap();
119 /// println!("{}", *number); // prints 6
124 #[unsafe_no_drop_flag]
125 #[stable(feature = "rust1", since = "1.0.0")]
126 pub struct Arc<T: ?Sized> {
127 ptr: Shared<ArcInner<T>>,
130 #[stable(feature = "rust1", since = "1.0.0")]
131 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
132 #[stable(feature = "rust1", since = "1.0.0")]
133 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
135 #[unstable(feature = "coerce_unsized", issue = "27732")]
136 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
138 /// A weak pointer to an `Arc`.
140 /// Weak pointers will not keep the data inside of the `Arc` alive, and can be
141 /// used to break cycles between `Arc` pointers.
143 /// A `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but
144 /// will return `None` if the value has already been dropped.
146 /// For example, a tree with parent pointers can be represented by putting the
147 /// nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
148 /// as `Weak<T>` pointers.
150 #[unsafe_no_drop_flag]
151 #[stable(feature = "arc_weak", since = "1.4.0")]
152 pub struct Weak<T: ?Sized> {
153 ptr: Shared<ArcInner<T>>,
156 #[stable(feature = "arc_weak", since = "1.4.0")]
157 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
158 #[stable(feature = "arc_weak", since = "1.4.0")]
159 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
161 #[unstable(feature = "coerce_unsized", issue = "27732")]
162 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
164 #[stable(feature = "arc_weak", since = "1.4.0")]
165 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
166 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
171 struct ArcInner<T: ?Sized> {
172 strong: atomic::AtomicUsize,
174 // the value usize::MAX acts as a sentinel for temporarily "locking" the
175 // ability to upgrade weak pointers or downgrade strong ones; this is used
176 // to avoid races in `make_mut` and `get_mut`.
177 weak: atomic::AtomicUsize,
182 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
183 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
186 /// Constructs a new `Arc<T>`.
191 /// use std::sync::Arc;
193 /// let five = Arc::new(5);
196 #[stable(feature = "rust1", since = "1.0.0")]
197 pub fn new(data: T) -> Arc<T> {
198 // Start the weak pointer count as 1 which is the weak pointer that's
199 // held by all the strong pointers (kinda), see std/rc.rs for more info
200 let x: Box<_> = box ArcInner {
201 strong: atomic::AtomicUsize::new(1),
202 weak: atomic::AtomicUsize::new(1),
205 Arc { ptr: unsafe { Shared::new(Box::into_raw(x)) } }
208 /// Unwraps the contained value if the `Arc<T>` has exactly one strong reference.
210 /// Otherwise, an `Err` is returned with the same `Arc<T>`.
212 /// This will succeed even if there are outstanding weak references.
217 /// use std::sync::Arc;
219 /// let x = Arc::new(3);
220 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
222 /// let x = Arc::new(4);
223 /// let _y = x.clone();
224 /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
227 #[stable(feature = "arc_unique", since = "1.4.0")]
228 pub fn try_unwrap(this: Self) -> Result<T, Self> {
229 // See `drop` for why all these atomics are like this
230 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
234 atomic::fence(Acquire);
238 let elem = ptr::read(&(*ptr).data);
240 // Make a weak pointer to clean up the implicit strong-weak reference
241 let _weak = Weak { ptr: this.ptr };
249 impl<T: ?Sized> Arc<T> {
250 /// Downgrades the `Arc<T>` to a `Weak<T>` reference.
255 /// use std::sync::Arc;
257 /// let five = Arc::new(5);
259 /// let weak_five = Arc::downgrade(&five);
261 #[stable(feature = "arc_weak", since = "1.4.0")]
262 pub fn downgrade(this: &Self) -> Weak<T> {
263 // This Relaxed is OK because we're checking the value in the CAS
265 let mut cur = this.inner().weak.load(Relaxed);
268 // check if the weak counter is currently "locked"; if so, spin.
269 if cur == usize::MAX {
270 cur = this.inner().weak.load(Relaxed);
274 // NOTE: this code currently ignores the possibility of overflow
275 // into usize::MAX; in general both Rc and Arc need to be adjusted
276 // to deal with overflow.
278 // Unlike with Clone(), we need this to be an Acquire read to
279 // synchronize with the write coming from `is_unique`, so that the
280 // events prior to that write happen before this read.
281 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
282 Ok(_) => return Weak { ptr: this.ptr },
283 Err(old) => cur = old,
288 /// Get the number of weak references to this value.
290 #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy",
292 pub fn weak_count(this: &Self) -> usize {
293 this.inner().weak.load(SeqCst) - 1
296 /// Get the number of strong references to this value.
298 #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy",
300 pub fn strong_count(this: &Self) -> usize {
301 this.inner().strong.load(SeqCst)
305 fn inner(&self) -> &ArcInner<T> {
306 // This unsafety is ok because while this arc is alive we're guaranteed
307 // that the inner pointer is valid. Furthermore, we know that the
308 // `ArcInner` structure itself is `Sync` because the inner data is
309 // `Sync` as well, so we're ok loaning out an immutable pointer to these
311 unsafe { &**self.ptr }
314 // Non-inlined part of `drop`.
316 unsafe fn drop_slow(&mut self) {
319 // Destroy the data at this time, even though we may not free the box
320 // allocation itself (there may still be weak pointers lying around).
321 ptr::drop_in_place(&mut (*ptr).data);
323 if self.inner().weak.fetch_sub(1, Release) == 1 {
324 atomic::fence(Acquire);
325 deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
330 #[stable(feature = "rust1", since = "1.0.0")]
331 impl<T: ?Sized> Clone for Arc<T> {
332 /// Makes a clone of the `Arc<T>`.
334 /// This increases the strong reference count.
339 /// use std::sync::Arc;
341 /// let five = Arc::new(5);
346 fn clone(&self) -> Arc<T> {
347 // Using a relaxed ordering is alright here, as knowledge of the
348 // original reference prevents other threads from erroneously deleting
351 // As explained in the [Boost documentation][1], Increasing the
352 // reference counter can always be done with memory_order_relaxed: New
353 // references to an object can only be formed from an existing
354 // reference, and passing an existing reference from one thread to
355 // another must already provide any required synchronization.
357 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
358 let old_size = self.inner().strong.fetch_add(1, Relaxed);
360 // However we need to guard against massive refcounts in case someone
361 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
362 // and users will use-after free. We racily saturate to `isize::MAX` on
363 // the assumption that there aren't ~2 billion threads incrementing
364 // the reference count at once. This branch will never be taken in
365 // any realistic program.
367 // We abort because such a program is incredibly degenerate, and we
368 // don't care to support it.
369 if old_size > MAX_REFCOUNT {
375 Arc { ptr: self.ptr }
379 #[stable(feature = "rust1", since = "1.0.0")]
380 impl<T: ?Sized> Deref for Arc<T> {
384 fn deref(&self) -> &T {
389 impl<T: Clone> Arc<T> {
390 /// Make a mutable reference into the given `Arc<T>`.
391 /// If the `Arc<T>` has more than one strong reference, or any weak
392 /// references, the inner data is cloned.
394 /// This is also referred to as a copy-on-write.
399 /// use std::sync::Arc;
401 /// let mut data = Arc::new(5);
403 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
404 /// let mut other_data = data.clone(); // Won't clone inner data
405 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
406 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
407 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
409 /// // Note: data and other_data now point to different numbers
410 /// assert_eq!(*data, 8);
411 /// assert_eq!(*other_data, 12);
415 #[stable(feature = "arc_unique", since = "1.4.0")]
416 pub fn make_mut(this: &mut Self) -> &mut T {
417 // Note that we hold both a strong reference and a weak reference.
418 // Thus, releasing our strong reference only will not, by itself, cause
419 // the memory to be deallocated.
421 // Use Acquire to ensure that we see any writes to `weak` that happen
422 // before release writes (i.e., decrements) to `strong`. Since we hold a
423 // weak count, there's no chance the ArcInner itself could be
425 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
426 // Another strong pointer exists; clone
427 *this = Arc::new((**this).clone());
428 } else if this.inner().weak.load(Relaxed) != 1 {
429 // Relaxed suffices in the above because this is fundamentally an
430 // optimization: we are always racing with weak pointers being
431 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
433 // We removed the last strong ref, but there are additional weak
434 // refs remaining. We'll move the contents to a new Arc, and
435 // invalidate the other weak refs.
437 // Note that it is not possible for the read of `weak` to yield
438 // usize::MAX (i.e., locked), since the weak count can only be
439 // locked by a thread with a strong reference.
441 // Materialize our own implicit weak pointer, so that it can clean
442 // up the ArcInner as needed.
443 let weak = Weak { ptr: this.ptr };
445 // mark the data itself as already deallocated
447 // there is no data race in the implicit write caused by `read`
448 // here (due to zeroing) because data is no longer accessed by
449 // other threads (due to there being no more strong refs at this
451 let mut swap = Arc::new(ptr::read(&(**weak.ptr).data));
452 mem::swap(this, &mut swap);
456 // We were the sole reference of either kind; bump back up the
458 this.inner().strong.store(1, Release);
461 // As with `get_mut()`, the unsafety is ok because our reference was
462 // either unique to begin with, or became one upon cloning the contents.
464 let inner = &mut **this.ptr;
470 impl<T: ?Sized> Arc<T> {
471 /// Returns a mutable reference to the contained value if the `Arc<T>` has
472 /// one strong reference and no weak references.
477 /// use std::sync::Arc;
479 /// let mut x = Arc::new(3);
480 /// *Arc::get_mut(&mut x).unwrap() = 4;
481 /// assert_eq!(*x, 4);
483 /// let _y = x.clone();
484 /// assert!(Arc::get_mut(&mut x).is_none());
487 #[stable(feature = "arc_unique", since = "1.4.0")]
488 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
489 if this.is_unique() {
490 // This unsafety is ok because we're guaranteed that the pointer
491 // returned is the *only* pointer that will ever be returned to T. Our
492 // reference count is guaranteed to be 1 at this point, and we required
493 // the Arc itself to be `mut`, so we're returning the only possible
494 // reference to the inner data.
496 let inner = &mut **this.ptr;
497 Some(&mut inner.data)
504 /// Determine whether this is the unique reference (including weak refs) to
505 /// the underlying data.
507 /// Note that this requires locking the weak ref count.
508 fn is_unique(&mut self) -> bool {
509 // lock the weak pointer count if we appear to be the sole weak pointer
512 // The acquire label here ensures a happens-before relationship with any
513 // writes to `strong` prior to decrements of the `weak` count (via drop,
514 // which uses Release).
515 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
516 // Due to the previous acquire read, this will observe any writes to
517 // `strong` that were due to upgrading weak pointers; only strong
518 // clones remain, which require that the strong count is > 1 anyway.
519 let unique = self.inner().strong.load(Relaxed) == 1;
521 // The release write here synchronizes with a read in `downgrade`,
522 // effectively preventing the above read of `strong` from happening
524 self.inner().weak.store(1, Release); // release the lock
532 #[stable(feature = "rust1", since = "1.0.0")]
533 impl<T: ?Sized> Drop for Arc<T> {
534 /// Drops the `Arc<T>`.
536 /// This will decrement the strong reference count. If the strong reference
537 /// count becomes zero and the only other references are `Weak<T>` ones,
538 /// `drop`s the inner value.
543 /// use std::sync::Arc;
546 /// let five = Arc::new(5);
550 /// drop(five); // explicit drop
553 /// let five = Arc::new(5);
557 /// } // implicit drop
559 #[unsafe_destructor_blind_to_params]
562 // This structure has #[unsafe_no_drop_flag], so this drop glue may run
563 // more than once (but it is guaranteed to be zeroed after the first if
564 // it's run more than once)
565 let thin = *self.ptr as *const ();
567 if thin as usize == mem::POST_DROP_USIZE {
571 // Because `fetch_sub` is already atomic, we do not need to synchronize
572 // with other threads unless we are going to delete the object. This
573 // same logic applies to the below `fetch_sub` to the `weak` count.
574 if self.inner().strong.fetch_sub(1, Release) != 1 {
578 // This fence is needed to prevent reordering of use of the data and
579 // deletion of the data. Because it is marked `Release`, the decreasing
580 // of the reference count synchronizes with this `Acquire` fence. This
581 // means that use of the data happens before decreasing the reference
582 // count, which happens before this fence, which happens before the
583 // deletion of the data.
585 // As explained in the [Boost documentation][1],
587 // > It is important to enforce any possible access to the object in one
588 // > thread (through an existing reference) to *happen before* deleting
589 // > the object in a different thread. This is achieved by a "release"
590 // > operation after dropping a reference (any access to the object
591 // > through this reference must obviously happened before), and an
592 // > "acquire" operation before deleting the object.
594 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
595 atomic::fence(Acquire);
604 /// Constructs a new `Weak<T>` without an accompanying instance of T.
606 /// This allocates memory for T, but does not initialize it. Calling
607 /// Weak<T>::upgrade() on the return value always gives None.
612 /// use std::sync::Weak;
614 /// let empty: Weak<i64> = Weak::new();
616 #[stable(feature = "downgraded_weak", since = "1.10.0")]
617 pub fn new() -> Weak<T> {
620 ptr: Shared::new(Box::into_raw(box ArcInner {
621 strong: atomic::AtomicUsize::new(0),
622 weak: atomic::AtomicUsize::new(1),
623 data: uninitialized(),
630 impl<T: ?Sized> Weak<T> {
631 /// Upgrades a weak reference to a strong reference.
633 /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
635 /// Returns `None` if there were no strong references and the data was
641 /// use std::sync::Arc;
643 /// let five = Arc::new(5);
645 /// let weak_five = Arc::downgrade(&five);
647 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
649 #[stable(feature = "arc_weak", since = "1.4.0")]
650 pub fn upgrade(&self) -> Option<Arc<T>> {
651 // We use a CAS loop to increment the strong count instead of a
652 // fetch_add because once the count hits 0 it must never be above 0.
653 let inner = self.inner();
655 // Relaxed load because any write of 0 that we can observe
656 // leaves the field in a permanently zero state (so a
657 // "stale" read of 0 is fine), and any other value is
658 // confirmed via the CAS below.
659 let mut n = inner.strong.load(Relaxed);
666 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
667 if n > MAX_REFCOUNT {
673 // Relaxed is valid for the same reason it is on Arc's Clone impl
674 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
675 Ok(_) => return Some(Arc { ptr: self.ptr }),
682 fn inner(&self) -> &ArcInner<T> {
683 // See comments above for why this is "safe"
684 unsafe { &**self.ptr }
688 #[stable(feature = "arc_weak", since = "1.4.0")]
689 impl<T: ?Sized> Clone for Weak<T> {
690 /// Makes a clone of the `Weak<T>`.
692 /// This increases the weak reference count.
697 /// use std::sync::Arc;
699 /// let weak_five = Arc::downgrade(&Arc::new(5));
701 /// weak_five.clone();
704 fn clone(&self) -> Weak<T> {
705 // See comments in Arc::clone() for why this is relaxed. This can use a
706 // fetch_add (ignoring the lock) because the weak count is only locked
707 // where are *no other* weak pointers in existence. (So we can't be
708 // running this code in that case).
709 let old_size = self.inner().weak.fetch_add(1, Relaxed);
711 // See comments in Arc::clone() for why we do this (for mem::forget).
712 if old_size > MAX_REFCOUNT {
718 return Weak { ptr: self.ptr };
722 #[stable(feature = "downgraded_weak", since = "1.10.0")]
723 impl<T> Default for Weak<T> {
724 fn default() -> Weak<T> {
729 #[stable(feature = "arc_weak", since = "1.4.0")]
730 impl<T: ?Sized> Drop for Weak<T> {
731 /// Drops the `Weak<T>`.
733 /// This will decrement the weak reference count.
738 /// use std::sync::Arc;
741 /// let five = Arc::new(5);
742 /// let weak_five = Arc::downgrade(&five);
746 /// drop(weak_five); // explicit drop
749 /// let five = Arc::new(5);
750 /// let weak_five = Arc::downgrade(&five);
754 /// } // implicit drop
758 let thin = ptr as *const ();
760 // see comments above for why this check is here
761 if thin as usize == mem::POST_DROP_USIZE {
765 // If we find out that we were the last weak pointer, then its time to
766 // deallocate the data entirely. See the discussion in Arc::drop() about
767 // the memory orderings
769 // It's not necessary to check for the locked state here, because the
770 // weak count can only be locked if there was precisely one weak ref,
771 // meaning that drop could only subsequently run ON that remaining weak
772 // ref, which can only happen after the lock is released.
773 if self.inner().weak.fetch_sub(1, Release) == 1 {
774 atomic::fence(Acquire);
775 unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) }
780 #[stable(feature = "rust1", since = "1.0.0")]
781 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
782 /// Equality for two `Arc<T>`s.
784 /// Two `Arc<T>`s are equal if their inner value are equal.
789 /// use std::sync::Arc;
791 /// let five = Arc::new(5);
793 /// five == Arc::new(5);
795 fn eq(&self, other: &Arc<T>) -> bool {
796 *(*self) == *(*other)
799 /// Inequality for two `Arc<T>`s.
801 /// Two `Arc<T>`s are unequal if their inner value are unequal.
806 /// use std::sync::Arc;
808 /// let five = Arc::new(5);
810 /// five != Arc::new(5);
812 fn ne(&self, other: &Arc<T>) -> bool {
813 *(*self) != *(*other)
816 #[stable(feature = "rust1", since = "1.0.0")]
817 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
818 /// Partial comparison for two `Arc<T>`s.
820 /// The two are compared by calling `partial_cmp()` on their inner values.
825 /// use std::sync::Arc;
827 /// let five = Arc::new(5);
829 /// five.partial_cmp(&Arc::new(5));
831 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
832 (**self).partial_cmp(&**other)
835 /// Less-than comparison for two `Arc<T>`s.
837 /// The two are compared by calling `<` on their inner values.
842 /// use std::sync::Arc;
844 /// let five = Arc::new(5);
846 /// five < Arc::new(5);
848 fn lt(&self, other: &Arc<T>) -> bool {
852 /// 'Less-than or equal to' comparison for two `Arc<T>`s.
854 /// The two are compared by calling `<=` on their inner values.
859 /// use std::sync::Arc;
861 /// let five = Arc::new(5);
863 /// five <= Arc::new(5);
865 fn le(&self, other: &Arc<T>) -> bool {
866 *(*self) <= *(*other)
869 /// Greater-than comparison for two `Arc<T>`s.
871 /// The two are compared by calling `>` on their inner values.
876 /// use std::sync::Arc;
878 /// let five = Arc::new(5);
880 /// five > Arc::new(5);
882 fn gt(&self, other: &Arc<T>) -> bool {
886 /// 'Greater-than or equal to' comparison for two `Arc<T>`s.
888 /// The two are compared by calling `>=` on their inner values.
893 /// use std::sync::Arc;
895 /// let five = Arc::new(5);
897 /// five >= Arc::new(5);
899 fn ge(&self, other: &Arc<T>) -> bool {
900 *(*self) >= *(*other)
903 #[stable(feature = "rust1", since = "1.0.0")]
904 impl<T: ?Sized + Ord> Ord for Arc<T> {
905 fn cmp(&self, other: &Arc<T>) -> Ordering {
906 (**self).cmp(&**other)
909 #[stable(feature = "rust1", since = "1.0.0")]
910 impl<T: ?Sized + Eq> Eq for Arc<T> {}
912 #[stable(feature = "rust1", since = "1.0.0")]
913 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
914 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
915 fmt::Display::fmt(&**self, f)
919 #[stable(feature = "rust1", since = "1.0.0")]
920 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
921 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
922 fmt::Debug::fmt(&**self, f)
926 #[stable(feature = "rust1", since = "1.0.0")]
927 impl<T: ?Sized> fmt::Pointer for Arc<T> {
928 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
929 fmt::Pointer::fmt(&*self.ptr, f)
933 #[stable(feature = "rust1", since = "1.0.0")]
934 impl<T: Default> Default for Arc<T> {
935 fn default() -> Arc<T> {
936 Arc::new(Default::default())
940 #[stable(feature = "rust1", since = "1.0.0")]
941 impl<T: ?Sized + Hash> Hash for Arc<T> {
942 fn hash<H: Hasher>(&self, state: &mut H) {
947 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
948 impl<T> From<T> for Arc<T> {
949 fn from(t: T) -> Self {
956 use std::clone::Clone;
957 use std::sync::mpsc::channel;
960 use std::option::Option;
961 use std::option::Option::{None, Some};
962 use std::sync::atomic;
963 use std::sync::atomic::Ordering::{Acquire, SeqCst};
966 use super::{Arc, Weak};
967 use std::sync::Mutex;
968 use std::convert::From;
970 struct Canary(*mut atomic::AtomicUsize);
972 impl Drop for Canary {
977 (*c).fetch_add(1, SeqCst);
985 fn manually_share_arc() {
986 let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
987 let arc_v = Arc::new(v);
989 let (tx, rx) = channel();
991 let _t = thread::spawn(move || {
992 let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
993 assert_eq!((*arc_v)[3], 4);
996 tx.send(arc_v.clone()).unwrap();
998 assert_eq!((*arc_v)[2], 3);
999 assert_eq!((*arc_v)[4], 5);
1003 fn test_arc_get_mut() {
1004 let mut x = Arc::new(3);
1005 *Arc::get_mut(&mut x).unwrap() = 4;
1008 assert!(Arc::get_mut(&mut x).is_none());
1010 assert!(Arc::get_mut(&mut x).is_some());
1011 let _w = Arc::downgrade(&x);
1012 assert!(Arc::get_mut(&mut x).is_none());
1017 let x = Arc::new(3);
1018 assert_eq!(Arc::try_unwrap(x), Ok(3));
1019 let x = Arc::new(4);
1021 assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
1022 let x = Arc::new(5);
1023 let _w = Arc::downgrade(&x);
1024 assert_eq!(Arc::try_unwrap(x), Ok(5));
1028 fn test_cowarc_clone_make_mut() {
1029 let mut cow0 = Arc::new(75);
1030 let mut cow1 = cow0.clone();
1031 let mut cow2 = cow1.clone();
1033 assert!(75 == *Arc::make_mut(&mut cow0));
1034 assert!(75 == *Arc::make_mut(&mut cow1));
1035 assert!(75 == *Arc::make_mut(&mut cow2));
1037 *Arc::make_mut(&mut cow0) += 1;
1038 *Arc::make_mut(&mut cow1) += 2;
1039 *Arc::make_mut(&mut cow2) += 3;
1041 assert!(76 == *cow0);
1042 assert!(77 == *cow1);
1043 assert!(78 == *cow2);
1045 // none should point to the same backing memory
1046 assert!(*cow0 != *cow1);
1047 assert!(*cow0 != *cow2);
1048 assert!(*cow1 != *cow2);
1052 fn test_cowarc_clone_unique2() {
1053 let mut cow0 = Arc::new(75);
1054 let cow1 = cow0.clone();
1055 let cow2 = cow1.clone();
1057 assert!(75 == *cow0);
1058 assert!(75 == *cow1);
1059 assert!(75 == *cow2);
1061 *Arc::make_mut(&mut cow0) += 1;
1062 assert!(76 == *cow0);
1063 assert!(75 == *cow1);
1064 assert!(75 == *cow2);
1066 // cow1 and cow2 should share the same contents
1067 // cow0 should have a unique reference
1068 assert!(*cow0 != *cow1);
1069 assert!(*cow0 != *cow2);
1070 assert!(*cow1 == *cow2);
1074 fn test_cowarc_clone_weak() {
1075 let mut cow0 = Arc::new(75);
1076 let cow1_weak = Arc::downgrade(&cow0);
1078 assert!(75 == *cow0);
1079 assert!(75 == *cow1_weak.upgrade().unwrap());
1081 *Arc::make_mut(&mut cow0) += 1;
1083 assert!(76 == *cow0);
1084 assert!(cow1_weak.upgrade().is_none());
1089 let x = Arc::new(5);
1090 let y = Arc::downgrade(&x);
1091 assert!(y.upgrade().is_some());
1096 let x = Arc::new(5);
1097 let y = Arc::downgrade(&x);
1099 assert!(y.upgrade().is_none());
1103 fn weak_self_cyclic() {
1105 x: Mutex<Option<Weak<Cycle>>>,
1108 let a = Arc::new(Cycle { x: Mutex::new(None) });
1109 let b = Arc::downgrade(&a.clone());
1110 *a.x.lock().unwrap() = Some(b);
1112 // hopefully we don't double-free (or leak)...
1117 let mut canary = atomic::AtomicUsize::new(0);
1118 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1120 assert!(canary.load(Acquire) == 1);
1124 fn drop_arc_weak() {
1125 let mut canary = atomic::AtomicUsize::new(0);
1126 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1127 let arc_weak = Arc::downgrade(&arc);
1128 assert!(canary.load(Acquire) == 0);
1130 assert!(canary.load(Acquire) == 1);
1135 fn test_strong_count() {
1136 let a = Arc::new(0);
1137 assert!(Arc::strong_count(&a) == 1);
1138 let w = Arc::downgrade(&a);
1139 assert!(Arc::strong_count(&a) == 1);
1140 let b = w.upgrade().expect("");
1141 assert!(Arc::strong_count(&b) == 2);
1142 assert!(Arc::strong_count(&a) == 2);
1145 assert!(Arc::strong_count(&b) == 1);
1147 assert!(Arc::strong_count(&b) == 2);
1148 assert!(Arc::strong_count(&c) == 2);
1152 fn test_weak_count() {
1153 let a = Arc::new(0);
1154 assert!(Arc::strong_count(&a) == 1);
1155 assert!(Arc::weak_count(&a) == 0);
1156 let w = Arc::downgrade(&a);
1157 assert!(Arc::strong_count(&a) == 1);
1158 assert!(Arc::weak_count(&a) == 1);
1160 assert!(Arc::weak_count(&a) == 2);
1163 assert!(Arc::strong_count(&a) == 1);
1164 assert!(Arc::weak_count(&a) == 0);
1166 assert!(Arc::strong_count(&a) == 2);
1167 assert!(Arc::weak_count(&a) == 0);
1168 let d = Arc::downgrade(&c);
1169 assert!(Arc::weak_count(&c) == 1);
1170 assert!(Arc::strong_count(&c) == 2);
1179 let a = Arc::new(5);
1180 assert_eq!(format!("{:?}", a), "5");
1183 // Make sure deriving works with Arc<T>
1184 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
1191 let x: Arc<[i32]> = Arc::new([1, 2, 3]);
1192 assert_eq!(format!("{:?}", x), "[1, 2, 3]");
1193 let y = Arc::downgrade(&x.clone());
1195 assert!(y.upgrade().is_none());
1199 fn test_from_owned() {
1201 let foo_arc = Arc::from(foo);
1202 assert!(123 == *foo_arc);
1206 fn test_new_weak() {
1207 let foo: Weak<usize> = Weak::new();
1208 assert!(foo.upgrade().is_none());
1212 #[stable(feature = "rust1", since = "1.0.0")]
1213 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
1214 fn borrow(&self) -> &T {
1219 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
1220 impl<T: ?Sized> AsRef<T> for Arc<T> {
1221 fn as_ref(&self) -> &T {