1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![stable(feature = "rust1", since = "1.0.0")]
13 //! Thread-safe reference-counting pointers.
15 //! See the [`Arc<T>`][arc] documentation for more details.
17 //! [arc]: struct.Arc.html
20 use core::sync::atomic;
21 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
24 use core::cmp::Ordering;
25 use core::intrinsics::abort;
26 use core::mem::{self, align_of_val, size_of_val};
28 use core::ops::CoerceUnsized;
29 use core::ptr::{self, NonNull};
30 use core::marker::{Unsize, PhantomData};
31 use core::hash::{Hash, Hasher};
32 use core::{isize, usize};
33 use core::convert::From;
35 use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
41 /// A soft limit on the amount of references that may be made to an `Arc`.
43 /// Going above this limit will abort your program (although not
44 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
45 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
47 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
48 /// Reference Counted'.
50 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
51 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
52 /// a new pointer to the same value in the heap. When the last `Arc`
53 /// pointer to a given value is destroyed, the pointed-to value is
56 /// Shared references in Rust disallow mutation by default, and `Arc` is no
57 /// exception: you cannot generally obtain a mutable reference to something
58 /// inside an `Arc`. If you need to mutate through an `Arc`, use
59 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
64 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
65 /// counting. This means that it is thread-safe. The disadvantage is that
66 /// atomic operations are more expensive than ordinary memory accesses. If you
67 /// are not sharing reference-counted values between threads, consider using
68 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
69 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
70 /// However, a library might choose `Arc<T>` in order to give library consumers
73 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
74 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
75 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
76 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
77 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
78 /// data, but it doesn't add thread safety to its data. Consider
79 /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
80 /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
81 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
82 /// non-atomic operations.
84 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
85 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
87 /// ## Breaking cycles with `Weak`
89 /// The [`downgrade`][downgrade] method can be used to create a non-owning
90 /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
91 /// to an `Arc`, but this will return [`None`] if the value has already been
94 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
95 /// [`Weak`][weak] is used to break cycles. For example, a tree could have
96 /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
97 /// pointers from children back to their parents.
99 /// # Cloning references
101 /// Creating a new reference from an existing reference counted pointer is done using the
102 /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
105 /// use std::sync::Arc;
106 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
107 /// // The two syntaxes below are equivalent.
108 /// let a = foo.clone();
109 /// let b = Arc::clone(&foo);
110 /// // a and b both point to the same memory location as foo.
113 /// The [`Arc::clone(&from)`] syntax is the most idiomatic because it conveys more explicitly
114 /// the meaning of the code. In the example above, this syntax makes it easier to see that
115 /// this code is creating a new reference rather than copying the whole content of foo.
117 /// ## `Deref` behavior
119 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
120 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
121 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are [associated
122 /// functions][assoc], called using function-like syntax:
125 /// use std::sync::Arc;
126 /// let my_arc = Arc::new(());
128 /// Arc::downgrade(&my_arc);
131 /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the value may have
132 /// already been destroyed.
134 /// [arc]: struct.Arc.html
135 /// [weak]: struct.Weak.html
136 /// [`Rc<T>`]: ../../std/rc/struct.Rc.html
137 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
138 /// [mutex]: ../../std/sync/struct.Mutex.html
139 /// [rwlock]: ../../std/sync/struct.RwLock.html
140 /// [atomic]: ../../std/sync/atomic/index.html
141 /// [`Send`]: ../../std/marker/trait.Send.html
142 /// [`Sync`]: ../../std/marker/trait.Sync.html
143 /// [deref]: ../../std/ops/trait.Deref.html
144 /// [downgrade]: struct.Arc.html#method.downgrade
145 /// [upgrade]: struct.Weak.html#method.upgrade
146 /// [`None`]: ../../std/option/enum.Option.html#variant.None
147 /// [assoc]: ../../book/first-edition/method-syntax.html#associated-functions
148 /// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html
149 /// [`std::sync`]: ../../std/sync/index.html
150 /// [`Arc::clone(&from)`]: #method.clone
154 /// Sharing some immutable data between threads:
156 // Note that we **do not** run these tests here. The windows builders get super
157 // unhappy if a thread outlives the main thread and then exits at the same time
158 // (something deadlocks) so we just avoid this entirely by not running these
161 /// use std::sync::Arc;
164 /// let five = Arc::new(5);
167 /// let five = Arc::clone(&five);
169 /// thread::spawn(move || {
170 /// println!("{:?}", five);
175 /// Sharing a mutable [`AtomicUsize`]:
177 /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
180 /// use std::sync::Arc;
181 /// use std::sync::atomic::{AtomicUsize, Ordering};
184 /// let val = Arc::new(AtomicUsize::new(5));
187 /// let val = Arc::clone(&val);
189 /// thread::spawn(move || {
190 /// let v = val.fetch_add(1, Ordering::SeqCst);
191 /// println!("{:?}", v);
196 /// See the [`rc` documentation][rc_examples] for more examples of reference
197 /// counting in general.
199 /// [rc_examples]: ../../std/rc/index.html#examples
200 #[stable(feature = "rust1", since = "1.0.0")]
201 pub struct Arc<T: ?Sized> {
202 ptr: NonNull<ArcInner<T>>,
203 phantom: PhantomData<T>,
206 #[stable(feature = "rust1", since = "1.0.0")]
207 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
208 #[stable(feature = "rust1", since = "1.0.0")]
209 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
211 #[unstable(feature = "coerce_unsized", issue = "27732")]
212 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
214 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
215 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
216 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
218 /// Since a `Weak` reference does not count towards ownership, it will not
219 /// prevent the inner value from being dropped, and `Weak` itself makes no
220 /// guarantees about the value still being present and may return [`None`]
221 /// when [`upgrade`]d.
223 /// A `Weak` pointer is useful for keeping a temporary reference to the value
224 /// within [`Arc`] without extending its lifetime. It is also used to prevent
225 /// circular references between [`Arc`] pointers, since mutual owning references
226 /// would never allow either [`Arc`] to be dropped. For example, a tree could
227 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
228 /// pointers from children back to their parents.
230 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
232 /// [`Arc`]: struct.Arc.html
233 /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
234 /// [`upgrade`]: struct.Weak.html#method.upgrade
235 /// [`Option`]: ../../std/option/enum.Option.html
236 /// [`None`]: ../../std/option/enum.Option.html#variant.None
237 #[stable(feature = "arc_weak", since = "1.4.0")]
238 pub struct Weak<T: ?Sized> {
239 // This is a `NonNull` to allow optimizing the size of this type in enums,
240 // but it is not necessarily a valid pointer.
241 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
242 // to allocate space on the heap. That's not a value a real pointer
243 // will ever have because RcBox has alignment at least 2.
244 ptr: NonNull<ArcInner<T>>,
247 #[stable(feature = "arc_weak", since = "1.4.0")]
248 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
249 #[stable(feature = "arc_weak", since = "1.4.0")]
250 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
252 #[unstable(feature = "coerce_unsized", issue = "27732")]
253 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
255 #[stable(feature = "arc_weak", since = "1.4.0")]
256 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
257 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
262 struct ArcInner<T: ?Sized> {
263 strong: atomic::AtomicUsize,
265 // the value usize::MAX acts as a sentinel for temporarily "locking" the
266 // ability to upgrade weak pointers or downgrade strong ones; this is used
267 // to avoid races in `make_mut` and `get_mut`.
268 weak: atomic::AtomicUsize,
273 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
274 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
277 /// Constructs a new `Arc<T>`.
282 /// use std::sync::Arc;
284 /// let five = Arc::new(5);
287 #[stable(feature = "rust1", since = "1.0.0")]
288 pub fn new(data: T) -> Arc<T> {
289 // Start the weak pointer count as 1 which is the weak pointer that's
290 // held by all the strong pointers (kinda), see std/rc.rs for more info
291 let x: Box<_> = box ArcInner {
292 strong: atomic::AtomicUsize::new(1),
293 weak: atomic::AtomicUsize::new(1),
296 Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData }
299 /// Returns the contained value, if the `Arc` has exactly one strong reference.
301 /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
304 /// This will succeed even if there are outstanding weak references.
306 /// [result]: ../../std/result/enum.Result.html
311 /// use std::sync::Arc;
313 /// let x = Arc::new(3);
314 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
316 /// let x = Arc::new(4);
317 /// let _y = Arc::clone(&x);
318 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
321 #[stable(feature = "arc_unique", since = "1.4.0")]
322 pub fn try_unwrap(this: Self) -> Result<T, Self> {
323 // See `drop` for why all these atomics are like this
324 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
328 atomic::fence(Acquire);
331 let elem = ptr::read(&this.ptr.as_ref().data);
333 // Make a weak pointer to clean up the implicit strong-weak reference
334 let _weak = Weak { ptr: this.ptr };
342 impl<T: ?Sized> Arc<T> {
343 /// Consumes the `Arc`, returning the wrapped pointer.
345 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
346 /// [`Arc::from_raw`][from_raw].
348 /// [from_raw]: struct.Arc.html#method.from_raw
353 /// use std::sync::Arc;
355 /// let x = Arc::new(10);
356 /// let x_ptr = Arc::into_raw(x);
357 /// assert_eq!(unsafe { *x_ptr }, 10);
359 #[stable(feature = "rc_raw", since = "1.17.0")]
360 pub fn into_raw(this: Self) -> *const T {
361 let ptr: *const T = &*this;
366 /// Constructs an `Arc` from a raw pointer.
368 /// The raw pointer must have been previously returned by a call to a
369 /// [`Arc::into_raw`][into_raw].
371 /// This function is unsafe because improper use may lead to memory problems. For example, a
372 /// double-free may occur if the function is called twice on the same raw pointer.
374 /// [into_raw]: struct.Arc.html#method.into_raw
379 /// use std::sync::Arc;
381 /// let x = Arc::new(10);
382 /// let x_ptr = Arc::into_raw(x);
385 /// // Convert back to an `Arc` to prevent leak.
386 /// let x = Arc::from_raw(x_ptr);
387 /// assert_eq!(*x, 10);
389 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe.
392 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
394 #[stable(feature = "rc_raw", since = "1.17.0")]
395 pub unsafe fn from_raw(ptr: *const T) -> Self {
396 // Align the unsized value to the end of the ArcInner.
397 // Because it is ?Sized, it will always be the last field in memory.
398 let align = align_of_val(&*ptr);
399 let layout = Layout::new::<ArcInner<()>>();
400 let offset = (layout.size() + layout.padding_needed_for(align)) as isize;
402 // Reverse the offset to find the original ArcInner.
403 let fake_ptr = ptr as *mut ArcInner<T>;
404 let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
407 ptr: NonNull::new_unchecked(arc_ptr),
408 phantom: PhantomData,
412 /// Creates a new [`Weak`][weak] pointer to this value.
414 /// [weak]: struct.Weak.html
419 /// use std::sync::Arc;
421 /// let five = Arc::new(5);
423 /// let weak_five = Arc::downgrade(&five);
425 #[stable(feature = "arc_weak", since = "1.4.0")]
426 pub fn downgrade(this: &Self) -> Weak<T> {
427 // This Relaxed is OK because we're checking the value in the CAS
429 let mut cur = this.inner().weak.load(Relaxed);
432 // check if the weak counter is currently "locked"; if so, spin.
433 if cur == usize::MAX {
434 cur = this.inner().weak.load(Relaxed);
438 // NOTE: this code currently ignores the possibility of overflow
439 // into usize::MAX; in general both Rc and Arc need to be adjusted
440 // to deal with overflow.
442 // Unlike with Clone(), we need this to be an Acquire read to
443 // synchronize with the write coming from `is_unique`, so that the
444 // events prior to that write happen before this read.
445 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
447 // Make sure we do not create a dangling Weak
448 debug_assert!(!is_dangling(this.ptr));
449 return Weak { ptr: this.ptr };
451 Err(old) => cur = old,
456 /// Gets the number of [`Weak`][weak] pointers to this value.
458 /// [weak]: struct.Weak.html
462 /// This method by itself is safe, but using it correctly requires extra care.
463 /// Another thread can change the weak count at any time,
464 /// including potentially between calling this method and acting on the result.
469 /// use std::sync::Arc;
471 /// let five = Arc::new(5);
472 /// let _weak_five = Arc::downgrade(&five);
474 /// // This assertion is deterministic because we haven't shared
475 /// // the `Arc` or `Weak` between threads.
476 /// assert_eq!(1, Arc::weak_count(&five));
479 #[stable(feature = "arc_counts", since = "1.15.0")]
480 pub fn weak_count(this: &Self) -> usize {
481 let cnt = this.inner().weak.load(SeqCst);
482 // If the weak count is currently locked, the value of the
483 // count was 0 just before taking the lock.
484 if cnt == usize::MAX { 0 } else { cnt - 1 }
487 /// Gets the number of strong (`Arc`) pointers to this value.
491 /// This method by itself is safe, but using it correctly requires extra care.
492 /// Another thread can change the strong count at any time,
493 /// including potentially between calling this method and acting on the result.
498 /// use std::sync::Arc;
500 /// let five = Arc::new(5);
501 /// let _also_five = Arc::clone(&five);
503 /// // This assertion is deterministic because we haven't shared
504 /// // the `Arc` between threads.
505 /// assert_eq!(2, Arc::strong_count(&five));
508 #[stable(feature = "arc_counts", since = "1.15.0")]
509 pub fn strong_count(this: &Self) -> usize {
510 this.inner().strong.load(SeqCst)
514 fn inner(&self) -> &ArcInner<T> {
515 // This unsafety is ok because while this arc is alive we're guaranteed
516 // that the inner pointer is valid. Furthermore, we know that the
517 // `ArcInner` structure itself is `Sync` because the inner data is
518 // `Sync` as well, so we're ok loaning out an immutable pointer to these
520 unsafe { self.ptr.as_ref() }
523 // Non-inlined part of `drop`.
525 unsafe fn drop_slow(&mut self) {
526 // Destroy the data at this time, even though we may not free the box
527 // allocation itself (there may still be weak pointers lying around).
528 ptr::drop_in_place(&mut self.ptr.as_mut().data);
530 if self.inner().weak.fetch_sub(1, Release) == 1 {
531 atomic::fence(Acquire);
532 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
537 #[stable(feature = "ptr_eq", since = "1.17.0")]
538 /// Returns true if the two `Arc`s point to the same value (not
539 /// just values that compare as equal).
544 /// use std::sync::Arc;
546 /// let five = Arc::new(5);
547 /// let same_five = Arc::clone(&five);
548 /// let other_five = Arc::new(5);
550 /// assert!(Arc::ptr_eq(&five, &same_five));
551 /// assert!(!Arc::ptr_eq(&five, &other_five));
553 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
554 this.ptr.as_ptr() == other.ptr.as_ptr()
558 impl<T: ?Sized> Arc<T> {
559 // Allocates an `ArcInner<T>` with sufficient space for an unsized value
560 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
561 // Create a fake ArcInner to find allocation size and alignment
562 let fake_ptr = ptr as *mut ArcInner<T>;
564 let layout = Layout::for_value(&*fake_ptr);
566 let mem = Global.alloc(layout)
567 .unwrap_or_else(|_| handle_alloc_error(layout));
569 // Initialize the real ArcInner
570 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
572 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
573 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
578 fn from_box(v: Box<T>) -> Arc<T> {
580 let box_unique = Box::into_unique(v);
581 let bptr = box_unique.as_ptr();
583 let value_size = size_of_val(&*bptr);
584 let ptr = Self::allocate_for_ptr(bptr);
586 // Copy value as bytes
587 ptr::copy_nonoverlapping(
588 bptr as *const T as *const u8,
589 &mut (*ptr).data as *mut _ as *mut u8,
592 // Free the allocation without dropping its contents
593 box_free(box_unique);
595 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
600 // Sets the data pointer of a `?Sized` raw pointer.
602 // For a slice/trait object, this sets the `data` field and leaves the rest
603 // unchanged. For a sized raw pointer, this simply sets the pointer.
604 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
605 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
610 // Copy elements from slice into newly allocated Arc<[T]>
612 // Unsafe because the caller must either take ownership or bind `T: Copy`
613 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
614 let v_ptr = v as *const [T];
615 let ptr = Self::allocate_for_ptr(v_ptr);
617 ptr::copy_nonoverlapping(
619 &mut (*ptr).data as *mut [T] as *mut T,
622 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
626 // Specialization trait used for From<&[T]>
627 trait ArcFromSlice<T> {
628 fn from_slice(slice: &[T]) -> Self;
631 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
633 default fn from_slice(v: &[T]) -> Self {
634 // Panic guard while cloning T elements.
635 // In the event of a panic, elements that have been written
636 // into the new ArcInner will be dropped, then the memory freed.
644 impl<T> Drop for Guard<T> {
646 use core::slice::from_raw_parts_mut;
649 let slice = from_raw_parts_mut(self.elems, self.n_elems);
650 ptr::drop_in_place(slice);
652 Global.dealloc(self.mem.cast(), self.layout.clone());
658 let v_ptr = v as *const [T];
659 let ptr = Self::allocate_for_ptr(v_ptr);
661 let mem = ptr as *mut _ as *mut u8;
662 let layout = Layout::for_value(&*ptr);
664 // Pointer to first element
665 let elems = &mut (*ptr).data as *mut [T] as *mut T;
667 let mut guard = Guard{
668 mem: NonNull::new_unchecked(mem),
674 for (i, item) in v.iter().enumerate() {
675 ptr::write(elems.offset(i as isize), item.clone());
679 // All clear. Forget the guard so it doesn't free the new ArcInner.
682 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
687 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
689 fn from_slice(v: &[T]) -> Self {
690 unsafe { Arc::copy_from_slice(v) }
694 #[stable(feature = "rust1", since = "1.0.0")]
695 impl<T: ?Sized> Clone for Arc<T> {
696 /// Makes a clone of the `Arc` pointer.
698 /// This creates another pointer to the same inner value, increasing the
699 /// strong reference count.
704 /// use std::sync::Arc;
706 /// let five = Arc::new(5);
708 /// Arc::clone(&five);
711 fn clone(&self) -> Arc<T> {
712 // Using a relaxed ordering is alright here, as knowledge of the
713 // original reference prevents other threads from erroneously deleting
716 // As explained in the [Boost documentation][1], Increasing the
717 // reference counter can always be done with memory_order_relaxed: New
718 // references to an object can only be formed from an existing
719 // reference, and passing an existing reference from one thread to
720 // another must already provide any required synchronization.
722 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
723 let old_size = self.inner().strong.fetch_add(1, Relaxed);
725 // However we need to guard against massive refcounts in case someone
726 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
727 // and users will use-after free. We racily saturate to `isize::MAX` on
728 // the assumption that there aren't ~2 billion threads incrementing
729 // the reference count at once. This branch will never be taken in
730 // any realistic program.
732 // We abort because such a program is incredibly degenerate, and we
733 // don't care to support it.
734 if old_size > MAX_REFCOUNT {
740 Arc { ptr: self.ptr, phantom: PhantomData }
744 #[stable(feature = "rust1", since = "1.0.0")]
745 impl<T: ?Sized> Deref for Arc<T> {
749 fn deref(&self) -> &T {
754 impl<T: Clone> Arc<T> {
755 /// Makes a mutable reference into the given `Arc`.
757 /// If there are other `Arc` or [`Weak`][weak] pointers to the same value,
758 /// then `make_mut` will invoke [`clone`][clone] on the inner value to
759 /// ensure unique ownership. This is also referred to as clone-on-write.
761 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
763 /// [weak]: struct.Weak.html
764 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
765 /// [get_mut]: struct.Arc.html#method.get_mut
770 /// use std::sync::Arc;
772 /// let mut data = Arc::new(5);
774 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
775 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
776 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
777 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
778 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
780 /// // Now `data` and `other_data` point to different values.
781 /// assert_eq!(*data, 8);
782 /// assert_eq!(*other_data, 12);
785 #[stable(feature = "arc_unique", since = "1.4.0")]
786 pub fn make_mut(this: &mut Self) -> &mut T {
787 // Note that we hold both a strong reference and a weak reference.
788 // Thus, releasing our strong reference only will not, by itself, cause
789 // the memory to be deallocated.
791 // Use Acquire to ensure that we see any writes to `weak` that happen
792 // before release writes (i.e., decrements) to `strong`. Since we hold a
793 // weak count, there's no chance the ArcInner itself could be
795 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
796 // Another strong pointer exists; clone
797 *this = Arc::new((**this).clone());
798 } else if this.inner().weak.load(Relaxed) != 1 {
799 // Relaxed suffices in the above because this is fundamentally an
800 // optimization: we are always racing with weak pointers being
801 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
803 // We removed the last strong ref, but there are additional weak
804 // refs remaining. We'll move the contents to a new Arc, and
805 // invalidate the other weak refs.
807 // Note that it is not possible for the read of `weak` to yield
808 // usize::MAX (i.e., locked), since the weak count can only be
809 // locked by a thread with a strong reference.
811 // Materialize our own implicit weak pointer, so that it can clean
812 // up the ArcInner as needed.
813 let weak = Weak { ptr: this.ptr };
815 // mark the data itself as already deallocated
817 // there is no data race in the implicit write caused by `read`
818 // here (due to zeroing) because data is no longer accessed by
819 // other threads (due to there being no more strong refs at this
821 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
822 mem::swap(this, &mut swap);
826 // We were the sole reference of either kind; bump back up the
828 this.inner().strong.store(1, Release);
831 // As with `get_mut()`, the unsafety is ok because our reference was
832 // either unique to begin with, or became one upon cloning the contents.
834 &mut this.ptr.as_mut().data
839 impl<T: ?Sized> Arc<T> {
840 /// Returns a mutable reference to the inner value, if there are
841 /// no other `Arc` or [`Weak`][weak] pointers to the same value.
843 /// Returns [`None`][option] otherwise, because it is not safe to
844 /// mutate a shared value.
846 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
847 /// the inner value when it's shared.
849 /// [weak]: struct.Weak.html
850 /// [option]: ../../std/option/enum.Option.html
851 /// [make_mut]: struct.Arc.html#method.make_mut
852 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
857 /// use std::sync::Arc;
859 /// let mut x = Arc::new(3);
860 /// *Arc::get_mut(&mut x).unwrap() = 4;
861 /// assert_eq!(*x, 4);
863 /// let _y = Arc::clone(&x);
864 /// assert!(Arc::get_mut(&mut x).is_none());
867 #[stable(feature = "arc_unique", since = "1.4.0")]
868 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
869 if this.is_unique() {
870 // This unsafety is ok because we're guaranteed that the pointer
871 // returned is the *only* pointer that will ever be returned to T. Our
872 // reference count is guaranteed to be 1 at this point, and we required
873 // the Arc itself to be `mut`, so we're returning the only possible
874 // reference to the inner data.
876 Some(&mut this.ptr.as_mut().data)
883 /// Determine whether this is the unique reference (including weak refs) to
884 /// the underlying data.
886 /// Note that this requires locking the weak ref count.
887 fn is_unique(&mut self) -> bool {
888 // lock the weak pointer count if we appear to be the sole weak pointer
891 // The acquire label here ensures a happens-before relationship with any
892 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
893 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
894 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
895 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
896 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
897 // counter in `drop` -- the only access that happens when any but the last reference
899 let unique = self.inner().strong.load(Acquire) == 1;
901 // The release write here synchronizes with a read in `downgrade`,
902 // effectively preventing the above read of `strong` from happening
904 self.inner().weak.store(1, Release); // release the lock
912 #[stable(feature = "rust1", since = "1.0.0")]
913 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
916 /// This will decrement the strong reference count. If the strong reference
917 /// count reaches zero then the only other references (if any) are
918 /// [`Weak`][weak], so we `drop` the inner value.
920 /// [weak]: struct.Weak.html
925 /// use std::sync::Arc;
929 /// impl Drop for Foo {
930 /// fn drop(&mut self) {
931 /// println!("dropped!");
935 /// let foo = Arc::new(Foo);
936 /// let foo2 = Arc::clone(&foo);
938 /// drop(foo); // Doesn't print anything
939 /// drop(foo2); // Prints "dropped!"
943 // Because `fetch_sub` is already atomic, we do not need to synchronize
944 // with other threads unless we are going to delete the object. This
945 // same logic applies to the below `fetch_sub` to the `weak` count.
946 if self.inner().strong.fetch_sub(1, Release) != 1 {
950 // This fence is needed to prevent reordering of use of the data and
951 // deletion of the data. Because it is marked `Release`, the decreasing
952 // of the reference count synchronizes with this `Acquire` fence. This
953 // means that use of the data happens before decreasing the reference
954 // count, which happens before this fence, which happens before the
955 // deletion of the data.
957 // As explained in the [Boost documentation][1],
959 // > It is important to enforce any possible access to the object in one
960 // > thread (through an existing reference) to *happen before* deleting
961 // > the object in a different thread. This is achieved by a "release"
962 // > operation after dropping a reference (any access to the object
963 // > through this reference must obviously happened before), and an
964 // > "acquire" operation before deleting the object.
966 // In particular, while the contents of an Arc are usually immutable, it's
967 // possible to have interior writes to something like a Mutex<T>. Since a
968 // Mutex is not acquired when it is deleted, we can't rely on its
969 // synchronization logic to make writes in thread A visible to a destructor
970 // running in thread B.
972 // Also note that the Acquire fence here could probably be replaced with an
973 // Acquire load, which could improve performance in highly-contended
974 // situations. See [2].
976 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
977 // [2]: (https://github.com/rust-lang/rust/pull/41714)
978 atomic::fence(Acquire);
986 impl Arc<dyn Any + Send + Sync> {
988 #[stable(feature = "rc_downcast", since = "1.29.0")]
989 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
994 /// use std::any::Any;
995 /// use std::sync::Arc;
997 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
998 /// if let Ok(string) = value.downcast::<String>() {
999 /// println!("String ({}): {}", string.len(), string);
1004 /// let my_string = "Hello World".to_string();
1005 /// print_if_string(Arc::new(my_string));
1006 /// print_if_string(Arc::new(0i8));
1009 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1011 T: Any + Send + Sync + 'static,
1013 if (*self).is::<T>() {
1014 let ptr = self.ptr.cast::<ArcInner<T>>();
1016 Ok(Arc { ptr, phantom: PhantomData })
1024 /// Constructs a new `Weak<T>`, without allocating any memory.
1025 /// Calling [`upgrade`] on the return value always gives [`None`].
1027 /// [`upgrade`]: struct.Weak.html#method.upgrade
1028 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1033 /// use std::sync::Weak;
1035 /// let empty: Weak<i64> = Weak::new();
1036 /// assert!(empty.upgrade().is_none());
1038 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1039 pub fn new() -> Weak<T> {
1041 ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"),
1046 impl<T: ?Sized> Weak<T> {
1047 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending
1048 /// the lifetime of the value if successful.
1050 /// Returns [`None`] if the value has since been dropped.
1052 /// [`Arc`]: struct.Arc.html
1053 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1058 /// use std::sync::Arc;
1060 /// let five = Arc::new(5);
1062 /// let weak_five = Arc::downgrade(&five);
1064 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1065 /// assert!(strong_five.is_some());
1067 /// // Destroy all strong pointers.
1068 /// drop(strong_five);
1071 /// assert!(weak_five.upgrade().is_none());
1073 #[stable(feature = "arc_weak", since = "1.4.0")]
1074 pub fn upgrade(&self) -> Option<Arc<T>> {
1075 // We use a CAS loop to increment the strong count instead of a
1076 // fetch_add because once the count hits 0 it must never be above 0.
1077 let inner = self.inner()?;
1079 // Relaxed load because any write of 0 that we can observe
1080 // leaves the field in a permanently zero state (so a
1081 // "stale" read of 0 is fine), and any other value is
1082 // confirmed via the CAS below.
1083 let mut n = inner.strong.load(Relaxed);
1090 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1091 if n > MAX_REFCOUNT {
1097 // Relaxed is valid for the same reason it is on Arc's Clone impl
1098 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
1099 Ok(_) => return Some(Arc {
1100 // null checked above
1102 phantom: PhantomData,
1104 Err(old) => n = old,
1109 /// Return `None` when the pointer is dangling and there is no allocated `ArcInner`,
1110 /// i.e. this `Weak` was created by `Weak::new`
1112 fn inner(&self) -> Option<&ArcInner<T>> {
1113 if is_dangling(self.ptr) {
1116 Some(unsafe { self.ptr.as_ref() })
1121 #[stable(feature = "arc_weak", since = "1.4.0")]
1122 impl<T: ?Sized> Clone for Weak<T> {
1123 /// Makes a clone of the `Weak` pointer that points to the same value.
1128 /// use std::sync::{Arc, Weak};
1130 /// let weak_five = Arc::downgrade(&Arc::new(5));
1132 /// Weak::clone(&weak_five);
1135 fn clone(&self) -> Weak<T> {
1136 let inner = if let Some(inner) = self.inner() {
1139 return Weak { ptr: self.ptr };
1141 // See comments in Arc::clone() for why this is relaxed. This can use a
1142 // fetch_add (ignoring the lock) because the weak count is only locked
1143 // where are *no other* weak pointers in existence. (So we can't be
1144 // running this code in that case).
1145 let old_size = inner.weak.fetch_add(1, Relaxed);
1147 // See comments in Arc::clone() for why we do this (for mem::forget).
1148 if old_size > MAX_REFCOUNT {
1154 return Weak { ptr: self.ptr };
1158 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1159 impl<T> Default for Weak<T> {
1160 /// Constructs a new `Weak<T>`, without allocating memory.
1161 /// Calling [`upgrade`] on the return value always gives [`None`].
1163 /// [`upgrade`]: struct.Weak.html#method.upgrade
1164 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1169 /// use std::sync::Weak;
1171 /// let empty: Weak<i64> = Default::default();
1172 /// assert!(empty.upgrade().is_none());
1174 fn default() -> Weak<T> {
1179 #[stable(feature = "arc_weak", since = "1.4.0")]
1180 impl<T: ?Sized> Drop for Weak<T> {
1181 /// Drops the `Weak` pointer.
1186 /// use std::sync::{Arc, Weak};
1190 /// impl Drop for Foo {
1191 /// fn drop(&mut self) {
1192 /// println!("dropped!");
1196 /// let foo = Arc::new(Foo);
1197 /// let weak_foo = Arc::downgrade(&foo);
1198 /// let other_weak_foo = Weak::clone(&weak_foo);
1200 /// drop(weak_foo); // Doesn't print anything
1201 /// drop(foo); // Prints "dropped!"
1203 /// assert!(other_weak_foo.upgrade().is_none());
1205 fn drop(&mut self) {
1206 // If we find out that we were the last weak pointer, then its time to
1207 // deallocate the data entirely. See the discussion in Arc::drop() about
1208 // the memory orderings
1210 // It's not necessary to check for the locked state here, because the
1211 // weak count can only be locked if there was precisely one weak ref,
1212 // meaning that drop could only subsequently run ON that remaining weak
1213 // ref, which can only happen after the lock is released.
1214 let inner = if let Some(inner) = self.inner() {
1220 if inner.weak.fetch_sub(1, Release) == 1 {
1221 atomic::fence(Acquire);
1223 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
1229 #[stable(feature = "rust1", since = "1.0.0")]
1230 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1231 /// Equality for two `Arc`s.
1233 /// Two `Arc`s are equal if their inner values are equal.
1238 /// use std::sync::Arc;
1240 /// let five = Arc::new(5);
1242 /// assert!(five == Arc::new(5));
1244 fn eq(&self, other: &Arc<T>) -> bool {
1245 *(*self) == *(*other)
1248 /// Inequality for two `Arc`s.
1250 /// Two `Arc`s are unequal if their inner values are unequal.
1255 /// use std::sync::Arc;
1257 /// let five = Arc::new(5);
1259 /// assert!(five != Arc::new(6));
1261 fn ne(&self, other: &Arc<T>) -> bool {
1262 *(*self) != *(*other)
1265 #[stable(feature = "rust1", since = "1.0.0")]
1266 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
1267 /// Partial comparison for two `Arc`s.
1269 /// The two are compared by calling `partial_cmp()` on their inner values.
1274 /// use std::sync::Arc;
1275 /// use std::cmp::Ordering;
1277 /// let five = Arc::new(5);
1279 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
1281 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
1282 (**self).partial_cmp(&**other)
1285 /// Less-than comparison for two `Arc`s.
1287 /// The two are compared by calling `<` on their inner values.
1292 /// use std::sync::Arc;
1294 /// let five = Arc::new(5);
1296 /// assert!(five < Arc::new(6));
1298 fn lt(&self, other: &Arc<T>) -> bool {
1299 *(*self) < *(*other)
1302 /// 'Less than or equal to' comparison for two `Arc`s.
1304 /// The two are compared by calling `<=` on their inner values.
1309 /// use std::sync::Arc;
1311 /// let five = Arc::new(5);
1313 /// assert!(five <= Arc::new(5));
1315 fn le(&self, other: &Arc<T>) -> bool {
1316 *(*self) <= *(*other)
1319 /// Greater-than comparison for two `Arc`s.
1321 /// The two are compared by calling `>` on their inner values.
1326 /// use std::sync::Arc;
1328 /// let five = Arc::new(5);
1330 /// assert!(five > Arc::new(4));
1332 fn gt(&self, other: &Arc<T>) -> bool {
1333 *(*self) > *(*other)
1336 /// 'Greater than or equal to' comparison for two `Arc`s.
1338 /// The two are compared by calling `>=` on their inner values.
1343 /// use std::sync::Arc;
1345 /// let five = Arc::new(5);
1347 /// assert!(five >= Arc::new(5));
1349 fn ge(&self, other: &Arc<T>) -> bool {
1350 *(*self) >= *(*other)
1353 #[stable(feature = "rust1", since = "1.0.0")]
1354 impl<T: ?Sized + Ord> Ord for Arc<T> {
1355 /// Comparison for two `Arc`s.
1357 /// The two are compared by calling `cmp()` on their inner values.
1362 /// use std::sync::Arc;
1363 /// use std::cmp::Ordering;
1365 /// let five = Arc::new(5);
1367 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
1369 fn cmp(&self, other: &Arc<T>) -> Ordering {
1370 (**self).cmp(&**other)
1373 #[stable(feature = "rust1", since = "1.0.0")]
1374 impl<T: ?Sized + Eq> Eq for Arc<T> {}
1376 #[stable(feature = "rust1", since = "1.0.0")]
1377 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
1378 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1379 fmt::Display::fmt(&**self, f)
1383 #[stable(feature = "rust1", since = "1.0.0")]
1384 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
1385 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1386 fmt::Debug::fmt(&**self, f)
1390 #[stable(feature = "rust1", since = "1.0.0")]
1391 impl<T: ?Sized> fmt::Pointer for Arc<T> {
1392 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1393 fmt::Pointer::fmt(&(&**self as *const T), f)
1397 #[stable(feature = "rust1", since = "1.0.0")]
1398 impl<T: Default> Default for Arc<T> {
1399 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
1404 /// use std::sync::Arc;
1406 /// let x: Arc<i32> = Default::default();
1407 /// assert_eq!(*x, 0);
1409 fn default() -> Arc<T> {
1410 Arc::new(Default::default())
1414 #[stable(feature = "rust1", since = "1.0.0")]
1415 impl<T: ?Sized + Hash> Hash for Arc<T> {
1416 fn hash<H: Hasher>(&self, state: &mut H) {
1417 (**self).hash(state)
1421 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
1422 impl<T> From<T> for Arc<T> {
1423 fn from(t: T) -> Self {
1428 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1429 impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> {
1431 fn from(v: &[T]) -> Arc<[T]> {
1432 <Self as ArcFromSlice<T>>::from_slice(v)
1436 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1437 impl<'a> From<&'a str> for Arc<str> {
1439 fn from(v: &str) -> Arc<str> {
1440 let arc = Arc::<[u8]>::from(v.as_bytes());
1441 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
1445 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1446 impl From<String> for Arc<str> {
1448 fn from(v: String) -> Arc<str> {
1453 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1454 impl<T: ?Sized> From<Box<T>> for Arc<T> {
1456 fn from(v: Box<T>) -> Arc<T> {
1461 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1462 impl<T> From<Vec<T>> for Arc<[T]> {
1464 fn from(mut v: Vec<T>) -> Arc<[T]> {
1466 let arc = Arc::copy_from_slice(&v);
1468 // Allow the Vec to free its memory, but not destroy its contents
1478 use std::boxed::Box;
1479 use std::clone::Clone;
1480 use std::sync::mpsc::channel;
1483 use std::option::Option;
1484 use std::option::Option::{None, Some};
1485 use std::sync::atomic;
1486 use std::sync::atomic::Ordering::{Acquire, SeqCst};
1488 use std::sync::Mutex;
1489 use std::convert::From;
1491 use super::{Arc, Weak};
1494 struct Canary(*mut atomic::AtomicUsize);
1496 impl Drop for Canary {
1497 fn drop(&mut self) {
1501 (*c).fetch_add(1, SeqCst);
1509 #[cfg_attr(target_os = "emscripten", ignore)]
1510 fn manually_share_arc() {
1511 let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1512 let arc_v = Arc::new(v);
1514 let (tx, rx) = channel();
1516 let _t = thread::spawn(move || {
1517 let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
1518 assert_eq!((*arc_v)[3], 4);
1521 tx.send(arc_v.clone()).unwrap();
1523 assert_eq!((*arc_v)[2], 3);
1524 assert_eq!((*arc_v)[4], 5);
1528 fn test_arc_get_mut() {
1529 let mut x = Arc::new(3);
1530 *Arc::get_mut(&mut x).unwrap() = 4;
1533 assert!(Arc::get_mut(&mut x).is_none());
1535 assert!(Arc::get_mut(&mut x).is_some());
1536 let _w = Arc::downgrade(&x);
1537 assert!(Arc::get_mut(&mut x).is_none());
1542 let x = Arc::new(3);
1543 assert_eq!(Arc::try_unwrap(x), Ok(3));
1544 let x = Arc::new(4);
1546 assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
1547 let x = Arc::new(5);
1548 let _w = Arc::downgrade(&x);
1549 assert_eq!(Arc::try_unwrap(x), Ok(5));
1553 fn into_from_raw() {
1554 let x = Arc::new(box "hello");
1557 let x_ptr = Arc::into_raw(x);
1560 assert_eq!(**x_ptr, "hello");
1562 let x = Arc::from_raw(x_ptr);
1563 assert_eq!(**x, "hello");
1565 assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
1570 fn test_into_from_raw_unsized() {
1571 use std::fmt::Display;
1572 use std::string::ToString;
1574 let arc: Arc<str> = Arc::from("foo");
1576 let ptr = Arc::into_raw(arc.clone());
1577 let arc2 = unsafe { Arc::from_raw(ptr) };
1579 assert_eq!(unsafe { &*ptr }, "foo");
1580 assert_eq!(arc, arc2);
1582 let arc: Arc<dyn Display> = Arc::new(123);
1584 let ptr = Arc::into_raw(arc.clone());
1585 let arc2 = unsafe { Arc::from_raw(ptr) };
1587 assert_eq!(unsafe { &*ptr }.to_string(), "123");
1588 assert_eq!(arc2.to_string(), "123");
1592 fn test_cowarc_clone_make_mut() {
1593 let mut cow0 = Arc::new(75);
1594 let mut cow1 = cow0.clone();
1595 let mut cow2 = cow1.clone();
1597 assert!(75 == *Arc::make_mut(&mut cow0));
1598 assert!(75 == *Arc::make_mut(&mut cow1));
1599 assert!(75 == *Arc::make_mut(&mut cow2));
1601 *Arc::make_mut(&mut cow0) += 1;
1602 *Arc::make_mut(&mut cow1) += 2;
1603 *Arc::make_mut(&mut cow2) += 3;
1605 assert!(76 == *cow0);
1606 assert!(77 == *cow1);
1607 assert!(78 == *cow2);
1609 // none should point to the same backing memory
1610 assert!(*cow0 != *cow1);
1611 assert!(*cow0 != *cow2);
1612 assert!(*cow1 != *cow2);
1616 fn test_cowarc_clone_unique2() {
1617 let mut cow0 = Arc::new(75);
1618 let cow1 = cow0.clone();
1619 let cow2 = cow1.clone();
1621 assert!(75 == *cow0);
1622 assert!(75 == *cow1);
1623 assert!(75 == *cow2);
1625 *Arc::make_mut(&mut cow0) += 1;
1626 assert!(76 == *cow0);
1627 assert!(75 == *cow1);
1628 assert!(75 == *cow2);
1630 // cow1 and cow2 should share the same contents
1631 // cow0 should have a unique reference
1632 assert!(*cow0 != *cow1);
1633 assert!(*cow0 != *cow2);
1634 assert!(*cow1 == *cow2);
1638 fn test_cowarc_clone_weak() {
1639 let mut cow0 = Arc::new(75);
1640 let cow1_weak = Arc::downgrade(&cow0);
1642 assert!(75 == *cow0);
1643 assert!(75 == *cow1_weak.upgrade().unwrap());
1645 *Arc::make_mut(&mut cow0) += 1;
1647 assert!(76 == *cow0);
1648 assert!(cow1_weak.upgrade().is_none());
1653 let x = Arc::new(5);
1654 let y = Arc::downgrade(&x);
1655 assert!(y.upgrade().is_some());
1660 let x = Arc::new(5);
1661 let y = Arc::downgrade(&x);
1663 assert!(y.upgrade().is_none());
1667 fn weak_self_cyclic() {
1669 x: Mutex<Option<Weak<Cycle>>>,
1672 let a = Arc::new(Cycle { x: Mutex::new(None) });
1673 let b = Arc::downgrade(&a.clone());
1674 *a.x.lock().unwrap() = Some(b);
1676 // hopefully we don't double-free (or leak)...
1681 let mut canary = atomic::AtomicUsize::new(0);
1682 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1684 assert!(canary.load(Acquire) == 1);
1688 fn drop_arc_weak() {
1689 let mut canary = atomic::AtomicUsize::new(0);
1690 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1691 let arc_weak = Arc::downgrade(&arc);
1692 assert!(canary.load(Acquire) == 0);
1694 assert!(canary.load(Acquire) == 1);
1699 fn test_strong_count() {
1700 let a = Arc::new(0);
1701 assert!(Arc::strong_count(&a) == 1);
1702 let w = Arc::downgrade(&a);
1703 assert!(Arc::strong_count(&a) == 1);
1704 let b = w.upgrade().expect("");
1705 assert!(Arc::strong_count(&b) == 2);
1706 assert!(Arc::strong_count(&a) == 2);
1709 assert!(Arc::strong_count(&b) == 1);
1711 assert!(Arc::strong_count(&b) == 2);
1712 assert!(Arc::strong_count(&c) == 2);
1716 fn test_weak_count() {
1717 let a = Arc::new(0);
1718 assert!(Arc::strong_count(&a) == 1);
1719 assert!(Arc::weak_count(&a) == 0);
1720 let w = Arc::downgrade(&a);
1721 assert!(Arc::strong_count(&a) == 1);
1722 assert!(Arc::weak_count(&a) == 1);
1724 assert!(Arc::weak_count(&a) == 2);
1727 assert!(Arc::strong_count(&a) == 1);
1728 assert!(Arc::weak_count(&a) == 0);
1730 assert!(Arc::strong_count(&a) == 2);
1731 assert!(Arc::weak_count(&a) == 0);
1732 let d = Arc::downgrade(&c);
1733 assert!(Arc::weak_count(&c) == 1);
1734 assert!(Arc::strong_count(&c) == 2);
1743 let a = Arc::new(5);
1744 assert_eq!(format!("{:?}", a), "5");
1747 // Make sure deriving works with Arc<T>
1748 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
1755 let x: Arc<[i32]> = Arc::new([1, 2, 3]);
1756 assert_eq!(format!("{:?}", x), "[1, 2, 3]");
1757 let y = Arc::downgrade(&x.clone());
1759 assert!(y.upgrade().is_none());
1763 fn test_from_owned() {
1765 let foo_arc = Arc::from(foo);
1766 assert!(123 == *foo_arc);
1770 fn test_new_weak() {
1771 let foo: Weak<usize> = Weak::new();
1772 assert!(foo.upgrade().is_none());
1777 let five = Arc::new(5);
1778 let same_five = five.clone();
1779 let other_five = Arc::new(5);
1781 assert!(Arc::ptr_eq(&five, &same_five));
1782 assert!(!Arc::ptr_eq(&five, &other_five));
1786 #[cfg_attr(target_os = "emscripten", ignore)]
1787 fn test_weak_count_locked() {
1788 let mut a = Arc::new(atomic::AtomicBool::new(false));
1790 let t = thread::spawn(move || {
1791 for _i in 0..1000000 {
1792 Arc::get_mut(&mut a);
1794 a.store(true, SeqCst);
1797 while !a2.load(SeqCst) {
1798 let n = Arc::weak_count(&a2);
1799 assert!(n < 2, "bad weak count: {}", n);
1805 fn test_from_str() {
1806 let r: Arc<str> = Arc::from("foo");
1808 assert_eq!(&r[..], "foo");
1812 fn test_copy_from_slice() {
1813 let s: &[u32] = &[1, 2, 3];
1814 let r: Arc<[u32]> = Arc::from(s);
1816 assert_eq!(&r[..], [1, 2, 3]);
1820 fn test_clone_from_slice() {
1821 #[derive(Clone, Debug, Eq, PartialEq)]
1824 let s: &[X] = &[X(1), X(2), X(3)];
1825 let r: Arc<[X]> = Arc::from(s);
1827 assert_eq!(&r[..], s);
1832 fn test_clone_from_slice_panic() {
1833 use std::string::{String, ToString};
1835 struct Fail(u32, String);
1837 impl Clone for Fail {
1838 fn clone(&self) -> Fail {
1842 Fail(self.0, self.1.clone())
1847 Fail(0, "foo".to_string()),
1848 Fail(1, "bar".to_string()),
1849 Fail(2, "baz".to_string()),
1852 // Should panic, but not cause memory corruption
1853 let _r: Arc<[Fail]> = Arc::from(s);
1857 fn test_from_box() {
1858 let b: Box<u32> = box 123;
1859 let r: Arc<u32> = Arc::from(b);
1861 assert_eq!(*r, 123);
1865 fn test_from_box_str() {
1866 use std::string::String;
1868 let s = String::from("foo").into_boxed_str();
1869 let r: Arc<str> = Arc::from(s);
1871 assert_eq!(&r[..], "foo");
1875 fn test_from_box_slice() {
1876 let s = vec![1, 2, 3].into_boxed_slice();
1877 let r: Arc<[u32]> = Arc::from(s);
1879 assert_eq!(&r[..], [1, 2, 3]);
1883 fn test_from_box_trait() {
1884 use std::fmt::Display;
1885 use std::string::ToString;
1887 let b: Box<dyn Display> = box 123;
1888 let r: Arc<dyn Display> = Arc::from(b);
1890 assert_eq!(r.to_string(), "123");
1894 fn test_from_box_trait_zero_sized() {
1895 use std::fmt::Debug;
1897 let b: Box<dyn Debug> = box ();
1898 let r: Arc<dyn Debug> = Arc::from(b);
1900 assert_eq!(format!("{:?}", r), "()");
1904 fn test_from_vec() {
1905 let v = vec![1, 2, 3];
1906 let r: Arc<[u32]> = Arc::from(v);
1908 assert_eq!(&r[..], [1, 2, 3]);
1912 fn test_downcast() {
1915 let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value());
1916 let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
1918 assert!(r1.clone().downcast::<u32>().is_err());
1920 let r1i32 = r1.downcast::<i32>();
1921 assert!(r1i32.is_ok());
1922 assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value()));
1924 assert!(r2.clone().downcast::<i32>().is_err());
1926 let r2str = r2.downcast::<&'static str>();
1927 assert!(r2str.is_ok());
1928 assert_eq!(r2str.unwrap(), Arc::new("abc"));
1932 #[stable(feature = "rust1", since = "1.0.0")]
1933 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
1934 fn borrow(&self) -> &T {
1939 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
1940 impl<T: ?Sized> AsRef<T> for Arc<T> {
1941 fn as_ref(&self) -> &T {