1 #![stable(feature = "rust1", since = "1.0.0")]
3 //! Thread-safe reference-counting pointers.
5 //! See the [`Arc<T>`][arc] documentation for more details.
7 //! [arc]: struct.Arc.html
10 use core::sync::atomic;
11 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
14 use core::cmp::{self, Ordering};
15 use core::intrinsics::abort;
16 use core::mem::{self, align_of, align_of_val, size_of_val};
17 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
19 use core::ptr::{self, NonNull};
20 use core::marker::{Unpin, Unsize, PhantomData};
21 use core::hash::{Hash, Hasher};
22 use core::{isize, usize};
23 use core::convert::From;
24 use core::slice::from_raw_parts_mut;
26 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
27 use crate::boxed::Box;
28 use crate::rc::is_dangling;
29 use crate::string::String;
32 /// A soft limit on the amount of references that may be made to an `Arc`.
34 /// Going above this limit will abort your program (although not
35 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
36 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
38 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
39 /// Reference Counted'.
41 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
42 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
43 /// a new `Arc` instance, which points to the same value on the heap as the
44 /// source `Arc`, while increasing a reference count. When the last `Arc`
45 /// pointer to a given value is destroyed, the pointed-to value is also
48 /// Shared references in Rust disallow mutation by default, and `Arc` is no
49 /// exception: you cannot generally obtain a mutable reference to something
50 /// inside an `Arc`. If you need to mutate through an `Arc`, use
51 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
56 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
57 /// counting. This means that it is thread-safe. The disadvantage is that
58 /// atomic operations are more expensive than ordinary memory accesses. If you
59 /// are not sharing reference-counted values between threads, consider using
60 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
61 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
62 /// However, a library might choose `Arc<T>` in order to give library consumers
65 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
66 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
67 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
68 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
69 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
70 /// data, but it doesn't add thread safety to its data. Consider
71 /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
72 /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
73 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
74 /// non-atomic operations.
76 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
77 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
79 /// ## Breaking cycles with `Weak`
81 /// The [`downgrade`][downgrade] method can be used to create a non-owning
82 /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
83 /// to an `Arc`, but this will return [`None`] if the value has already been
86 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
87 /// [`Weak`][weak] is used to break cycles. For example, a tree could have
88 /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
89 /// pointers from children back to their parents.
91 /// # Cloning references
93 /// Creating a new reference from an existing reference counted pointer is done using the
94 /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
97 /// use std::sync::Arc;
98 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
99 /// // The two syntaxes below are equivalent.
100 /// let a = foo.clone();
101 /// let b = Arc::clone(&foo);
102 /// // a, b, and foo are all Arcs that point to the same memory location
105 /// The [`Arc::clone(&from)`] syntax is the most idiomatic because it conveys more explicitly
106 /// the meaning of the code. In the example above, this syntax makes it easier to see that
107 /// this code is creating a new reference rather than copying the whole content of foo.
109 /// ## `Deref` behavior
111 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
112 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
113 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
114 /// functions, called using function-like syntax:
117 /// use std::sync::Arc;
118 /// let my_arc = Arc::new(());
120 /// Arc::downgrade(&my_arc);
123 /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the value may have
124 /// already been destroyed.
126 /// [arc]: struct.Arc.html
127 /// [weak]: struct.Weak.html
128 /// [`Rc<T>`]: ../../std/rc/struct.Rc.html
129 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
130 /// [mutex]: ../../std/sync/struct.Mutex.html
131 /// [rwlock]: ../../std/sync/struct.RwLock.html
132 /// [atomic]: ../../std/sync/atomic/index.html
133 /// [`Send`]: ../../std/marker/trait.Send.html
134 /// [`Sync`]: ../../std/marker/trait.Sync.html
135 /// [deref]: ../../std/ops/trait.Deref.html
136 /// [downgrade]: struct.Arc.html#method.downgrade
137 /// [upgrade]: struct.Weak.html#method.upgrade
138 /// [`None`]: ../../std/option/enum.Option.html#variant.None
139 /// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html
140 /// [`std::sync`]: ../../std/sync/index.html
141 /// [`Arc::clone(&from)`]: #method.clone
145 /// Sharing some immutable data between threads:
147 // Note that we **do not** run these tests here. The windows builders get super
148 // unhappy if a thread outlives the main thread and then exits at the same time
149 // (something deadlocks) so we just avoid this entirely by not running these
152 /// use std::sync::Arc;
155 /// let five = Arc::new(5);
158 /// let five = Arc::clone(&five);
160 /// thread::spawn(move || {
161 /// println!("{:?}", five);
166 /// Sharing a mutable [`AtomicUsize`]:
168 /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
171 /// use std::sync::Arc;
172 /// use std::sync::atomic::{AtomicUsize, Ordering};
175 /// let val = Arc::new(AtomicUsize::new(5));
178 /// let val = Arc::clone(&val);
180 /// thread::spawn(move || {
181 /// let v = val.fetch_add(1, Ordering::SeqCst);
182 /// println!("{:?}", v);
187 /// See the [`rc` documentation][rc_examples] for more examples of reference
188 /// counting in general.
190 /// [rc_examples]: ../../std/rc/index.html#examples
191 #[cfg_attr(not(test), lang = "arc")]
192 #[stable(feature = "rust1", since = "1.0.0")]
193 pub struct Arc<T: ?Sized> {
194 ptr: NonNull<ArcInner<T>>,
195 phantom: PhantomData<T>,
198 #[stable(feature = "rust1", since = "1.0.0")]
199 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
200 #[stable(feature = "rust1", since = "1.0.0")]
201 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
203 #[unstable(feature = "coerce_unsized", issue = "27732")]
204 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
206 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
207 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
209 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
210 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
211 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
213 /// Since a `Weak` reference does not count towards ownership, it will not
214 /// prevent the inner value from being dropped, and `Weak` itself makes no
215 /// guarantees about the value still being present and may return [`None`]
216 /// when [`upgrade`]d.
218 /// A `Weak` pointer is useful for keeping a temporary reference to the value
219 /// within [`Arc`] without extending its lifetime. It is also used to prevent
220 /// circular references between [`Arc`] pointers, since mutual owning references
221 /// would never allow either [`Arc`] to be dropped. For example, a tree could
222 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
223 /// pointers from children back to their parents.
225 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
227 /// [`Arc`]: struct.Arc.html
228 /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
229 /// [`upgrade`]: struct.Weak.html#method.upgrade
230 /// [`Option`]: ../../std/option/enum.Option.html
231 /// [`None`]: ../../std/option/enum.Option.html#variant.None
232 #[stable(feature = "arc_weak", since = "1.4.0")]
233 pub struct Weak<T: ?Sized> {
234 // This is a `NonNull` to allow optimizing the size of this type in enums,
235 // but it is not necessarily a valid pointer.
236 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
237 // to allocate space on the heap. That's not a value a real pointer
238 // will ever have because RcBox has alignment at least 2.
239 ptr: NonNull<ArcInner<T>>,
242 #[stable(feature = "arc_weak", since = "1.4.0")]
243 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
244 #[stable(feature = "arc_weak", since = "1.4.0")]
245 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
247 #[unstable(feature = "coerce_unsized", issue = "27732")]
248 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
249 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
250 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
252 #[stable(feature = "arc_weak", since = "1.4.0")]
253 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
254 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
259 struct ArcInner<T: ?Sized> {
260 strong: atomic::AtomicUsize,
262 // the value usize::MAX acts as a sentinel for temporarily "locking" the
263 // ability to upgrade weak pointers or downgrade strong ones; this is used
264 // to avoid races in `make_mut` and `get_mut`.
265 weak: atomic::AtomicUsize,
270 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
271 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
274 /// Constructs a new `Arc<T>`.
279 /// use std::sync::Arc;
281 /// let five = Arc::new(5);
284 #[stable(feature = "rust1", since = "1.0.0")]
285 pub fn new(data: T) -> Arc<T> {
286 // Start the weak pointer count as 1 which is the weak pointer that's
287 // held by all the strong pointers (kinda), see std/rc.rs for more info
288 let x: Box<_> = box ArcInner {
289 strong: atomic::AtomicUsize::new(1),
290 weak: atomic::AtomicUsize::new(1),
293 Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData }
296 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
297 /// `data` will be pinned in memory and unable to be moved.
298 #[stable(feature = "pin", since = "1.33.0")]
299 pub fn pin(data: T) -> Pin<Arc<T>> {
300 unsafe { Pin::new_unchecked(Arc::new(data)) }
303 /// Returns the contained value, if the `Arc` has exactly one strong reference.
305 /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
308 /// This will succeed even if there are outstanding weak references.
310 /// [result]: ../../std/result/enum.Result.html
315 /// use std::sync::Arc;
317 /// let x = Arc::new(3);
318 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
320 /// let x = Arc::new(4);
321 /// let _y = Arc::clone(&x);
322 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
325 #[stable(feature = "arc_unique", since = "1.4.0")]
326 pub fn try_unwrap(this: Self) -> Result<T, Self> {
327 // See `drop` for why all these atomics are like this
328 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
332 atomic::fence(Acquire);
335 let elem = ptr::read(&this.ptr.as_ref().data);
337 // Make a weak pointer to clean up the implicit strong-weak reference
338 let _weak = Weak { ptr: this.ptr };
346 impl<T: ?Sized> Arc<T> {
347 /// Consumes the `Arc`, returning the wrapped pointer.
349 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
350 /// [`Arc::from_raw`][from_raw].
352 /// [from_raw]: struct.Arc.html#method.from_raw
357 /// use std::sync::Arc;
359 /// let x = Arc::new(10);
360 /// let x_ptr = Arc::into_raw(x);
361 /// assert_eq!(unsafe { *x_ptr }, 10);
363 #[stable(feature = "rc_raw", since = "1.17.0")]
364 pub fn into_raw(this: Self) -> *const T {
365 let ptr: *const T = &*this;
370 /// Constructs an `Arc` from a raw pointer.
372 /// The raw pointer must have been previously returned by a call to a
373 /// [`Arc::into_raw`][into_raw].
375 /// This function is unsafe because improper use may lead to memory problems. For example, a
376 /// double-free may occur if the function is called twice on the same raw pointer.
378 /// [into_raw]: struct.Arc.html#method.into_raw
383 /// use std::sync::Arc;
385 /// let x = Arc::new(10);
386 /// let x_ptr = Arc::into_raw(x);
389 /// // Convert back to an `Arc` to prevent leak.
390 /// let x = Arc::from_raw(x_ptr);
391 /// assert_eq!(*x, 10);
393 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe.
396 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
398 #[stable(feature = "rc_raw", since = "1.17.0")]
399 pub unsafe fn from_raw(ptr: *const T) -> Self {
400 let offset = data_offset(ptr);
402 // Reverse the offset to find the original ArcInner.
403 let fake_ptr = ptr as *mut ArcInner<T>;
404 let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
407 ptr: NonNull::new_unchecked(arc_ptr),
408 phantom: PhantomData,
412 /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
417 /// #![feature(rc_into_raw_non_null)]
419 /// use std::sync::Arc;
421 /// let x = Arc::new(10);
422 /// let ptr = Arc::into_raw_non_null(x);
423 /// let deref = unsafe { *ptr.as_ref() };
424 /// assert_eq!(deref, 10);
426 #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
428 pub fn into_raw_non_null(this: Self) -> NonNull<T> {
429 // safe because Arc guarantees its pointer is non-null
430 unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) }
433 /// Creates a new [`Weak`][weak] pointer to this value.
435 /// [weak]: struct.Weak.html
440 /// use std::sync::Arc;
442 /// let five = Arc::new(5);
444 /// let weak_five = Arc::downgrade(&five);
446 #[stable(feature = "arc_weak", since = "1.4.0")]
447 pub fn downgrade(this: &Self) -> Weak<T> {
448 // This Relaxed is OK because we're checking the value in the CAS
450 let mut cur = this.inner().weak.load(Relaxed);
453 // check if the weak counter is currently "locked"; if so, spin.
454 if cur == usize::MAX {
455 cur = this.inner().weak.load(Relaxed);
459 // NOTE: this code currently ignores the possibility of overflow
460 // into usize::MAX; in general both Rc and Arc need to be adjusted
461 // to deal with overflow.
463 // Unlike with Clone(), we need this to be an Acquire read to
464 // synchronize with the write coming from `is_unique`, so that the
465 // events prior to that write happen before this read.
466 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
468 // Make sure we do not create a dangling Weak
469 debug_assert!(!is_dangling(this.ptr));
470 return Weak { ptr: this.ptr };
472 Err(old) => cur = old,
477 /// Gets the number of [`Weak`][weak] pointers to this value.
479 /// [weak]: struct.Weak.html
483 /// This method by itself is safe, but using it correctly requires extra care.
484 /// Another thread can change the weak count at any time,
485 /// including potentially between calling this method and acting on the result.
490 /// use std::sync::Arc;
492 /// let five = Arc::new(5);
493 /// let _weak_five = Arc::downgrade(&five);
495 /// // This assertion is deterministic because we haven't shared
496 /// // the `Arc` or `Weak` between threads.
497 /// assert_eq!(1, Arc::weak_count(&five));
500 #[stable(feature = "arc_counts", since = "1.15.0")]
501 pub fn weak_count(this: &Self) -> usize {
502 let cnt = this.inner().weak.load(SeqCst);
503 // If the weak count is currently locked, the value of the
504 // count was 0 just before taking the lock.
505 if cnt == usize::MAX { 0 } else { cnt - 1 }
508 /// Gets the number of strong (`Arc`) pointers to this value.
512 /// This method by itself is safe, but using it correctly requires extra care.
513 /// Another thread can change the strong count at any time,
514 /// including potentially between calling this method and acting on the result.
519 /// use std::sync::Arc;
521 /// let five = Arc::new(5);
522 /// let _also_five = Arc::clone(&five);
524 /// // This assertion is deterministic because we haven't shared
525 /// // the `Arc` between threads.
526 /// assert_eq!(2, Arc::strong_count(&five));
529 #[stable(feature = "arc_counts", since = "1.15.0")]
530 pub fn strong_count(this: &Self) -> usize {
531 this.inner().strong.load(SeqCst)
535 fn inner(&self) -> &ArcInner<T> {
536 // This unsafety is ok because while this arc is alive we're guaranteed
537 // that the inner pointer is valid. Furthermore, we know that the
538 // `ArcInner` structure itself is `Sync` because the inner data is
539 // `Sync` as well, so we're ok loaning out an immutable pointer to these
541 unsafe { self.ptr.as_ref() }
544 // Non-inlined part of `drop`.
546 unsafe fn drop_slow(&mut self) {
547 // Destroy the data at this time, even though we may not free the box
548 // allocation itself (there may still be weak pointers lying around).
549 ptr::drop_in_place(&mut self.ptr.as_mut().data);
551 if self.inner().weak.fetch_sub(1, Release) == 1 {
552 atomic::fence(Acquire);
553 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
558 #[stable(feature = "ptr_eq", since = "1.17.0")]
559 /// Returns `true` if the two `Arc`s point to the same value (not
560 /// just values that compare as equal).
565 /// use std::sync::Arc;
567 /// let five = Arc::new(5);
568 /// let same_five = Arc::clone(&five);
569 /// let other_five = Arc::new(5);
571 /// assert!(Arc::ptr_eq(&five, &same_five));
572 /// assert!(!Arc::ptr_eq(&five, &other_five));
574 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
575 this.ptr.as_ptr() == other.ptr.as_ptr()
579 impl<T: ?Sized> Arc<T> {
580 // Allocates an `ArcInner<T>` with sufficient space for an unsized value
581 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
582 // Calculate layout using the given value.
583 // Previously, layout was calculated on the expression
584 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
585 // reference (see #54908).
586 let layout = Layout::new::<ArcInner<()>>()
587 .extend(Layout::for_value(&*ptr)).unwrap().0
588 .pad_to_align().unwrap();
590 let mem = Global.alloc(layout)
591 .unwrap_or_else(|_| handle_alloc_error(layout));
593 // Initialize the ArcInner
594 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
595 debug_assert_eq!(Layout::for_value(&*inner), layout);
597 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
598 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
603 fn from_box(v: Box<T>) -> Arc<T> {
605 let box_unique = Box::into_unique(v);
606 let bptr = box_unique.as_ptr();
608 let value_size = size_of_val(&*bptr);
609 let ptr = Self::allocate_for_ptr(bptr);
611 // Copy value as bytes
612 ptr::copy_nonoverlapping(
613 bptr as *const T as *const u8,
614 &mut (*ptr).data as *mut _ as *mut u8,
617 // Free the allocation without dropping its contents
618 box_free(box_unique);
620 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
625 // Sets the data pointer of a `?Sized` raw pointer.
627 // For a slice/trait object, this sets the `data` field and leaves the rest
628 // unchanged. For a sized raw pointer, this simply sets the pointer.
629 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
630 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
635 // Copy elements from slice into newly allocated Arc<[T]>
637 // Unsafe because the caller must either take ownership or bind `T: Copy`
638 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
639 let v_ptr = v as *const [T];
640 let ptr = Self::allocate_for_ptr(v_ptr);
642 ptr::copy_nonoverlapping(
644 &mut (*ptr).data as *mut [T] as *mut T,
647 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
651 // Specialization trait used for From<&[T]>
652 trait ArcFromSlice<T> {
653 fn from_slice(slice: &[T]) -> Self;
656 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
658 default fn from_slice(v: &[T]) -> Self {
659 // Panic guard while cloning T elements.
660 // In the event of a panic, elements that have been written
661 // into the new ArcInner will be dropped, then the memory freed.
669 impl<T> Drop for Guard<T> {
672 let slice = from_raw_parts_mut(self.elems, self.n_elems);
673 ptr::drop_in_place(slice);
675 Global.dealloc(self.mem.cast(), self.layout.clone());
681 let v_ptr = v as *const [T];
682 let ptr = Self::allocate_for_ptr(v_ptr);
684 let mem = ptr as *mut _ as *mut u8;
685 let layout = Layout::for_value(&*ptr);
687 // Pointer to first element
688 let elems = &mut (*ptr).data as *mut [T] as *mut T;
690 let mut guard = Guard{
691 mem: NonNull::new_unchecked(mem),
697 for (i, item) in v.iter().enumerate() {
698 ptr::write(elems.add(i), item.clone());
702 // All clear. Forget the guard so it doesn't free the new ArcInner.
705 Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
710 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
712 fn from_slice(v: &[T]) -> Self {
713 unsafe { Arc::copy_from_slice(v) }
717 #[stable(feature = "rust1", since = "1.0.0")]
718 impl<T: ?Sized> Clone for Arc<T> {
719 /// Makes a clone of the `Arc` pointer.
721 /// This creates another pointer to the same inner value, increasing the
722 /// strong reference count.
727 /// use std::sync::Arc;
729 /// let five = Arc::new(5);
731 /// let _ = Arc::clone(&five);
734 fn clone(&self) -> Arc<T> {
735 // Using a relaxed ordering is alright here, as knowledge of the
736 // original reference prevents other threads from erroneously deleting
739 // As explained in the [Boost documentation][1], Increasing the
740 // reference counter can always be done with memory_order_relaxed: New
741 // references to an object can only be formed from an existing
742 // reference, and passing an existing reference from one thread to
743 // another must already provide any required synchronization.
745 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
746 let old_size = self.inner().strong.fetch_add(1, Relaxed);
748 // However we need to guard against massive refcounts in case someone
749 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
750 // and users will use-after free. We racily saturate to `isize::MAX` on
751 // the assumption that there aren't ~2 billion threads incrementing
752 // the reference count at once. This branch will never be taken in
753 // any realistic program.
755 // We abort because such a program is incredibly degenerate, and we
756 // don't care to support it.
757 if old_size > MAX_REFCOUNT {
763 Arc { ptr: self.ptr, phantom: PhantomData }
767 #[stable(feature = "rust1", since = "1.0.0")]
768 impl<T: ?Sized> Deref for Arc<T> {
772 fn deref(&self) -> &T {
777 #[unstable(feature = "receiver_trait", issue = "0")]
778 impl<T: ?Sized> Receiver for Arc<T> {}
780 impl<T: Clone> Arc<T> {
781 /// Makes a mutable reference into the given `Arc`.
783 /// If there are other `Arc` or [`Weak`][weak] pointers to the same value,
784 /// then `make_mut` will invoke [`clone`][clone] on the inner value to
785 /// ensure unique ownership. This is also referred to as clone-on-write.
787 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
789 /// [weak]: struct.Weak.html
790 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
791 /// [get_mut]: struct.Arc.html#method.get_mut
796 /// use std::sync::Arc;
798 /// let mut data = Arc::new(5);
800 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
801 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
802 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
803 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
804 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
806 /// // Now `data` and `other_data` point to different values.
807 /// assert_eq!(*data, 8);
808 /// assert_eq!(*other_data, 12);
811 #[stable(feature = "arc_unique", since = "1.4.0")]
812 pub fn make_mut(this: &mut Self) -> &mut T {
813 // Note that we hold both a strong reference and a weak reference.
814 // Thus, releasing our strong reference only will not, by itself, cause
815 // the memory to be deallocated.
817 // Use Acquire to ensure that we see any writes to `weak` that happen
818 // before release writes (i.e., decrements) to `strong`. Since we hold a
819 // weak count, there's no chance the ArcInner itself could be
821 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
822 // Another strong pointer exists; clone
823 *this = Arc::new((**this).clone());
824 } else if this.inner().weak.load(Relaxed) != 1 {
825 // Relaxed suffices in the above because this is fundamentally an
826 // optimization: we are always racing with weak pointers being
827 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
829 // We removed the last strong ref, but there are additional weak
830 // refs remaining. We'll move the contents to a new Arc, and
831 // invalidate the other weak refs.
833 // Note that it is not possible for the read of `weak` to yield
834 // usize::MAX (i.e., locked), since the weak count can only be
835 // locked by a thread with a strong reference.
837 // Materialize our own implicit weak pointer, so that it can clean
838 // up the ArcInner as needed.
839 let weak = Weak { ptr: this.ptr };
841 // mark the data itself as already deallocated
843 // there is no data race in the implicit write caused by `read`
844 // here (due to zeroing) because data is no longer accessed by
845 // other threads (due to there being no more strong refs at this
847 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
848 mem::swap(this, &mut swap);
852 // We were the sole reference of either kind; bump back up the
854 this.inner().strong.store(1, Release);
857 // As with `get_mut()`, the unsafety is ok because our reference was
858 // either unique to begin with, or became one upon cloning the contents.
860 &mut this.ptr.as_mut().data
865 impl<T: ?Sized> Arc<T> {
866 /// Returns a mutable reference to the inner value, if there are
867 /// no other `Arc` or [`Weak`][weak] pointers to the same value.
869 /// Returns [`None`][option] otherwise, because it is not safe to
870 /// mutate a shared value.
872 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
873 /// the inner value when it's shared.
875 /// [weak]: struct.Weak.html
876 /// [option]: ../../std/option/enum.Option.html
877 /// [make_mut]: struct.Arc.html#method.make_mut
878 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
883 /// use std::sync::Arc;
885 /// let mut x = Arc::new(3);
886 /// *Arc::get_mut(&mut x).unwrap() = 4;
887 /// assert_eq!(*x, 4);
889 /// let _y = Arc::clone(&x);
890 /// assert!(Arc::get_mut(&mut x).is_none());
893 #[stable(feature = "arc_unique", since = "1.4.0")]
894 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
895 if this.is_unique() {
896 // This unsafety is ok because we're guaranteed that the pointer
897 // returned is the *only* pointer that will ever be returned to T. Our
898 // reference count is guaranteed to be 1 at this point, and we required
899 // the Arc itself to be `mut`, so we're returning the only possible
900 // reference to the inner data.
902 Some(&mut this.ptr.as_mut().data)
909 /// Determine whether this is the unique reference (including weak refs) to
910 /// the underlying data.
912 /// Note that this requires locking the weak ref count.
913 fn is_unique(&mut self) -> bool {
914 // lock the weak pointer count if we appear to be the sole weak pointer
917 // The acquire label here ensures a happens-before relationship with any
918 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
919 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
920 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
921 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
922 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
923 // counter in `drop` -- the only access that happens when any but the last reference
925 let unique = self.inner().strong.load(Acquire) == 1;
927 // The release write here synchronizes with a read in `downgrade`,
928 // effectively preventing the above read of `strong` from happening
930 self.inner().weak.store(1, Release); // release the lock
938 #[stable(feature = "rust1", since = "1.0.0")]
939 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
942 /// This will decrement the strong reference count. If the strong reference
943 /// count reaches zero then the only other references (if any) are
944 /// [`Weak`], so we `drop` the inner value.
949 /// use std::sync::Arc;
953 /// impl Drop for Foo {
954 /// fn drop(&mut self) {
955 /// println!("dropped!");
959 /// let foo = Arc::new(Foo);
960 /// let foo2 = Arc::clone(&foo);
962 /// drop(foo); // Doesn't print anything
963 /// drop(foo2); // Prints "dropped!"
966 /// [`Weak`]: ../../std/sync/struct.Weak.html
969 // Because `fetch_sub` is already atomic, we do not need to synchronize
970 // with other threads unless we are going to delete the object. This
971 // same logic applies to the below `fetch_sub` to the `weak` count.
972 if self.inner().strong.fetch_sub(1, Release) != 1 {
976 // This fence is needed to prevent reordering of use of the data and
977 // deletion of the data. Because it is marked `Release`, the decreasing
978 // of the reference count synchronizes with this `Acquire` fence. This
979 // means that use of the data happens before decreasing the reference
980 // count, which happens before this fence, which happens before the
981 // deletion of the data.
983 // As explained in the [Boost documentation][1],
985 // > It is important to enforce any possible access to the object in one
986 // > thread (through an existing reference) to *happen before* deleting
987 // > the object in a different thread. This is achieved by a "release"
988 // > operation after dropping a reference (any access to the object
989 // > through this reference must obviously happened before), and an
990 // > "acquire" operation before deleting the object.
992 // In particular, while the contents of an Arc are usually immutable, it's
993 // possible to have interior writes to something like a Mutex<T>. Since a
994 // Mutex is not acquired when it is deleted, we can't rely on its
995 // synchronization logic to make writes in thread A visible to a destructor
996 // running in thread B.
998 // Also note that the Acquire fence here could probably be replaced with an
999 // Acquire load, which could improve performance in highly-contended
1000 // situations. See [2].
1002 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1003 // [2]: (https://github.com/rust-lang/rust/pull/41714)
1004 atomic::fence(Acquire);
1012 impl Arc<dyn Any + Send + Sync> {
1014 #[stable(feature = "rc_downcast", since = "1.29.0")]
1015 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1020 /// use std::any::Any;
1021 /// use std::sync::Arc;
1023 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1024 /// if let Ok(string) = value.downcast::<String>() {
1025 /// println!("String ({}): {}", string.len(), string);
1030 /// let my_string = "Hello World".to_string();
1031 /// print_if_string(Arc::new(my_string));
1032 /// print_if_string(Arc::new(0i8));
1035 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1037 T: Any + Send + Sync + 'static,
1039 if (*self).is::<T>() {
1040 let ptr = self.ptr.cast::<ArcInner<T>>();
1042 Ok(Arc { ptr, phantom: PhantomData })
1050 /// Constructs a new `Weak<T>`, without allocating any memory.
1051 /// Calling [`upgrade`] on the return value always gives [`None`].
1053 /// [`upgrade`]: struct.Weak.html#method.upgrade
1054 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1059 /// use std::sync::Weak;
1061 /// let empty: Weak<i64> = Weak::new();
1062 /// assert!(empty.upgrade().is_none());
1064 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1065 pub fn new() -> Weak<T> {
1067 ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"),
1071 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1073 /// It is up to the caller to ensure that the object is still alive when accessing it through
1076 /// The pointer may be [`null`] or be dangling in case the object has already been destroyed.
1081 /// #![feature(weak_into_raw)]
1083 /// use std::sync::{Arc, Weak};
1086 /// let strong = Arc::new(42);
1087 /// let weak = Arc::downgrade(&strong);
1088 /// // Both point to the same object
1089 /// assert!(ptr::eq(&*strong, Weak::as_raw(&weak)));
1090 /// // The strong here keeps it alive, so we can still access the object.
1091 /// assert_eq!(42, unsafe { *Weak::as_raw(&weak) });
1094 /// // But not any more. We can do Weak::as_raw(&weak), but accessing the pointer would lead to
1095 /// // undefined behaviour.
1096 /// // assert_eq!(42, unsafe { *Weak::as_raw(&weak) });
1099 /// [`null`]: ../../std/ptr/fn.null.html
1100 #[unstable(feature = "weak_into_raw", issue = "60728")]
1101 pub fn as_raw(this: &Self) -> *const T {
1102 match this.inner() {
1103 None => ptr::null(),
1105 let offset = data_offset_sized::<T>();
1106 let ptr = inner as *const ArcInner<T>;
1107 // Note: while the pointer we create may already point to dropped value, the
1108 // allocation still lives (it must hold the weak point as long as we are alive).
1109 // Therefore, the offset is OK to do, it won't get out of the allocation.
1110 let ptr = unsafe { (ptr as *const u8).offset(offset) };
1116 /// Consumes the `Weak<T>` and turns it into a raw pointer.
1118 /// This converts the weak pointer into a raw pointer, preserving the original weak count. It
1119 /// can be turned back into the `Weak<T>` with [`from_raw`].
1121 /// The same restrictions of accessing the target of the pointer as with
1122 /// [`as_raw`] apply.
1127 /// #![feature(weak_into_raw)]
1129 /// use std::sync::{Arc, Weak};
1131 /// let strong = Arc::new(42);
1132 /// let weak = Arc::downgrade(&strong);
1133 /// let raw = Weak::into_raw(weak);
1135 /// assert_eq!(1, Arc::weak_count(&strong));
1136 /// assert_eq!(42, unsafe { *raw });
1138 /// drop(unsafe { Weak::from_raw(raw) });
1139 /// assert_eq!(0, Arc::weak_count(&strong));
1142 /// [`from_raw`]: struct.Weak.html#method.from_raw
1143 /// [`as_raw`]: struct.Weak.html#method.as_raw
1144 #[unstable(feature = "weak_into_raw", issue = "60728")]
1145 pub fn into_raw(this: Self) -> *const T {
1146 let result = Self::as_raw(&this);
1151 /// Converts a raw pointer previously created by [`into_raw`] back into
1154 /// This can be used to safely get a strong reference (by calling [`upgrade`]
1155 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1157 /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is
1162 /// The pointer must represent one valid weak count. In other words, it must point to `T` which
1163 /// is or *was* managed by an [`Arc`] and the weak count of that [`Arc`] must not have reached
1164 /// 0. It is allowed for the strong count to be 0.
1169 /// #![feature(weak_into_raw)]
1171 /// use std::sync::{Arc, Weak};
1173 /// let strong = Arc::new(42);
1175 /// let raw_1 = Weak::into_raw(Arc::downgrade(&strong));
1176 /// let raw_2 = Weak::into_raw(Arc::downgrade(&strong));
1178 /// assert_eq!(2, Arc::weak_count(&strong));
1180 /// assert_eq!(42, *Weak::upgrade(&unsafe { Weak::from_raw(raw_1) }).unwrap());
1181 /// assert_eq!(1, Arc::weak_count(&strong));
1185 /// // Decrement the last weak count.
1186 /// assert!(Weak::upgrade(&unsafe { Weak::from_raw(raw_2) }).is_none());
1189 /// [`null`]: ../../std/ptr/fn.null.html
1190 /// [`into_raw`]: struct.Weak.html#method.into_raw
1191 /// [`upgrade`]: struct.Weak.html#method.upgrade
1192 /// [`Weak`]: struct.Weak.html
1193 /// [`Arc`]: struct.Arc.html
1194 #[unstable(feature = "weak_into_raw", issue = "60728")]
1195 pub unsafe fn from_raw(ptr: *const T) -> Self {
1199 // See Arc::from_raw for details
1200 let offset = data_offset(ptr);
1201 let fake_ptr = ptr as *mut ArcInner<T>;
1202 let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
1204 ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"),
1210 impl<T: ?Sized> Weak<T> {
1211 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending
1212 /// the lifetime of the value if successful.
1214 /// Returns [`None`] if the value has since been dropped.
1216 /// [`Arc`]: struct.Arc.html
1217 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1222 /// use std::sync::Arc;
1224 /// let five = Arc::new(5);
1226 /// let weak_five = Arc::downgrade(&five);
1228 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1229 /// assert!(strong_five.is_some());
1231 /// // Destroy all strong pointers.
1232 /// drop(strong_five);
1235 /// assert!(weak_five.upgrade().is_none());
1237 #[stable(feature = "arc_weak", since = "1.4.0")]
1238 pub fn upgrade(&self) -> Option<Arc<T>> {
1239 // We use a CAS loop to increment the strong count instead of a
1240 // fetch_add because once the count hits 0 it must never be above 0.
1241 let inner = self.inner()?;
1243 // Relaxed load because any write of 0 that we can observe
1244 // leaves the field in a permanently zero state (so a
1245 // "stale" read of 0 is fine), and any other value is
1246 // confirmed via the CAS below.
1247 let mut n = inner.strong.load(Relaxed);
1254 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1255 if n > MAX_REFCOUNT {
1261 // Relaxed is valid for the same reason it is on Arc's Clone impl
1262 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
1263 Ok(_) => return Some(Arc {
1264 // null checked above
1266 phantom: PhantomData,
1268 Err(old) => n = old,
1273 /// Gets the number of strong (`Arc`) pointers pointing to this value.
1275 /// If `self` was created using [`Weak::new`], this will return 0.
1277 /// [`Weak::new`]: #method.new
1278 #[unstable(feature = "weak_counts", issue = "57977")]
1279 pub fn strong_count(&self) -> usize {
1280 if let Some(inner) = self.inner() {
1281 inner.strong.load(SeqCst)
1287 /// Gets an approximation of the number of `Weak` pointers pointing to this
1290 /// If `self` was created using [`Weak::new`], this will return 0. If not,
1291 /// the returned value is at least 1, since `self` still points to the
1296 /// Due to implementation details, the returned value can be off by 1 in
1297 /// either direction when other threads are manipulating any `Arc`s or
1298 /// `Weak`s pointing to the same value.
1300 /// [`Weak::new`]: #method.new
1301 #[unstable(feature = "weak_counts", issue = "57977")]
1302 pub fn weak_count(&self) -> Option<usize> {
1303 // Due to the implicit weak pointer added when any strong pointers are
1304 // around, we cannot implement `weak_count` correctly since it
1305 // necessarily requires accessing the strong count and weak count in an
1306 // unsynchronized fashion. So this version is a bit racy.
1307 self.inner().map(|inner| {
1308 let strong = inner.strong.load(SeqCst);
1309 let weak = inner.weak.load(SeqCst);
1311 // If the last `Arc` has *just* been dropped, it might not yet
1312 // have removed the implicit weak count, so the value we get
1313 // here might be 1 too high.
1316 // As long as there's still at least 1 `Arc` around, subtract
1317 // the implicit weak pointer.
1318 // Note that the last `Arc` might get dropped between the 2
1319 // loads we do above, removing the implicit weak pointer. This
1320 // means that the value might be 1 too low here. In order to not
1321 // return 0 here (which would happen if we're the only weak
1322 // pointer), we guard against that specifically.
1323 cmp::max(1, weak - 1)
1328 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1329 /// (i.e., when this `Weak` was created by `Weak::new`).
1331 fn inner(&self) -> Option<&ArcInner<T>> {
1332 if is_dangling(self.ptr) {
1335 Some(unsafe { self.ptr.as_ref() })
1339 /// Returns `true` if the two `Weak`s point to the same value (not just values
1340 /// that compare as equal).
1344 /// Since this compares pointers it means that `Weak::new()` will equal each
1345 /// other, even though they don't point to any value.
1351 /// #![feature(weak_ptr_eq)]
1352 /// use std::sync::{Arc, Weak};
1354 /// let first_rc = Arc::new(5);
1355 /// let first = Arc::downgrade(&first_rc);
1356 /// let second = Arc::downgrade(&first_rc);
1358 /// assert!(Weak::ptr_eq(&first, &second));
1360 /// let third_rc = Arc::new(5);
1361 /// let third = Arc::downgrade(&third_rc);
1363 /// assert!(!Weak::ptr_eq(&first, &third));
1366 /// Comparing `Weak::new`.
1369 /// #![feature(weak_ptr_eq)]
1370 /// use std::sync::{Arc, Weak};
1372 /// let first = Weak::new();
1373 /// let second = Weak::new();
1374 /// assert!(Weak::ptr_eq(&first, &second));
1376 /// let third_rc = Arc::new(());
1377 /// let third = Arc::downgrade(&third_rc);
1378 /// assert!(!Weak::ptr_eq(&first, &third));
1381 #[unstable(feature = "weak_ptr_eq", issue = "55981")]
1382 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1383 this.ptr.as_ptr() == other.ptr.as_ptr()
1387 #[stable(feature = "arc_weak", since = "1.4.0")]
1388 impl<T: ?Sized> Clone for Weak<T> {
1389 /// Makes a clone of the `Weak` pointer that points to the same value.
1394 /// use std::sync::{Arc, Weak};
1396 /// let weak_five = Arc::downgrade(&Arc::new(5));
1398 /// let _ = Weak::clone(&weak_five);
1401 fn clone(&self) -> Weak<T> {
1402 let inner = if let Some(inner) = self.inner() {
1405 return Weak { ptr: self.ptr };
1407 // See comments in Arc::clone() for why this is relaxed. This can use a
1408 // fetch_add (ignoring the lock) because the weak count is only locked
1409 // where are *no other* weak pointers in existence. (So we can't be
1410 // running this code in that case).
1411 let old_size = inner.weak.fetch_add(1, Relaxed);
1413 // See comments in Arc::clone() for why we do this (for mem::forget).
1414 if old_size > MAX_REFCOUNT {
1420 return Weak { ptr: self.ptr };
1424 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1425 impl<T> Default for Weak<T> {
1426 /// Constructs a new `Weak<T>`, without allocating memory.
1427 /// Calling [`upgrade`] on the return value always
1430 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1431 /// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade
1436 /// use std::sync::Weak;
1438 /// let empty: Weak<i64> = Default::default();
1439 /// assert!(empty.upgrade().is_none());
1441 fn default() -> Weak<T> {
1446 #[stable(feature = "arc_weak", since = "1.4.0")]
1447 impl<T: ?Sized> Drop for Weak<T> {
1448 /// Drops the `Weak` pointer.
1453 /// use std::sync::{Arc, Weak};
1457 /// impl Drop for Foo {
1458 /// fn drop(&mut self) {
1459 /// println!("dropped!");
1463 /// let foo = Arc::new(Foo);
1464 /// let weak_foo = Arc::downgrade(&foo);
1465 /// let other_weak_foo = Weak::clone(&weak_foo);
1467 /// drop(weak_foo); // Doesn't print anything
1468 /// drop(foo); // Prints "dropped!"
1470 /// assert!(other_weak_foo.upgrade().is_none());
1472 fn drop(&mut self) {
1473 // If we find out that we were the last weak pointer, then its time to
1474 // deallocate the data entirely. See the discussion in Arc::drop() about
1475 // the memory orderings
1477 // It's not necessary to check for the locked state here, because the
1478 // weak count can only be locked if there was precisely one weak ref,
1479 // meaning that drop could only subsequently run ON that remaining weak
1480 // ref, which can only happen after the lock is released.
1481 let inner = if let Some(inner) = self.inner() {
1487 if inner.weak.fetch_sub(1, Release) == 1 {
1488 atomic::fence(Acquire);
1490 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
1496 #[stable(feature = "rust1", since = "1.0.0")]
1497 trait ArcEqIdent<T: ?Sized + PartialEq> {
1498 fn eq(&self, other: &Arc<T>) -> bool;
1499 fn ne(&self, other: &Arc<T>) -> bool;
1502 #[stable(feature = "rust1", since = "1.0.0")]
1503 impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
1505 default fn eq(&self, other: &Arc<T>) -> bool {
1509 default fn ne(&self, other: &Arc<T>) -> bool {
1514 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
1515 /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
1516 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
1517 /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
1518 /// the same value, than two `&T`s.
1519 #[stable(feature = "rust1", since = "1.0.0")]
1520 impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
1522 fn eq(&self, other: &Arc<T>) -> bool {
1523 Arc::ptr_eq(self, other) || **self == **other
1527 fn ne(&self, other: &Arc<T>) -> bool {
1528 !Arc::ptr_eq(self, other) && **self != **other
1532 #[stable(feature = "rust1", since = "1.0.0")]
1533 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1534 /// Equality for two `Arc`s.
1536 /// Two `Arc`s are equal if their inner values are equal.
1538 /// If `T` also implements `Eq`, two `Arc`s that point to the same value are
1544 /// use std::sync::Arc;
1546 /// let five = Arc::new(5);
1548 /// assert!(five == Arc::new(5));
1551 fn eq(&self, other: &Arc<T>) -> bool {
1552 ArcEqIdent::eq(self, other)
1555 /// Inequality for two `Arc`s.
1557 /// Two `Arc`s are unequal if their inner values are unequal.
1559 /// If `T` also implements `Eq`, two `Arc`s that point to the same value are
1565 /// use std::sync::Arc;
1567 /// let five = Arc::new(5);
1569 /// assert!(five != Arc::new(6));
1572 fn ne(&self, other: &Arc<T>) -> bool {
1573 ArcEqIdent::ne(self, other)
1577 #[stable(feature = "rust1", since = "1.0.0")]
1578 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
1579 /// Partial comparison for two `Arc`s.
1581 /// The two are compared by calling `partial_cmp()` on their inner values.
1586 /// use std::sync::Arc;
1587 /// use std::cmp::Ordering;
1589 /// let five = Arc::new(5);
1591 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
1593 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
1594 (**self).partial_cmp(&**other)
1597 /// Less-than comparison for two `Arc`s.
1599 /// The two are compared by calling `<` on their inner values.
1604 /// use std::sync::Arc;
1606 /// let five = Arc::new(5);
1608 /// assert!(five < Arc::new(6));
1610 fn lt(&self, other: &Arc<T>) -> bool {
1611 *(*self) < *(*other)
1614 /// 'Less than or equal to' comparison for two `Arc`s.
1616 /// The two are compared by calling `<=` on their inner values.
1621 /// use std::sync::Arc;
1623 /// let five = Arc::new(5);
1625 /// assert!(five <= Arc::new(5));
1627 fn le(&self, other: &Arc<T>) -> bool {
1628 *(*self) <= *(*other)
1631 /// Greater-than comparison for two `Arc`s.
1633 /// The two are compared by calling `>` on their inner values.
1638 /// use std::sync::Arc;
1640 /// let five = Arc::new(5);
1642 /// assert!(five > Arc::new(4));
1644 fn gt(&self, other: &Arc<T>) -> bool {
1645 *(*self) > *(*other)
1648 /// 'Greater than or equal to' comparison for two `Arc`s.
1650 /// The two are compared by calling `>=` on their inner values.
1655 /// use std::sync::Arc;
1657 /// let five = Arc::new(5);
1659 /// assert!(five >= Arc::new(5));
1661 fn ge(&self, other: &Arc<T>) -> bool {
1662 *(*self) >= *(*other)
1665 #[stable(feature = "rust1", since = "1.0.0")]
1666 impl<T: ?Sized + Ord> Ord for Arc<T> {
1667 /// Comparison for two `Arc`s.
1669 /// The two are compared by calling `cmp()` on their inner values.
1674 /// use std::sync::Arc;
1675 /// use std::cmp::Ordering;
1677 /// let five = Arc::new(5);
1679 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
1681 fn cmp(&self, other: &Arc<T>) -> Ordering {
1682 (**self).cmp(&**other)
1685 #[stable(feature = "rust1", since = "1.0.0")]
1686 impl<T: ?Sized + Eq> Eq for Arc<T> {}
1688 #[stable(feature = "rust1", since = "1.0.0")]
1689 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
1690 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1691 fmt::Display::fmt(&**self, f)
1695 #[stable(feature = "rust1", since = "1.0.0")]
1696 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
1697 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1698 fmt::Debug::fmt(&**self, f)
1702 #[stable(feature = "rust1", since = "1.0.0")]
1703 impl<T: ?Sized> fmt::Pointer for Arc<T> {
1704 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1705 fmt::Pointer::fmt(&(&**self as *const T), f)
1709 #[stable(feature = "rust1", since = "1.0.0")]
1710 impl<T: Default> Default for Arc<T> {
1711 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
1716 /// use std::sync::Arc;
1718 /// let x: Arc<i32> = Default::default();
1719 /// assert_eq!(*x, 0);
1721 fn default() -> Arc<T> {
1722 Arc::new(Default::default())
1726 #[stable(feature = "rust1", since = "1.0.0")]
1727 impl<T: ?Sized + Hash> Hash for Arc<T> {
1728 fn hash<H: Hasher>(&self, state: &mut H) {
1729 (**self).hash(state)
1733 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
1734 impl<T> From<T> for Arc<T> {
1735 fn from(t: T) -> Self {
1740 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1741 impl<T: Clone> From<&[T]> for Arc<[T]> {
1743 fn from(v: &[T]) -> Arc<[T]> {
1744 <Self as ArcFromSlice<T>>::from_slice(v)
1748 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1749 impl From<&str> for Arc<str> {
1751 fn from(v: &str) -> Arc<str> {
1752 let arc = Arc::<[u8]>::from(v.as_bytes());
1753 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
1757 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1758 impl From<String> for Arc<str> {
1760 fn from(v: String) -> Arc<str> {
1765 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1766 impl<T: ?Sized> From<Box<T>> for Arc<T> {
1768 fn from(v: Box<T>) -> Arc<T> {
1773 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1774 impl<T> From<Vec<T>> for Arc<[T]> {
1776 fn from(mut v: Vec<T>) -> Arc<[T]> {
1778 let arc = Arc::copy_from_slice(&v);
1780 // Allow the Vec to free its memory, but not destroy its contents
1790 use std::boxed::Box;
1791 use std::clone::Clone;
1792 use std::sync::mpsc::channel;
1795 use std::option::Option::{self, None, Some};
1796 use std::sync::atomic::{self, Ordering::{Acquire, SeqCst}};
1798 use std::sync::Mutex;
1799 use std::convert::From;
1801 use super::{Arc, Weak};
1802 use crate::vec::Vec;
1804 struct Canary(*mut atomic::AtomicUsize);
1806 impl Drop for Canary {
1807 fn drop(&mut self) {
1811 (*c).fetch_add(1, SeqCst);
1819 #[cfg_attr(target_os = "emscripten", ignore)]
1820 #[cfg(not(miri))] // Miri does not support threads
1821 fn manually_share_arc() {
1822 let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1823 let arc_v = Arc::new(v);
1825 let (tx, rx) = channel();
1827 let _t = thread::spawn(move || {
1828 let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
1829 assert_eq!((*arc_v)[3], 4);
1832 tx.send(arc_v.clone()).unwrap();
1834 assert_eq!((*arc_v)[2], 3);
1835 assert_eq!((*arc_v)[4], 5);
1839 fn test_arc_get_mut() {
1840 let mut x = Arc::new(3);
1841 *Arc::get_mut(&mut x).unwrap() = 4;
1844 assert!(Arc::get_mut(&mut x).is_none());
1846 assert!(Arc::get_mut(&mut x).is_some());
1847 let _w = Arc::downgrade(&x);
1848 assert!(Arc::get_mut(&mut x).is_none());
1853 assert_eq!(Weak::weak_count(&Weak::<u64>::new()), None);
1854 assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
1856 let a = Arc::new(0);
1857 let w = Arc::downgrade(&a);
1858 assert_eq!(Weak::strong_count(&w), 1);
1859 assert_eq!(Weak::weak_count(&w), Some(1));
1861 assert_eq!(Weak::strong_count(&w), 1);
1862 assert_eq!(Weak::weak_count(&w), Some(2));
1863 assert_eq!(Weak::strong_count(&w2), 1);
1864 assert_eq!(Weak::weak_count(&w2), Some(2));
1866 assert_eq!(Weak::strong_count(&w2), 1);
1867 assert_eq!(Weak::weak_count(&w2), Some(1));
1869 assert_eq!(Weak::strong_count(&w2), 2);
1870 assert_eq!(Weak::weak_count(&w2), Some(1));
1873 assert_eq!(Weak::strong_count(&w2), 0);
1874 assert_eq!(Weak::weak_count(&w2), Some(1));
1880 let x = Arc::new(3);
1881 assert_eq!(Arc::try_unwrap(x), Ok(3));
1882 let x = Arc::new(4);
1884 assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
1885 let x = Arc::new(5);
1886 let _w = Arc::downgrade(&x);
1887 assert_eq!(Arc::try_unwrap(x), Ok(5));
1891 fn into_from_raw() {
1892 let x = Arc::new(box "hello");
1895 let x_ptr = Arc::into_raw(x);
1898 assert_eq!(**x_ptr, "hello");
1900 let x = Arc::from_raw(x_ptr);
1901 assert_eq!(**x, "hello");
1903 assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
1908 fn test_into_from_raw_unsized() {
1909 use std::fmt::Display;
1910 use std::string::ToString;
1912 let arc: Arc<str> = Arc::from("foo");
1914 let ptr = Arc::into_raw(arc.clone());
1915 let arc2 = unsafe { Arc::from_raw(ptr) };
1917 assert_eq!(unsafe { &*ptr }, "foo");
1918 assert_eq!(arc, arc2);
1920 let arc: Arc<dyn Display> = Arc::new(123);
1922 let ptr = Arc::into_raw(arc.clone());
1923 let arc2 = unsafe { Arc::from_raw(ptr) };
1925 assert_eq!(unsafe { &*ptr }.to_string(), "123");
1926 assert_eq!(arc2.to_string(), "123");
1930 fn test_cowarc_clone_make_mut() {
1931 let mut cow0 = Arc::new(75);
1932 let mut cow1 = cow0.clone();
1933 let mut cow2 = cow1.clone();
1935 assert!(75 == *Arc::make_mut(&mut cow0));
1936 assert!(75 == *Arc::make_mut(&mut cow1));
1937 assert!(75 == *Arc::make_mut(&mut cow2));
1939 *Arc::make_mut(&mut cow0) += 1;
1940 *Arc::make_mut(&mut cow1) += 2;
1941 *Arc::make_mut(&mut cow2) += 3;
1943 assert!(76 == *cow0);
1944 assert!(77 == *cow1);
1945 assert!(78 == *cow2);
1947 // none should point to the same backing memory
1948 assert!(*cow0 != *cow1);
1949 assert!(*cow0 != *cow2);
1950 assert!(*cow1 != *cow2);
1954 fn test_cowarc_clone_unique2() {
1955 let mut cow0 = Arc::new(75);
1956 let cow1 = cow0.clone();
1957 let cow2 = cow1.clone();
1959 assert!(75 == *cow0);
1960 assert!(75 == *cow1);
1961 assert!(75 == *cow2);
1963 *Arc::make_mut(&mut cow0) += 1;
1964 assert!(76 == *cow0);
1965 assert!(75 == *cow1);
1966 assert!(75 == *cow2);
1968 // cow1 and cow2 should share the same contents
1969 // cow0 should have a unique reference
1970 assert!(*cow0 != *cow1);
1971 assert!(*cow0 != *cow2);
1972 assert!(*cow1 == *cow2);
1976 fn test_cowarc_clone_weak() {
1977 let mut cow0 = Arc::new(75);
1978 let cow1_weak = Arc::downgrade(&cow0);
1980 assert!(75 == *cow0);
1981 assert!(75 == *cow1_weak.upgrade().unwrap());
1983 *Arc::make_mut(&mut cow0) += 1;
1985 assert!(76 == *cow0);
1986 assert!(cow1_weak.upgrade().is_none());
1991 let x = Arc::new(5);
1992 let y = Arc::downgrade(&x);
1993 assert!(y.upgrade().is_some());
1998 let x = Arc::new(5);
1999 let y = Arc::downgrade(&x);
2001 assert!(y.upgrade().is_none());
2005 fn weak_self_cyclic() {
2007 x: Mutex<Option<Weak<Cycle>>>,
2010 let a = Arc::new(Cycle { x: Mutex::new(None) });
2011 let b = Arc::downgrade(&a.clone());
2012 *a.x.lock().unwrap() = Some(b);
2014 // hopefully we don't double-free (or leak)...
2019 let mut canary = atomic::AtomicUsize::new(0);
2020 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
2022 assert!(canary.load(Acquire) == 1);
2026 fn drop_arc_weak() {
2027 let mut canary = atomic::AtomicUsize::new(0);
2028 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
2029 let arc_weak = Arc::downgrade(&arc);
2030 assert!(canary.load(Acquire) == 0);
2032 assert!(canary.load(Acquire) == 1);
2037 fn test_strong_count() {
2038 let a = Arc::new(0);
2039 assert!(Arc::strong_count(&a) == 1);
2040 let w = Arc::downgrade(&a);
2041 assert!(Arc::strong_count(&a) == 1);
2042 let b = w.upgrade().expect("");
2043 assert!(Arc::strong_count(&b) == 2);
2044 assert!(Arc::strong_count(&a) == 2);
2047 assert!(Arc::strong_count(&b) == 1);
2049 assert!(Arc::strong_count(&b) == 2);
2050 assert!(Arc::strong_count(&c) == 2);
2054 fn test_weak_count() {
2055 let a = Arc::new(0);
2056 assert!(Arc::strong_count(&a) == 1);
2057 assert!(Arc::weak_count(&a) == 0);
2058 let w = Arc::downgrade(&a);
2059 assert!(Arc::strong_count(&a) == 1);
2060 assert!(Arc::weak_count(&a) == 1);
2062 assert!(Arc::weak_count(&a) == 2);
2065 assert!(Arc::strong_count(&a) == 1);
2066 assert!(Arc::weak_count(&a) == 0);
2068 assert!(Arc::strong_count(&a) == 2);
2069 assert!(Arc::weak_count(&a) == 0);
2070 let d = Arc::downgrade(&c);
2071 assert!(Arc::weak_count(&c) == 1);
2072 assert!(Arc::strong_count(&c) == 2);
2081 let a = Arc::new(5);
2082 assert_eq!(format!("{:?}", a), "5");
2085 // Make sure deriving works with Arc<T>
2086 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
2093 let x: Arc<[i32]> = Arc::new([1, 2, 3]);
2094 assert_eq!(format!("{:?}", x), "[1, 2, 3]");
2095 let y = Arc::downgrade(&x.clone());
2097 assert!(y.upgrade().is_none());
2101 fn test_from_owned() {
2103 let foo_arc = Arc::from(foo);
2104 assert!(123 == *foo_arc);
2108 fn test_new_weak() {
2109 let foo: Weak<usize> = Weak::new();
2110 assert!(foo.upgrade().is_none());
2115 let five = Arc::new(5);
2116 let same_five = five.clone();
2117 let other_five = Arc::new(5);
2119 assert!(Arc::ptr_eq(&five, &same_five));
2120 assert!(!Arc::ptr_eq(&five, &other_five));
2124 #[cfg_attr(target_os = "emscripten", ignore)]
2125 #[cfg(not(miri))] // Miri does not support threads
2126 fn test_weak_count_locked() {
2127 let mut a = Arc::new(atomic::AtomicBool::new(false));
2129 let t = thread::spawn(move || {
2130 for _i in 0..1000000 {
2131 Arc::get_mut(&mut a);
2133 a.store(true, SeqCst);
2136 while !a2.load(SeqCst) {
2137 let n = Arc::weak_count(&a2);
2138 assert!(n < 2, "bad weak count: {}", n);
2144 fn test_from_str() {
2145 let r: Arc<str> = Arc::from("foo");
2147 assert_eq!(&r[..], "foo");
2151 fn test_copy_from_slice() {
2152 let s: &[u32] = &[1, 2, 3];
2153 let r: Arc<[u32]> = Arc::from(s);
2155 assert_eq!(&r[..], [1, 2, 3]);
2159 fn test_clone_from_slice() {
2160 #[derive(Clone, Debug, Eq, PartialEq)]
2163 let s: &[X] = &[X(1), X(2), X(3)];
2164 let r: Arc<[X]> = Arc::from(s);
2166 assert_eq!(&r[..], s);
2171 fn test_clone_from_slice_panic() {
2172 use std::string::{String, ToString};
2174 struct Fail(u32, String);
2176 impl Clone for Fail {
2177 fn clone(&self) -> Fail {
2181 Fail(self.0, self.1.clone())
2186 Fail(0, "foo".to_string()),
2187 Fail(1, "bar".to_string()),
2188 Fail(2, "baz".to_string()),
2191 // Should panic, but not cause memory corruption
2192 let _r: Arc<[Fail]> = Arc::from(s);
2196 fn test_from_box() {
2197 let b: Box<u32> = box 123;
2198 let r: Arc<u32> = Arc::from(b);
2200 assert_eq!(*r, 123);
2204 fn test_from_box_str() {
2205 use std::string::String;
2207 let s = String::from("foo").into_boxed_str();
2208 let r: Arc<str> = Arc::from(s);
2210 assert_eq!(&r[..], "foo");
2214 fn test_from_box_slice() {
2215 let s = vec![1, 2, 3].into_boxed_slice();
2216 let r: Arc<[u32]> = Arc::from(s);
2218 assert_eq!(&r[..], [1, 2, 3]);
2222 fn test_from_box_trait() {
2223 use std::fmt::Display;
2224 use std::string::ToString;
2226 let b: Box<dyn Display> = box 123;
2227 let r: Arc<dyn Display> = Arc::from(b);
2229 assert_eq!(r.to_string(), "123");
2233 fn test_from_box_trait_zero_sized() {
2234 use std::fmt::Debug;
2236 let b: Box<dyn Debug> = box ();
2237 let r: Arc<dyn Debug> = Arc::from(b);
2239 assert_eq!(format!("{:?}", r), "()");
2243 fn test_from_vec() {
2244 let v = vec![1, 2, 3];
2245 let r: Arc<[u32]> = Arc::from(v);
2247 assert_eq!(&r[..], [1, 2, 3]);
2251 fn test_downcast() {
2254 let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::max_value());
2255 let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
2257 assert!(r1.clone().downcast::<u32>().is_err());
2259 let r1i32 = r1.downcast::<i32>();
2260 assert!(r1i32.is_ok());
2261 assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value()));
2263 assert!(r2.clone().downcast::<i32>().is_err());
2265 let r2str = r2.downcast::<&'static str>();
2266 assert!(r2str.is_ok());
2267 assert_eq!(r2str.unwrap(), Arc::new("abc"));
2271 #[stable(feature = "rust1", since = "1.0.0")]
2272 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2273 fn borrow(&self) -> &T {
2278 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
2279 impl<T: ?Sized> AsRef<T> for Arc<T> {
2280 fn as_ref(&self) -> &T {
2285 #[stable(feature = "pin", since = "1.33.0")]
2286 impl<T: ?Sized> Unpin for Arc<T> { }
2288 /// Computes the offset of the data field within ArcInner.
2289 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
2290 // Align the unsized value to the end of the ArcInner.
2291 // Because it is ?Sized, it will always be the last field in memory.
2292 let align = align_of_val(&*ptr);
2293 let layout = Layout::new::<ArcInner<()>>();
2294 (layout.size() + layout.padding_needed_for(align)) as isize
2297 /// Computes the offset of the data field within ArcInner.
2299 /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
2300 fn data_offset_sized<T>() -> isize {
2301 let align = align_of::<T>();
2302 let layout = Layout::new::<ArcInner<()>>();
2303 (layout.size() + layout.padding_needed_for(align)) as isize