1 #![stable(feature = "rust1", since = "1.0.0")]
3 //! Thread-safe reference-counting pointers.
5 //! See the [`Arc<T>`][arc] documentation for more details.
7 //! [arc]: struct.Arc.html
10 use core::array::LengthAtMost32;
11 use core::sync::atomic;
12 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
15 use core::cmp::{self, Ordering};
17 use core::intrinsics::abort;
18 use core::mem::{self, align_of, align_of_val, size_of_val};
19 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
21 use core::ptr::{self, NonNull};
22 use core::marker::{Unpin, Unsize, PhantomData};
23 use core::hash::{Hash, Hasher};
24 use core::{isize, usize};
25 use core::convert::{From, TryFrom};
26 use core::slice::{self, from_raw_parts_mut};
28 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
29 use crate::boxed::Box;
30 use crate::rc::is_dangling;
31 use crate::string::String;
37 /// A soft limit on the amount of references that may be made to an `Arc`.
39 /// Going above this limit will abort your program (although not
40 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
41 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
43 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
44 /// Reference Counted'.
46 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
47 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
48 /// a new `Arc` instance, which points to the same allocation on the heap as the
49 /// source `Arc`, while increasing a reference count. When the last `Arc`
50 /// pointer to a given allocation is destroyed, the value stored in that allocation (often
51 /// referred to as "inner value") is also dropped.
53 /// Shared references in Rust disallow mutation by default, and `Arc` is no
54 /// exception: you cannot generally obtain a mutable reference to something
55 /// inside an `Arc`. If you need to mutate through an `Arc`, use
56 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
61 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
62 /// counting. This means that it is thread-safe. The disadvantage is that
63 /// atomic operations are more expensive than ordinary memory accesses. If you
64 /// are not sharing reference-counted allocations between threads, consider using
65 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
66 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
67 /// However, a library might choose `Arc<T>` in order to give library consumers
70 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
71 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
72 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
73 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
74 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
75 /// data, but it doesn't add thread safety to its data. Consider
76 /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
77 /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
78 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
79 /// non-atomic operations.
81 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
82 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
84 /// ## Breaking cycles with `Weak`
86 /// The [`downgrade`][downgrade] method can be used to create a non-owning
87 /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
88 /// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
89 /// already been dropped. In other words, `Weak` pointers do not keep the value
90 /// inside the allocation alive; however, they *do* keep the allocation
91 /// (the backing store for the value) alive.
93 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
94 /// [`Weak`][weak] is used to break cycles. For example, a tree could have
95 /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
96 /// pointers from children back to their parents.
98 /// # Cloning references
100 /// Creating a new reference from an existing reference counted pointer is done using the
101 /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
104 /// use std::sync::Arc;
105 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
106 /// // The two syntaxes below are equivalent.
107 /// let a = foo.clone();
108 /// let b = Arc::clone(&foo);
109 /// // a, b, and foo are all Arcs that point to the same memory location
112 /// ## `Deref` behavior
114 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
115 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
116 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
117 /// functions, called using function-like syntax:
120 /// use std::sync::Arc;
121 /// let my_arc = Arc::new(());
123 /// Arc::downgrade(&my_arc);
126 /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the inner value may have
127 /// already been dropped.
129 /// [arc]: struct.Arc.html
130 /// [weak]: struct.Weak.html
131 /// [`Rc<T>`]: ../../std/rc/struct.Rc.html
132 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
133 /// [mutex]: ../../std/sync/struct.Mutex.html
134 /// [rwlock]: ../../std/sync/struct.RwLock.html
135 /// [atomic]: ../../std/sync/atomic/index.html
136 /// [`Send`]: ../../std/marker/trait.Send.html
137 /// [`Sync`]: ../../std/marker/trait.Sync.html
138 /// [deref]: ../../std/ops/trait.Deref.html
139 /// [downgrade]: struct.Arc.html#method.downgrade
140 /// [upgrade]: struct.Weak.html#method.upgrade
141 /// [`None`]: ../../std/option/enum.Option.html#variant.None
142 /// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html
143 /// [`std::sync`]: ../../std/sync/index.html
144 /// [`Arc::clone(&from)`]: #method.clone
148 /// Sharing some immutable data between threads:
150 // Note that we **do not** run these tests here. The windows builders get super
151 // unhappy if a thread outlives the main thread and then exits at the same time
152 // (something deadlocks) so we just avoid this entirely by not running these
155 /// use std::sync::Arc;
158 /// let five = Arc::new(5);
161 /// let five = Arc::clone(&five);
163 /// thread::spawn(move || {
164 /// println!("{:?}", five);
169 /// Sharing a mutable [`AtomicUsize`]:
171 /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
174 /// use std::sync::Arc;
175 /// use std::sync::atomic::{AtomicUsize, Ordering};
178 /// let val = Arc::new(AtomicUsize::new(5));
181 /// let val = Arc::clone(&val);
183 /// thread::spawn(move || {
184 /// let v = val.fetch_add(1, Ordering::SeqCst);
185 /// println!("{:?}", v);
190 /// See the [`rc` documentation][rc_examples] for more examples of reference
191 /// counting in general.
193 /// [rc_examples]: ../../std/rc/index.html#examples
194 #[cfg_attr(not(test), lang = "arc")]
195 #[stable(feature = "rust1", since = "1.0.0")]
196 pub struct Arc<T: ?Sized> {
197 ptr: NonNull<ArcInner<T>>,
198 phantom: PhantomData<ArcInner<T>>,
201 #[stable(feature = "rust1", since = "1.0.0")]
202 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
203 #[stable(feature = "rust1", since = "1.0.0")]
204 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
206 #[unstable(feature = "coerce_unsized", issue = "27732")]
207 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
209 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
210 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
212 impl<T: ?Sized> Arc<T> {
213 fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
216 phantom: PhantomData,
220 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
221 Self::from_inner(NonNull::new_unchecked(ptr))
225 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
226 /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
227 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
229 /// Since a `Weak` reference does not count towards ownership, it will not
230 /// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
231 /// guarantees about the value still being present. Thus it may return [`None`]
232 /// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
233 /// itself (the backing store) from being deallocated.
235 /// A `Weak` pointer is useful for keeping a temporary reference to the allocation
236 /// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
237 /// prevent circular references between [`Arc`] pointers, since mutual owning references
238 /// would never allow either [`Arc`] to be dropped. For example, a tree could
239 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
240 /// pointers from children back to their parents.
242 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
244 /// [`Arc`]: struct.Arc.html
245 /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
246 /// [`upgrade`]: struct.Weak.html#method.upgrade
247 /// [`Option`]: ../../std/option/enum.Option.html
248 /// [`None`]: ../../std/option/enum.Option.html#variant.None
249 #[stable(feature = "arc_weak", since = "1.4.0")]
250 pub struct Weak<T: ?Sized> {
251 // This is a `NonNull` to allow optimizing the size of this type in enums,
252 // but it is not necessarily a valid pointer.
253 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
254 // to allocate space on the heap. That's not a value a real pointer
255 // will ever have because RcBox has alignment at least 2.
256 ptr: NonNull<ArcInner<T>>,
259 #[stable(feature = "arc_weak", since = "1.4.0")]
260 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
261 #[stable(feature = "arc_weak", since = "1.4.0")]
262 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
264 #[unstable(feature = "coerce_unsized", issue = "27732")]
265 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
266 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
267 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
269 #[stable(feature = "arc_weak", since = "1.4.0")]
270 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
271 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
276 struct ArcInner<T: ?Sized> {
277 strong: atomic::AtomicUsize,
279 // the value usize::MAX acts as a sentinel for temporarily "locking" the
280 // ability to upgrade weak pointers or downgrade strong ones; this is used
281 // to avoid races in `make_mut` and `get_mut`.
282 weak: atomic::AtomicUsize,
287 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
288 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
291 /// Constructs a new `Arc<T>`.
296 /// use std::sync::Arc;
298 /// let five = Arc::new(5);
301 #[stable(feature = "rust1", since = "1.0.0")]
302 pub fn new(data: T) -> Arc<T> {
303 // Start the weak pointer count as 1 which is the weak pointer that's
304 // held by all the strong pointers (kinda), see std/rc.rs for more info
305 let x: Box<_> = box ArcInner {
306 strong: atomic::AtomicUsize::new(1),
307 weak: atomic::AtomicUsize::new(1),
310 Self::from_inner(Box::into_raw_non_null(x))
313 /// Constructs a new `Arc` with uninitialized contents.
318 /// #![feature(new_uninit)]
319 /// #![feature(get_mut_unchecked)]
321 /// use std::sync::Arc;
323 /// let mut five = Arc::<u32>::new_uninit();
325 /// let five = unsafe {
326 /// // Deferred initialization:
327 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
329 /// five.assume_init()
332 /// assert_eq!(*five, 5)
334 #[unstable(feature = "new_uninit", issue = "63291")]
335 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
337 Arc::from_ptr(Arc::allocate_for_layout(
339 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
344 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
345 /// `data` will be pinned in memory and unable to be moved.
346 #[stable(feature = "pin", since = "1.33.0")]
347 pub fn pin(data: T) -> Pin<Arc<T>> {
348 unsafe { Pin::new_unchecked(Arc::new(data)) }
351 /// Returns the inner value, if the `Arc` has exactly one strong reference.
353 /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
356 /// This will succeed even if there are outstanding weak references.
358 /// [result]: ../../std/result/enum.Result.html
363 /// use std::sync::Arc;
365 /// let x = Arc::new(3);
366 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
368 /// let x = Arc::new(4);
369 /// let _y = Arc::clone(&x);
370 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
373 #[stable(feature = "arc_unique", since = "1.4.0")]
374 pub fn try_unwrap(this: Self) -> Result<T, Self> {
375 // See `drop` for why all these atomics are like this
376 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
380 atomic::fence(Acquire);
383 let elem = ptr::read(&this.ptr.as_ref().data);
385 // Make a weak pointer to clean up the implicit strong-weak reference
386 let _weak = Weak { ptr: this.ptr };
395 /// Constructs a new reference-counted slice with uninitialized contents.
400 /// #![feature(new_uninit)]
401 /// #![feature(get_mut_unchecked)]
403 /// use std::sync::Arc;
405 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
407 /// let values = unsafe {
408 /// // Deferred initialization:
409 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
410 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
411 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
413 /// values.assume_init()
416 /// assert_eq!(*values, [1, 2, 3])
418 #[unstable(feature = "new_uninit", issue = "63291")]
419 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
421 Arc::from_ptr(Arc::allocate_for_slice(len))
426 impl<T> Arc<mem::MaybeUninit<T>> {
427 /// Converts to `Arc<T>`.
431 /// As with [`MaybeUninit::assume_init`],
432 /// it is up to the caller to guarantee that the inner value
433 /// really is in an initialized state.
434 /// Calling this when the content is not yet fully initialized
435 /// causes immediate undefined behavior.
437 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
442 /// #![feature(new_uninit)]
443 /// #![feature(get_mut_unchecked)]
445 /// use std::sync::Arc;
447 /// let mut five = Arc::<u32>::new_uninit();
449 /// let five = unsafe {
450 /// // Deferred initialization:
451 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
453 /// five.assume_init()
456 /// assert_eq!(*five, 5)
458 #[unstable(feature = "new_uninit", issue = "63291")]
460 pub unsafe fn assume_init(self) -> Arc<T> {
461 Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast())
465 impl<T> Arc<[mem::MaybeUninit<T>]> {
466 /// Converts to `Arc<[T]>`.
470 /// As with [`MaybeUninit::assume_init`],
471 /// it is up to the caller to guarantee that the inner value
472 /// really is in an initialized state.
473 /// Calling this when the content is not yet fully initialized
474 /// causes immediate undefined behavior.
476 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
481 /// #![feature(new_uninit)]
482 /// #![feature(get_mut_unchecked)]
484 /// use std::sync::Arc;
486 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
488 /// let values = unsafe {
489 /// // Deferred initialization:
490 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
491 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
492 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
494 /// values.assume_init()
497 /// assert_eq!(*values, [1, 2, 3])
499 #[unstable(feature = "new_uninit", issue = "63291")]
501 pub unsafe fn assume_init(self) -> Arc<[T]> {
502 Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _)
506 impl<T: ?Sized> Arc<T> {
507 /// Consumes the `Arc`, returning the wrapped pointer.
509 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
510 /// [`Arc::from_raw`][from_raw].
512 /// [from_raw]: struct.Arc.html#method.from_raw
517 /// use std::sync::Arc;
519 /// let x = Arc::new("hello".to_owned());
520 /// let x_ptr = Arc::into_raw(x);
521 /// assert_eq!(unsafe { &*x_ptr }, "hello");
523 #[stable(feature = "rc_raw", since = "1.17.0")]
524 pub fn into_raw(this: Self) -> *const T {
525 let ptr: *const T = &*this;
530 /// Constructs an `Arc` from a raw pointer.
532 /// The raw pointer must have been previously returned by a call to a
533 /// [`Arc::into_raw`][into_raw].
535 /// This function is unsafe because improper use may lead to memory problems. For example, a
536 /// double-free may occur if the function is called twice on the same raw pointer.
538 /// [into_raw]: struct.Arc.html#method.into_raw
543 /// use std::sync::Arc;
545 /// let x = Arc::new("hello".to_owned());
546 /// let x_ptr = Arc::into_raw(x);
549 /// // Convert back to an `Arc` to prevent leak.
550 /// let x = Arc::from_raw(x_ptr);
551 /// assert_eq!(&*x, "hello");
553 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
556 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
558 #[stable(feature = "rc_raw", since = "1.17.0")]
559 pub unsafe fn from_raw(ptr: *const T) -> Self {
560 let offset = data_offset(ptr);
562 // Reverse the offset to find the original ArcInner.
563 let fake_ptr = ptr as *mut ArcInner<T>;
564 let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
566 Self::from_ptr(arc_ptr)
569 /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
574 /// #![feature(rc_into_raw_non_null)]
576 /// use std::sync::Arc;
578 /// let x = Arc::new("hello".to_owned());
579 /// let ptr = Arc::into_raw_non_null(x);
580 /// let deref = unsafe { ptr.as_ref() };
581 /// assert_eq!(deref, "hello");
583 #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
585 pub fn into_raw_non_null(this: Self) -> NonNull<T> {
586 // safe because Arc guarantees its pointer is non-null
587 unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) }
590 /// Creates a new [`Weak`][weak] pointer to this allocation.
592 /// [weak]: struct.Weak.html
597 /// use std::sync::Arc;
599 /// let five = Arc::new(5);
601 /// let weak_five = Arc::downgrade(&five);
603 #[stable(feature = "arc_weak", since = "1.4.0")]
604 pub fn downgrade(this: &Self) -> Weak<T> {
605 // This Relaxed is OK because we're checking the value in the CAS
607 let mut cur = this.inner().weak.load(Relaxed);
610 // check if the weak counter is currently "locked"; if so, spin.
611 if cur == usize::MAX {
612 cur = this.inner().weak.load(Relaxed);
616 // NOTE: this code currently ignores the possibility of overflow
617 // into usize::MAX; in general both Rc and Arc need to be adjusted
618 // to deal with overflow.
620 // Unlike with Clone(), we need this to be an Acquire read to
621 // synchronize with the write coming from `is_unique`, so that the
622 // events prior to that write happen before this read.
623 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
625 // Make sure we do not create a dangling Weak
626 debug_assert!(!is_dangling(this.ptr));
627 return Weak { ptr: this.ptr };
629 Err(old) => cur = old,
634 /// Gets the number of [`Weak`][weak] pointers to this allocation.
636 /// [weak]: struct.Weak.html
640 /// This method by itself is safe, but using it correctly requires extra care.
641 /// Another thread can change the weak count at any time,
642 /// including potentially between calling this method and acting on the result.
647 /// use std::sync::Arc;
649 /// let five = Arc::new(5);
650 /// let _weak_five = Arc::downgrade(&five);
652 /// // This assertion is deterministic because we haven't shared
653 /// // the `Arc` or `Weak` between threads.
654 /// assert_eq!(1, Arc::weak_count(&five));
657 #[stable(feature = "arc_counts", since = "1.15.0")]
658 pub fn weak_count(this: &Self) -> usize {
659 let cnt = this.inner().weak.load(SeqCst);
660 // If the weak count is currently locked, the value of the
661 // count was 0 just before taking the lock.
662 if cnt == usize::MAX { 0 } else { cnt - 1 }
665 /// Gets the number of strong (`Arc`) pointers to this allocation.
669 /// This method by itself is safe, but using it correctly requires extra care.
670 /// Another thread can change the strong count at any time,
671 /// including potentially between calling this method and acting on the result.
676 /// use std::sync::Arc;
678 /// let five = Arc::new(5);
679 /// let _also_five = Arc::clone(&five);
681 /// // This assertion is deterministic because we haven't shared
682 /// // the `Arc` between threads.
683 /// assert_eq!(2, Arc::strong_count(&five));
686 #[stable(feature = "arc_counts", since = "1.15.0")]
687 pub fn strong_count(this: &Self) -> usize {
688 this.inner().strong.load(SeqCst)
692 fn inner(&self) -> &ArcInner<T> {
693 // This unsafety is ok because while this arc is alive we're guaranteed
694 // that the inner pointer is valid. Furthermore, we know that the
695 // `ArcInner` structure itself is `Sync` because the inner data is
696 // `Sync` as well, so we're ok loaning out an immutable pointer to these
698 unsafe { self.ptr.as_ref() }
701 // Non-inlined part of `drop`.
703 unsafe fn drop_slow(&mut self) {
704 // Destroy the data at this time, even though we may not free the box
705 // allocation itself (there may still be weak pointers lying around).
706 ptr::drop_in_place(&mut self.ptr.as_mut().data);
708 if self.inner().weak.fetch_sub(1, Release) == 1 {
709 atomic::fence(Acquire);
710 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
715 #[stable(feature = "ptr_eq", since = "1.17.0")]
716 /// Returns `true` if the two `Arc`s point to the same allocation
717 /// (in a vein similar to [`ptr::eq`]).
722 /// use std::sync::Arc;
724 /// let five = Arc::new(5);
725 /// let same_five = Arc::clone(&five);
726 /// let other_five = Arc::new(5);
728 /// assert!(Arc::ptr_eq(&five, &same_five));
729 /// assert!(!Arc::ptr_eq(&five, &other_five));
732 /// [`ptr::eq`]: ../../std/ptr/fn.eq.html
733 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
734 this.ptr.as_ptr() == other.ptr.as_ptr()
738 impl<T: ?Sized> Arc<T> {
739 /// Allocates an `ArcInner<T>` with sufficient space for
740 /// a possibly-unsized inner value where the value has the layout provided.
742 /// The function `mem_to_arcinner` is called with the data pointer
743 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
744 unsafe fn allocate_for_layout(
745 value_layout: Layout,
746 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>
747 ) -> *mut ArcInner<T> {
748 // Calculate layout using the given value layout.
749 // Previously, layout was calculated on the expression
750 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
751 // reference (see #54908).
752 let layout = Layout::new::<ArcInner<()>>()
753 .extend(value_layout).unwrap().0
754 .pad_to_align().unwrap();
756 let mem = Global.alloc(layout)
757 .unwrap_or_else(|_| handle_alloc_error(layout));
759 // Initialize the ArcInner
760 let inner = mem_to_arcinner(mem.as_ptr());
761 debug_assert_eq!(Layout::for_value(&*inner), layout);
763 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
764 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
769 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
770 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
771 // Allocate for the `ArcInner<T>` using the given value.
772 Self::allocate_for_layout(
773 Layout::for_value(&*ptr),
774 |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
778 fn from_box(v: Box<T>) -> Arc<T> {
780 let box_unique = Box::into_unique(v);
781 let bptr = box_unique.as_ptr();
783 let value_size = size_of_val(&*bptr);
784 let ptr = Self::allocate_for_ptr(bptr);
786 // Copy value as bytes
787 ptr::copy_nonoverlapping(
788 bptr as *const T as *const u8,
789 &mut (*ptr).data as *mut _ as *mut u8,
792 // Free the allocation without dropping its contents
793 box_free(box_unique);
801 /// Allocates an `ArcInner<[T]>` with the given length.
802 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
803 Self::allocate_for_layout(
804 Layout::array::<T>(len).unwrap(),
805 |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
810 /// Sets the data pointer of a `?Sized` raw pointer.
812 /// For a slice/trait object, this sets the `data` field and leaves the rest
813 /// unchanged. For a sized raw pointer, this simply sets the pointer.
814 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
815 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
820 /// Copy elements from slice into newly allocated Arc<[T]>
822 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
823 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
824 let ptr = Self::allocate_for_slice(v.len());
826 ptr::copy_nonoverlapping(
828 &mut (*ptr).data as *mut [T] as *mut T,
834 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
836 /// Behavior is undefined should the size be wrong.
837 unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
838 // Panic guard while cloning T elements.
839 // In the event of a panic, elements that have been written
840 // into the new ArcInner will be dropped, then the memory freed.
848 impl<T> Drop for Guard<T> {
851 let slice = from_raw_parts_mut(self.elems, self.n_elems);
852 ptr::drop_in_place(slice);
854 Global.dealloc(self.mem.cast(), self.layout);
859 let ptr = Self::allocate_for_slice(len);
861 let mem = ptr as *mut _ as *mut u8;
862 let layout = Layout::for_value(&*ptr);
864 // Pointer to first element
865 let elems = &mut (*ptr).data as *mut [T] as *mut T;
867 let mut guard = Guard {
868 mem: NonNull::new_unchecked(mem),
874 for (i, item) in iter.enumerate() {
875 ptr::write(elems.add(i), item);
879 // All clear. Forget the guard so it doesn't free the new ArcInner.
886 /// Specialization trait used for `From<&[T]>`.
887 trait ArcFromSlice<T> {
888 fn from_slice(slice: &[T]) -> Self;
891 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
893 default fn from_slice(v: &[T]) -> Self {
895 Self::from_iter_exact(v.iter().cloned(), v.len())
900 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
902 fn from_slice(v: &[T]) -> Self {
903 unsafe { Arc::copy_from_slice(v) }
907 #[stable(feature = "rust1", since = "1.0.0")]
908 impl<T: ?Sized> Clone for Arc<T> {
909 /// Makes a clone of the `Arc` pointer.
911 /// This creates another pointer to the same allocation, increasing the
912 /// strong reference count.
917 /// use std::sync::Arc;
919 /// let five = Arc::new(5);
921 /// let _ = Arc::clone(&five);
924 fn clone(&self) -> Arc<T> {
925 // Using a relaxed ordering is alright here, as knowledge of the
926 // original reference prevents other threads from erroneously deleting
929 // As explained in the [Boost documentation][1], Increasing the
930 // reference counter can always be done with memory_order_relaxed: New
931 // references to an object can only be formed from an existing
932 // reference, and passing an existing reference from one thread to
933 // another must already provide any required synchronization.
935 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
936 let old_size = self.inner().strong.fetch_add(1, Relaxed);
938 // However we need to guard against massive refcounts in case someone
939 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
940 // and users will use-after free. We racily saturate to `isize::MAX` on
941 // the assumption that there aren't ~2 billion threads incrementing
942 // the reference count at once. This branch will never be taken in
943 // any realistic program.
945 // We abort because such a program is incredibly degenerate, and we
946 // don't care to support it.
947 if old_size > MAX_REFCOUNT {
953 Self::from_inner(self.ptr)
957 #[stable(feature = "rust1", since = "1.0.0")]
958 impl<T: ?Sized> Deref for Arc<T> {
962 fn deref(&self) -> &T {
967 #[unstable(feature = "receiver_trait", issue = "0")]
968 impl<T: ?Sized> Receiver for Arc<T> {}
970 impl<T: Clone> Arc<T> {
971 /// Makes a mutable reference into the given `Arc`.
973 /// If there are other `Arc` or [`Weak`][weak] pointers to the same allocation,
974 /// then `make_mut` will create a new allocation and invoke [`clone`][clone] on the inner value
975 /// to ensure unique ownership. This is also referred to as clone-on-write.
977 /// Note that this differs from the behavior of [`Rc::make_mut`] which disassociates
978 /// any remaining `Weak` pointers.
980 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
982 /// [weak]: struct.Weak.html
983 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
984 /// [get_mut]: struct.Arc.html#method.get_mut
985 /// [`Rc::make_mut`]: ../rc/struct.Rc.html#method.make_mut
990 /// use std::sync::Arc;
992 /// let mut data = Arc::new(5);
994 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
995 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
996 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
997 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
998 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
1000 /// // Now `data` and `other_data` point to different allocations.
1001 /// assert_eq!(*data, 8);
1002 /// assert_eq!(*other_data, 12);
1005 #[stable(feature = "arc_unique", since = "1.4.0")]
1006 pub fn make_mut(this: &mut Self) -> &mut T {
1007 // Note that we hold both a strong reference and a weak reference.
1008 // Thus, releasing our strong reference only will not, by itself, cause
1009 // the memory to be deallocated.
1011 // Use Acquire to ensure that we see any writes to `weak` that happen
1012 // before release writes (i.e., decrements) to `strong`. Since we hold a
1013 // weak count, there's no chance the ArcInner itself could be
1015 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1016 // Another strong pointer exists; clone
1017 *this = Arc::new((**this).clone());
1018 } else if this.inner().weak.load(Relaxed) != 1 {
1019 // Relaxed suffices in the above because this is fundamentally an
1020 // optimization: we are always racing with weak pointers being
1021 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1023 // We removed the last strong ref, but there are additional weak
1024 // refs remaining. We'll move the contents to a new Arc, and
1025 // invalidate the other weak refs.
1027 // Note that it is not possible for the read of `weak` to yield
1028 // usize::MAX (i.e., locked), since the weak count can only be
1029 // locked by a thread with a strong reference.
1031 // Materialize our own implicit weak pointer, so that it can clean
1032 // up the ArcInner as needed.
1033 let weak = Weak { ptr: this.ptr };
1035 // mark the data itself as already deallocated
1037 // there is no data race in the implicit write caused by `read`
1038 // here (due to zeroing) because data is no longer accessed by
1039 // other threads (due to there being no more strong refs at this
1041 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
1042 mem::swap(this, &mut swap);
1046 // We were the sole reference of either kind; bump back up the
1047 // strong ref count.
1048 this.inner().strong.store(1, Release);
1051 // As with `get_mut()`, the unsafety is ok because our reference was
1052 // either unique to begin with, or became one upon cloning the contents.
1054 &mut this.ptr.as_mut().data
1059 impl<T: ?Sized> Arc<T> {
1060 /// Returns a mutable reference into the given `Arc`, if there are
1061 /// no other `Arc` or [`Weak`][weak] pointers to the same allocation.
1063 /// Returns [`None`][option] otherwise, because it is not safe to
1064 /// mutate a shared value.
1066 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1067 /// the inner value when there are other pointers.
1069 /// [weak]: struct.Weak.html
1070 /// [option]: ../../std/option/enum.Option.html
1071 /// [make_mut]: struct.Arc.html#method.make_mut
1072 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
1077 /// use std::sync::Arc;
1079 /// let mut x = Arc::new(3);
1080 /// *Arc::get_mut(&mut x).unwrap() = 4;
1081 /// assert_eq!(*x, 4);
1083 /// let _y = Arc::clone(&x);
1084 /// assert!(Arc::get_mut(&mut x).is_none());
1087 #[stable(feature = "arc_unique", since = "1.4.0")]
1088 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1089 if this.is_unique() {
1090 // This unsafety is ok because we're guaranteed that the pointer
1091 // returned is the *only* pointer that will ever be returned to T. Our
1092 // reference count is guaranteed to be 1 at this point, and we required
1093 // the Arc itself to be `mut`, so we're returning the only possible
1094 // reference to the inner data.
1096 Some(Arc::get_mut_unchecked(this))
1103 /// Returns a mutable reference into the given `Arc`,
1104 /// without any check.
1106 /// See also [`get_mut`], which is safe and does appropriate checks.
1108 /// [`get_mut`]: struct.Arc.html#method.get_mut
1112 /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
1113 /// for the duration of the returned borrow.
1114 /// This is trivially the case if no such pointers exist,
1115 /// for example immediately after `Arc::new`.
1120 /// #![feature(get_mut_unchecked)]
1122 /// use std::sync::Arc;
1124 /// let mut x = Arc::new(String::new());
1126 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
1128 /// assert_eq!(*x, "foo");
1131 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
1132 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1133 &mut this.ptr.as_mut().data
1136 /// Determine whether this is the unique reference (including weak refs) to
1137 /// the underlying data.
1139 /// Note that this requires locking the weak ref count.
1140 fn is_unique(&mut self) -> bool {
1141 // lock the weak pointer count if we appear to be the sole weak pointer
1144 // The acquire label here ensures a happens-before relationship with any
1145 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1146 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1147 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1148 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1149 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1150 // counter in `drop` -- the only access that happens when any but the last reference
1151 // is being dropped.
1152 let unique = self.inner().strong.load(Acquire) == 1;
1154 // The release write here synchronizes with a read in `downgrade`,
1155 // effectively preventing the above read of `strong` from happening
1157 self.inner().weak.store(1, Release); // release the lock
1165 #[stable(feature = "rust1", since = "1.0.0")]
1166 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
1167 /// Drops the `Arc`.
1169 /// This will decrement the strong reference count. If the strong reference
1170 /// count reaches zero then the only other references (if any) are
1171 /// [`Weak`], so we `drop` the inner value.
1176 /// use std::sync::Arc;
1180 /// impl Drop for Foo {
1181 /// fn drop(&mut self) {
1182 /// println!("dropped!");
1186 /// let foo = Arc::new(Foo);
1187 /// let foo2 = Arc::clone(&foo);
1189 /// drop(foo); // Doesn't print anything
1190 /// drop(foo2); // Prints "dropped!"
1193 /// [`Weak`]: ../../std/sync/struct.Weak.html
1195 fn drop(&mut self) {
1196 // Because `fetch_sub` is already atomic, we do not need to synchronize
1197 // with other threads unless we are going to delete the object. This
1198 // same logic applies to the below `fetch_sub` to the `weak` count.
1199 if self.inner().strong.fetch_sub(1, Release) != 1 {
1203 // This fence is needed to prevent reordering of use of the data and
1204 // deletion of the data. Because it is marked `Release`, the decreasing
1205 // of the reference count synchronizes with this `Acquire` fence. This
1206 // means that use of the data happens before decreasing the reference
1207 // count, which happens before this fence, which happens before the
1208 // deletion of the data.
1210 // As explained in the [Boost documentation][1],
1212 // > It is important to enforce any possible access to the object in one
1213 // > thread (through an existing reference) to *happen before* deleting
1214 // > the object in a different thread. This is achieved by a "release"
1215 // > operation after dropping a reference (any access to the object
1216 // > through this reference must obviously happened before), and an
1217 // > "acquire" operation before deleting the object.
1219 // In particular, while the contents of an Arc are usually immutable, it's
1220 // possible to have interior writes to something like a Mutex<T>. Since a
1221 // Mutex is not acquired when it is deleted, we can't rely on its
1222 // synchronization logic to make writes in thread A visible to a destructor
1223 // running in thread B.
1225 // Also note that the Acquire fence here could probably be replaced with an
1226 // Acquire load, which could improve performance in highly-contended
1227 // situations. See [2].
1229 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1230 // [2]: (https://github.com/rust-lang/rust/pull/41714)
1231 atomic::fence(Acquire);
1239 impl Arc<dyn Any + Send + Sync> {
1241 #[stable(feature = "rc_downcast", since = "1.29.0")]
1242 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1247 /// use std::any::Any;
1248 /// use std::sync::Arc;
1250 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1251 /// if let Ok(string) = value.downcast::<String>() {
1252 /// println!("String ({}): {}", string.len(), string);
1256 /// let my_string = "Hello World".to_string();
1257 /// print_if_string(Arc::new(my_string));
1258 /// print_if_string(Arc::new(0i8));
1260 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1262 T: Any + Send + Sync + 'static,
1264 if (*self).is::<T>() {
1265 let ptr = self.ptr.cast::<ArcInner<T>>();
1267 Ok(Arc::from_inner(ptr))
1275 /// Constructs a new `Weak<T>`, without allocating any memory.
1276 /// Calling [`upgrade`] on the return value always gives [`None`].
1278 /// [`upgrade`]: struct.Weak.html#method.upgrade
1279 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1284 /// use std::sync::Weak;
1286 /// let empty: Weak<i64> = Weak::new();
1287 /// assert!(empty.upgrade().is_none());
1289 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1290 pub fn new() -> Weak<T> {
1292 ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"),
1296 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1298 /// It is up to the caller to ensure that the object is still alive when accessing it through
1301 /// The pointer may be [`null`] or be dangling in case the object has already been destroyed.
1306 /// #![feature(weak_into_raw)]
1308 /// use std::sync::Arc;
1311 /// let strong = Arc::new("hello".to_owned());
1312 /// let weak = Arc::downgrade(&strong);
1313 /// // Both point to the same object
1314 /// assert!(ptr::eq(&*strong, weak.as_raw()));
1315 /// // The strong here keeps it alive, so we can still access the object.
1316 /// assert_eq!("hello", unsafe { &*weak.as_raw() });
1319 /// // But not any more. We can do weak.as_raw(), but accessing the pointer would lead to
1320 /// // undefined behaviour.
1321 /// // assert_eq!("hello", unsafe { &*weak.as_raw() });
1324 /// [`null`]: ../../std/ptr/fn.null.html
1325 #[unstable(feature = "weak_into_raw", issue = "60728")]
1326 pub fn as_raw(&self) -> *const T {
1327 match self.inner() {
1328 None => ptr::null(),
1330 let offset = data_offset_sized::<T>();
1331 let ptr = inner as *const ArcInner<T>;
1332 // Note: while the pointer we create may already point to dropped value, the
1333 // allocation still lives (it must hold the weak point as long as we are alive).
1334 // Therefore, the offset is OK to do, it won't get out of the allocation.
1335 let ptr = unsafe { (ptr as *const u8).offset(offset) };
1341 /// Consumes the `Weak<T>` and turns it into a raw pointer.
1343 /// This converts the weak pointer into a raw pointer, preserving the original weak count. It
1344 /// can be turned back into the `Weak<T>` with [`from_raw`].
1346 /// The same restrictions of accessing the target of the pointer as with
1347 /// [`as_raw`] apply.
1352 /// #![feature(weak_into_raw)]
1354 /// use std::sync::{Arc, Weak};
1356 /// let strong = Arc::new("hello".to_owned());
1357 /// let weak = Arc::downgrade(&strong);
1358 /// let raw = weak.into_raw();
1360 /// assert_eq!(1, Arc::weak_count(&strong));
1361 /// assert_eq!("hello", unsafe { &*raw });
1363 /// drop(unsafe { Weak::from_raw(raw) });
1364 /// assert_eq!(0, Arc::weak_count(&strong));
1367 /// [`from_raw`]: struct.Weak.html#method.from_raw
1368 /// [`as_raw`]: struct.Weak.html#method.as_raw
1369 #[unstable(feature = "weak_into_raw", issue = "60728")]
1370 pub fn into_raw(self) -> *const T {
1371 let result = self.as_raw();
1376 /// Converts a raw pointer previously created by [`into_raw`] back into
1379 /// This can be used to safely get a strong reference (by calling [`upgrade`]
1380 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1382 /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is
1387 /// The pointer must represent one valid weak count. In other words, it must point to `T` which
1388 /// is or *was* managed by an [`Arc`] and the weak count of that [`Arc`] must not have reached
1389 /// 0. It is allowed for the strong count to be 0.
1394 /// #![feature(weak_into_raw)]
1396 /// use std::sync::{Arc, Weak};
1398 /// let strong = Arc::new("hello".to_owned());
1400 /// let raw_1 = Arc::downgrade(&strong).into_raw();
1401 /// let raw_2 = Arc::downgrade(&strong).into_raw();
1403 /// assert_eq!(2, Arc::weak_count(&strong));
1405 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
1406 /// assert_eq!(1, Arc::weak_count(&strong));
1410 /// // Decrement the last weak count.
1411 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
1414 /// [`null`]: ../../std/ptr/fn.null.html
1415 /// [`into_raw`]: struct.Weak.html#method.into_raw
1416 /// [`upgrade`]: struct.Weak.html#method.upgrade
1417 /// [`Weak`]: struct.Weak.html
1418 /// [`Arc`]: struct.Arc.html
1419 #[unstable(feature = "weak_into_raw", issue = "60728")]
1420 pub unsafe fn from_raw(ptr: *const T) -> Self {
1424 // See Arc::from_raw for details
1425 let offset = data_offset(ptr);
1426 let fake_ptr = ptr as *mut ArcInner<T>;
1427 let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
1429 ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"),
1435 impl<T: ?Sized> Weak<T> {
1436 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
1437 /// dropping of the inner value if successful.
1439 /// Returns [`None`] if the inner value has since been dropped.
1441 /// [`Arc`]: struct.Arc.html
1442 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1447 /// use std::sync::Arc;
1449 /// let five = Arc::new(5);
1451 /// let weak_five = Arc::downgrade(&five);
1453 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1454 /// assert!(strong_five.is_some());
1456 /// // Destroy all strong pointers.
1457 /// drop(strong_five);
1460 /// assert!(weak_five.upgrade().is_none());
1462 #[stable(feature = "arc_weak", since = "1.4.0")]
1463 pub fn upgrade(&self) -> Option<Arc<T>> {
1464 // We use a CAS loop to increment the strong count instead of a
1465 // fetch_add because once the count hits 0 it must never be above 0.
1466 let inner = self.inner()?;
1468 // Relaxed load because any write of 0 that we can observe
1469 // leaves the field in a permanently zero state (so a
1470 // "stale" read of 0 is fine), and any other value is
1471 // confirmed via the CAS below.
1472 let mut n = inner.strong.load(Relaxed);
1479 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1480 if n > MAX_REFCOUNT {
1486 // Relaxed is valid for the same reason it is on Arc's Clone impl
1487 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
1488 Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
1489 Err(old) => n = old,
1494 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
1496 /// If `self` was created using [`Weak::new`], this will return 0.
1498 /// [`Weak::new`]: #method.new
1499 #[unstable(feature = "weak_counts", issue = "57977")]
1500 pub fn strong_count(&self) -> usize {
1501 if let Some(inner) = self.inner() {
1502 inner.strong.load(SeqCst)
1508 /// Gets an approximation of the number of `Weak` pointers pointing to this
1511 /// If `self` was created using [`Weak::new`], this will return 0. If not,
1512 /// the returned value is at least 1, since `self` still points to the
1517 /// Due to implementation details, the returned value can be off by 1 in
1518 /// either direction when other threads are manipulating any `Arc`s or
1519 /// `Weak`s pointing to the same allocation.
1521 /// [`Weak::new`]: #method.new
1522 #[unstable(feature = "weak_counts", issue = "57977")]
1523 pub fn weak_count(&self) -> Option<usize> {
1524 // Due to the implicit weak pointer added when any strong pointers are
1525 // around, we cannot implement `weak_count` correctly since it
1526 // necessarily requires accessing the strong count and weak count in an
1527 // unsynchronized fashion. So this version is a bit racy.
1528 self.inner().map(|inner| {
1529 let strong = inner.strong.load(SeqCst);
1530 let weak = inner.weak.load(SeqCst);
1532 // If the last `Arc` has *just* been dropped, it might not yet
1533 // have removed the implicit weak count, so the value we get
1534 // here might be 1 too high.
1537 // As long as there's still at least 1 `Arc` around, subtract
1538 // the implicit weak pointer.
1539 // Note that the last `Arc` might get dropped between the 2
1540 // loads we do above, removing the implicit weak pointer. This
1541 // means that the value might be 1 too low here. In order to not
1542 // return 0 here (which would happen if we're the only weak
1543 // pointer), we guard against that specifically.
1544 cmp::max(1, weak - 1)
1549 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1550 /// (i.e., when this `Weak` was created by `Weak::new`).
1552 fn inner(&self) -> Option<&ArcInner<T>> {
1553 if is_dangling(self.ptr) {
1556 Some(unsafe { self.ptr.as_ref() })
1560 /// Returns `true` if the two `Weak`s point to the same allocation (similar to
1561 /// [`ptr::eq`]), or if both don't point to any allocation
1562 /// (because they were created with `Weak::new()`).
1566 /// Since this compares pointers it means that `Weak::new()` will equal each
1567 /// other, even though they don't point to any allocation.
1572 /// use std::sync::Arc;
1574 /// let first_rc = Arc::new(5);
1575 /// let first = Arc::downgrade(&first_rc);
1576 /// let second = Arc::downgrade(&first_rc);
1578 /// assert!(first.ptr_eq(&second));
1580 /// let third_rc = Arc::new(5);
1581 /// let third = Arc::downgrade(&third_rc);
1583 /// assert!(!first.ptr_eq(&third));
1586 /// Comparing `Weak::new`.
1589 /// use std::sync::{Arc, Weak};
1591 /// let first = Weak::new();
1592 /// let second = Weak::new();
1593 /// assert!(first.ptr_eq(&second));
1595 /// let third_rc = Arc::new(());
1596 /// let third = Arc::downgrade(&third_rc);
1597 /// assert!(!first.ptr_eq(&third));
1600 /// [`ptr::eq`]: ../../std/ptr/fn.eq.html
1602 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
1603 pub fn ptr_eq(&self, other: &Self) -> bool {
1604 self.ptr.as_ptr() == other.ptr.as_ptr()
1608 #[stable(feature = "arc_weak", since = "1.4.0")]
1609 impl<T: ?Sized> Clone for Weak<T> {
1610 /// Makes a clone of the `Weak` pointer that points to the same allocation.
1615 /// use std::sync::{Arc, Weak};
1617 /// let weak_five = Arc::downgrade(&Arc::new(5));
1619 /// let _ = Weak::clone(&weak_five);
1622 fn clone(&self) -> Weak<T> {
1623 let inner = if let Some(inner) = self.inner() {
1626 return Weak { ptr: self.ptr };
1628 // See comments in Arc::clone() for why this is relaxed. This can use a
1629 // fetch_add (ignoring the lock) because the weak count is only locked
1630 // where are *no other* weak pointers in existence. (So we can't be
1631 // running this code in that case).
1632 let old_size = inner.weak.fetch_add(1, Relaxed);
1634 // See comments in Arc::clone() for why we do this (for mem::forget).
1635 if old_size > MAX_REFCOUNT {
1641 Weak { ptr: self.ptr }
1645 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1646 impl<T> Default for Weak<T> {
1647 /// Constructs a new `Weak<T>`, without allocating memory.
1648 /// Calling [`upgrade`] on the return value always
1651 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1652 /// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade
1657 /// use std::sync::Weak;
1659 /// let empty: Weak<i64> = Default::default();
1660 /// assert!(empty.upgrade().is_none());
1662 fn default() -> Weak<T> {
1667 #[stable(feature = "arc_weak", since = "1.4.0")]
1668 impl<T: ?Sized> Drop for Weak<T> {
1669 /// Drops the `Weak` pointer.
1674 /// use std::sync::{Arc, Weak};
1678 /// impl Drop for Foo {
1679 /// fn drop(&mut self) {
1680 /// println!("dropped!");
1684 /// let foo = Arc::new(Foo);
1685 /// let weak_foo = Arc::downgrade(&foo);
1686 /// let other_weak_foo = Weak::clone(&weak_foo);
1688 /// drop(weak_foo); // Doesn't print anything
1689 /// drop(foo); // Prints "dropped!"
1691 /// assert!(other_weak_foo.upgrade().is_none());
1693 fn drop(&mut self) {
1694 // If we find out that we were the last weak pointer, then its time to
1695 // deallocate the data entirely. See the discussion in Arc::drop() about
1696 // the memory orderings
1698 // It's not necessary to check for the locked state here, because the
1699 // weak count can only be locked if there was precisely one weak ref,
1700 // meaning that drop could only subsequently run ON that remaining weak
1701 // ref, which can only happen after the lock is released.
1702 let inner = if let Some(inner) = self.inner() {
1708 if inner.weak.fetch_sub(1, Release) == 1 {
1709 atomic::fence(Acquire);
1711 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
1717 #[stable(feature = "rust1", since = "1.0.0")]
1718 trait ArcEqIdent<T: ?Sized + PartialEq> {
1719 fn eq(&self, other: &Arc<T>) -> bool;
1720 fn ne(&self, other: &Arc<T>) -> bool;
1723 #[stable(feature = "rust1", since = "1.0.0")]
1724 impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
1726 default fn eq(&self, other: &Arc<T>) -> bool {
1730 default fn ne(&self, other: &Arc<T>) -> bool {
1735 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
1736 /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
1737 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
1738 /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
1739 /// the same value, than two `&T`s.
1741 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
1742 #[stable(feature = "rust1", since = "1.0.0")]
1743 impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
1745 fn eq(&self, other: &Arc<T>) -> bool {
1746 Arc::ptr_eq(self, other) || **self == **other
1750 fn ne(&self, other: &Arc<T>) -> bool {
1751 !Arc::ptr_eq(self, other) && **self != **other
1755 #[stable(feature = "rust1", since = "1.0.0")]
1756 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1757 /// Equality for two `Arc`s.
1759 /// Two `Arc`s are equal if their inner values are equal, even if they are
1760 /// stored in different allocation.
1762 /// If `T` also implements `Eq` (implying reflexivity of equality),
1763 /// two `Arc`s that point to the same allocation are always equal.
1768 /// use std::sync::Arc;
1770 /// let five = Arc::new(5);
1772 /// assert!(five == Arc::new(5));
1775 fn eq(&self, other: &Arc<T>) -> bool {
1776 ArcEqIdent::eq(self, other)
1779 /// Inequality for two `Arc`s.
1781 /// Two `Arc`s are unequal if their inner values are unequal.
1783 /// If `T` also implements `Eq` (implying reflexivity of equality),
1784 /// two `Arc`s that point to the same value are never unequal.
1789 /// use std::sync::Arc;
1791 /// let five = Arc::new(5);
1793 /// assert!(five != Arc::new(6));
1796 fn ne(&self, other: &Arc<T>) -> bool {
1797 ArcEqIdent::ne(self, other)
1801 #[stable(feature = "rust1", since = "1.0.0")]
1802 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
1803 /// Partial comparison for two `Arc`s.
1805 /// The two are compared by calling `partial_cmp()` on their inner values.
1810 /// use std::sync::Arc;
1811 /// use std::cmp::Ordering;
1813 /// let five = Arc::new(5);
1815 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
1817 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
1818 (**self).partial_cmp(&**other)
1821 /// Less-than comparison for two `Arc`s.
1823 /// The two are compared by calling `<` on their inner values.
1828 /// use std::sync::Arc;
1830 /// let five = Arc::new(5);
1832 /// assert!(five < Arc::new(6));
1834 fn lt(&self, other: &Arc<T>) -> bool {
1835 *(*self) < *(*other)
1838 /// 'Less than or equal to' comparison for two `Arc`s.
1840 /// The two are compared by calling `<=` on their inner values.
1845 /// use std::sync::Arc;
1847 /// let five = Arc::new(5);
1849 /// assert!(five <= Arc::new(5));
1851 fn le(&self, other: &Arc<T>) -> bool {
1852 *(*self) <= *(*other)
1855 /// Greater-than comparison for two `Arc`s.
1857 /// The two are compared by calling `>` on their inner values.
1862 /// use std::sync::Arc;
1864 /// let five = Arc::new(5);
1866 /// assert!(five > Arc::new(4));
1868 fn gt(&self, other: &Arc<T>) -> bool {
1869 *(*self) > *(*other)
1872 /// 'Greater than or equal to' comparison for two `Arc`s.
1874 /// The two are compared by calling `>=` on their inner values.
1879 /// use std::sync::Arc;
1881 /// let five = Arc::new(5);
1883 /// assert!(five >= Arc::new(5));
1885 fn ge(&self, other: &Arc<T>) -> bool {
1886 *(*self) >= *(*other)
1889 #[stable(feature = "rust1", since = "1.0.0")]
1890 impl<T: ?Sized + Ord> Ord for Arc<T> {
1891 /// Comparison for two `Arc`s.
1893 /// The two are compared by calling `cmp()` on their inner values.
1898 /// use std::sync::Arc;
1899 /// use std::cmp::Ordering;
1901 /// let five = Arc::new(5);
1903 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
1905 fn cmp(&self, other: &Arc<T>) -> Ordering {
1906 (**self).cmp(&**other)
1909 #[stable(feature = "rust1", since = "1.0.0")]
1910 impl<T: ?Sized + Eq> Eq for Arc<T> {}
1912 #[stable(feature = "rust1", since = "1.0.0")]
1913 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
1914 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1915 fmt::Display::fmt(&**self, f)
1919 #[stable(feature = "rust1", since = "1.0.0")]
1920 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
1921 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1922 fmt::Debug::fmt(&**self, f)
1926 #[stable(feature = "rust1", since = "1.0.0")]
1927 impl<T: ?Sized> fmt::Pointer for Arc<T> {
1928 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1929 fmt::Pointer::fmt(&(&**self as *const T), f)
1933 #[stable(feature = "rust1", since = "1.0.0")]
1934 impl<T: Default> Default for Arc<T> {
1935 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
1940 /// use std::sync::Arc;
1942 /// let x: Arc<i32> = Default::default();
1943 /// assert_eq!(*x, 0);
1945 fn default() -> Arc<T> {
1946 Arc::new(Default::default())
1950 #[stable(feature = "rust1", since = "1.0.0")]
1951 impl<T: ?Sized + Hash> Hash for Arc<T> {
1952 fn hash<H: Hasher>(&self, state: &mut H) {
1953 (**self).hash(state)
1957 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
1958 impl<T> From<T> for Arc<T> {
1959 fn from(t: T) -> Self {
1964 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1965 impl<T: Clone> From<&[T]> for Arc<[T]> {
1967 fn from(v: &[T]) -> Arc<[T]> {
1968 <Self as ArcFromSlice<T>>::from_slice(v)
1972 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1973 impl From<&str> for Arc<str> {
1975 fn from(v: &str) -> Arc<str> {
1976 let arc = Arc::<[u8]>::from(v.as_bytes());
1977 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
1981 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1982 impl From<String> for Arc<str> {
1984 fn from(v: String) -> Arc<str> {
1989 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1990 impl<T: ?Sized> From<Box<T>> for Arc<T> {
1992 fn from(v: Box<T>) -> Arc<T> {
1997 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1998 impl<T> From<Vec<T>> for Arc<[T]> {
2000 fn from(mut v: Vec<T>) -> Arc<[T]> {
2002 let arc = Arc::copy_from_slice(&v);
2004 // Allow the Vec to free its memory, but not destroy its contents
2012 #[unstable(feature = "boxed_slice_try_from", issue = "0")]
2013 impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]>
2015 [T; N]: LengthAtMost32,
2017 type Error = Arc<[T]>;
2019 fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2020 if boxed_slice.len() == N {
2021 Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
2028 #[stable(feature = "shared_from_iter", since = "1.37.0")]
2029 impl<T> iter::FromIterator<T> for Arc<[T]> {
2030 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2032 /// # Performance characteristics
2034 /// ## The general case
2036 /// In the general case, collecting into `Arc<[T]>` is done by first
2037 /// collecting into a `Vec<T>`. That is, when writing the following:
2040 /// # use std::sync::Arc;
2041 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2042 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2045 /// this behaves as if we wrote:
2048 /// # use std::sync::Arc;
2049 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2050 /// .collect::<Vec<_>>() // The first set of allocations happens here.
2051 /// .into(); // A second allocation for `Arc<[T]>` happens here.
2052 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2055 /// This will allocate as many times as needed for constructing the `Vec<T>`
2056 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2058 /// ## Iterators of known length
2060 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2061 /// a single allocation will be made for the `Arc<[T]>`. For example:
2064 /// # use std::sync::Arc;
2065 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2066 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2068 fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
2069 ArcFromIter::from_iter(iter.into_iter())
2073 /// Specialization trait used for collecting into `Arc<[T]>`.
2074 trait ArcFromIter<T, I> {
2075 fn from_iter(iter: I) -> Self;
2078 impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
2079 default fn from_iter(iter: I) -> Self {
2080 iter.collect::<Vec<T>>().into()
2084 impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
2085 default fn from_iter(iter: I) -> Self {
2086 // This is the case for a `TrustedLen` iterator.
2087 let (low, high) = iter.size_hint();
2088 if let Some(high) = high {
2091 "TrustedLen iterator's size hint is not exact: {:?}",
2096 // SAFETY: We need to ensure that the iterator has an exact length and we have.
2097 Arc::from_iter_exact(iter, low)
2100 // Fall back to normal implementation.
2101 iter.collect::<Vec<T>>().into()
2106 impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
2107 fn from_iter(iter: slice::Iter<'a, T>) -> Self {
2108 // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
2110 // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
2111 // which is even more performant.
2113 // In the fall-back case we have `T: Clone`. This is still better
2114 // than the `TrustedLen` implementation as slices have a known length
2115 // and so we get to avoid calling `size_hint` and avoid the branching.
2116 iter.as_slice().into()
2120 #[stable(feature = "rust1", since = "1.0.0")]
2121 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2122 fn borrow(&self) -> &T {
2127 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
2128 impl<T: ?Sized> AsRef<T> for Arc<T> {
2129 fn as_ref(&self) -> &T {
2134 #[stable(feature = "pin", since = "1.33.0")]
2135 impl<T: ?Sized> Unpin for Arc<T> { }
2137 /// Computes the offset of the data field within `ArcInner`.
2138 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
2139 // Align the unsized value to the end of the `ArcInner`.
2140 // Because it is `?Sized`, it will always be the last field in memory.
2141 data_offset_align(align_of_val(&*ptr))
2144 /// Computes the offset of the data field within `ArcInner`.
2146 /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
2147 fn data_offset_sized<T>() -> isize {
2148 data_offset_align(align_of::<T>())
2152 fn data_offset_align(align: usize) -> isize {
2153 let layout = Layout::new::<ArcInner<()>>();
2154 (layout.size() + layout.padding_needed_for(align)) as isize