1 #![stable(feature = "rust1", since = "1.0.0")]
3 //! Thread-safe reference-counting pointers.
5 //! See the [`Arc<T>`][Arc] documentation for more details.
7 //! **Note**: This module is only available on platforms that support atomic
8 //! loads and stores of pointers. This may be detected at compile time using
9 //! `#[cfg(target_has_atomic = "ptr")]`.
13 use core::cmp::Ordering;
14 use core::convert::{From, TryFrom};
16 use core::hash::{Hash, Hasher};
18 use core::intrinsics::abort;
19 #[cfg(not(no_global_oom_handling))]
21 use core::marker::{PhantomData, Unpin, Unsize};
22 #[cfg(not(no_global_oom_handling))]
23 use core::mem::size_of_val;
24 use core::mem::{self, align_of_val_raw};
25 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
26 use core::panic::{RefUnwindSafe, UnwindSafe};
28 use core::ptr::{self, NonNull};
29 #[cfg(not(no_global_oom_handling))]
30 use core::slice::from_raw_parts_mut;
31 use core::sync::atomic;
32 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
34 #[cfg(not(no_global_oom_handling))]
35 use crate::alloc::handle_alloc_error;
36 #[cfg(not(no_global_oom_handling))]
37 use crate::alloc::{box_free, WriteCloneIntoRaw};
38 use crate::alloc::{AllocError, Allocator, Global, Layout};
39 use crate::borrow::{Cow, ToOwned};
40 use crate::boxed::Box;
41 use crate::rc::is_dangling;
42 #[cfg(not(no_global_oom_handling))]
43 use crate::string::String;
44 #[cfg(not(no_global_oom_handling))]
50 /// A soft limit on the amount of references that may be made to an `Arc`.
52 /// Going above this limit will abort your program (although not
53 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
54 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
56 #[cfg(not(sanitize = "thread"))]
57 macro_rules! acquire {
59 atomic::fence(Acquire)
63 // ThreadSanitizer does not support memory fences. To avoid false positive
64 // reports in Arc / Weak implementation use atomic loads for synchronization
66 #[cfg(sanitize = "thread")]
67 macro_rules! acquire {
73 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
74 /// Reference Counted'.
76 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
77 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
78 /// a new `Arc` instance, which points to the same allocation on the heap as the
79 /// source `Arc`, while increasing a reference count. When the last `Arc`
80 /// pointer to a given allocation is destroyed, the value stored in that allocation (often
81 /// referred to as "inner value") is also dropped.
83 /// Shared references in Rust disallow mutation by default, and `Arc` is no
84 /// exception: you cannot generally obtain a mutable reference to something
85 /// inside an `Arc`. If you need to mutate through an `Arc`, use
86 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
89 /// **Note**: This type is only available on platforms that support atomic
90 /// loads and stores of pointers, which includes all platforms that support
91 /// the `std` crate but not all those which only support [`alloc`](crate).
92 /// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
96 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
97 /// counting. This means that it is thread-safe. The disadvantage is that
98 /// atomic operations are more expensive than ordinary memory accesses. If you
99 /// are not sharing reference-counted allocations between threads, consider using
100 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
101 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
102 /// However, a library might choose `Arc<T>` in order to give library consumers
103 /// more flexibility.
105 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
106 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
107 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
108 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
109 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
110 /// data, but it doesn't add thread safety to its data. Consider
111 /// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
112 /// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
113 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
114 /// non-atomic operations.
116 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
117 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
119 /// ## Breaking cycles with `Weak`
121 /// The [`downgrade`][downgrade] method can be used to create a non-owning
122 /// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
123 /// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
124 /// already been dropped. In other words, `Weak` pointers do not keep the value
125 /// inside the allocation alive; however, they *do* keep the allocation
126 /// (the backing store for the value) alive.
128 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
129 /// [`Weak`] is used to break cycles. For example, a tree could have
130 /// strong `Arc` pointers from parent nodes to children, and [`Weak`]
131 /// pointers from children back to their parents.
133 /// # Cloning references
135 /// Creating a new reference from an existing reference-counted pointer is done using the
136 /// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
139 /// use std::sync::Arc;
140 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
141 /// // The two syntaxes below are equivalent.
142 /// let a = foo.clone();
143 /// let b = Arc::clone(&foo);
144 /// // a, b, and foo are all Arcs that point to the same memory location
147 /// ## `Deref` behavior
149 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
150 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
151 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
152 /// functions, called using [fully qualified syntax]:
155 /// use std::sync::Arc;
157 /// let my_arc = Arc::new(());
158 /// let my_weak = Arc::downgrade(&my_arc);
161 /// `Arc<T>`'s implementations of traits like `Clone` may also be called using
162 /// fully qualified syntax. Some people prefer to use fully qualified syntax,
163 /// while others prefer using method-call syntax.
166 /// use std::sync::Arc;
168 /// let arc = Arc::new(());
169 /// // Method-call syntax
170 /// let arc2 = arc.clone();
171 /// // Fully qualified syntax
172 /// let arc3 = Arc::clone(&arc);
175 /// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
176 /// already been dropped.
178 /// [`Rc<T>`]: crate::rc::Rc
179 /// [clone]: Clone::clone
180 /// [mutex]: ../../std/sync/struct.Mutex.html
181 /// [rwlock]: ../../std/sync/struct.RwLock.html
182 /// [atomic]: core::sync::atomic
183 /// [`Send`]: core::marker::Send
184 /// [`Sync`]: core::marker::Sync
185 /// [deref]: core::ops::Deref
186 /// [downgrade]: Arc::downgrade
187 /// [upgrade]: Weak::upgrade
188 /// [RefCell\<T>]: core::cell::RefCell
189 /// [`RefCell<T>`]: core::cell::RefCell
190 /// [`std::sync`]: ../../std/sync/index.html
191 /// [`Arc::clone(&from)`]: Arc::clone
192 /// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
196 /// Sharing some immutable data between threads:
198 // Note that we **do not** run these tests here. The windows builders get super
199 // unhappy if a thread outlives the main thread and then exits at the same time
200 // (something deadlocks) so we just avoid this entirely by not running these
203 /// use std::sync::Arc;
206 /// let five = Arc::new(5);
209 /// let five = Arc::clone(&five);
211 /// thread::spawn(move || {
212 /// println!("{five:?}");
217 /// Sharing a mutable [`AtomicUsize`]:
219 /// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
222 /// use std::sync::Arc;
223 /// use std::sync::atomic::{AtomicUsize, Ordering};
226 /// let val = Arc::new(AtomicUsize::new(5));
229 /// let val = Arc::clone(&val);
231 /// thread::spawn(move || {
232 /// let v = val.fetch_add(1, Ordering::SeqCst);
233 /// println!("{v:?}");
238 /// See the [`rc` documentation][rc_examples] for more examples of reference
239 /// counting in general.
241 /// [rc_examples]: crate::rc#examples
242 #[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
243 #[stable(feature = "rust1", since = "1.0.0")]
244 pub struct Arc<T: ?Sized> {
245 ptr: NonNull<ArcInner<T>>,
246 phantom: PhantomData<ArcInner<T>>,
249 #[stable(feature = "rust1", since = "1.0.0")]
250 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
251 #[stable(feature = "rust1", since = "1.0.0")]
252 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
254 #[stable(feature = "catch_unwind", since = "1.9.0")]
255 impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
257 #[unstable(feature = "coerce_unsized", issue = "18598")]
258 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
260 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
261 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
263 impl<T: ?Sized> Arc<T> {
264 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
265 Self { ptr, phantom: PhantomData }
268 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
269 unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
273 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
274 /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
275 /// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
277 /// Since a `Weak` reference does not count towards ownership, it will not
278 /// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
279 /// guarantees about the value still being present. Thus it may return [`None`]
280 /// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
281 /// itself (the backing store) from being deallocated.
283 /// A `Weak` pointer is useful for keeping a temporary reference to the allocation
284 /// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
285 /// prevent circular references between [`Arc`] pointers, since mutual owning references
286 /// would never allow either [`Arc`] to be dropped. For example, a tree could
287 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
288 /// pointers from children back to their parents.
290 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
292 /// [`upgrade`]: Weak::upgrade
293 #[stable(feature = "arc_weak", since = "1.4.0")]
294 pub struct Weak<T: ?Sized> {
295 // This is a `NonNull` to allow optimizing the size of this type in enums,
296 // but it is not necessarily a valid pointer.
297 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
298 // to allocate space on the heap. That's not a value a real pointer
299 // will ever have because RcBox has alignment at least 2.
300 // This is only possible when `T: Sized`; unsized `T` never dangle.
301 ptr: NonNull<ArcInner<T>>,
304 #[stable(feature = "arc_weak", since = "1.4.0")]
305 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
306 #[stable(feature = "arc_weak", since = "1.4.0")]
307 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
309 #[unstable(feature = "coerce_unsized", issue = "18598")]
310 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
311 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
312 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
314 #[stable(feature = "arc_weak", since = "1.4.0")]
315 impl<T: ?Sized> fmt::Debug for Weak<T> {
316 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
321 // This is repr(C) to future-proof against possible field-reordering, which
322 // would interfere with otherwise safe [into|from]_raw() of transmutable
325 struct ArcInner<T: ?Sized> {
326 strong: atomic::AtomicUsize,
328 // the value usize::MAX acts as a sentinel for temporarily "locking" the
329 // ability to upgrade weak pointers or downgrade strong ones; this is used
330 // to avoid races in `make_mut` and `get_mut`.
331 weak: atomic::AtomicUsize,
336 /// Calculate layout for `ArcInner<T>` using the inner value's layout
337 fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
338 // Calculate layout using the given value layout.
339 // Previously, layout was calculated on the expression
340 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
341 // reference (see #54908).
342 Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
345 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
346 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
349 /// Constructs a new `Arc<T>`.
354 /// use std::sync::Arc;
356 /// let five = Arc::new(5);
358 #[cfg(not(no_global_oom_handling))]
360 #[stable(feature = "rust1", since = "1.0.0")]
361 pub fn new(data: T) -> Arc<T> {
362 // Start the weak pointer count as 1 which is the weak pointer that's
363 // held by all the strong pointers (kinda), see std/rc.rs for more info
364 let x: Box<_> = Box::new(ArcInner {
365 strong: atomic::AtomicUsize::new(1),
366 weak: atomic::AtomicUsize::new(1),
369 unsafe { Self::from_inner(Box::leak(x).into()) }
372 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
373 /// to allow you to construct a `T` which holds a weak pointer to itself.
375 /// Generally, a structure circularly referencing itself, either directly or
376 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
377 /// Using this function, you get access to the weak pointer during the
378 /// initialization of `T`, before the `Arc<T>` is created, such that you can
379 /// clone and store it inside the `T`.
381 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
382 /// then calls your closure, giving it a `Weak<T>` to this allocation,
383 /// and only afterwards completes the construction of the `Arc<T>` by placing
384 /// the `T` returned from your closure into the allocation.
386 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
387 /// returns, calling [`upgrade`] on the weak reference inside your closure will
388 /// fail and result in a `None` value.
392 /// If `data_fn` panics, the panic is propagated to the caller, and the
393 /// temporary [`Weak<T>`] is dropped normally.
398 /// # #![allow(dead_code)]
399 /// use std::sync::{Arc, Weak};
402 /// me: Weak<Gadget>,
406 /// /// Construct a reference counted Gadget.
407 /// fn new() -> Arc<Self> {
408 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
409 /// // `Arc` we're constructing.
410 /// Arc::new_cyclic(|me| {
411 /// // Create the actual struct here.
412 /// Gadget { me: me.clone() }
416 /// /// Return a reference counted pointer to Self.
417 /// fn me(&self) -> Arc<Self> {
418 /// self.me.upgrade().unwrap()
422 /// [`upgrade`]: Weak::upgrade
423 #[cfg(not(no_global_oom_handling))]
425 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
426 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
428 F: FnOnce(&Weak<T>) -> T,
430 // Construct the inner in the "uninitialized" state with a single
432 let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner {
433 strong: atomic::AtomicUsize::new(0),
434 weak: atomic::AtomicUsize::new(1),
435 data: mem::MaybeUninit::<T>::uninit(),
438 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
440 let weak = Weak { ptr: init_ptr };
442 // It's important we don't give up ownership of the weak pointer, or
443 // else the memory might be freed by the time `data_fn` returns. If
444 // we really wanted to pass ownership, we could create an additional
445 // weak pointer for ourselves, but this would result in additional
446 // updates to the weak reference count which might not be necessary
448 let data = data_fn(&weak);
450 // Now we can properly initialize the inner value and turn our weak
451 // reference into a strong reference.
452 let strong = unsafe {
453 let inner = init_ptr.as_ptr();
454 ptr::write(ptr::addr_of_mut!((*inner).data), data);
456 // The above write to the data field must be visible to any threads which
457 // observe a non-zero strong count. Therefore we need at least "Release" ordering
458 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
460 // "Acquire" ordering is not required. When considering the possible behaviours
461 // of `data_fn` we only need to look at what it could do with a reference to a
462 // non-upgradeable `Weak`:
463 // - It can *clone* the `Weak`, increasing the weak reference count.
464 // - It can drop those clones, decreasing the weak reference count (but never to zero).
466 // These side effects do not impact us in any way, and no other side effects are
467 // possible with safe code alone.
468 let prev_value = (*inner).strong.fetch_add(1, Release);
469 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
471 Arc::from_inner(init_ptr)
474 // Strong references should collectively own a shared weak reference,
475 // so don't run the destructor for our old weak reference.
480 /// Constructs a new `Arc` with uninitialized contents.
485 /// #![feature(new_uninit)]
486 /// #![feature(get_mut_unchecked)]
488 /// use std::sync::Arc;
490 /// let mut five = Arc::<u32>::new_uninit();
492 /// // Deferred initialization:
493 /// Arc::get_mut(&mut five).unwrap().write(5);
495 /// let five = unsafe { five.assume_init() };
497 /// assert_eq!(*five, 5)
499 #[cfg(not(no_global_oom_handling))]
500 #[unstable(feature = "new_uninit", issue = "63291")]
502 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
504 Arc::from_ptr(Arc::allocate_for_layout(
506 |layout| Global.allocate(layout),
507 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
512 /// Constructs a new `Arc` with uninitialized contents, with the memory
513 /// being filled with `0` bytes.
515 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
521 /// #![feature(new_uninit)]
523 /// use std::sync::Arc;
525 /// let zero = Arc::<u32>::new_zeroed();
526 /// let zero = unsafe { zero.assume_init() };
528 /// assert_eq!(*zero, 0)
531 /// [zeroed]: mem::MaybeUninit::zeroed
532 #[cfg(not(no_global_oom_handling))]
533 #[unstable(feature = "new_uninit", issue = "63291")]
535 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
537 Arc::from_ptr(Arc::allocate_for_layout(
539 |layout| Global.allocate_zeroed(layout),
540 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
545 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
546 /// `data` will be pinned in memory and unable to be moved.
547 #[cfg(not(no_global_oom_handling))]
548 #[stable(feature = "pin", since = "1.33.0")]
550 pub fn pin(data: T) -> Pin<Arc<T>> {
551 unsafe { Pin::new_unchecked(Arc::new(data)) }
554 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
555 #[unstable(feature = "allocator_api", issue = "32838")]
557 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
558 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
561 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
566 /// #![feature(allocator_api)]
567 /// use std::sync::Arc;
569 /// let five = Arc::try_new(5)?;
570 /// # Ok::<(), std::alloc::AllocError>(())
572 #[unstable(feature = "allocator_api", issue = "32838")]
574 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
575 // Start the weak pointer count as 1 which is the weak pointer that's
576 // held by all the strong pointers (kinda), see std/rc.rs for more info
577 let x: Box<_> = Box::try_new(ArcInner {
578 strong: atomic::AtomicUsize::new(1),
579 weak: atomic::AtomicUsize::new(1),
582 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
585 /// Constructs a new `Arc` with uninitialized contents, returning an error
586 /// if allocation fails.
591 /// #![feature(new_uninit, allocator_api)]
592 /// #![feature(get_mut_unchecked)]
594 /// use std::sync::Arc;
596 /// let mut five = Arc::<u32>::try_new_uninit()?;
598 /// // Deferred initialization:
599 /// Arc::get_mut(&mut five).unwrap().write(5);
601 /// let five = unsafe { five.assume_init() };
603 /// assert_eq!(*five, 5);
604 /// # Ok::<(), std::alloc::AllocError>(())
606 #[unstable(feature = "allocator_api", issue = "32838")]
607 // #[unstable(feature = "new_uninit", issue = "63291")]
608 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
610 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
612 |layout| Global.allocate(layout),
613 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
618 /// Constructs a new `Arc` with uninitialized contents, with the memory
619 /// being filled with `0` bytes, returning an error if allocation fails.
621 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
627 /// #![feature(new_uninit, allocator_api)]
629 /// use std::sync::Arc;
631 /// let zero = Arc::<u32>::try_new_zeroed()?;
632 /// let zero = unsafe { zero.assume_init() };
634 /// assert_eq!(*zero, 0);
635 /// # Ok::<(), std::alloc::AllocError>(())
638 /// [zeroed]: mem::MaybeUninit::zeroed
639 #[unstable(feature = "allocator_api", issue = "32838")]
640 // #[unstable(feature = "new_uninit", issue = "63291")]
641 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
643 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
645 |layout| Global.allocate_zeroed(layout),
646 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
650 /// Returns the inner value, if the `Arc` has exactly one strong reference.
652 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
655 /// This will succeed even if there are outstanding weak references.
657 // FIXME: when `Arc::into_inner` is stabilized, add this paragraph:
659 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
660 /// want to keep the `Arc` in the [`Err`] case.
661 /// Immediately dropping the [`Err`] payload, like in the expression
662 /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
663 /// drop to zero and the inner value of the `Arc` to be dropped:
664 /// For instance if two threads execute this expression in parallel, then
665 /// there is a race condition. The threads could first both check whether they
666 /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
667 /// both drop their `Arc` in the call to [`ok`][`Result::ok`],
668 /// taking the strong count from two down to zero.
674 /// use std::sync::Arc;
676 /// let x = Arc::new(3);
677 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
679 /// let x = Arc::new(4);
680 /// let _y = Arc::clone(&x);
681 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
684 #[stable(feature = "arc_unique", since = "1.4.0")]
685 pub fn try_unwrap(this: Self) -> Result<T, Self> {
686 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
690 acquire!(this.inner().strong);
693 let elem = ptr::read(&this.ptr.as_ref().data);
695 // Make a weak pointer to clean up the implicit strong-weak reference
696 let _weak = Weak { ptr: this.ptr };
703 /// Returns the inner value, if the `Arc` has exactly one strong reference.
705 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
707 /// This will succeed even if there are outstanding weak references.
709 /// If `Arc::into_inner` is called on every clone of this `Arc`,
710 /// it is guaranteed that exactly one of the calls returns the inner value.
711 /// This means in particular that the inner value is not dropped.
713 /// The similar expression `Arc::try_unwrap(this).ok()` does not
714 /// offer such a guarantee. See the last example below.
716 // FIXME: when `Arc::into_inner` is stabilized, add this to end
717 // of the previous sentence:
719 /// and the documentation of [`Arc::try_unwrap`].
724 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
726 /// #![feature(arc_into_inner)]
728 /// use std::sync::Arc;
730 /// let x = Arc::new(3);
731 /// let y = Arc::clone(&x);
733 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
734 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
735 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
737 /// let x_inner_value = x_thread.join().unwrap();
738 /// let y_inner_value = y_thread.join().unwrap();
740 /// // One of the threads is guaranteed to receive the inner value:
741 /// assert!(matches!(
742 /// (x_inner_value, y_inner_value),
743 /// (None, Some(3)) | (Some(3), None)
745 /// // The result could also be `(None, None)` if the threads called
746 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
749 /// A more practical example demonstrating the need for `Arc::into_inner`:
751 /// #![feature(arc_into_inner)]
753 /// use std::sync::Arc;
755 /// // Definition of a simple singly linked list using `Arc`:
757 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
758 /// struct Node<T>(T, Option<Arc<Node<T>>>);
760 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
761 /// // can cause a stack overflow. To prevent this, we can provide a
762 /// // manual `Drop` implementation that does the destruction in a loop:
763 /// impl<T> Drop for LinkedList<T> {
764 /// fn drop(&mut self) {
765 /// let mut link = self.0.take();
766 /// while let Some(arc_node) = link.take() {
767 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
774 /// // Implementation of `new` and `push` omitted
775 /// impl<T> LinkedList<T> {
777 /// # fn new() -> Self {
778 /// # LinkedList(None)
780 /// # fn push(&mut self, x: T) {
781 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
785 /// // The following code could have still caused a stack overflow
786 /// // despite the manual `Drop` impl if that `Drop` impl had used
787 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
789 /// // Create a long list and clone it
790 /// let mut x = LinkedList::new();
791 /// for i in 0..100000 {
792 /// x.push(i); // Adds i to the front of x
794 /// let y = x.clone();
796 /// // Drop the clones in parallel
797 /// let x_thread = std::thread::spawn(|| drop(x));
798 /// let y_thread = std::thread::spawn(|| drop(y));
799 /// x_thread.join().unwrap();
800 /// y_thread.join().unwrap();
803 // FIXME: when `Arc::into_inner` is stabilized, adjust above documentation
804 // and the documentation of `Arc::try_unwrap` according to the `FIXME`s. Also
805 // open an issue on rust-lang/rust-clippy, asking for a lint against
806 // `Arc::try_unwrap(...).ok()`.
808 #[unstable(feature = "arc_into_inner", issue = "106894")]
809 pub fn into_inner(this: Self) -> Option<T> {
810 // Make sure that the ordinary `Drop` implementation isn’t called as well
811 let mut this = mem::ManuallyDrop::new(this);
813 // Following the implementation of `drop` and `drop_slow`
814 if this.inner().strong.fetch_sub(1, Release) != 1 {
818 acquire!(this.inner().strong);
820 // SAFETY: This mirrors the line
822 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
824 // in `drop_slow`. Instead of dropping the value behind the pointer,
825 // it is read and eventually returned; `ptr::read` has the same
826 // safety conditions as `ptr::drop_in_place`.
827 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
829 drop(Weak { ptr: this.ptr });
836 /// Constructs a new atomically reference-counted slice with uninitialized contents.
841 /// #![feature(new_uninit)]
842 /// #![feature(get_mut_unchecked)]
844 /// use std::sync::Arc;
846 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
848 /// // Deferred initialization:
849 /// let data = Arc::get_mut(&mut values).unwrap();
850 /// data[0].write(1);
851 /// data[1].write(2);
852 /// data[2].write(3);
854 /// let values = unsafe { values.assume_init() };
856 /// assert_eq!(*values, [1, 2, 3])
858 #[cfg(not(no_global_oom_handling))]
859 #[unstable(feature = "new_uninit", issue = "63291")]
861 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
862 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
865 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
866 /// filled with `0` bytes.
868 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
869 /// incorrect usage of this method.
874 /// #![feature(new_uninit)]
876 /// use std::sync::Arc;
878 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
879 /// let values = unsafe { values.assume_init() };
881 /// assert_eq!(*values, [0, 0, 0])
884 /// [zeroed]: mem::MaybeUninit::zeroed
885 #[cfg(not(no_global_oom_handling))]
886 #[unstable(feature = "new_uninit", issue = "63291")]
888 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
890 Arc::from_ptr(Arc::allocate_for_layout(
891 Layout::array::<T>(len).unwrap(),
892 |layout| Global.allocate_zeroed(layout),
894 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
895 as *mut ArcInner<[mem::MaybeUninit<T>]>
902 impl<T> Arc<mem::MaybeUninit<T>> {
903 /// Converts to `Arc<T>`.
907 /// As with [`MaybeUninit::assume_init`],
908 /// it is up to the caller to guarantee that the inner value
909 /// really is in an initialized state.
910 /// Calling this when the content is not yet fully initialized
911 /// causes immediate undefined behavior.
913 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
918 /// #![feature(new_uninit)]
919 /// #![feature(get_mut_unchecked)]
921 /// use std::sync::Arc;
923 /// let mut five = Arc::<u32>::new_uninit();
925 /// // Deferred initialization:
926 /// Arc::get_mut(&mut five).unwrap().write(5);
928 /// let five = unsafe { five.assume_init() };
930 /// assert_eq!(*five, 5)
932 #[unstable(feature = "new_uninit", issue = "63291")]
933 #[must_use = "`self` will be dropped if the result is not used"]
935 pub unsafe fn assume_init(self) -> Arc<T> {
936 unsafe { Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) }
940 impl<T> Arc<[mem::MaybeUninit<T>]> {
941 /// Converts to `Arc<[T]>`.
945 /// As with [`MaybeUninit::assume_init`],
946 /// it is up to the caller to guarantee that the inner value
947 /// really is in an initialized state.
948 /// Calling this when the content is not yet fully initialized
949 /// causes immediate undefined behavior.
951 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
956 /// #![feature(new_uninit)]
957 /// #![feature(get_mut_unchecked)]
959 /// use std::sync::Arc;
961 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
963 /// // Deferred initialization:
964 /// let data = Arc::get_mut(&mut values).unwrap();
965 /// data[0].write(1);
966 /// data[1].write(2);
967 /// data[2].write(3);
969 /// let values = unsafe { values.assume_init() };
971 /// assert_eq!(*values, [1, 2, 3])
973 #[unstable(feature = "new_uninit", issue = "63291")]
974 #[must_use = "`self` will be dropped if the result is not used"]
976 pub unsafe fn assume_init(self) -> Arc<[T]> {
977 unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
981 impl<T: ?Sized> Arc<T> {
982 /// Consumes the `Arc`, returning the wrapped pointer.
984 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
985 /// [`Arc::from_raw`].
990 /// use std::sync::Arc;
992 /// let x = Arc::new("hello".to_owned());
993 /// let x_ptr = Arc::into_raw(x);
994 /// assert_eq!(unsafe { &*x_ptr }, "hello");
996 #[must_use = "losing the pointer will leak memory"]
997 #[stable(feature = "rc_raw", since = "1.17.0")]
998 pub fn into_raw(this: Self) -> *const T {
999 let ptr = Self::as_ptr(&this);
1004 /// Provides a raw pointer to the data.
1006 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1007 /// as long as there are strong counts in the `Arc`.
1012 /// use std::sync::Arc;
1014 /// let x = Arc::new("hello".to_owned());
1015 /// let y = Arc::clone(&x);
1016 /// let x_ptr = Arc::as_ptr(&x);
1017 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1018 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1021 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1022 pub fn as_ptr(this: &Self) -> *const T {
1023 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1025 // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
1026 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1027 // write through the pointer after the Rc is recovered through `from_raw`.
1028 unsafe { ptr::addr_of_mut!((*ptr).data) }
1031 /// Constructs an `Arc<T>` from a raw pointer.
1033 /// The raw pointer must have been previously returned by a call to
1034 /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
1035 /// alignment as `T`. This is trivially true if `U` is `T`.
1036 /// Note that if `U` is not `T` but has the same size and alignment, this is
1037 /// basically like transmuting references of different types. See
1038 /// [`mem::transmute`][transmute] for more information on what
1039 /// restrictions apply in this case.
1041 /// The user of `from_raw` has to make sure a specific value of `T` is only
1044 /// This function is unsafe because improper use may lead to memory unsafety,
1045 /// even if the returned `Arc<T>` is never accessed.
1047 /// [into_raw]: Arc::into_raw
1048 /// [transmute]: core::mem::transmute
1053 /// use std::sync::Arc;
1055 /// let x = Arc::new("hello".to_owned());
1056 /// let x_ptr = Arc::into_raw(x);
1059 /// // Convert back to an `Arc` to prevent leak.
1060 /// let x = Arc::from_raw(x_ptr);
1061 /// assert_eq!(&*x, "hello");
1063 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1066 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1068 #[stable(feature = "rc_raw", since = "1.17.0")]
1069 pub unsafe fn from_raw(ptr: *const T) -> Self {
1071 let offset = data_offset(ptr);
1073 // Reverse the offset to find the original ArcInner.
1074 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1076 Self::from_ptr(arc_ptr)
1080 /// Creates a new [`Weak`] pointer to this allocation.
1085 /// use std::sync::Arc;
1087 /// let five = Arc::new(5);
1089 /// let weak_five = Arc::downgrade(&five);
1091 #[must_use = "this returns a new `Weak` pointer, \
1092 without modifying the original `Arc`"]
1093 #[stable(feature = "arc_weak", since = "1.4.0")]
1094 pub fn downgrade(this: &Self) -> Weak<T> {
1095 // This Relaxed is OK because we're checking the value in the CAS
1097 let mut cur = this.inner().weak.load(Relaxed);
1100 // check if the weak counter is currently "locked"; if so, spin.
1101 if cur == usize::MAX {
1103 cur = this.inner().weak.load(Relaxed);
1107 // NOTE: this code currently ignores the possibility of overflow
1108 // into usize::MAX; in general both Rc and Arc need to be adjusted
1109 // to deal with overflow.
1111 // Unlike with Clone(), we need this to be an Acquire read to
1112 // synchronize with the write coming from `is_unique`, so that the
1113 // events prior to that write happen before this read.
1114 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1116 // Make sure we do not create a dangling Weak
1117 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1118 return Weak { ptr: this.ptr };
1120 Err(old) => cur = old,
1125 /// Gets the number of [`Weak`] pointers to this allocation.
1129 /// This method by itself is safe, but using it correctly requires extra care.
1130 /// Another thread can change the weak count at any time,
1131 /// including potentially between calling this method and acting on the result.
1136 /// use std::sync::Arc;
1138 /// let five = Arc::new(5);
1139 /// let _weak_five = Arc::downgrade(&five);
1141 /// // This assertion is deterministic because we haven't shared
1142 /// // the `Arc` or `Weak` between threads.
1143 /// assert_eq!(1, Arc::weak_count(&five));
1147 #[stable(feature = "arc_counts", since = "1.15.0")]
1148 pub fn weak_count(this: &Self) -> usize {
1149 let cnt = this.inner().weak.load(Acquire);
1150 // If the weak count is currently locked, the value of the
1151 // count was 0 just before taking the lock.
1152 if cnt == usize::MAX { 0 } else { cnt - 1 }
1155 /// Gets the number of strong (`Arc`) pointers to this allocation.
1159 /// This method by itself is safe, but using it correctly requires extra care.
1160 /// Another thread can change the strong count at any time,
1161 /// including potentially between calling this method and acting on the result.
1166 /// use std::sync::Arc;
1168 /// let five = Arc::new(5);
1169 /// let _also_five = Arc::clone(&five);
1171 /// // This assertion is deterministic because we haven't shared
1172 /// // the `Arc` between threads.
1173 /// assert_eq!(2, Arc::strong_count(&five));
1177 #[stable(feature = "arc_counts", since = "1.15.0")]
1178 pub fn strong_count(this: &Self) -> usize {
1179 this.inner().strong.load(Acquire)
1182 /// Increments the strong reference count on the `Arc<T>` associated with the
1183 /// provided pointer by one.
1187 /// The pointer must have been obtained through `Arc::into_raw`, and the
1188 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1189 /// least 1) for the duration of this method.
1194 /// use std::sync::Arc;
1196 /// let five = Arc::new(5);
1199 /// let ptr = Arc::into_raw(five);
1200 /// Arc::increment_strong_count(ptr);
1202 /// // This assertion is deterministic because we haven't shared
1203 /// // the `Arc` between threads.
1204 /// let five = Arc::from_raw(ptr);
1205 /// assert_eq!(2, Arc::strong_count(&five));
1209 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1210 pub unsafe fn increment_strong_count(ptr: *const T) {
1211 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1212 let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
1213 // Now increase refcount, but don't drop new refcount either
1214 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1217 /// Decrements the strong reference count on the `Arc<T>` associated with the
1218 /// provided pointer by one.
1222 /// The pointer must have been obtained through `Arc::into_raw`, and the
1223 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1224 /// least 1) when invoking this method. This method can be used to release the final
1225 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1231 /// use std::sync::Arc;
1233 /// let five = Arc::new(5);
1236 /// let ptr = Arc::into_raw(five);
1237 /// Arc::increment_strong_count(ptr);
1239 /// // Those assertions are deterministic because we haven't shared
1240 /// // the `Arc` between threads.
1241 /// let five = Arc::from_raw(ptr);
1242 /// assert_eq!(2, Arc::strong_count(&five));
1243 /// Arc::decrement_strong_count(ptr);
1244 /// assert_eq!(1, Arc::strong_count(&five));
1248 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1249 pub unsafe fn decrement_strong_count(ptr: *const T) {
1250 unsafe { mem::drop(Arc::from_raw(ptr)) };
1254 fn inner(&self) -> &ArcInner<T> {
1255 // This unsafety is ok because while this arc is alive we're guaranteed
1256 // that the inner pointer is valid. Furthermore, we know that the
1257 // `ArcInner` structure itself is `Sync` because the inner data is
1258 // `Sync` as well, so we're ok loaning out an immutable pointer to these
1260 unsafe { self.ptr.as_ref() }
1263 // Non-inlined part of `drop`.
1265 unsafe fn drop_slow(&mut self) {
1266 // Destroy the data at this time, even though we must not free the box
1267 // allocation itself (there might still be weak pointers lying around).
1268 unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1270 // Drop the weak ref collectively held by all strong references
1271 drop(Weak { ptr: self.ptr });
1274 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1275 /// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
1280 /// use std::sync::Arc;
1282 /// let five = Arc::new(5);
1283 /// let same_five = Arc::clone(&five);
1284 /// let other_five = Arc::new(5);
1286 /// assert!(Arc::ptr_eq(&five, &same_five));
1287 /// assert!(!Arc::ptr_eq(&five, &other_five));
1290 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1293 #[stable(feature = "ptr_eq", since = "1.17.0")]
1294 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1295 this.ptr.as_ptr() == other.ptr.as_ptr()
1299 impl<T: ?Sized> Arc<T> {
1300 /// Allocates an `ArcInner<T>` with sufficient space for
1301 /// a possibly-unsized inner value where the value has the layout provided.
1303 /// The function `mem_to_arcinner` is called with the data pointer
1304 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1305 #[cfg(not(no_global_oom_handling))]
1306 unsafe fn allocate_for_layout(
1307 value_layout: Layout,
1308 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1309 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1310 ) -> *mut ArcInner<T> {
1311 let layout = arcinner_layout_for_value_layout(value_layout);
1313 Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
1314 .unwrap_or_else(|_| handle_alloc_error(layout))
1318 /// Allocates an `ArcInner<T>` with sufficient space for
1319 /// a possibly-unsized inner value where the value has the layout provided,
1320 /// returning an error if allocation fails.
1322 /// The function `mem_to_arcinner` is called with the data pointer
1323 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1324 unsafe fn try_allocate_for_layout(
1325 value_layout: Layout,
1326 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1327 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1328 ) -> Result<*mut ArcInner<T>, AllocError> {
1329 let layout = arcinner_layout_for_value_layout(value_layout);
1331 let ptr = allocate(layout)?;
1333 // Initialize the ArcInner
1334 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
1335 debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
1338 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
1339 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
1345 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
1346 #[cfg(not(no_global_oom_handling))]
1347 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
1348 // Allocate for the `ArcInner<T>` using the given value.
1350 Self::allocate_for_layout(
1351 Layout::for_value(&*ptr),
1352 |layout| Global.allocate(layout),
1353 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
1358 #[cfg(not(no_global_oom_handling))]
1359 fn from_box(v: Box<T>) -> Arc<T> {
1361 let (box_unique, alloc) = Box::into_unique(v);
1362 let bptr = box_unique.as_ptr();
1364 let value_size = size_of_val(&*bptr);
1365 let ptr = Self::allocate_for_ptr(bptr);
1367 // Copy value as bytes
1368 ptr::copy_nonoverlapping(
1369 bptr as *const T as *const u8,
1370 &mut (*ptr).data as *mut _ as *mut u8,
1374 // Free the allocation without dropping its contents
1375 box_free(box_unique, alloc);
1383 /// Allocates an `ArcInner<[T]>` with the given length.
1384 #[cfg(not(no_global_oom_handling))]
1385 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
1387 Self::allocate_for_layout(
1388 Layout::array::<T>(len).unwrap(),
1389 |layout| Global.allocate(layout),
1390 |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
1395 /// Copy elements from slice into newly allocated `Arc<[T]>`
1397 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
1398 #[cfg(not(no_global_oom_handling))]
1399 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
1401 let ptr = Self::allocate_for_slice(v.len());
1403 ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
1409 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1411 /// Behavior is undefined should the size be wrong.
1412 #[cfg(not(no_global_oom_handling))]
1413 unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
1414 // Panic guard while cloning T elements.
1415 // In the event of a panic, elements that have been written
1416 // into the new ArcInner will be dropped, then the memory freed.
1424 impl<T> Drop for Guard<T> {
1425 fn drop(&mut self) {
1427 let slice = from_raw_parts_mut(self.elems, self.n_elems);
1428 ptr::drop_in_place(slice);
1430 Global.deallocate(self.mem, self.layout);
1436 let ptr = Self::allocate_for_slice(len);
1438 let mem = ptr as *mut _ as *mut u8;
1439 let layout = Layout::for_value(&*ptr);
1441 // Pointer to first element
1442 let elems = &mut (*ptr).data as *mut [T] as *mut T;
1444 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
1446 for (i, item) in iter.enumerate() {
1447 ptr::write(elems.add(i), item);
1451 // All clear. Forget the guard so it doesn't free the new ArcInner.
1459 /// Specialization trait used for `From<&[T]>`.
1460 #[cfg(not(no_global_oom_handling))]
1461 trait ArcFromSlice<T> {
1462 fn from_slice(slice: &[T]) -> Self;
1465 #[cfg(not(no_global_oom_handling))]
1466 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
1468 default fn from_slice(v: &[T]) -> Self {
1469 unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
1473 #[cfg(not(no_global_oom_handling))]
1474 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
1476 fn from_slice(v: &[T]) -> Self {
1477 unsafe { Arc::copy_from_slice(v) }
1481 #[stable(feature = "rust1", since = "1.0.0")]
1482 impl<T: ?Sized> Clone for Arc<T> {
1483 /// Makes a clone of the `Arc` pointer.
1485 /// This creates another pointer to the same allocation, increasing the
1486 /// strong reference count.
1491 /// use std::sync::Arc;
1493 /// let five = Arc::new(5);
1495 /// let _ = Arc::clone(&five);
1498 fn clone(&self) -> Arc<T> {
1499 // Using a relaxed ordering is alright here, as knowledge of the
1500 // original reference prevents other threads from erroneously deleting
1503 // As explained in the [Boost documentation][1], Increasing the
1504 // reference counter can always be done with memory_order_relaxed: New
1505 // references to an object can only be formed from an existing
1506 // reference, and passing an existing reference from one thread to
1507 // another must already provide any required synchronization.
1509 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1510 let old_size = self.inner().strong.fetch_add(1, Relaxed);
1512 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
1513 // Arcs. If we don't do this the count can overflow and users will use-after free. This
1514 // branch will never be taken in any realistic program. We abort because such a program is
1515 // incredibly degenerate, and we don't care to support it.
1517 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
1518 // But we do that check *after* having done the increment, so there is a chance here that
1519 // the worst already happened and we actually do overflow the `usize` counter. However, that
1520 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
1521 // above and the `abort` below, which seems exceedingly unlikely.
1522 if old_size > MAX_REFCOUNT {
1526 unsafe { Self::from_inner(self.ptr) }
1530 #[stable(feature = "rust1", since = "1.0.0")]
1531 impl<T: ?Sized> Deref for Arc<T> {
1535 fn deref(&self) -> &T {
1540 #[unstable(feature = "receiver_trait", issue = "none")]
1541 impl<T: ?Sized> Receiver for Arc<T> {}
1543 impl<T: Clone> Arc<T> {
1544 /// Makes a mutable reference into the given `Arc`.
1546 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
1547 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
1548 /// referred to as clone-on-write.
1550 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
1551 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
1554 /// See also [`get_mut`], which will fail rather than cloning the inner value
1555 /// or dissociating [`Weak`] pointers.
1557 /// [`clone`]: Clone::clone
1558 /// [`get_mut`]: Arc::get_mut
1563 /// use std::sync::Arc;
1565 /// let mut data = Arc::new(5);
1567 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1568 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
1569 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
1570 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1571 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
1573 /// // Now `data` and `other_data` point to different allocations.
1574 /// assert_eq!(*data, 8);
1575 /// assert_eq!(*other_data, 12);
1578 /// [`Weak`] pointers will be dissociated:
1581 /// use std::sync::Arc;
1583 /// let mut data = Arc::new(75);
1584 /// let weak = Arc::downgrade(&data);
1586 /// assert!(75 == *data);
1587 /// assert!(75 == *weak.upgrade().unwrap());
1589 /// *Arc::make_mut(&mut data) += 1;
1591 /// assert!(76 == *data);
1592 /// assert!(weak.upgrade().is_none());
1594 #[cfg(not(no_global_oom_handling))]
1596 #[stable(feature = "arc_unique", since = "1.4.0")]
1597 pub fn make_mut(this: &mut Self) -> &mut T {
1598 // Note that we hold both a strong reference and a weak reference.
1599 // Thus, releasing our strong reference only will not, by itself, cause
1600 // the memory to be deallocated.
1602 // Use Acquire to ensure that we see any writes to `weak` that happen
1603 // before release writes (i.e., decrements) to `strong`. Since we hold a
1604 // weak count, there's no chance the ArcInner itself could be
1606 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1607 // Another strong pointer exists, so we must clone.
1608 // Pre-allocate memory to allow writing the cloned value directly.
1609 let mut arc = Self::new_uninit();
1611 let data = Arc::get_mut_unchecked(&mut arc);
1612 (**this).write_clone_into_raw(data.as_mut_ptr());
1613 *this = arc.assume_init();
1615 } else if this.inner().weak.load(Relaxed) != 1 {
1616 // Relaxed suffices in the above because this is fundamentally an
1617 // optimization: we are always racing with weak pointers being
1618 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1620 // We removed the last strong ref, but there are additional weak
1621 // refs remaining. We'll move the contents to a new Arc, and
1622 // invalidate the other weak refs.
1624 // Note that it is not possible for the read of `weak` to yield
1625 // usize::MAX (i.e., locked), since the weak count can only be
1626 // locked by a thread with a strong reference.
1628 // Materialize our own implicit weak pointer, so that it can clean
1629 // up the ArcInner as needed.
1630 let _weak = Weak { ptr: this.ptr };
1632 // Can just steal the data, all that's left is Weaks
1633 let mut arc = Self::new_uninit();
1635 let data = Arc::get_mut_unchecked(&mut arc);
1636 data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
1637 ptr::write(this, arc.assume_init());
1640 // We were the sole reference of either kind; bump back up the
1641 // strong ref count.
1642 this.inner().strong.store(1, Release);
1645 // As with `get_mut()`, the unsafety is ok because our reference was
1646 // either unique to begin with, or became one upon cloning the contents.
1647 unsafe { Self::get_mut_unchecked(this) }
1650 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
1653 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
1654 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
1659 /// #![feature(arc_unwrap_or_clone)]
1660 /// # use std::{ptr, sync::Arc};
1661 /// let inner = String::from("test");
1662 /// let ptr = inner.as_ptr();
1664 /// let arc = Arc::new(inner);
1665 /// let inner = Arc::unwrap_or_clone(arc);
1666 /// // The inner value was not cloned
1667 /// assert!(ptr::eq(ptr, inner.as_ptr()));
1669 /// let arc = Arc::new(inner);
1670 /// let arc2 = arc.clone();
1671 /// let inner = Arc::unwrap_or_clone(arc);
1672 /// // Because there were 2 references, we had to clone the inner value.
1673 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
1674 /// // `arc2` is the last reference, so when we unwrap it we get back
1675 /// // the original `String`.
1676 /// let inner = Arc::unwrap_or_clone(arc2);
1677 /// assert!(ptr::eq(ptr, inner.as_ptr()));
1680 #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
1681 pub fn unwrap_or_clone(this: Self) -> T {
1682 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
1686 impl<T: ?Sized> Arc<T> {
1687 /// Returns a mutable reference into the given `Arc`, if there are
1688 /// no other `Arc` or [`Weak`] pointers to the same allocation.
1690 /// Returns [`None`] otherwise, because it is not safe to
1691 /// mutate a shared value.
1693 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1694 /// the inner value when there are other `Arc` pointers.
1696 /// [make_mut]: Arc::make_mut
1697 /// [clone]: Clone::clone
1702 /// use std::sync::Arc;
1704 /// let mut x = Arc::new(3);
1705 /// *Arc::get_mut(&mut x).unwrap() = 4;
1706 /// assert_eq!(*x, 4);
1708 /// let _y = Arc::clone(&x);
1709 /// assert!(Arc::get_mut(&mut x).is_none());
1712 #[stable(feature = "arc_unique", since = "1.4.0")]
1713 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1714 if this.is_unique() {
1715 // This unsafety is ok because we're guaranteed that the pointer
1716 // returned is the *only* pointer that will ever be returned to T. Our
1717 // reference count is guaranteed to be 1 at this point, and we required
1718 // the Arc itself to be `mut`, so we're returning the only possible
1719 // reference to the inner data.
1720 unsafe { Some(Arc::get_mut_unchecked(this)) }
1726 /// Returns a mutable reference into the given `Arc`,
1727 /// without any check.
1729 /// See also [`get_mut`], which is safe and does appropriate checks.
1731 /// [`get_mut`]: Arc::get_mut
1735 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
1736 /// they must be must not be dereferenced or have active borrows for the duration
1737 /// of the returned borrow, and their inner type must be exactly the same as the
1738 /// inner type of this Rc (including lifetimes). This is trivially the case if no
1739 /// such pointers exist, for example immediately after `Arc::new`.
1744 /// #![feature(get_mut_unchecked)]
1746 /// use std::sync::Arc;
1748 /// let mut x = Arc::new(String::new());
1750 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
1752 /// assert_eq!(*x, "foo");
1754 /// Other `Arc` pointers to the same allocation must be to the same type.
1756 /// #![feature(get_mut_unchecked)]
1758 /// use std::sync::Arc;
1760 /// let x: Arc<str> = Arc::from("Hello, world!");
1761 /// let mut y: Arc<[u8]> = x.clone().into();
1763 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
1764 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
1766 /// println!("{}", &*x); // Invalid UTF-8 in a str
1768 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
1770 /// #![feature(get_mut_unchecked)]
1772 /// use std::sync::Arc;
1774 /// let x: Arc<&str> = Arc::new("Hello, world!");
1776 /// let s = String::from("Oh, no!");
1777 /// let mut y: Arc<&str> = x.clone().into();
1779 /// // this is Undefined Behavior, because x's inner type
1780 /// // is &'long str, not &'short str
1781 /// *Arc::get_mut_unchecked(&mut y) = &s;
1784 /// println!("{}", &*x); // Use-after-free
1787 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
1788 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1789 // We are careful to *not* create a reference covering the "count" fields, as
1790 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
1791 unsafe { &mut (*this.ptr.as_ptr()).data }
1794 /// Determine whether this is the unique reference (including weak refs) to
1795 /// the underlying data.
1797 /// Note that this requires locking the weak ref count.
1798 fn is_unique(&mut self) -> bool {
1799 // lock the weak pointer count if we appear to be the sole weak pointer
1802 // The acquire label here ensures a happens-before relationship with any
1803 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1804 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1805 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1806 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1807 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1808 // counter in `drop` -- the only access that happens when any but the last reference
1809 // is being dropped.
1810 let unique = self.inner().strong.load(Acquire) == 1;
1812 // The release write here synchronizes with a read in `downgrade`,
1813 // effectively preventing the above read of `strong` from happening
1815 self.inner().weak.store(1, Release); // release the lock
1823 #[stable(feature = "rust1", since = "1.0.0")]
1824 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
1825 /// Drops the `Arc`.
1827 /// This will decrement the strong reference count. If the strong reference
1828 /// count reaches zero then the only other references (if any) are
1829 /// [`Weak`], so we `drop` the inner value.
1834 /// use std::sync::Arc;
1838 /// impl Drop for Foo {
1839 /// fn drop(&mut self) {
1840 /// println!("dropped!");
1844 /// let foo = Arc::new(Foo);
1845 /// let foo2 = Arc::clone(&foo);
1847 /// drop(foo); // Doesn't print anything
1848 /// drop(foo2); // Prints "dropped!"
1851 fn drop(&mut self) {
1852 // Because `fetch_sub` is already atomic, we do not need to synchronize
1853 // with other threads unless we are going to delete the object. This
1854 // same logic applies to the below `fetch_sub` to the `weak` count.
1855 if self.inner().strong.fetch_sub(1, Release) != 1 {
1859 // This fence is needed to prevent reordering of use of the data and
1860 // deletion of the data. Because it is marked `Release`, the decreasing
1861 // of the reference count synchronizes with this `Acquire` fence. This
1862 // means that use of the data happens before decreasing the reference
1863 // count, which happens before this fence, which happens before the
1864 // deletion of the data.
1866 // As explained in the [Boost documentation][1],
1868 // > It is important to enforce any possible access to the object in one
1869 // > thread (through an existing reference) to *happen before* deleting
1870 // > the object in a different thread. This is achieved by a "release"
1871 // > operation after dropping a reference (any access to the object
1872 // > through this reference must obviously happened before), and an
1873 // > "acquire" operation before deleting the object.
1875 // In particular, while the contents of an Arc are usually immutable, it's
1876 // possible to have interior writes to something like a Mutex<T>. Since a
1877 // Mutex is not acquired when it is deleted, we can't rely on its
1878 // synchronization logic to make writes in thread A visible to a destructor
1879 // running in thread B.
1881 // Also note that the Acquire fence here could probably be replaced with an
1882 // Acquire load, which could improve performance in highly-contended
1883 // situations. See [2].
1885 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1886 // [2]: (https://github.com/rust-lang/rust/pull/41714)
1887 acquire!(self.inner().strong);
1895 impl Arc<dyn Any + Send + Sync> {
1896 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1901 /// use std::any::Any;
1902 /// use std::sync::Arc;
1904 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1905 /// if let Ok(string) = value.downcast::<String>() {
1906 /// println!("String ({}): {}", string.len(), string);
1910 /// let my_string = "Hello World".to_string();
1911 /// print_if_string(Arc::new(my_string));
1912 /// print_if_string(Arc::new(0i8));
1915 #[stable(feature = "rc_downcast", since = "1.29.0")]
1916 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1918 T: Any + Send + Sync,
1920 if (*self).is::<T>() {
1922 let ptr = self.ptr.cast::<ArcInner<T>>();
1924 Ok(Arc::from_inner(ptr))
1931 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
1933 /// For a safe alternative see [`downcast`].
1938 /// #![feature(downcast_unchecked)]
1940 /// use std::any::Any;
1941 /// use std::sync::Arc;
1943 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
1946 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
1952 /// The contained value must be of type `T`. Calling this method
1953 /// with the incorrect type is *undefined behavior*.
1956 /// [`downcast`]: Self::downcast
1958 #[unstable(feature = "downcast_unchecked", issue = "90850")]
1959 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T>
1961 T: Any + Send + Sync,
1964 let ptr = self.ptr.cast::<ArcInner<T>>();
1966 Arc::from_inner(ptr)
1972 /// Constructs a new `Weak<T>`, without allocating any memory.
1973 /// Calling [`upgrade`] on the return value always gives [`None`].
1975 /// [`upgrade`]: Weak::upgrade
1980 /// use std::sync::Weak;
1982 /// let empty: Weak<i64> = Weak::new();
1983 /// assert!(empty.upgrade().is_none());
1985 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1986 #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
1988 pub const fn new() -> Weak<T> {
1989 Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) } }
1993 /// Helper type to allow accessing the reference counts without
1994 /// making any assertions about the data field.
1995 struct WeakInner<'a> {
1996 weak: &'a atomic::AtomicUsize,
1997 strong: &'a atomic::AtomicUsize,
2000 impl<T: ?Sized> Weak<T> {
2001 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2003 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2004 /// unaligned or even [`null`] otherwise.
2009 /// use std::sync::Arc;
2012 /// let strong = Arc::new("hello".to_owned());
2013 /// let weak = Arc::downgrade(&strong);
2014 /// // Both point to the same object
2015 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2016 /// // The strong here keeps it alive, so we can still access the object.
2017 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2020 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2021 /// // undefined behaviour.
2022 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2025 /// [`null`]: core::ptr::null "ptr::null"
2027 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2028 pub fn as_ptr(&self) -> *const T {
2029 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2031 if is_dangling(ptr) {
2032 // If the pointer is dangling, we return the sentinel directly. This cannot be
2033 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2036 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2037 // The payload may be dropped at this point, and we have to maintain provenance,
2038 // so use raw pointer manipulation.
2039 unsafe { ptr::addr_of_mut!((*ptr).data) }
2043 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2045 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2046 /// one weak reference (the weak count is not modified by this operation). It can be turned
2047 /// back into the `Weak<T>` with [`from_raw`].
2049 /// The same restrictions of accessing the target of the pointer as with
2050 /// [`as_ptr`] apply.
2055 /// use std::sync::{Arc, Weak};
2057 /// let strong = Arc::new("hello".to_owned());
2058 /// let weak = Arc::downgrade(&strong);
2059 /// let raw = weak.into_raw();
2061 /// assert_eq!(1, Arc::weak_count(&strong));
2062 /// assert_eq!("hello", unsafe { &*raw });
2064 /// drop(unsafe { Weak::from_raw(raw) });
2065 /// assert_eq!(0, Arc::weak_count(&strong));
2068 /// [`from_raw`]: Weak::from_raw
2069 /// [`as_ptr`]: Weak::as_ptr
2070 #[must_use = "`self` will be dropped if the result is not used"]
2071 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2072 pub fn into_raw(self) -> *const T {
2073 let result = self.as_ptr();
2078 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2080 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2081 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2083 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2084 /// as these don't own anything; the method still works on them).
2088 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2091 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2092 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2093 /// count is not modified by this operation) and therefore it must be paired with a previous
2094 /// call to [`into_raw`].
2098 /// use std::sync::{Arc, Weak};
2100 /// let strong = Arc::new("hello".to_owned());
2102 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2103 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2105 /// assert_eq!(2, Arc::weak_count(&strong));
2107 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2108 /// assert_eq!(1, Arc::weak_count(&strong));
2112 /// // Decrement the last weak count.
2113 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2116 /// [`new`]: Weak::new
2117 /// [`into_raw`]: Weak::into_raw
2118 /// [`upgrade`]: Weak::upgrade
2119 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2120 pub unsafe fn from_raw(ptr: *const T) -> Self {
2121 // See Weak::as_ptr for context on how the input pointer is derived.
2123 let ptr = if is_dangling(ptr as *mut T) {
2124 // This is a dangling Weak.
2125 ptr as *mut ArcInner<T>
2127 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
2128 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
2129 let offset = unsafe { data_offset(ptr) };
2130 // Thus, we reverse the offset to get the whole RcBox.
2131 // SAFETY: the pointer originated from a Weak, so this offset is safe.
2132 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
2135 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
2136 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
2140 impl<T: ?Sized> Weak<T> {
2141 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
2142 /// dropping of the inner value if successful.
2144 /// Returns [`None`] if the inner value has since been dropped.
2149 /// use std::sync::Arc;
2151 /// let five = Arc::new(5);
2153 /// let weak_five = Arc::downgrade(&five);
2155 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
2156 /// assert!(strong_five.is_some());
2158 /// // Destroy all strong pointers.
2159 /// drop(strong_five);
2162 /// assert!(weak_five.upgrade().is_none());
2164 #[must_use = "this returns a new `Arc`, \
2165 without modifying the original weak pointer"]
2166 #[stable(feature = "arc_weak", since = "1.4.0")]
2167 pub fn upgrade(&self) -> Option<Arc<T>> {
2168 // We use a CAS loop to increment the strong count instead of a
2169 // fetch_add as this function should never take the reference count
2170 // from zero to one.
2173 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
2174 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
2175 // value can be initialized after `Weak` references have already been created. In that case, we
2176 // expect to observe the fully initialized value.
2177 .fetch_update(Acquire, Relaxed, |n| {
2178 // Any write of 0 we can observe leaves the field in permanently zero state.
2182 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
2183 if n > MAX_REFCOUNT {
2189 // null checked above
2190 .map(|_| unsafe { Arc::from_inner(self.ptr) })
2193 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
2195 /// If `self` was created using [`Weak::new`], this will return 0.
2197 #[stable(feature = "weak_counts", since = "1.41.0")]
2198 pub fn strong_count(&self) -> usize {
2199 if let Some(inner) = self.inner() { inner.strong.load(Acquire) } else { 0 }
2202 /// Gets an approximation of the number of `Weak` pointers pointing to this
2205 /// If `self` was created using [`Weak::new`], or if there are no remaining
2206 /// strong pointers, this will return 0.
2210 /// Due to implementation details, the returned value can be off by 1 in
2211 /// either direction when other threads are manipulating any `Arc`s or
2212 /// `Weak`s pointing to the same allocation.
2214 #[stable(feature = "weak_counts", since = "1.41.0")]
2215 pub fn weak_count(&self) -> usize {
2218 let weak = inner.weak.load(Acquire);
2219 let strong = inner.strong.load(Acquire);
2223 // Since we observed that there was at least one strong pointer
2224 // after reading the weak count, we know that the implicit weak
2225 // reference (present whenever any strong references are alive)
2226 // was still around when we observed the weak count, and can
2227 // therefore safely subtract it.
2234 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
2235 /// (i.e., when this `Weak` was created by `Weak::new`).
2237 fn inner(&self) -> Option<WeakInner<'_>> {
2238 if is_dangling(self.ptr.as_ptr()) {
2241 // We are careful to *not* create a reference covering the "data" field, as
2242 // the field may be mutated concurrently (for example, if the last `Arc`
2243 // is dropped, the data field will be dropped in-place).
2245 let ptr = self.ptr.as_ptr();
2246 WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
2251 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
2252 /// both don't point to any allocation (because they were created with `Weak::new()`). See [that
2253 /// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
2257 /// Since this compares pointers it means that `Weak::new()` will equal each
2258 /// other, even though they don't point to any allocation.
2263 /// use std::sync::Arc;
2265 /// let first_rc = Arc::new(5);
2266 /// let first = Arc::downgrade(&first_rc);
2267 /// let second = Arc::downgrade(&first_rc);
2269 /// assert!(first.ptr_eq(&second));
2271 /// let third_rc = Arc::new(5);
2272 /// let third = Arc::downgrade(&third_rc);
2274 /// assert!(!first.ptr_eq(&third));
2277 /// Comparing `Weak::new`.
2280 /// use std::sync::{Arc, Weak};
2282 /// let first = Weak::new();
2283 /// let second = Weak::new();
2284 /// assert!(first.ptr_eq(&second));
2286 /// let third_rc = Arc::new(());
2287 /// let third = Arc::downgrade(&third_rc);
2288 /// assert!(!first.ptr_eq(&third));
2291 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2294 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
2295 pub fn ptr_eq(&self, other: &Self) -> bool {
2296 self.ptr.as_ptr() == other.ptr.as_ptr()
2300 #[stable(feature = "arc_weak", since = "1.4.0")]
2301 impl<T: ?Sized> Clone for Weak<T> {
2302 /// Makes a clone of the `Weak` pointer that points to the same allocation.
2307 /// use std::sync::{Arc, Weak};
2309 /// let weak_five = Arc::downgrade(&Arc::new(5));
2311 /// let _ = Weak::clone(&weak_five);
2314 fn clone(&self) -> Weak<T> {
2315 let inner = if let Some(inner) = self.inner() {
2318 return Weak { ptr: self.ptr };
2320 // See comments in Arc::clone() for why this is relaxed. This can use a
2321 // fetch_add (ignoring the lock) because the weak count is only locked
2322 // where are *no other* weak pointers in existence. (So we can't be
2323 // running this code in that case).
2324 let old_size = inner.weak.fetch_add(1, Relaxed);
2326 // See comments in Arc::clone() for why we do this (for mem::forget).
2327 if old_size > MAX_REFCOUNT {
2331 Weak { ptr: self.ptr }
2335 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2336 impl<T> Default for Weak<T> {
2337 /// Constructs a new `Weak<T>`, without allocating memory.
2338 /// Calling [`upgrade`] on the return value always
2341 /// [`upgrade`]: Weak::upgrade
2346 /// use std::sync::Weak;
2348 /// let empty: Weak<i64> = Default::default();
2349 /// assert!(empty.upgrade().is_none());
2351 fn default() -> Weak<T> {
2356 #[stable(feature = "arc_weak", since = "1.4.0")]
2357 unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
2358 /// Drops the `Weak` pointer.
2363 /// use std::sync::{Arc, Weak};
2367 /// impl Drop for Foo {
2368 /// fn drop(&mut self) {
2369 /// println!("dropped!");
2373 /// let foo = Arc::new(Foo);
2374 /// let weak_foo = Arc::downgrade(&foo);
2375 /// let other_weak_foo = Weak::clone(&weak_foo);
2377 /// drop(weak_foo); // Doesn't print anything
2378 /// drop(foo); // Prints "dropped!"
2380 /// assert!(other_weak_foo.upgrade().is_none());
2382 fn drop(&mut self) {
2383 // If we find out that we were the last weak pointer, then its time to
2384 // deallocate the data entirely. See the discussion in Arc::drop() about
2385 // the memory orderings
2387 // It's not necessary to check for the locked state here, because the
2388 // weak count can only be locked if there was precisely one weak ref,
2389 // meaning that drop could only subsequently run ON that remaining weak
2390 // ref, which can only happen after the lock is released.
2391 let inner = if let Some(inner) = self.inner() { inner } else { return };
2393 if inner.weak.fetch_sub(1, Release) == 1 {
2394 acquire!(inner.weak);
2395 unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) }
2400 #[stable(feature = "rust1", since = "1.0.0")]
2401 trait ArcEqIdent<T: ?Sized + PartialEq> {
2402 fn eq(&self, other: &Arc<T>) -> bool;
2403 fn ne(&self, other: &Arc<T>) -> bool;
2406 #[stable(feature = "rust1", since = "1.0.0")]
2407 impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
2409 default fn eq(&self, other: &Arc<T>) -> bool {
2413 default fn ne(&self, other: &Arc<T>) -> bool {
2418 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
2419 /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
2420 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
2421 /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
2422 /// the same value, than two `&T`s.
2424 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
2425 #[stable(feature = "rust1", since = "1.0.0")]
2426 impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
2428 fn eq(&self, other: &Arc<T>) -> bool {
2429 Arc::ptr_eq(self, other) || **self == **other
2433 fn ne(&self, other: &Arc<T>) -> bool {
2434 !Arc::ptr_eq(self, other) && **self != **other
2438 #[stable(feature = "rust1", since = "1.0.0")]
2439 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
2440 /// Equality for two `Arc`s.
2442 /// Two `Arc`s are equal if their inner values are equal, even if they are
2443 /// stored in different allocation.
2445 /// If `T` also implements `Eq` (implying reflexivity of equality),
2446 /// two `Arc`s that point to the same allocation are always equal.
2451 /// use std::sync::Arc;
2453 /// let five = Arc::new(5);
2455 /// assert!(five == Arc::new(5));
2458 fn eq(&self, other: &Arc<T>) -> bool {
2459 ArcEqIdent::eq(self, other)
2462 /// Inequality for two `Arc`s.
2464 /// Two `Arc`s are unequal if their inner values are unequal.
2466 /// If `T` also implements `Eq` (implying reflexivity of equality),
2467 /// two `Arc`s that point to the same value are never unequal.
2472 /// use std::sync::Arc;
2474 /// let five = Arc::new(5);
2476 /// assert!(five != Arc::new(6));
2479 fn ne(&self, other: &Arc<T>) -> bool {
2480 ArcEqIdent::ne(self, other)
2484 #[stable(feature = "rust1", since = "1.0.0")]
2485 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
2486 /// Partial comparison for two `Arc`s.
2488 /// The two are compared by calling `partial_cmp()` on their inner values.
2493 /// use std::sync::Arc;
2494 /// use std::cmp::Ordering;
2496 /// let five = Arc::new(5);
2498 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
2500 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
2501 (**self).partial_cmp(&**other)
2504 /// Less-than comparison for two `Arc`s.
2506 /// The two are compared by calling `<` on their inner values.
2511 /// use std::sync::Arc;
2513 /// let five = Arc::new(5);
2515 /// assert!(five < Arc::new(6));
2517 fn lt(&self, other: &Arc<T>) -> bool {
2518 *(*self) < *(*other)
2521 /// 'Less than or equal to' comparison for two `Arc`s.
2523 /// The two are compared by calling `<=` on their inner values.
2528 /// use std::sync::Arc;
2530 /// let five = Arc::new(5);
2532 /// assert!(five <= Arc::new(5));
2534 fn le(&self, other: &Arc<T>) -> bool {
2535 *(*self) <= *(*other)
2538 /// Greater-than comparison for two `Arc`s.
2540 /// The two are compared by calling `>` on their inner values.
2545 /// use std::sync::Arc;
2547 /// let five = Arc::new(5);
2549 /// assert!(five > Arc::new(4));
2551 fn gt(&self, other: &Arc<T>) -> bool {
2552 *(*self) > *(*other)
2555 /// 'Greater than or equal to' comparison for two `Arc`s.
2557 /// The two are compared by calling `>=` on their inner values.
2562 /// use std::sync::Arc;
2564 /// let five = Arc::new(5);
2566 /// assert!(five >= Arc::new(5));
2568 fn ge(&self, other: &Arc<T>) -> bool {
2569 *(*self) >= *(*other)
2572 #[stable(feature = "rust1", since = "1.0.0")]
2573 impl<T: ?Sized + Ord> Ord for Arc<T> {
2574 /// Comparison for two `Arc`s.
2576 /// The two are compared by calling `cmp()` on their inner values.
2581 /// use std::sync::Arc;
2582 /// use std::cmp::Ordering;
2584 /// let five = Arc::new(5);
2586 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
2588 fn cmp(&self, other: &Arc<T>) -> Ordering {
2589 (**self).cmp(&**other)
2592 #[stable(feature = "rust1", since = "1.0.0")]
2593 impl<T: ?Sized + Eq> Eq for Arc<T> {}
2595 #[stable(feature = "rust1", since = "1.0.0")]
2596 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
2597 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2598 fmt::Display::fmt(&**self, f)
2602 #[stable(feature = "rust1", since = "1.0.0")]
2603 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
2604 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2605 fmt::Debug::fmt(&**self, f)
2609 #[stable(feature = "rust1", since = "1.0.0")]
2610 impl<T: ?Sized> fmt::Pointer for Arc<T> {
2611 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2612 fmt::Pointer::fmt(&(&**self as *const T), f)
2616 #[cfg(not(no_global_oom_handling))]
2617 #[stable(feature = "rust1", since = "1.0.0")]
2618 impl<T: Default> Default for Arc<T> {
2619 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
2624 /// use std::sync::Arc;
2626 /// let x: Arc<i32> = Default::default();
2627 /// assert_eq!(*x, 0);
2629 fn default() -> Arc<T> {
2630 Arc::new(Default::default())
2634 #[stable(feature = "rust1", since = "1.0.0")]
2635 impl<T: ?Sized + Hash> Hash for Arc<T> {
2636 fn hash<H: Hasher>(&self, state: &mut H) {
2637 (**self).hash(state)
2641 #[cfg(not(no_global_oom_handling))]
2642 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
2643 impl<T> From<T> for Arc<T> {
2644 /// Converts a `T` into an `Arc<T>`
2646 /// The conversion moves the value into a
2647 /// newly allocated `Arc`. It is equivalent to
2648 /// calling `Arc::new(t)`.
2652 /// # use std::sync::Arc;
2654 /// let arc = Arc::new(5);
2656 /// assert_eq!(Arc::from(x), arc);
2658 fn from(t: T) -> Self {
2663 #[cfg(not(no_global_oom_handling))]
2664 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2665 impl<T: Clone> From<&[T]> for Arc<[T]> {
2666 /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
2671 /// # use std::sync::Arc;
2672 /// let original: &[i32] = &[1, 2, 3];
2673 /// let shared: Arc<[i32]> = Arc::from(original);
2674 /// assert_eq!(&[1, 2, 3], &shared[..]);
2677 fn from(v: &[T]) -> Arc<[T]> {
2678 <Self as ArcFromSlice<T>>::from_slice(v)
2682 #[cfg(not(no_global_oom_handling))]
2683 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2684 impl From<&str> for Arc<str> {
2685 /// Allocate a reference-counted `str` and copy `v` into it.
2690 /// # use std::sync::Arc;
2691 /// let shared: Arc<str> = Arc::from("eggplant");
2692 /// assert_eq!("eggplant", &shared[..]);
2695 fn from(v: &str) -> Arc<str> {
2696 let arc = Arc::<[u8]>::from(v.as_bytes());
2697 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
2701 #[cfg(not(no_global_oom_handling))]
2702 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2703 impl From<String> for Arc<str> {
2704 /// Allocate a reference-counted `str` and copy `v` into it.
2709 /// # use std::sync::Arc;
2710 /// let unique: String = "eggplant".to_owned();
2711 /// let shared: Arc<str> = Arc::from(unique);
2712 /// assert_eq!("eggplant", &shared[..]);
2715 fn from(v: String) -> Arc<str> {
2720 #[cfg(not(no_global_oom_handling))]
2721 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2722 impl<T: ?Sized> From<Box<T>> for Arc<T> {
2723 /// Move a boxed object to a new, reference-counted allocation.
2728 /// # use std::sync::Arc;
2729 /// let unique: Box<str> = Box::from("eggplant");
2730 /// let shared: Arc<str> = Arc::from(unique);
2731 /// assert_eq!("eggplant", &shared[..]);
2734 fn from(v: Box<T>) -> Arc<T> {
2739 #[cfg(not(no_global_oom_handling))]
2740 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2741 impl<T> From<Vec<T>> for Arc<[T]> {
2742 /// Allocate a reference-counted slice and move `v`'s items into it.
2747 /// # use std::sync::Arc;
2748 /// let unique: Vec<i32> = vec![1, 2, 3];
2749 /// let shared: Arc<[i32]> = Arc::from(unique);
2750 /// assert_eq!(&[1, 2, 3], &shared[..]);
2753 fn from(mut v: Vec<T>) -> Arc<[T]> {
2755 let rc = Arc::copy_from_slice(&v);
2756 // Allow the Vec to free its memory, but not destroy its contents
2763 #[stable(feature = "shared_from_cow", since = "1.45.0")]
2764 impl<'a, B> From<Cow<'a, B>> for Arc<B>
2766 B: ToOwned + ?Sized,
2767 Arc<B>: From<&'a B> + From<B::Owned>,
2769 /// Create an atomically reference-counted pointer from
2770 /// a clone-on-write pointer by copying its content.
2775 /// # use std::sync::Arc;
2776 /// # use std::borrow::Cow;
2777 /// let cow: Cow<str> = Cow::Borrowed("eggplant");
2778 /// let shared: Arc<str> = Arc::from(cow);
2779 /// assert_eq!("eggplant", &shared[..]);
2782 fn from(cow: Cow<'a, B>) -> Arc<B> {
2784 Cow::Borrowed(s) => Arc::from(s),
2785 Cow::Owned(s) => Arc::from(s),
2790 #[stable(feature = "shared_from_str", since = "1.62.0")]
2791 impl From<Arc<str>> for Arc<[u8]> {
2792 /// Converts an atomically reference-counted string slice into a byte slice.
2797 /// # use std::sync::Arc;
2798 /// let string: Arc<str> = Arc::from("eggplant");
2799 /// let bytes: Arc<[u8]> = Arc::from(string);
2800 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
2803 fn from(rc: Arc<str>) -> Self {
2804 // SAFETY: `str` has the same layout as `[u8]`.
2805 unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
2809 #[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
2810 impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
2811 type Error = Arc<[T]>;
2813 fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2814 if boxed_slice.len() == N {
2815 Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
2822 #[cfg(not(no_global_oom_handling))]
2823 #[stable(feature = "shared_from_iter", since = "1.37.0")]
2824 impl<T> iter::FromIterator<T> for Arc<[T]> {
2825 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2827 /// # Performance characteristics
2829 /// ## The general case
2831 /// In the general case, collecting into `Arc<[T]>` is done by first
2832 /// collecting into a `Vec<T>`. That is, when writing the following:
2835 /// # use std::sync::Arc;
2836 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2837 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2840 /// this behaves as if we wrote:
2843 /// # use std::sync::Arc;
2844 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2845 /// .collect::<Vec<_>>() // The first set of allocations happens here.
2846 /// .into(); // A second allocation for `Arc<[T]>` happens here.
2847 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2850 /// This will allocate as many times as needed for constructing the `Vec<T>`
2851 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2853 /// ## Iterators of known length
2855 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2856 /// a single allocation will be made for the `Arc<[T]>`. For example:
2859 /// # use std::sync::Arc;
2860 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2861 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2863 fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
2864 ToArcSlice::to_arc_slice(iter.into_iter())
2868 /// Specialization trait used for collecting into `Arc<[T]>`.
2869 trait ToArcSlice<T>: Iterator<Item = T> + Sized {
2870 fn to_arc_slice(self) -> Arc<[T]>;
2873 #[cfg(not(no_global_oom_handling))]
2874 impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
2875 default fn to_arc_slice(self) -> Arc<[T]> {
2876 self.collect::<Vec<T>>().into()
2880 #[cfg(not(no_global_oom_handling))]
2881 impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
2882 fn to_arc_slice(self) -> Arc<[T]> {
2883 // This is the case for a `TrustedLen` iterator.
2884 let (low, high) = self.size_hint();
2885 if let Some(high) = high {
2889 "TrustedLen iterator's size hint is not exact: {:?}",
2894 // SAFETY: We need to ensure that the iterator has an exact length and we have.
2895 Arc::from_iter_exact(self, low)
2898 // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
2899 // length exceeding `usize::MAX`.
2900 // The default implementation would collect into a vec which would panic.
2901 // Thus we panic here immediately without invoking `Vec` code.
2902 panic!("capacity overflow");
2907 #[stable(feature = "rust1", since = "1.0.0")]
2908 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2909 fn borrow(&self) -> &T {
2914 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
2915 impl<T: ?Sized> AsRef<T> for Arc<T> {
2916 fn as_ref(&self) -> &T {
2921 #[stable(feature = "pin", since = "1.33.0")]
2922 impl<T: ?Sized> Unpin for Arc<T> {}
2924 /// Get the offset within an `ArcInner` for the payload behind a pointer.
2928 /// The pointer must point to (and have valid metadata for) a previously
2929 /// valid instance of T, but the T is allowed to be dropped.
2930 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
2931 // Align the unsized value to the end of the ArcInner.
2932 // Because RcBox is repr(C), it will always be the last field in memory.
2933 // SAFETY: since the only unsized types possible are slices, trait objects,
2934 // and extern types, the input safety requirement is currently enough to
2935 // satisfy the requirements of align_of_val_raw; this is an implementation
2936 // detail of the language that must not be relied upon outside of std.
2937 unsafe { data_offset_align(align_of_val_raw(ptr)) }
2941 fn data_offset_align(align: usize) -> usize {
2942 let layout = Layout::new::<ArcInner<()>>();
2943 layout.size() + layout.padding_needed_for(align)
2946 #[stable(feature = "arc_error", since = "1.52.0")]
2947 impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
2948 #[allow(deprecated, deprecated_in_future)]
2949 fn description(&self) -> &str {
2950 core::error::Error::description(&**self)
2953 #[allow(deprecated)]
2954 fn cause(&self) -> Option<&dyn core::error::Error> {
2955 core::error::Error::cause(&**self)
2958 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
2959 core::error::Error::source(&**self)
2962 fn provide<'a>(&'a self, req: &mut core::any::Demand<'a>) {
2963 core::error::Error::provide(&**self, req);