1 #![stable(feature = "rust1", since = "1.0.0")]
3 //! Thread-safe reference-counting pointers.
5 //! See the [`Arc<T>`][Arc] documentation for more details.
9 use core::cmp::Ordering;
10 use core::convert::{From, TryFrom};
12 use core::hash::{Hash, Hasher};
14 use core::intrinsics::abort;
16 use core::marker::{PhantomData, Unpin, Unsize};
17 use core::mem::{self, align_of_val, size_of_val};
18 use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
20 use core::ptr::{self, NonNull};
21 use core::slice::from_raw_parts_mut;
22 use core::sync::atomic;
23 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
25 use crate::alloc::{box_free, handle_alloc_error, AllocError, AllocRef, Global, Layout};
26 use crate::borrow::{Cow, ToOwned};
27 use crate::boxed::Box;
28 use crate::rc::is_dangling;
29 use crate::string::String;
35 /// A soft limit on the amount of references that may be made to an `Arc`.
37 /// Going above this limit will abort your program (although not
38 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
39 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
41 #[cfg(not(sanitize = "thread"))]
42 macro_rules! acquire {
44 atomic::fence(Acquire)
48 // ThreadSanitizer does not support memory fences. To avoid false positive
49 // reports in Arc / Weak implementation use atomic loads for synchronization
51 #[cfg(sanitize = "thread")]
52 macro_rules! acquire {
58 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
59 /// Reference Counted'.
61 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
62 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
63 /// a new `Arc` instance, which points to the same allocation on the heap as the
64 /// source `Arc`, while increasing a reference count. When the last `Arc`
65 /// pointer to a given allocation is destroyed, the value stored in that allocation (often
66 /// referred to as "inner value") is also dropped.
68 /// Shared references in Rust disallow mutation by default, and `Arc` is no
69 /// exception: you cannot generally obtain a mutable reference to something
70 /// inside an `Arc`. If you need to mutate through an `Arc`, use
71 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
76 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
77 /// counting. This means that it is thread-safe. The disadvantage is that
78 /// atomic operations are more expensive than ordinary memory accesses. If you
79 /// are not sharing reference-counted allocations between threads, consider using
80 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
81 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
82 /// However, a library might choose `Arc<T>` in order to give library consumers
85 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
86 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
87 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
88 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
89 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
90 /// data, but it doesn't add thread safety to its data. Consider
91 /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
92 /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
93 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
94 /// non-atomic operations.
96 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
97 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
99 /// ## Breaking cycles with `Weak`
101 /// The [`downgrade`][downgrade] method can be used to create a non-owning
102 /// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
103 /// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
104 /// already been dropped. In other words, `Weak` pointers do not keep the value
105 /// inside the allocation alive; however, they *do* keep the allocation
106 /// (the backing store for the value) alive.
108 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
109 /// [`Weak`] is used to break cycles. For example, a tree could have
110 /// strong `Arc` pointers from parent nodes to children, and [`Weak`]
111 /// pointers from children back to their parents.
113 /// # Cloning references
115 /// Creating a new reference from an existing reference-counted pointer is done using the
116 /// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
119 /// use std::sync::Arc;
120 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
121 /// // The two syntaxes below are equivalent.
122 /// let a = foo.clone();
123 /// let b = Arc::clone(&foo);
124 /// // a, b, and foo are all Arcs that point to the same memory location
127 /// ## `Deref` behavior
129 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
130 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
131 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
132 /// functions, called using function-like syntax:
135 /// use std::sync::Arc;
136 /// let my_arc = Arc::new(());
138 /// Arc::downgrade(&my_arc);
141 /// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
142 /// already been dropped.
144 /// [`Rc<T>`]: crate::rc::Rc
145 /// [clone]: Clone::clone
146 /// [mutex]: ../../std/sync/struct.Mutex.html
147 /// [rwlock]: ../../std/sync/struct.RwLock.html
148 /// [atomic]: core::sync::atomic
149 /// [`Send`]: core::marker::Send
150 /// [`Sync`]: core::marker::Sync
151 /// [deref]: core::ops::Deref
152 /// [downgrade]: Arc::downgrade
153 /// [upgrade]: Weak::upgrade
154 /// [`RefCell<T>`]: core::cell::RefCell
155 /// [`std::sync`]: ../../std/sync/index.html
156 /// [`Arc::clone(&from)`]: Arc::clone
160 /// Sharing some immutable data between threads:
162 // Note that we **do not** run these tests here. The windows builders get super
163 // unhappy if a thread outlives the main thread and then exits at the same time
164 // (something deadlocks) so we just avoid this entirely by not running these
167 /// use std::sync::Arc;
170 /// let five = Arc::new(5);
173 /// let five = Arc::clone(&five);
175 /// thread::spawn(move || {
176 /// println!("{:?}", five);
181 /// Sharing a mutable [`AtomicUsize`]:
183 /// [`AtomicUsize`]: core::sync::atomic::AtomicUsize
186 /// use std::sync::Arc;
187 /// use std::sync::atomic::{AtomicUsize, Ordering};
190 /// let val = Arc::new(AtomicUsize::new(5));
193 /// let val = Arc::clone(&val);
195 /// thread::spawn(move || {
196 /// let v = val.fetch_add(1, Ordering::SeqCst);
197 /// println!("{:?}", v);
202 /// See the [`rc` documentation][rc_examples] for more examples of reference
203 /// counting in general.
205 /// [rc_examples]: crate::rc#examples
206 #[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
207 #[stable(feature = "rust1", since = "1.0.0")]
208 pub struct Arc<T: ?Sized> {
209 ptr: NonNull<ArcInner<T>>,
210 phantom: PhantomData<ArcInner<T>>,
213 #[stable(feature = "rust1", since = "1.0.0")]
214 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
215 #[stable(feature = "rust1", since = "1.0.0")]
216 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
218 #[unstable(feature = "coerce_unsized", issue = "27732")]
219 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
221 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
222 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
224 impl<T: ?Sized> Arc<T> {
225 fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
226 Self { ptr, phantom: PhantomData }
229 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
230 unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
234 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
235 /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
236 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
238 /// Since a `Weak` reference does not count towards ownership, it will not
239 /// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
240 /// guarantees about the value still being present. Thus it may return [`None`]
241 /// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
242 /// itself (the backing store) from being deallocated.
244 /// A `Weak` pointer is useful for keeping a temporary reference to the allocation
245 /// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
246 /// prevent circular references between [`Arc`] pointers, since mutual owning references
247 /// would never allow either [`Arc`] to be dropped. For example, a tree could
248 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
249 /// pointers from children back to their parents.
251 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
253 /// [`upgrade`]: Weak::upgrade
254 #[stable(feature = "arc_weak", since = "1.4.0")]
255 pub struct Weak<T: ?Sized> {
256 // This is a `NonNull` to allow optimizing the size of this type in enums,
257 // but it is not necessarily a valid pointer.
258 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
259 // to allocate space on the heap. That's not a value a real pointer
260 // will ever have because RcBox has alignment at least 2.
261 // This is only possible when `T: Sized`; unsized `T` never dangle.
262 ptr: NonNull<ArcInner<T>>,
265 #[stable(feature = "arc_weak", since = "1.4.0")]
266 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
267 #[stable(feature = "arc_weak", since = "1.4.0")]
268 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
270 #[unstable(feature = "coerce_unsized", issue = "27732")]
271 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
272 #[unstable(feature = "dispatch_from_dyn", issue = "none")]
273 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
275 #[stable(feature = "arc_weak", since = "1.4.0")]
276 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
277 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
282 // This is repr(C) to future-proof against possible field-reordering, which
283 // would interfere with otherwise safe [into|from]_raw() of transmutable
286 struct ArcInner<T: ?Sized> {
287 strong: atomic::AtomicUsize,
289 // the value usize::MAX acts as a sentinel for temporarily "locking" the
290 // ability to upgrade weak pointers or downgrade strong ones; this is used
291 // to avoid races in `make_mut` and `get_mut`.
292 weak: atomic::AtomicUsize,
297 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
298 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
301 /// Constructs a new `Arc<T>`.
306 /// use std::sync::Arc;
308 /// let five = Arc::new(5);
311 #[stable(feature = "rust1", since = "1.0.0")]
312 pub fn new(data: T) -> Arc<T> {
313 // Start the weak pointer count as 1 which is the weak pointer that's
314 // held by all the strong pointers (kinda), see std/rc.rs for more info
315 let x: Box<_> = box ArcInner {
316 strong: atomic::AtomicUsize::new(1),
317 weak: atomic::AtomicUsize::new(1),
320 Self::from_inner(Box::leak(x).into())
323 /// Constructs a new `Arc<T>` using a weak reference to itself. Attempting
324 /// to upgrade the weak reference before this function returns will result
325 /// in a `None` value. However, the weak reference may be cloned freely and
326 /// stored for use at a later time.
330 /// #![feature(arc_new_cyclic)]
331 /// #![allow(dead_code)]
333 /// use std::sync::{Arc, Weak};
339 /// let foo = Arc::new_cyclic(|me| Foo {
344 #[unstable(feature = "arc_new_cyclic", issue = "75861")]
345 pub fn new_cyclic(data_fn: impl FnOnce(&Weak<T>) -> T) -> Arc<T> {
346 // Construct the inner in the "uninitialized" state with a single
348 let uninit_ptr: NonNull<_> = Box::leak(box ArcInner {
349 strong: atomic::AtomicUsize::new(0),
350 weak: atomic::AtomicUsize::new(1),
351 data: mem::MaybeUninit::<T>::uninit(),
354 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
356 let weak = Weak { ptr: init_ptr };
358 // It's important we don't give up ownership of the weak pointer, or
359 // else the memory might be freed by the time `data_fn` returns. If
360 // we really wanted to pass ownership, we could create an additional
361 // weak pointer for ourselves, but this would result in additional
362 // updates to the weak reference count which might not be necessary
364 let data = data_fn(&weak);
366 // Now we can properly initialize the inner value and turn our weak
367 // reference into a strong reference.
369 let inner = init_ptr.as_ptr();
370 ptr::write(&raw mut (*inner).data, data);
372 // The above write to the data field must be visible to any threads which
373 // observe a non-zero strong count. Therefore we need at least "Release" ordering
374 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
376 // "Acquire" ordering is not required. When considering the possible behaviours
377 // of `data_fn` we only need to look at what it could do with a reference to a
378 // non-upgradeable `Weak`:
379 // - It can *clone* the `Weak`, increasing the weak reference count.
380 // - It can drop those clones, decreasing the weak reference count (but never to zero).
382 // These side effects do not impact us in any way, and no other side effects are
383 // possible with safe code alone.
384 let prev_value = (*inner).strong.fetch_add(1, Release);
385 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
388 let strong = Arc::from_inner(init_ptr);
390 // Strong references should collectively own a shared weak reference,
391 // so don't run the destructor for our old weak reference.
396 /// Constructs a new `Arc` with uninitialized contents.
401 /// #![feature(new_uninit)]
402 /// #![feature(get_mut_unchecked)]
404 /// use std::sync::Arc;
406 /// let mut five = Arc::<u32>::new_uninit();
408 /// let five = unsafe {
409 /// // Deferred initialization:
410 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
412 /// five.assume_init()
415 /// assert_eq!(*five, 5)
417 #[unstable(feature = "new_uninit", issue = "63291")]
418 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
420 Arc::from_ptr(Arc::allocate_for_layout(
422 |layout| Global.alloc(layout),
423 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
428 /// Constructs a new `Arc` with uninitialized contents, with the memory
429 /// being filled with `0` bytes.
431 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
437 /// #![feature(new_uninit)]
439 /// use std::sync::Arc;
441 /// let zero = Arc::<u32>::new_zeroed();
442 /// let zero = unsafe { zero.assume_init() };
444 /// assert_eq!(*zero, 0)
447 /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
448 #[unstable(feature = "new_uninit", issue = "63291")]
449 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
451 Arc::from_ptr(Arc::allocate_for_layout(
453 |layout| Global.alloc_zeroed(layout),
454 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
459 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
460 /// `data` will be pinned in memory and unable to be moved.
461 #[stable(feature = "pin", since = "1.33.0")]
462 pub fn pin(data: T) -> Pin<Arc<T>> {
463 unsafe { Pin::new_unchecked(Arc::new(data)) }
466 /// Returns the inner value, if the `Arc` has exactly one strong reference.
468 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
471 /// This will succeed even if there are outstanding weak references.
476 /// use std::sync::Arc;
478 /// let x = Arc::new(3);
479 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
481 /// let x = Arc::new(4);
482 /// let _y = Arc::clone(&x);
483 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
486 #[stable(feature = "arc_unique", since = "1.4.0")]
487 pub fn try_unwrap(this: Self) -> Result<T, Self> {
488 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
492 acquire!(this.inner().strong);
495 let elem = ptr::read(&this.ptr.as_ref().data);
497 // Make a weak pointer to clean up the implicit strong-weak reference
498 let _weak = Weak { ptr: this.ptr };
507 /// Constructs a new atomically reference-counted slice with uninitialized contents.
512 /// #![feature(new_uninit)]
513 /// #![feature(get_mut_unchecked)]
515 /// use std::sync::Arc;
517 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
519 /// let values = unsafe {
520 /// // Deferred initialization:
521 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
522 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
523 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
525 /// values.assume_init()
528 /// assert_eq!(*values, [1, 2, 3])
530 #[unstable(feature = "new_uninit", issue = "63291")]
531 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
532 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
535 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
536 /// filled with `0` bytes.
538 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
539 /// incorrect usage of this method.
544 /// #![feature(new_uninit)]
546 /// use std::sync::Arc;
548 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
549 /// let values = unsafe { values.assume_init() };
551 /// assert_eq!(*values, [0, 0, 0])
554 /// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
555 #[unstable(feature = "new_uninit", issue = "63291")]
556 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
558 Arc::from_ptr(Arc::allocate_for_layout(
559 Layout::array::<T>(len).unwrap(),
560 |layout| Global.alloc_zeroed(layout),
562 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
563 as *mut ArcInner<[mem::MaybeUninit<T>]>
570 impl<T> Arc<mem::MaybeUninit<T>> {
571 /// Converts to `Arc<T>`.
575 /// As with [`MaybeUninit::assume_init`],
576 /// it is up to the caller to guarantee that the inner value
577 /// really is in an initialized state.
578 /// Calling this when the content is not yet fully initialized
579 /// causes immediate undefined behavior.
581 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
586 /// #![feature(new_uninit)]
587 /// #![feature(get_mut_unchecked)]
589 /// use std::sync::Arc;
591 /// let mut five = Arc::<u32>::new_uninit();
593 /// let five = unsafe {
594 /// // Deferred initialization:
595 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
597 /// five.assume_init()
600 /// assert_eq!(*five, 5)
602 #[unstable(feature = "new_uninit", issue = "63291")]
604 pub unsafe fn assume_init(self) -> Arc<T> {
605 Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast())
609 impl<T> Arc<[mem::MaybeUninit<T>]> {
610 /// Converts to `Arc<[T]>`.
614 /// As with [`MaybeUninit::assume_init`],
615 /// it is up to the caller to guarantee that the inner value
616 /// really is in an initialized state.
617 /// Calling this when the content is not yet fully initialized
618 /// causes immediate undefined behavior.
620 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
625 /// #![feature(new_uninit)]
626 /// #![feature(get_mut_unchecked)]
628 /// use std::sync::Arc;
630 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
632 /// let values = unsafe {
633 /// // Deferred initialization:
634 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
635 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
636 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
638 /// values.assume_init()
641 /// assert_eq!(*values, [1, 2, 3])
643 #[unstable(feature = "new_uninit", issue = "63291")]
645 pub unsafe fn assume_init(self) -> Arc<[T]> {
646 unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
650 impl<T: ?Sized> Arc<T> {
651 /// Consumes the `Arc`, returning the wrapped pointer.
653 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
654 /// [`Arc::from_raw`].
659 /// use std::sync::Arc;
661 /// let x = Arc::new("hello".to_owned());
662 /// let x_ptr = Arc::into_raw(x);
663 /// assert_eq!(unsafe { &*x_ptr }, "hello");
665 #[stable(feature = "rc_raw", since = "1.17.0")]
666 pub fn into_raw(this: Self) -> *const T {
667 let ptr = Self::as_ptr(&this);
672 /// Provides a raw pointer to the data.
674 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
675 /// as long as there are strong counts in the `Arc`.
680 /// use std::sync::Arc;
682 /// let x = Arc::new("hello".to_owned());
683 /// let y = Arc::clone(&x);
684 /// let x_ptr = Arc::as_ptr(&x);
685 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
686 /// assert_eq!(unsafe { &*x_ptr }, "hello");
688 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
689 pub fn as_ptr(this: &Self) -> *const T {
690 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
692 // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
693 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
694 // write through the pointer after the Rc is recovered through `from_raw`.
695 unsafe { &raw const (*ptr).data }
698 /// Constructs an `Arc<T>` from a raw pointer.
700 /// The raw pointer must have been previously returned by a call to
701 /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
702 /// alignment as `T`. This is trivially true if `U` is `T`.
703 /// Note that if `U` is not `T` but has the same size and alignment, this is
704 /// basically like transmuting references of different types. See
705 /// [`mem::transmute`][transmute] for more information on what
706 /// restrictions apply in this case.
708 /// The user of `from_raw` has to make sure a specific value of `T` is only
711 /// This function is unsafe because improper use may lead to memory unsafety,
712 /// even if the returned `Arc<T>` is never accessed.
714 /// [into_raw]: Arc::into_raw
715 /// [transmute]: core::mem::transmute
720 /// use std::sync::Arc;
722 /// let x = Arc::new("hello".to_owned());
723 /// let x_ptr = Arc::into_raw(x);
726 /// // Convert back to an `Arc` to prevent leak.
727 /// let x = Arc::from_raw(x_ptr);
728 /// assert_eq!(&*x, "hello");
730 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
733 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
735 #[stable(feature = "rc_raw", since = "1.17.0")]
736 pub unsafe fn from_raw(ptr: *const T) -> Self {
738 let offset = data_offset(ptr);
740 // Reverse the offset to find the original ArcInner.
741 let fake_ptr = ptr as *mut ArcInner<T>;
742 let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
744 Self::from_ptr(arc_ptr)
748 /// Creates a new [`Weak`] pointer to this allocation.
753 /// use std::sync::Arc;
755 /// let five = Arc::new(5);
757 /// let weak_five = Arc::downgrade(&five);
759 #[stable(feature = "arc_weak", since = "1.4.0")]
760 pub fn downgrade(this: &Self) -> Weak<T> {
761 // This Relaxed is OK because we're checking the value in the CAS
763 let mut cur = this.inner().weak.load(Relaxed);
766 // check if the weak counter is currently "locked"; if so, spin.
767 if cur == usize::MAX {
769 cur = this.inner().weak.load(Relaxed);
773 // NOTE: this code currently ignores the possibility of overflow
774 // into usize::MAX; in general both Rc and Arc need to be adjusted
775 // to deal with overflow.
777 // Unlike with Clone(), we need this to be an Acquire read to
778 // synchronize with the write coming from `is_unique`, so that the
779 // events prior to that write happen before this read.
780 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
782 // Make sure we do not create a dangling Weak
783 debug_assert!(!is_dangling(this.ptr));
784 return Weak { ptr: this.ptr };
786 Err(old) => cur = old,
791 /// Gets the number of [`Weak`] pointers to this allocation.
795 /// This method by itself is safe, but using it correctly requires extra care.
796 /// Another thread can change the weak count at any time,
797 /// including potentially between calling this method and acting on the result.
802 /// use std::sync::Arc;
804 /// let five = Arc::new(5);
805 /// let _weak_five = Arc::downgrade(&five);
807 /// // This assertion is deterministic because we haven't shared
808 /// // the `Arc` or `Weak` between threads.
809 /// assert_eq!(1, Arc::weak_count(&five));
812 #[stable(feature = "arc_counts", since = "1.15.0")]
813 pub fn weak_count(this: &Self) -> usize {
814 let cnt = this.inner().weak.load(SeqCst);
815 // If the weak count is currently locked, the value of the
816 // count was 0 just before taking the lock.
817 if cnt == usize::MAX { 0 } else { cnt - 1 }
820 /// Gets the number of strong (`Arc`) pointers to this allocation.
824 /// This method by itself is safe, but using it correctly requires extra care.
825 /// Another thread can change the strong count at any time,
826 /// including potentially between calling this method and acting on the result.
831 /// use std::sync::Arc;
833 /// let five = Arc::new(5);
834 /// let _also_five = Arc::clone(&five);
836 /// // This assertion is deterministic because we haven't shared
837 /// // the `Arc` between threads.
838 /// assert_eq!(2, Arc::strong_count(&five));
841 #[stable(feature = "arc_counts", since = "1.15.0")]
842 pub fn strong_count(this: &Self) -> usize {
843 this.inner().strong.load(SeqCst)
846 /// Increments the strong reference count on the `Arc<T>` associated with the
847 /// provided pointer by one.
851 /// The pointer must have been obtained through `Arc::into_raw`, and the
852 /// associated `Arc` instance must be valid (i.e. the strong count must be at
853 /// least 1) for the duration of this method.
858 /// #![feature(arc_mutate_strong_count)]
860 /// use std::sync::Arc;
862 /// let five = Arc::new(5);
865 /// let ptr = Arc::into_raw(five);
866 /// Arc::incr_strong_count(ptr);
868 /// // This assertion is deterministic because we haven't shared
869 /// // the `Arc` between threads.
870 /// let five = Arc::from_raw(ptr);
871 /// assert_eq!(2, Arc::strong_count(&five));
875 #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
876 pub unsafe fn incr_strong_count(ptr: *const T) {
877 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
878 let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
879 // Now increase refcount, but don't drop new refcount either
880 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
883 /// Decrements the strong reference count on the `Arc<T>` associated with the
884 /// provided pointer by one.
888 /// The pointer must have been obtained through `Arc::into_raw`, and the
889 /// associated `Arc` instance must be valid (i.e. the strong count must be at
890 /// least 1) when invoking this method. This method can be used to release the final
891 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
897 /// #![feature(arc_mutate_strong_count)]
899 /// use std::sync::Arc;
901 /// let five = Arc::new(5);
904 /// let ptr = Arc::into_raw(five);
905 /// Arc::incr_strong_count(ptr);
907 /// // Those assertions are deterministic because we haven't shared
908 /// // the `Arc` between threads.
909 /// let five = Arc::from_raw(ptr);
910 /// assert_eq!(2, Arc::strong_count(&five));
911 /// Arc::decr_strong_count(ptr);
912 /// assert_eq!(1, Arc::strong_count(&five));
916 #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
917 pub unsafe fn decr_strong_count(ptr: *const T) {
918 unsafe { mem::drop(Arc::from_raw(ptr)) };
922 fn inner(&self) -> &ArcInner<T> {
923 // This unsafety is ok because while this arc is alive we're guaranteed
924 // that the inner pointer is valid. Furthermore, we know that the
925 // `ArcInner` structure itself is `Sync` because the inner data is
926 // `Sync` as well, so we're ok loaning out an immutable pointer to these
928 unsafe { self.ptr.as_ref() }
931 // Non-inlined part of `drop`.
933 unsafe fn drop_slow(&mut self) {
934 // Destroy the data at this time, even though we may not free the box
935 // allocation itself (there may still be weak pointers lying around).
936 unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
938 // Drop the weak ref collectively held by all strong references
939 drop(Weak { ptr: self.ptr });
943 #[stable(feature = "ptr_eq", since = "1.17.0")]
944 /// Returns `true` if the two `Arc`s point to the same allocation
945 /// (in a vein similar to [`ptr::eq`]).
950 /// use std::sync::Arc;
952 /// let five = Arc::new(5);
953 /// let same_five = Arc::clone(&five);
954 /// let other_five = Arc::new(5);
956 /// assert!(Arc::ptr_eq(&five, &same_five));
957 /// assert!(!Arc::ptr_eq(&five, &other_five));
960 /// [`ptr::eq`]: core::ptr::eq
961 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
962 this.ptr.as_ptr() == other.ptr.as_ptr()
966 impl<T: ?Sized> Arc<T> {
967 /// Allocates an `ArcInner<T>` with sufficient space for
968 /// a possibly-unsized inner value where the value has the layout provided.
970 /// The function `mem_to_arcinner` is called with the data pointer
971 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
972 unsafe fn allocate_for_layout(
973 value_layout: Layout,
974 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
975 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
976 ) -> *mut ArcInner<T> {
977 // Calculate layout using the given value layout.
978 // Previously, layout was calculated on the expression
979 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
980 // reference (see #54908).
981 let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
983 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
985 // Initialize the ArcInner
986 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
987 debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
990 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
991 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
997 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
998 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
999 // Allocate for the `ArcInner<T>` using the given value.
1001 Self::allocate_for_layout(
1002 Layout::for_value(&*ptr),
1003 |layout| Global.alloc(layout),
1004 |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
1009 fn from_box(v: Box<T>) -> Arc<T> {
1011 let box_unique = Box::into_unique(v);
1012 let bptr = box_unique.as_ptr();
1014 let value_size = size_of_val(&*bptr);
1015 let ptr = Self::allocate_for_ptr(bptr);
1017 // Copy value as bytes
1018 ptr::copy_nonoverlapping(
1019 bptr as *const T as *const u8,
1020 &mut (*ptr).data as *mut _ as *mut u8,
1024 // Free the allocation without dropping its contents
1025 box_free(box_unique);
1033 /// Allocates an `ArcInner<[T]>` with the given length.
1034 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
1036 Self::allocate_for_layout(
1037 Layout::array::<T>(len).unwrap(),
1038 |layout| Global.alloc(layout),
1039 |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
1045 /// Sets the data pointer of a `?Sized` raw pointer.
1047 /// For a slice/trait object, this sets the `data` field and leaves the rest
1048 /// unchanged. For a sized raw pointer, this simply sets the pointer.
1049 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
1051 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
1057 /// Copy elements from slice into newly allocated Arc<\[T\]>
1059 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
1060 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
1062 let ptr = Self::allocate_for_slice(v.len());
1064 ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
1070 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1072 /// Behavior is undefined should the size be wrong.
1073 unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
1074 // Panic guard while cloning T elements.
1075 // In the event of a panic, elements that have been written
1076 // into the new ArcInner will be dropped, then the memory freed.
1084 impl<T> Drop for Guard<T> {
1085 fn drop(&mut self) {
1087 let slice = from_raw_parts_mut(self.elems, self.n_elems);
1088 ptr::drop_in_place(slice);
1090 Global.dealloc(self.mem, self.layout);
1096 let ptr = Self::allocate_for_slice(len);
1098 let mem = ptr as *mut _ as *mut u8;
1099 let layout = Layout::for_value(&*ptr);
1101 // Pointer to first element
1102 let elems = &mut (*ptr).data as *mut [T] as *mut T;
1104 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
1106 for (i, item) in iter.enumerate() {
1107 ptr::write(elems.add(i), item);
1111 // All clear. Forget the guard so it doesn't free the new ArcInner.
1119 /// Specialization trait used for `From<&[T]>`.
1120 trait ArcFromSlice<T> {
1121 fn from_slice(slice: &[T]) -> Self;
1124 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
1126 default fn from_slice(v: &[T]) -> Self {
1127 unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
1131 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
1133 fn from_slice(v: &[T]) -> Self {
1134 unsafe { Arc::copy_from_slice(v) }
1138 #[stable(feature = "rust1", since = "1.0.0")]
1139 impl<T: ?Sized> Clone for Arc<T> {
1140 /// Makes a clone of the `Arc` pointer.
1142 /// This creates another pointer to the same allocation, increasing the
1143 /// strong reference count.
1148 /// use std::sync::Arc;
1150 /// let five = Arc::new(5);
1152 /// let _ = Arc::clone(&five);
1155 fn clone(&self) -> Arc<T> {
1156 // Using a relaxed ordering is alright here, as knowledge of the
1157 // original reference prevents other threads from erroneously deleting
1160 // As explained in the [Boost documentation][1], Increasing the
1161 // reference counter can always be done with memory_order_relaxed: New
1162 // references to an object can only be formed from an existing
1163 // reference, and passing an existing reference from one thread to
1164 // another must already provide any required synchronization.
1166 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1167 let old_size = self.inner().strong.fetch_add(1, Relaxed);
1169 // However we need to guard against massive refcounts in case someone
1170 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
1171 // and users will use-after free. We racily saturate to `isize::MAX` on
1172 // the assumption that there aren't ~2 billion threads incrementing
1173 // the reference count at once. This branch will never be taken in
1174 // any realistic program.
1176 // We abort because such a program is incredibly degenerate, and we
1177 // don't care to support it.
1178 if old_size > MAX_REFCOUNT {
1182 Self::from_inner(self.ptr)
1186 #[stable(feature = "rust1", since = "1.0.0")]
1187 impl<T: ?Sized> Deref for Arc<T> {
1191 fn deref(&self) -> &T {
1196 #[unstable(feature = "receiver_trait", issue = "none")]
1197 impl<T: ?Sized> Receiver for Arc<T> {}
1199 impl<T: Clone> Arc<T> {
1200 /// Makes a mutable reference into the given `Arc`.
1202 /// If there are other `Arc` or [`Weak`] pointers to the same allocation,
1203 /// then `make_mut` will create a new allocation and invoke [`clone`][clone] on the inner value
1204 /// to ensure unique ownership. This is also referred to as clone-on-write.
1206 /// Note that this differs from the behavior of [`Rc::make_mut`] which disassociates
1207 /// any remaining `Weak` pointers.
1209 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
1211 /// [clone]: Clone::clone
1212 /// [get_mut]: Arc::get_mut
1213 /// [`Rc::make_mut`]: super::rc::Rc::make_mut
1218 /// use std::sync::Arc;
1220 /// let mut data = Arc::new(5);
1222 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1223 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
1224 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
1225 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1226 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
1228 /// // Now `data` and `other_data` point to different allocations.
1229 /// assert_eq!(*data, 8);
1230 /// assert_eq!(*other_data, 12);
1233 #[stable(feature = "arc_unique", since = "1.4.0")]
1234 pub fn make_mut(this: &mut Self) -> &mut T {
1235 // Note that we hold both a strong reference and a weak reference.
1236 // Thus, releasing our strong reference only will not, by itself, cause
1237 // the memory to be deallocated.
1239 // Use Acquire to ensure that we see any writes to `weak` that happen
1240 // before release writes (i.e., decrements) to `strong`. Since we hold a
1241 // weak count, there's no chance the ArcInner itself could be
1243 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1244 // Another strong pointer exists; clone
1245 *this = Arc::new((**this).clone());
1246 } else if this.inner().weak.load(Relaxed) != 1 {
1247 // Relaxed suffices in the above because this is fundamentally an
1248 // optimization: we are always racing with weak pointers being
1249 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1251 // We removed the last strong ref, but there are additional weak
1252 // refs remaining. We'll move the contents to a new Arc, and
1253 // invalidate the other weak refs.
1255 // Note that it is not possible for the read of `weak` to yield
1256 // usize::MAX (i.e., locked), since the weak count can only be
1257 // locked by a thread with a strong reference.
1259 // Materialize our own implicit weak pointer, so that it can clean
1260 // up the ArcInner as needed.
1261 let weak = Weak { ptr: this.ptr };
1263 // mark the data itself as already deallocated
1265 // there is no data race in the implicit write caused by `read`
1266 // here (due to zeroing) because data is no longer accessed by
1267 // other threads (due to there being no more strong refs at this
1269 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
1270 mem::swap(this, &mut swap);
1274 // We were the sole reference of either kind; bump back up the
1275 // strong ref count.
1276 this.inner().strong.store(1, Release);
1279 // As with `get_mut()`, the unsafety is ok because our reference was
1280 // either unique to begin with, or became one upon cloning the contents.
1281 unsafe { Self::get_mut_unchecked(this) }
1285 impl<T: ?Sized> Arc<T> {
1286 /// Returns a mutable reference into the given `Arc`, if there are
1287 /// no other `Arc` or [`Weak`] pointers to the same allocation.
1289 /// Returns [`None`] otherwise, because it is not safe to
1290 /// mutate a shared value.
1292 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1293 /// the inner value when there are other pointers.
1295 /// [make_mut]: Arc::make_mut
1296 /// [clone]: Clone::clone
1301 /// use std::sync::Arc;
1303 /// let mut x = Arc::new(3);
1304 /// *Arc::get_mut(&mut x).unwrap() = 4;
1305 /// assert_eq!(*x, 4);
1307 /// let _y = Arc::clone(&x);
1308 /// assert!(Arc::get_mut(&mut x).is_none());
1311 #[stable(feature = "arc_unique", since = "1.4.0")]
1312 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1313 if this.is_unique() {
1314 // This unsafety is ok because we're guaranteed that the pointer
1315 // returned is the *only* pointer that will ever be returned to T. Our
1316 // reference count is guaranteed to be 1 at this point, and we required
1317 // the Arc itself to be `mut`, so we're returning the only possible
1318 // reference to the inner data.
1319 unsafe { Some(Arc::get_mut_unchecked(this)) }
1325 /// Returns a mutable reference into the given `Arc`,
1326 /// without any check.
1328 /// See also [`get_mut`], which is safe and does appropriate checks.
1330 /// [`get_mut`]: Arc::get_mut
1334 /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
1335 /// for the duration of the returned borrow.
1336 /// This is trivially the case if no such pointers exist,
1337 /// for example immediately after `Arc::new`.
1342 /// #![feature(get_mut_unchecked)]
1344 /// use std::sync::Arc;
1346 /// let mut x = Arc::new(String::new());
1348 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
1350 /// assert_eq!(*x, "foo");
1353 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
1354 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1355 // We are careful to *not* create a reference covering the "count" fields, as
1356 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
1357 unsafe { &mut (*this.ptr.as_ptr()).data }
1360 /// Determine whether this is the unique reference (including weak refs) to
1361 /// the underlying data.
1363 /// Note that this requires locking the weak ref count.
1364 fn is_unique(&mut self) -> bool {
1365 // lock the weak pointer count if we appear to be the sole weak pointer
1368 // The acquire label here ensures a happens-before relationship with any
1369 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1370 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1371 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1372 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1373 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1374 // counter in `drop` -- the only access that happens when any but the last reference
1375 // is being dropped.
1376 let unique = self.inner().strong.load(Acquire) == 1;
1378 // The release write here synchronizes with a read in `downgrade`,
1379 // effectively preventing the above read of `strong` from happening
1381 self.inner().weak.store(1, Release); // release the lock
1389 #[stable(feature = "rust1", since = "1.0.0")]
1390 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
1391 /// Drops the `Arc`.
1393 /// This will decrement the strong reference count. If the strong reference
1394 /// count reaches zero then the only other references (if any) are
1395 /// [`Weak`], so we `drop` the inner value.
1400 /// use std::sync::Arc;
1404 /// impl Drop for Foo {
1405 /// fn drop(&mut self) {
1406 /// println!("dropped!");
1410 /// let foo = Arc::new(Foo);
1411 /// let foo2 = Arc::clone(&foo);
1413 /// drop(foo); // Doesn't print anything
1414 /// drop(foo2); // Prints "dropped!"
1417 fn drop(&mut self) {
1418 // Because `fetch_sub` is already atomic, we do not need to synchronize
1419 // with other threads unless we are going to delete the object. This
1420 // same logic applies to the below `fetch_sub` to the `weak` count.
1421 if self.inner().strong.fetch_sub(1, Release) != 1 {
1425 // This fence is needed to prevent reordering of use of the data and
1426 // deletion of the data. Because it is marked `Release`, the decreasing
1427 // of the reference count synchronizes with this `Acquire` fence. This
1428 // means that use of the data happens before decreasing the reference
1429 // count, which happens before this fence, which happens before the
1430 // deletion of the data.
1432 // As explained in the [Boost documentation][1],
1434 // > It is important to enforce any possible access to the object in one
1435 // > thread (through an existing reference) to *happen before* deleting
1436 // > the object in a different thread. This is achieved by a "release"
1437 // > operation after dropping a reference (any access to the object
1438 // > through this reference must obviously happened before), and an
1439 // > "acquire" operation before deleting the object.
1441 // In particular, while the contents of an Arc are usually immutable, it's
1442 // possible to have interior writes to something like a Mutex<T>. Since a
1443 // Mutex is not acquired when it is deleted, we can't rely on its
1444 // synchronization logic to make writes in thread A visible to a destructor
1445 // running in thread B.
1447 // Also note that the Acquire fence here could probably be replaced with an
1448 // Acquire load, which could improve performance in highly-contended
1449 // situations. See [2].
1451 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1452 // [2]: (https://github.com/rust-lang/rust/pull/41714)
1453 acquire!(self.inner().strong);
1461 impl Arc<dyn Any + Send + Sync> {
1463 #[stable(feature = "rc_downcast", since = "1.29.0")]
1464 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1469 /// use std::any::Any;
1470 /// use std::sync::Arc;
1472 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1473 /// if let Ok(string) = value.downcast::<String>() {
1474 /// println!("String ({}): {}", string.len(), string);
1478 /// let my_string = "Hello World".to_string();
1479 /// print_if_string(Arc::new(my_string));
1480 /// print_if_string(Arc::new(0i8));
1482 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1484 T: Any + Send + Sync + 'static,
1486 if (*self).is::<T>() {
1487 let ptr = self.ptr.cast::<ArcInner<T>>();
1489 Ok(Arc::from_inner(ptr))
1497 /// Constructs a new `Weak<T>`, without allocating any memory.
1498 /// Calling [`upgrade`] on the return value always gives [`None`].
1500 /// [`upgrade`]: Weak::upgrade
1505 /// use std::sync::Weak;
1507 /// let empty: Weak<i64> = Weak::new();
1508 /// assert!(empty.upgrade().is_none());
1510 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1511 pub fn new() -> Weak<T> {
1512 Weak { ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0") }
1516 /// Helper type to allow accessing the reference counts without
1517 /// making any assertions about the data field.
1518 struct WeakInner<'a> {
1519 weak: &'a atomic::AtomicUsize,
1520 strong: &'a atomic::AtomicUsize,
1523 impl<T: ?Sized> Weak<T> {
1524 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1526 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
1527 /// unaligned or even [`null`] otherwise.
1532 /// use std::sync::Arc;
1535 /// let strong = Arc::new("hello".to_owned());
1536 /// let weak = Arc::downgrade(&strong);
1537 /// // Both point to the same object
1538 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
1539 /// // The strong here keeps it alive, so we can still access the object.
1540 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
1543 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
1544 /// // undefined behaviour.
1545 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
1548 /// [`null`]: core::ptr::null
1549 #[stable(feature = "weak_into_raw", since = "1.45.0")]
1550 pub fn as_ptr(&self) -> *const T {
1551 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
1553 // SAFETY: we must offset the pointer manually, and said pointer may be
1554 // a dangling weak (usize::MAX) if T is sized. data_offset is safe to call,
1555 // because we know that a pointer to unsized T was derived from a real
1556 // unsized T, as dangling weaks are only created for sized T. wrapping_offset
1557 // is used so that we can use the same code path for the non-dangling
1558 // unsized case and the potentially dangling sized case.
1560 let offset = data_offset(ptr as *mut T);
1561 set_data_ptr(ptr as *mut T, (ptr as *mut u8).wrapping_offset(offset))
1565 /// Consumes the `Weak<T>` and turns it into a raw pointer.
1567 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
1568 /// one weak reference (the weak count is not modified by this operation). It can be turned
1569 /// back into the `Weak<T>` with [`from_raw`].
1571 /// The same restrictions of accessing the target of the pointer as with
1572 /// [`as_ptr`] apply.
1577 /// use std::sync::{Arc, Weak};
1579 /// let strong = Arc::new("hello".to_owned());
1580 /// let weak = Arc::downgrade(&strong);
1581 /// let raw = weak.into_raw();
1583 /// assert_eq!(1, Arc::weak_count(&strong));
1584 /// assert_eq!("hello", unsafe { &*raw });
1586 /// drop(unsafe { Weak::from_raw(raw) });
1587 /// assert_eq!(0, Arc::weak_count(&strong));
1590 /// [`from_raw`]: Weak::from_raw
1591 /// [`as_ptr`]: Weak::as_ptr
1592 #[stable(feature = "weak_into_raw", since = "1.45.0")]
1593 pub fn into_raw(self) -> *const T {
1594 let result = self.as_ptr();
1599 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
1601 /// This can be used to safely get a strong reference (by calling [`upgrade`]
1602 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1604 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
1605 /// as these don't own anything; the method still works on them).
1609 /// The pointer must have originated from the [`into_raw`] and must still own its potential
1612 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
1613 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
1614 /// count is not modified by this operation) and therefore it must be paired with a previous
1615 /// call to [`into_raw`].
1619 /// use std::sync::{Arc, Weak};
1621 /// let strong = Arc::new("hello".to_owned());
1623 /// let raw_1 = Arc::downgrade(&strong).into_raw();
1624 /// let raw_2 = Arc::downgrade(&strong).into_raw();
1626 /// assert_eq!(2, Arc::weak_count(&strong));
1628 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
1629 /// assert_eq!(1, Arc::weak_count(&strong));
1633 /// // Decrement the last weak count.
1634 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
1637 /// [`new`]: Weak::new
1638 /// [`into_raw`]: Weak::into_raw
1639 /// [`upgrade`]: Weak::upgrade
1640 /// [`forget`]: std::mem::forget
1641 #[stable(feature = "weak_into_raw", since = "1.45.0")]
1642 pub unsafe fn from_raw(ptr: *const T) -> Self {
1643 // SAFETY: data_offset is safe to call, because this pointer originates from a Weak.
1644 // See Weak::as_ptr for context on how the input pointer is derived.
1645 let offset = unsafe { data_offset(ptr) };
1647 // Reverse the offset to find the original ArcInner.
1648 // SAFETY: we use wrapping_offset here because the pointer may be dangling (but only if T: Sized)
1650 set_data_ptr(ptr as *mut ArcInner<T>, (ptr as *mut u8).wrapping_offset(-offset))
1653 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
1654 unsafe { Weak { ptr: NonNull::new_unchecked(ptr) } }
1657 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
1658 /// dropping of the inner value if successful.
1660 /// Returns [`None`] if the inner value has since been dropped.
1665 /// use std::sync::Arc;
1667 /// let five = Arc::new(5);
1669 /// let weak_five = Arc::downgrade(&five);
1671 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1672 /// assert!(strong_five.is_some());
1674 /// // Destroy all strong pointers.
1675 /// drop(strong_five);
1678 /// assert!(weak_five.upgrade().is_none());
1680 #[stable(feature = "arc_weak", since = "1.4.0")]
1681 pub fn upgrade(&self) -> Option<Arc<T>> {
1682 // We use a CAS loop to increment the strong count instead of a
1683 // fetch_add as this function should never take the reference count
1684 // from zero to one.
1685 let inner = self.inner()?;
1687 // Relaxed load because any write of 0 that we can observe
1688 // leaves the field in a permanently zero state (so a
1689 // "stale" read of 0 is fine), and any other value is
1690 // confirmed via the CAS below.
1691 let mut n = inner.strong.load(Relaxed);
1698 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1699 if n > MAX_REFCOUNT {
1703 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
1704 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
1705 // value can be initialized after `Weak` references have already been created. In that case, we
1706 // expect to observe the fully initialized value.
1707 match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
1708 Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
1709 Err(old) => n = old,
1714 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
1716 /// If `self` was created using [`Weak::new`], this will return 0.
1717 #[stable(feature = "weak_counts", since = "1.41.0")]
1718 pub fn strong_count(&self) -> usize {
1719 if let Some(inner) = self.inner() { inner.strong.load(SeqCst) } else { 0 }
1722 /// Gets an approximation of the number of `Weak` pointers pointing to this
1725 /// If `self` was created using [`Weak::new`], or if there are no remaining
1726 /// strong pointers, this will return 0.
1730 /// Due to implementation details, the returned value can be off by 1 in
1731 /// either direction when other threads are manipulating any `Arc`s or
1732 /// `Weak`s pointing to the same allocation.
1733 #[stable(feature = "weak_counts", since = "1.41.0")]
1734 pub fn weak_count(&self) -> usize {
1737 let weak = inner.weak.load(SeqCst);
1738 let strong = inner.strong.load(SeqCst);
1742 // Since we observed that there was at least one strong pointer
1743 // after reading the weak count, we know that the implicit weak
1744 // reference (present whenever any strong references are alive)
1745 // was still around when we observed the weak count, and can
1746 // therefore safely subtract it.
1753 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1754 /// (i.e., when this `Weak` was created by `Weak::new`).
1756 fn inner(&self) -> Option<WeakInner<'_>> {
1757 if is_dangling(self.ptr) {
1760 // We are careful to *not* create a reference covering the "data" field, as
1761 // the field may be mutated concurrently (for example, if the last `Arc`
1762 // is dropped, the data field will be dropped in-place).
1764 let ptr = self.ptr.as_ptr();
1765 WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
1770 /// Returns `true` if the two `Weak`s point to the same allocation (similar to
1771 /// [`ptr::eq`]), or if both don't point to any allocation
1772 /// (because they were created with `Weak::new()`).
1776 /// Since this compares pointers it means that `Weak::new()` will equal each
1777 /// other, even though they don't point to any allocation.
1782 /// use std::sync::Arc;
1784 /// let first_rc = Arc::new(5);
1785 /// let first = Arc::downgrade(&first_rc);
1786 /// let second = Arc::downgrade(&first_rc);
1788 /// assert!(first.ptr_eq(&second));
1790 /// let third_rc = Arc::new(5);
1791 /// let third = Arc::downgrade(&third_rc);
1793 /// assert!(!first.ptr_eq(&third));
1796 /// Comparing `Weak::new`.
1799 /// use std::sync::{Arc, Weak};
1801 /// let first = Weak::new();
1802 /// let second = Weak::new();
1803 /// assert!(first.ptr_eq(&second));
1805 /// let third_rc = Arc::new(());
1806 /// let third = Arc::downgrade(&third_rc);
1807 /// assert!(!first.ptr_eq(&third));
1810 /// [`ptr::eq`]: core::ptr::eq
1812 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
1813 pub fn ptr_eq(&self, other: &Self) -> bool {
1814 self.ptr.as_ptr() == other.ptr.as_ptr()
1818 #[stable(feature = "arc_weak", since = "1.4.0")]
1819 impl<T: ?Sized> Clone for Weak<T> {
1820 /// Makes a clone of the `Weak` pointer that points to the same allocation.
1825 /// use std::sync::{Arc, Weak};
1827 /// let weak_five = Arc::downgrade(&Arc::new(5));
1829 /// let _ = Weak::clone(&weak_five);
1832 fn clone(&self) -> Weak<T> {
1833 let inner = if let Some(inner) = self.inner() {
1836 return Weak { ptr: self.ptr };
1838 // See comments in Arc::clone() for why this is relaxed. This can use a
1839 // fetch_add (ignoring the lock) because the weak count is only locked
1840 // where are *no other* weak pointers in existence. (So we can't be
1841 // running this code in that case).
1842 let old_size = inner.weak.fetch_add(1, Relaxed);
1844 // See comments in Arc::clone() for why we do this (for mem::forget).
1845 if old_size > MAX_REFCOUNT {
1849 Weak { ptr: self.ptr }
1853 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1854 impl<T> Default for Weak<T> {
1855 /// Constructs a new `Weak<T>`, without allocating memory.
1856 /// Calling [`upgrade`] on the return value always
1859 /// [`upgrade`]: Weak::upgrade
1864 /// use std::sync::Weak;
1866 /// let empty: Weak<i64> = Default::default();
1867 /// assert!(empty.upgrade().is_none());
1869 fn default() -> Weak<T> {
1874 #[stable(feature = "arc_weak", since = "1.4.0")]
1875 impl<T: ?Sized> Drop for Weak<T> {
1876 /// Drops the `Weak` pointer.
1881 /// use std::sync::{Arc, Weak};
1885 /// impl Drop for Foo {
1886 /// fn drop(&mut self) {
1887 /// println!("dropped!");
1891 /// let foo = Arc::new(Foo);
1892 /// let weak_foo = Arc::downgrade(&foo);
1893 /// let other_weak_foo = Weak::clone(&weak_foo);
1895 /// drop(weak_foo); // Doesn't print anything
1896 /// drop(foo); // Prints "dropped!"
1898 /// assert!(other_weak_foo.upgrade().is_none());
1900 fn drop(&mut self) {
1901 // If we find out that we were the last weak pointer, then its time to
1902 // deallocate the data entirely. See the discussion in Arc::drop() about
1903 // the memory orderings
1905 // It's not necessary to check for the locked state here, because the
1906 // weak count can only be locked if there was precisely one weak ref,
1907 // meaning that drop could only subsequently run ON that remaining weak
1908 // ref, which can only happen after the lock is released.
1909 let inner = if let Some(inner) = self.inner() { inner } else { return };
1911 if inner.weak.fetch_sub(1, Release) == 1 {
1912 acquire!(inner.weak);
1913 unsafe { Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) }
1918 #[stable(feature = "rust1", since = "1.0.0")]
1919 trait ArcEqIdent<T: ?Sized + PartialEq> {
1920 fn eq(&self, other: &Arc<T>) -> bool;
1921 fn ne(&self, other: &Arc<T>) -> bool;
1924 #[stable(feature = "rust1", since = "1.0.0")]
1925 impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
1927 default fn eq(&self, other: &Arc<T>) -> bool {
1931 default fn ne(&self, other: &Arc<T>) -> bool {
1936 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
1937 /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
1938 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
1939 /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
1940 /// the same value, than two `&T`s.
1942 /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
1943 #[stable(feature = "rust1", since = "1.0.0")]
1944 impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
1946 fn eq(&self, other: &Arc<T>) -> bool {
1947 Arc::ptr_eq(self, other) || **self == **other
1951 fn ne(&self, other: &Arc<T>) -> bool {
1952 !Arc::ptr_eq(self, other) && **self != **other
1956 #[stable(feature = "rust1", since = "1.0.0")]
1957 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1958 /// Equality for two `Arc`s.
1960 /// Two `Arc`s are equal if their inner values are equal, even if they are
1961 /// stored in different allocation.
1963 /// If `T` also implements `Eq` (implying reflexivity of equality),
1964 /// two `Arc`s that point to the same allocation are always equal.
1969 /// use std::sync::Arc;
1971 /// let five = Arc::new(5);
1973 /// assert!(five == Arc::new(5));
1976 fn eq(&self, other: &Arc<T>) -> bool {
1977 ArcEqIdent::eq(self, other)
1980 /// Inequality for two `Arc`s.
1982 /// Two `Arc`s are unequal if their inner values are unequal.
1984 /// If `T` also implements `Eq` (implying reflexivity of equality),
1985 /// two `Arc`s that point to the same value are never unequal.
1990 /// use std::sync::Arc;
1992 /// let five = Arc::new(5);
1994 /// assert!(five != Arc::new(6));
1997 fn ne(&self, other: &Arc<T>) -> bool {
1998 ArcEqIdent::ne(self, other)
2002 #[stable(feature = "rust1", since = "1.0.0")]
2003 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
2004 /// Partial comparison for two `Arc`s.
2006 /// The two are compared by calling `partial_cmp()` on their inner values.
2011 /// use std::sync::Arc;
2012 /// use std::cmp::Ordering;
2014 /// let five = Arc::new(5);
2016 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
2018 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
2019 (**self).partial_cmp(&**other)
2022 /// Less-than comparison for two `Arc`s.
2024 /// The two are compared by calling `<` on their inner values.
2029 /// use std::sync::Arc;
2031 /// let five = Arc::new(5);
2033 /// assert!(five < Arc::new(6));
2035 fn lt(&self, other: &Arc<T>) -> bool {
2036 *(*self) < *(*other)
2039 /// 'Less than or equal to' comparison for two `Arc`s.
2041 /// The two are compared by calling `<=` on their inner values.
2046 /// use std::sync::Arc;
2048 /// let five = Arc::new(5);
2050 /// assert!(five <= Arc::new(5));
2052 fn le(&self, other: &Arc<T>) -> bool {
2053 *(*self) <= *(*other)
2056 /// Greater-than comparison for two `Arc`s.
2058 /// The two are compared by calling `>` on their inner values.
2063 /// use std::sync::Arc;
2065 /// let five = Arc::new(5);
2067 /// assert!(five > Arc::new(4));
2069 fn gt(&self, other: &Arc<T>) -> bool {
2070 *(*self) > *(*other)
2073 /// 'Greater than or equal to' comparison for two `Arc`s.
2075 /// The two are compared by calling `>=` on their inner values.
2080 /// use std::sync::Arc;
2082 /// let five = Arc::new(5);
2084 /// assert!(five >= Arc::new(5));
2086 fn ge(&self, other: &Arc<T>) -> bool {
2087 *(*self) >= *(*other)
2090 #[stable(feature = "rust1", since = "1.0.0")]
2091 impl<T: ?Sized + Ord> Ord for Arc<T> {
2092 /// Comparison for two `Arc`s.
2094 /// The two are compared by calling `cmp()` on their inner values.
2099 /// use std::sync::Arc;
2100 /// use std::cmp::Ordering;
2102 /// let five = Arc::new(5);
2104 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
2106 fn cmp(&self, other: &Arc<T>) -> Ordering {
2107 (**self).cmp(&**other)
2110 #[stable(feature = "rust1", since = "1.0.0")]
2111 impl<T: ?Sized + Eq> Eq for Arc<T> {}
2113 #[stable(feature = "rust1", since = "1.0.0")]
2114 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
2115 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2116 fmt::Display::fmt(&**self, f)
2120 #[stable(feature = "rust1", since = "1.0.0")]
2121 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
2122 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2123 fmt::Debug::fmt(&**self, f)
2127 #[stable(feature = "rust1", since = "1.0.0")]
2128 impl<T: ?Sized> fmt::Pointer for Arc<T> {
2129 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2130 fmt::Pointer::fmt(&(&**self as *const T), f)
2134 #[stable(feature = "rust1", since = "1.0.0")]
2135 impl<T: Default> Default for Arc<T> {
2136 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
2141 /// use std::sync::Arc;
2143 /// let x: Arc<i32> = Default::default();
2144 /// assert_eq!(*x, 0);
2146 fn default() -> Arc<T> {
2147 Arc::new(Default::default())
2151 #[stable(feature = "rust1", since = "1.0.0")]
2152 impl<T: ?Sized + Hash> Hash for Arc<T> {
2153 fn hash<H: Hasher>(&self, state: &mut H) {
2154 (**self).hash(state)
2158 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
2159 impl<T> From<T> for Arc<T> {
2160 fn from(t: T) -> Self {
2165 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2166 impl<T: Clone> From<&[T]> for Arc<[T]> {
2168 fn from(v: &[T]) -> Arc<[T]> {
2169 <Self as ArcFromSlice<T>>::from_slice(v)
2173 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2174 impl From<&str> for Arc<str> {
2176 fn from(v: &str) -> Arc<str> {
2177 let arc = Arc::<[u8]>::from(v.as_bytes());
2178 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
2182 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2183 impl From<String> for Arc<str> {
2185 fn from(v: String) -> Arc<str> {
2190 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2191 impl<T: ?Sized> From<Box<T>> for Arc<T> {
2193 fn from(v: Box<T>) -> Arc<T> {
2198 #[stable(feature = "shared_from_slice", since = "1.21.0")]
2199 impl<T> From<Vec<T>> for Arc<[T]> {
2201 fn from(mut v: Vec<T>) -> Arc<[T]> {
2203 let arc = Arc::copy_from_slice(&v);
2205 // Allow the Vec to free its memory, but not destroy its contents
2213 #[stable(feature = "shared_from_cow", since = "1.45.0")]
2214 impl<'a, B> From<Cow<'a, B>> for Arc<B>
2216 B: ToOwned + ?Sized,
2217 Arc<B>: From<&'a B> + From<B::Owned>,
2220 fn from(cow: Cow<'a, B>) -> Arc<B> {
2222 Cow::Borrowed(s) => Arc::from(s),
2223 Cow::Owned(s) => Arc::from(s),
2228 #[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
2229 impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
2230 type Error = Arc<[T]>;
2232 fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2233 if boxed_slice.len() == N {
2234 Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
2241 #[stable(feature = "shared_from_iter", since = "1.37.0")]
2242 impl<T> iter::FromIterator<T> for Arc<[T]> {
2243 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2245 /// # Performance characteristics
2247 /// ## The general case
2249 /// In the general case, collecting into `Arc<[T]>` is done by first
2250 /// collecting into a `Vec<T>`. That is, when writing the following:
2253 /// # use std::sync::Arc;
2254 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2255 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2258 /// this behaves as if we wrote:
2261 /// # use std::sync::Arc;
2262 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2263 /// .collect::<Vec<_>>() // The first set of allocations happens here.
2264 /// .into(); // A second allocation for `Arc<[T]>` happens here.
2265 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2268 /// This will allocate as many times as needed for constructing the `Vec<T>`
2269 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2271 /// ## Iterators of known length
2273 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2274 /// a single allocation will be made for the `Arc<[T]>`. For example:
2277 /// # use std::sync::Arc;
2278 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2279 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2281 fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
2282 ToArcSlice::to_arc_slice(iter.into_iter())
2286 /// Specialization trait used for collecting into `Arc<[T]>`.
2287 trait ToArcSlice<T>: Iterator<Item = T> + Sized {
2288 fn to_arc_slice(self) -> Arc<[T]>;
2291 impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
2292 default fn to_arc_slice(self) -> Arc<[T]> {
2293 self.collect::<Vec<T>>().into()
2297 impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
2298 fn to_arc_slice(self) -> Arc<[T]> {
2299 // This is the case for a `TrustedLen` iterator.
2300 let (low, high) = self.size_hint();
2301 if let Some(high) = high {
2305 "TrustedLen iterator's size hint is not exact: {:?}",
2310 // SAFETY: We need to ensure that the iterator has an exact length and we have.
2311 Arc::from_iter_exact(self, low)
2314 // Fall back to normal implementation.
2315 self.collect::<Vec<T>>().into()
2320 #[stable(feature = "rust1", since = "1.0.0")]
2321 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2322 fn borrow(&self) -> &T {
2327 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
2328 impl<T: ?Sized> AsRef<T> for Arc<T> {
2329 fn as_ref(&self) -> &T {
2334 #[stable(feature = "pin", since = "1.33.0")]
2335 impl<T: ?Sized> Unpin for Arc<T> {}
2337 /// Get the offset within an `ArcInner` for
2338 /// a payload of type described by a pointer.
2342 /// This has the same safety requirements as `align_of_val_raw`. In effect:
2344 /// - This function is safe for any argument if `T` is sized, and
2345 /// - if `T` is unsized, the pointer must have appropriate pointer metadata
2346 /// acquired from the real instance that you are getting this offset for.
2347 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
2348 // Align the unsized value to the end of the `ArcInner`.
2349 // Because it is `?Sized`, it will always be the last field in memory.
2350 // Note: This is a detail of the current implementation of the compiler,
2351 // and is not a guaranteed language detail. Do not rely on it outside of std.
2352 unsafe { data_offset_align(align_of_val(&*ptr)) }
2356 fn data_offset_align(align: usize) -> isize {
2357 let layout = Layout::new::<ArcInner<()>>();
2358 (layout.size() + layout.padding_needed_for(align)) as isize