1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![stable(feature = "rust1", since = "1.0.0")]
13 //! Thread-safe reference-counting pointers.
15 //! See the [`Arc<T>`][arc] documentation for more details.
17 //! [arc]: struct.Arc.html
19 use core::sync::atomic;
20 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
23 use core::cmp::Ordering;
24 use core::intrinsics::abort;
25 use core::mem::{self, size_of_val, uninitialized};
27 use core::ops::CoerceUnsized;
28 use core::ptr::{self, Shared};
29 use core::marker::Unsize;
30 use core::hash::{Hash, Hasher};
31 use core::{isize, usize};
32 use core::convert::From;
34 use heap::{Heap, Alloc, Layout, box_free};
39 /// A soft limit on the amount of references that may be made to an `Arc`.
41 /// Going above this limit will abort your program (although not
42 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
43 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
45 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
46 /// Reference Counted'.
48 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
49 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
50 /// a new pointer to the same value in the heap. When the last `Arc`
51 /// pointer to a given value is destroyed, the pointed-to value is
54 /// Shared references in Rust disallow mutation by default, and `Arc` is no
55 /// exception. If you need to mutate through an `Arc`, use [`Mutex`][mutex],
56 /// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
60 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
61 /// counting This means that it is thread-safe. The disadvantage is that
62 /// atomic operations are more expensive than ordinary memory accesses. If you
63 /// are not sharing reference-counted values between threads, consider using
64 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
65 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
66 /// However, a library might choose `Arc<T>` in order to give library consumers
69 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
70 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
71 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
72 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
73 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
74 /// data, but it doesn't add thread safety to its data. Consider
75 /// `Arc<RefCell<T>>`. `RefCell<T>` isn't [`Sync`], and if `Arc<T>` was always
76 /// [`Send`], `Arc<RefCell<T>>` would be as well. But then we'd have a problem:
77 /// `RefCell<T>` is not thread safe; it keeps track of the borrowing count using
78 /// non-atomic operations.
80 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
81 /// `std::sync` type, usually `Mutex<T>`.
83 /// ## Breaking cycles with `Weak`
85 /// The [`downgrade`][downgrade] method can be used to create a non-owning
86 /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
87 /// to an `Arc`, but this will return [`None`] if the value has already been
90 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
91 /// [`Weak`][weak] is used to break cycles. For example, a tree could have
92 /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
93 /// pointers from children back to their parents.
95 /// # Cloning references
97 /// Creating a new reference from an existing reference counted pointer is done using the
98 /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
101 /// use std::sync::Arc;
102 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
103 /// // The two syntaxes below are equivalent.
104 /// let a = foo.clone();
105 /// let b = Arc::clone(&foo);
106 /// // a and b both point to the same memory location as foo.
109 /// The `Arc::clone(&from)` syntax is the most idiomatic because it conveys more explicitly
110 /// the meaning of the code. In the example above, this syntax makes it easier to see that
111 /// this code is creating a new reference rather than copying the whole content of foo.
113 /// ## `Deref` behavior
115 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
116 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
117 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are [associated
118 /// functions][assoc], called using function-like syntax:
121 /// use std::sync::Arc;
122 /// let my_arc = Arc::new(());
124 /// Arc::downgrade(&my_arc);
127 /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the value may have
128 /// already been destroyed.
130 /// [arc]: struct.Arc.html
131 /// [weak]: struct.Weak.html
132 /// [`Rc<T>`]: ../../std/rc/struct.Rc.html
133 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
134 /// [mutex]: ../../std/sync/struct.Mutex.html
135 /// [rwlock]: ../../std/sync/struct.RwLock.html
136 /// [atomic]: ../../std/sync/atomic/index.html
137 /// [`Send`]: ../../std/marker/trait.Send.html
138 /// [`Sync`]: ../../std/marker/trait.Sync.html
139 /// [deref]: ../../std/ops/trait.Deref.html
140 /// [downgrade]: struct.Arc.html#method.downgrade
141 /// [upgrade]: struct.Weak.html#method.upgrade
142 /// [`None`]: ../../std/option/enum.Option.html#variant.None
143 /// [assoc]: ../../book/first-edition/method-syntax.html#associated-functions
147 /// Sharing some immutable data between threads:
149 // Note that we **do not** run these tests here. The windows builders get super
150 // unhappy if a thread outlives the main thread and then exits at the same time
151 // (something deadlocks) so we just avoid this entirely by not running these
154 /// use std::sync::Arc;
157 /// let five = Arc::new(5);
160 /// let five = Arc::clone(&five);
162 /// thread::spawn(move || {
163 /// println!("{:?}", five);
168 /// Sharing a mutable [`AtomicUsize`]:
170 /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
173 /// use std::sync::Arc;
174 /// use std::sync::atomic::{AtomicUsize, Ordering};
177 /// let val = Arc::new(AtomicUsize::new(5));
180 /// let val = Arc::clone(&val);
182 /// thread::spawn(move || {
183 /// let v = val.fetch_add(1, Ordering::SeqCst);
184 /// println!("{:?}", v);
189 /// See the [`rc` documentation][rc_examples] for more examples of reference
190 /// counting in general.
192 /// [rc_examples]: ../../std/rc/index.html#examples
193 #[stable(feature = "rust1", since = "1.0.0")]
194 pub struct Arc<T: ?Sized> {
195 ptr: Shared<ArcInner<T>>,
198 #[stable(feature = "rust1", since = "1.0.0")]
199 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
200 #[stable(feature = "rust1", since = "1.0.0")]
201 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
203 #[unstable(feature = "coerce_unsized", issue = "27732")]
204 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
206 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
207 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
208 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
210 /// Since a `Weak` reference does not count towards ownership, it will not
211 /// prevent the inner value from being dropped, and `Weak` itself makes no
212 /// guarantees about the value still being present and may return [`None`]
213 /// when [`upgrade`]d.
215 /// A `Weak` pointer is useful for keeping a temporary reference to the value
216 /// within [`Arc`] without extending its lifetime. It is also used to prevent
217 /// circular references between [`Arc`] pointers, since mutual owning references
218 /// would never allow either [`Arc`] to be dropped. For example, a tree could
219 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
220 /// pointers from children back to their parents.
222 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
224 /// [`Arc`]: struct.Arc.html
225 /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
226 /// [`upgrade`]: struct.Weak.html#method.upgrade
227 /// [`Option`]: ../../std/option/enum.Option.html
228 /// [`None`]: ../../std/option/enum.Option.html#variant.None
229 #[stable(feature = "arc_weak", since = "1.4.0")]
230 pub struct Weak<T: ?Sized> {
231 ptr: Shared<ArcInner<T>>,
234 #[stable(feature = "arc_weak", since = "1.4.0")]
235 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
236 #[stable(feature = "arc_weak", since = "1.4.0")]
237 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
239 #[unstable(feature = "coerce_unsized", issue = "27732")]
240 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
242 #[stable(feature = "arc_weak", since = "1.4.0")]
243 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
244 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
249 struct ArcInner<T: ?Sized> {
250 strong: atomic::AtomicUsize,
252 // the value usize::MAX acts as a sentinel for temporarily "locking" the
253 // ability to upgrade weak pointers or downgrade strong ones; this is used
254 // to avoid races in `make_mut` and `get_mut`.
255 weak: atomic::AtomicUsize,
260 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
261 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
264 /// Constructs a new `Arc<T>`.
269 /// use std::sync::Arc;
271 /// let five = Arc::new(5);
274 #[stable(feature = "rust1", since = "1.0.0")]
275 pub fn new(data: T) -> Arc<T> {
276 // Start the weak pointer count as 1 which is the weak pointer that's
277 // held by all the strong pointers (kinda), see std/rc.rs for more info
278 let x: Box<_> = box ArcInner {
279 strong: atomic::AtomicUsize::new(1),
280 weak: atomic::AtomicUsize::new(1),
283 Arc { ptr: Shared::from(Box::into_unique(x)) }
286 /// Returns the contained value, if the `Arc` has exactly one strong reference.
288 /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
291 /// This will succeed even if there are outstanding weak references.
293 /// [result]: ../../std/result/enum.Result.html
298 /// use std::sync::Arc;
300 /// let x = Arc::new(3);
301 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
303 /// let x = Arc::new(4);
304 /// let _y = Arc::clone(&x);
305 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
308 #[stable(feature = "arc_unique", since = "1.4.0")]
309 pub fn try_unwrap(this: Self) -> Result<T, Self> {
310 // See `drop` for why all these atomics are like this
311 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
315 atomic::fence(Acquire);
318 let elem = ptr::read(&this.ptr.as_ref().data);
320 // Make a weak pointer to clean up the implicit strong-weak reference
321 let _weak = Weak { ptr: this.ptr };
328 /// Consumes the `Arc`, returning the wrapped pointer.
330 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
331 /// [`Arc::from_raw`][from_raw].
333 /// [from_raw]: struct.Arc.html#method.from_raw
338 /// use std::sync::Arc;
340 /// let x = Arc::new(10);
341 /// let x_ptr = Arc::into_raw(x);
342 /// assert_eq!(unsafe { *x_ptr }, 10);
344 #[stable(feature = "rc_raw", since = "1.17.0")]
345 pub fn into_raw(this: Self) -> *const T {
346 let ptr: *const T = &*this;
351 /// Constructs an `Arc` from a raw pointer.
353 /// The raw pointer must have been previously returned by a call to a
354 /// [`Arc::into_raw`][into_raw].
356 /// This function is unsafe because improper use may lead to memory problems. For example, a
357 /// double-free may occur if the function is called twice on the same raw pointer.
359 /// [into_raw]: struct.Arc.html#method.into_raw
364 /// use std::sync::Arc;
366 /// let x = Arc::new(10);
367 /// let x_ptr = Arc::into_raw(x);
370 /// // Convert back to an `Arc` to prevent leak.
371 /// let x = Arc::from_raw(x_ptr);
372 /// assert_eq!(*x, 10);
374 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe.
377 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
379 #[stable(feature = "rc_raw", since = "1.17.0")]
380 pub unsafe fn from_raw(ptr: *const T) -> Self {
381 // To find the corresponding pointer to the `ArcInner` we need to subtract the offset of the
382 // `data` field from the pointer.
383 let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
385 ptr: Shared::new_unchecked(ptr as *mut u8 as *mut _),
390 impl<T: ?Sized> Arc<T> {
391 /// Creates a new [`Weak`][weak] pointer to this value.
393 /// [weak]: struct.Weak.html
398 /// use std::sync::Arc;
400 /// let five = Arc::new(5);
402 /// let weak_five = Arc::downgrade(&five);
404 #[stable(feature = "arc_weak", since = "1.4.0")]
405 pub fn downgrade(this: &Self) -> Weak<T> {
406 // This Relaxed is OK because we're checking the value in the CAS
408 let mut cur = this.inner().weak.load(Relaxed);
411 // check if the weak counter is currently "locked"; if so, spin.
412 if cur == usize::MAX {
413 cur = this.inner().weak.load(Relaxed);
417 // NOTE: this code currently ignores the possibility of overflow
418 // into usize::MAX; in general both Rc and Arc need to be adjusted
419 // to deal with overflow.
421 // Unlike with Clone(), we need this to be an Acquire read to
422 // synchronize with the write coming from `is_unique`, so that the
423 // events prior to that write happen before this read.
424 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
425 Ok(_) => return Weak { ptr: this.ptr },
426 Err(old) => cur = old,
431 /// Gets the number of [`Weak`][weak] pointers to this value.
433 /// [weak]: struct.Weak.html
437 /// This method by itself is safe, but using it correctly requires extra care.
438 /// Another thread can change the weak count at any time,
439 /// including potentially between calling this method and acting on the result.
444 /// use std::sync::Arc;
446 /// let five = Arc::new(5);
447 /// let _weak_five = Arc::downgrade(&five);
449 /// // This assertion is deterministic because we haven't shared
450 /// // the `Arc` or `Weak` between threads.
451 /// assert_eq!(1, Arc::weak_count(&five));
454 #[stable(feature = "arc_counts", since = "1.15.0")]
455 pub fn weak_count(this: &Self) -> usize {
456 let cnt = this.inner().weak.load(SeqCst);
457 // If the weak count is currently locked, the value of the
458 // count was 0 just before taking the lock.
459 if cnt == usize::MAX { 0 } else { cnt - 1 }
462 /// Gets the number of strong (`Arc`) pointers to this value.
466 /// This method by itself is safe, but using it correctly requires extra care.
467 /// Another thread can change the strong count at any time,
468 /// including potentially between calling this method and acting on the result.
473 /// use std::sync::Arc;
475 /// let five = Arc::new(5);
476 /// let _also_five = Arc::clone(&five);
478 /// // This assertion is deterministic because we haven't shared
479 /// // the `Arc` between threads.
480 /// assert_eq!(2, Arc::strong_count(&five));
483 #[stable(feature = "arc_counts", since = "1.15.0")]
484 pub fn strong_count(this: &Self) -> usize {
485 this.inner().strong.load(SeqCst)
489 fn inner(&self) -> &ArcInner<T> {
490 // This unsafety is ok because while this arc is alive we're guaranteed
491 // that the inner pointer is valid. Furthermore, we know that the
492 // `ArcInner` structure itself is `Sync` because the inner data is
493 // `Sync` as well, so we're ok loaning out an immutable pointer to these
495 unsafe { self.ptr.as_ref() }
498 // Non-inlined part of `drop`.
500 unsafe fn drop_slow(&mut self) {
501 let ptr = self.ptr.as_ptr();
503 // Destroy the data at this time, even though we may not free the box
504 // allocation itself (there may still be weak pointers lying around).
505 ptr::drop_in_place(&mut self.ptr.as_mut().data);
507 if self.inner().weak.fetch_sub(1, Release) == 1 {
508 atomic::fence(Acquire);
509 Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr))
514 #[stable(feature = "ptr_eq", since = "1.17.0")]
515 /// Returns true if the two `Arc`s point to the same value (not
516 /// just values that compare as equal).
521 /// use std::sync::Arc;
523 /// let five = Arc::new(5);
524 /// let same_five = Arc::clone(&five);
525 /// let other_five = Arc::new(5);
527 /// assert!(Arc::ptr_eq(&five, &same_five));
528 /// assert!(!Arc::ptr_eq(&five, &other_five));
530 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
531 this.ptr.as_ptr() == other.ptr.as_ptr()
535 impl<T: ?Sized> Arc<T> {
536 // Allocates an `ArcInner<T>` with sufficient space for an unsized value
537 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
538 // Create a fake ArcInner to find allocation size and alignment
539 let fake_ptr = ptr as *mut ArcInner<T>;
541 let layout = Layout::for_value(&*fake_ptr);
543 let mem = Heap.alloc(layout)
544 .unwrap_or_else(|e| Heap.oom(e));
546 // Initialize the real ArcInner
547 let inner = set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>;
549 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
550 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
555 fn from_box(v: Box<T>) -> Arc<T> {
557 let bptr = Box::into_raw(v);
559 let value_size = size_of_val(&*bptr);
560 let ptr = Self::allocate_for_ptr(bptr);
562 // Copy value as bytes
563 ptr::copy_nonoverlapping(
564 bptr as *const T as *const u8,
565 &mut (*ptr).data as *mut _ as *mut u8,
568 // Free the allocation without dropping its contents
571 Arc { ptr: Shared::new_unchecked(ptr) }
576 // Sets the data pointer of a `?Sized` raw pointer.
578 // For a slice/trait object, this sets the `data` field and leaves the rest
579 // unchanged. For a sized raw pointer, this simply sets the pointer.
580 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
581 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
586 // Copy elements from slice into newly allocated Arc<[T]>
588 // Unsafe because the caller must either take ownership or bind `T: Copy`
589 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
590 let v_ptr = v as *const [T];
591 let ptr = Self::allocate_for_ptr(v_ptr);
593 ptr::copy_nonoverlapping(
595 &mut (*ptr).data as *mut [T] as *mut T,
598 Arc { ptr: Shared::new_unchecked(ptr) }
602 // Specialization trait used for From<&[T]>
603 trait ArcFromSlice<T> {
604 fn from_slice(slice: &[T]) -> Self;
607 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
609 default fn from_slice(v: &[T]) -> Self {
610 // Panic guard while cloning T elements.
611 // In the event of a panic, elements that have been written
612 // into the new ArcInner will be dropped, then the memory freed.
620 impl<T> Drop for Guard<T> {
622 use core::slice::from_raw_parts_mut;
625 let slice = from_raw_parts_mut(self.elems, self.n_elems);
626 ptr::drop_in_place(slice);
628 Heap.dealloc(self.mem, self.layout.clone());
634 let v_ptr = v as *const [T];
635 let ptr = Self::allocate_for_ptr(v_ptr);
637 let mem = ptr as *mut _ as *mut u8;
638 let layout = Layout::for_value(&*ptr);
640 // Pointer to first element
641 let elems = &mut (*ptr).data as *mut [T] as *mut T;
643 let mut guard = Guard{
650 for (i, item) in v.iter().enumerate() {
651 ptr::write(elems.offset(i as isize), item.clone());
655 // All clear. Forget the guard so it doesn't free the new ArcInner.
658 Arc { ptr: Shared::new_unchecked(ptr) }
663 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
665 fn from_slice(v: &[T]) -> Self {
666 unsafe { Arc::copy_from_slice(v) }
670 #[stable(feature = "rust1", since = "1.0.0")]
671 impl<T: ?Sized> Clone for Arc<T> {
672 /// Makes a clone of the `Arc` pointer.
674 /// This creates another pointer to the same inner value, increasing the
675 /// strong reference count.
680 /// use std::sync::Arc;
682 /// let five = Arc::new(5);
684 /// Arc::clone(&five);
687 fn clone(&self) -> Arc<T> {
688 // Using a relaxed ordering is alright here, as knowledge of the
689 // original reference prevents other threads from erroneously deleting
692 // As explained in the [Boost documentation][1], Increasing the
693 // reference counter can always be done with memory_order_relaxed: New
694 // references to an object can only be formed from an existing
695 // reference, and passing an existing reference from one thread to
696 // another must already provide any required synchronization.
698 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
699 let old_size = self.inner().strong.fetch_add(1, Relaxed);
701 // However we need to guard against massive refcounts in case someone
702 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
703 // and users will use-after free. We racily saturate to `isize::MAX` on
704 // the assumption that there aren't ~2 billion threads incrementing
705 // the reference count at once. This branch will never be taken in
706 // any realistic program.
708 // We abort because such a program is incredibly degenerate, and we
709 // don't care to support it.
710 if old_size > MAX_REFCOUNT {
716 Arc { ptr: self.ptr }
720 #[stable(feature = "rust1", since = "1.0.0")]
721 impl<T: ?Sized> Deref for Arc<T> {
725 fn deref(&self) -> &T {
730 impl<T: Clone> Arc<T> {
731 /// Makes a mutable reference into the given `Arc`.
733 /// If there are other `Arc` or [`Weak`][weak] pointers to the same value,
734 /// then `make_mut` will invoke [`clone`][clone] on the inner value to
735 /// ensure unique ownership. This is also referred to as clone-on-write.
737 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
739 /// [weak]: struct.Weak.html
740 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
741 /// [get_mut]: struct.Arc.html#method.get_mut
746 /// use std::sync::Arc;
748 /// let mut data = Arc::new(5);
750 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
751 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
752 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
753 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
754 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
756 /// // Now `data` and `other_data` point to different values.
757 /// assert_eq!(*data, 8);
758 /// assert_eq!(*other_data, 12);
761 #[stable(feature = "arc_unique", since = "1.4.0")]
762 pub fn make_mut(this: &mut Self) -> &mut T {
763 // Note that we hold both a strong reference and a weak reference.
764 // Thus, releasing our strong reference only will not, by itself, cause
765 // the memory to be deallocated.
767 // Use Acquire to ensure that we see any writes to `weak` that happen
768 // before release writes (i.e., decrements) to `strong`. Since we hold a
769 // weak count, there's no chance the ArcInner itself could be
771 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
772 // Another strong pointer exists; clone
773 *this = Arc::new((**this).clone());
774 } else if this.inner().weak.load(Relaxed) != 1 {
775 // Relaxed suffices in the above because this is fundamentally an
776 // optimization: we are always racing with weak pointers being
777 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
779 // We removed the last strong ref, but there are additional weak
780 // refs remaining. We'll move the contents to a new Arc, and
781 // invalidate the other weak refs.
783 // Note that it is not possible for the read of `weak` to yield
784 // usize::MAX (i.e., locked), since the weak count can only be
785 // locked by a thread with a strong reference.
787 // Materialize our own implicit weak pointer, so that it can clean
788 // up the ArcInner as needed.
789 let weak = Weak { ptr: this.ptr };
791 // mark the data itself as already deallocated
793 // there is no data race in the implicit write caused by `read`
794 // here (due to zeroing) because data is no longer accessed by
795 // other threads (due to there being no more strong refs at this
797 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
798 mem::swap(this, &mut swap);
802 // We were the sole reference of either kind; bump back up the
804 this.inner().strong.store(1, Release);
807 // As with `get_mut()`, the unsafety is ok because our reference was
808 // either unique to begin with, or became one upon cloning the contents.
810 &mut this.ptr.as_mut().data
815 impl<T: ?Sized> Arc<T> {
816 /// Returns a mutable reference to the inner value, if there are
817 /// no other `Arc` or [`Weak`][weak] pointers to the same value.
819 /// Returns [`None`][option] otherwise, because it is not safe to
820 /// mutate a shared value.
822 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
823 /// the inner value when it's shared.
825 /// [weak]: struct.Weak.html
826 /// [option]: ../../std/option/enum.Option.html
827 /// [make_mut]: struct.Arc.html#method.make_mut
828 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
833 /// use std::sync::Arc;
835 /// let mut x = Arc::new(3);
836 /// *Arc::get_mut(&mut x).unwrap() = 4;
837 /// assert_eq!(*x, 4);
839 /// let _y = Arc::clone(&x);
840 /// assert!(Arc::get_mut(&mut x).is_none());
843 #[stable(feature = "arc_unique", since = "1.4.0")]
844 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
845 if this.is_unique() {
846 // This unsafety is ok because we're guaranteed that the pointer
847 // returned is the *only* pointer that will ever be returned to T. Our
848 // reference count is guaranteed to be 1 at this point, and we required
849 // the Arc itself to be `mut`, so we're returning the only possible
850 // reference to the inner data.
852 Some(&mut this.ptr.as_mut().data)
859 /// Determine whether this is the unique reference (including weak refs) to
860 /// the underlying data.
862 /// Note that this requires locking the weak ref count.
863 fn is_unique(&mut self) -> bool {
864 // lock the weak pointer count if we appear to be the sole weak pointer
867 // The acquire label here ensures a happens-before relationship with any
868 // writes to `strong` prior to decrements of the `weak` count (via drop,
869 // which uses Release).
870 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
871 // Due to the previous acquire read, this will observe any writes to
872 // `strong` that were due to upgrading weak pointers; only strong
873 // clones remain, which require that the strong count is > 1 anyway.
874 let unique = self.inner().strong.load(Relaxed) == 1;
876 // The release write here synchronizes with a read in `downgrade`,
877 // effectively preventing the above read of `strong` from happening
879 self.inner().weak.store(1, Release); // release the lock
887 #[stable(feature = "rust1", since = "1.0.0")]
888 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
891 /// This will decrement the strong reference count. If the strong reference
892 /// count reaches zero then the only other references (if any) are
893 /// [`Weak`][weak], so we `drop` the inner value.
895 /// [weak]: struct.Weak.html
900 /// use std::sync::Arc;
904 /// impl Drop for Foo {
905 /// fn drop(&mut self) {
906 /// println!("dropped!");
910 /// let foo = Arc::new(Foo);
911 /// let foo2 = Arc::clone(&foo);
913 /// drop(foo); // Doesn't print anything
914 /// drop(foo2); // Prints "dropped!"
918 // Because `fetch_sub` is already atomic, we do not need to synchronize
919 // with other threads unless we are going to delete the object. This
920 // same logic applies to the below `fetch_sub` to the `weak` count.
921 if self.inner().strong.fetch_sub(1, Release) != 1 {
925 // This fence is needed to prevent reordering of use of the data and
926 // deletion of the data. Because it is marked `Release`, the decreasing
927 // of the reference count synchronizes with this `Acquire` fence. This
928 // means that use of the data happens before decreasing the reference
929 // count, which happens before this fence, which happens before the
930 // deletion of the data.
932 // As explained in the [Boost documentation][1],
934 // > It is important to enforce any possible access to the object in one
935 // > thread (through an existing reference) to *happen before* deleting
936 // > the object in a different thread. This is achieved by a "release"
937 // > operation after dropping a reference (any access to the object
938 // > through this reference must obviously happened before), and an
939 // > "acquire" operation before deleting the object.
941 // In particular, while the contents of an Arc are usually immutable, it's
942 // possible to have interior writes to something like a Mutex<T>. Since a
943 // Mutex is not acquired when it is deleted, we can't rely on its
944 // synchronization logic to make writes in thread A visible to a destructor
945 // running in thread B.
947 // Also note that the Acquire fence here could probably be replaced with an
948 // Acquire load, which could improve performance in highly-contended
949 // situations. See [2].
951 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
952 // [2]: (https://github.com/rust-lang/rust/pull/41714)
953 atomic::fence(Acquire);
962 /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
963 /// it. Calling [`upgrade`] on the return value always gives [`None`].
965 /// [`upgrade`]: struct.Weak.html#method.upgrade
966 /// [`None`]: ../../std/option/enum.Option.html#variant.None
971 /// use std::sync::Weak;
973 /// let empty: Weak<i64> = Weak::new();
974 /// assert!(empty.upgrade().is_none());
976 #[stable(feature = "downgraded_weak", since = "1.10.0")]
977 pub fn new() -> Weak<T> {
980 ptr: Shared::from(Box::into_unique(box ArcInner {
981 strong: atomic::AtomicUsize::new(0),
982 weak: atomic::AtomicUsize::new(1),
983 data: uninitialized(),
990 impl<T: ?Sized> Weak<T> {
991 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending
992 /// the lifetime of the value if successful.
994 /// Returns [`None`] if the value has since been dropped.
996 /// [`Arc`]: struct.Arc.html
997 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1002 /// use std::sync::Arc;
1004 /// let five = Arc::new(5);
1006 /// let weak_five = Arc::downgrade(&five);
1008 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1009 /// assert!(strong_five.is_some());
1011 /// // Destroy all strong pointers.
1012 /// drop(strong_five);
1015 /// assert!(weak_five.upgrade().is_none());
1017 #[stable(feature = "arc_weak", since = "1.4.0")]
1018 pub fn upgrade(&self) -> Option<Arc<T>> {
1019 // We use a CAS loop to increment the strong count instead of a
1020 // fetch_add because once the count hits 0 it must never be above 0.
1021 let inner = self.inner();
1023 // Relaxed load because any write of 0 that we can observe
1024 // leaves the field in a permanently zero state (so a
1025 // "stale" read of 0 is fine), and any other value is
1026 // confirmed via the CAS below.
1027 let mut n = inner.strong.load(Relaxed);
1034 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1035 if n > MAX_REFCOUNT {
1041 // Relaxed is valid for the same reason it is on Arc's Clone impl
1042 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
1043 Ok(_) => return Some(Arc { ptr: self.ptr }),
1044 Err(old) => n = old,
1050 fn inner(&self) -> &ArcInner<T> {
1051 // See comments above for why this is "safe"
1052 unsafe { self.ptr.as_ref() }
1056 #[stable(feature = "arc_weak", since = "1.4.0")]
1057 impl<T: ?Sized> Clone for Weak<T> {
1058 /// Makes a clone of the `Weak` pointer that points to the same value.
1063 /// use std::sync::{Arc, Weak};
1065 /// let weak_five = Arc::downgrade(&Arc::new(5));
1067 /// Weak::clone(&weak_five);
1070 fn clone(&self) -> Weak<T> {
1071 // See comments in Arc::clone() for why this is relaxed. This can use a
1072 // fetch_add (ignoring the lock) because the weak count is only locked
1073 // where are *no other* weak pointers in existence. (So we can't be
1074 // running this code in that case).
1075 let old_size = self.inner().weak.fetch_add(1, Relaxed);
1077 // See comments in Arc::clone() for why we do this (for mem::forget).
1078 if old_size > MAX_REFCOUNT {
1084 return Weak { ptr: self.ptr };
1088 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1089 impl<T> Default for Weak<T> {
1090 /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
1091 /// it. Calling [`upgrade`] on the return value always gives [`None`].
1093 /// [`upgrade`]: struct.Weak.html#method.upgrade
1094 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1099 /// use std::sync::Weak;
1101 /// let empty: Weak<i64> = Default::default();
1102 /// assert!(empty.upgrade().is_none());
1104 fn default() -> Weak<T> {
1109 #[stable(feature = "arc_weak", since = "1.4.0")]
1110 impl<T: ?Sized> Drop for Weak<T> {
1111 /// Drops the `Weak` pointer.
1116 /// use std::sync::{Arc, Weak};
1120 /// impl Drop for Foo {
1121 /// fn drop(&mut self) {
1122 /// println!("dropped!");
1126 /// let foo = Arc::new(Foo);
1127 /// let weak_foo = Arc::downgrade(&foo);
1128 /// let other_weak_foo = Weak::clone(&weak_foo);
1130 /// drop(weak_foo); // Doesn't print anything
1131 /// drop(foo); // Prints "dropped!"
1133 /// assert!(other_weak_foo.upgrade().is_none());
1135 fn drop(&mut self) {
1136 let ptr = self.ptr.as_ptr();
1138 // If we find out that we were the last weak pointer, then its time to
1139 // deallocate the data entirely. See the discussion in Arc::drop() about
1140 // the memory orderings
1142 // It's not necessary to check for the locked state here, because the
1143 // weak count can only be locked if there was precisely one weak ref,
1144 // meaning that drop could only subsequently run ON that remaining weak
1145 // ref, which can only happen after the lock is released.
1146 if self.inner().weak.fetch_sub(1, Release) == 1 {
1147 atomic::fence(Acquire);
1149 Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr))
1155 #[stable(feature = "rust1", since = "1.0.0")]
1156 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1157 /// Equality for two `Arc`s.
1159 /// Two `Arc`s are equal if their inner values are equal.
1164 /// use std::sync::Arc;
1166 /// let five = Arc::new(5);
1168 /// assert!(five == Arc::new(5));
1170 fn eq(&self, other: &Arc<T>) -> bool {
1171 *(*self) == *(*other)
1174 /// Inequality for two `Arc`s.
1176 /// Two `Arc`s are unequal if their inner values are unequal.
1181 /// use std::sync::Arc;
1183 /// let five = Arc::new(5);
1185 /// assert!(five != Arc::new(6));
1187 fn ne(&self, other: &Arc<T>) -> bool {
1188 *(*self) != *(*other)
1191 #[stable(feature = "rust1", since = "1.0.0")]
1192 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
1193 /// Partial comparison for two `Arc`s.
1195 /// The two are compared by calling `partial_cmp()` on their inner values.
1200 /// use std::sync::Arc;
1201 /// use std::cmp::Ordering;
1203 /// let five = Arc::new(5);
1205 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
1207 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
1208 (**self).partial_cmp(&**other)
1211 /// Less-than comparison for two `Arc`s.
1213 /// The two are compared by calling `<` on their inner values.
1218 /// use std::sync::Arc;
1220 /// let five = Arc::new(5);
1222 /// assert!(five < Arc::new(6));
1224 fn lt(&self, other: &Arc<T>) -> bool {
1225 *(*self) < *(*other)
1228 /// 'Less than or equal to' comparison for two `Arc`s.
1230 /// The two are compared by calling `<=` on their inner values.
1235 /// use std::sync::Arc;
1237 /// let five = Arc::new(5);
1239 /// assert!(five <= Arc::new(5));
1241 fn le(&self, other: &Arc<T>) -> bool {
1242 *(*self) <= *(*other)
1245 /// Greater-than comparison for two `Arc`s.
1247 /// The two are compared by calling `>` on their inner values.
1252 /// use std::sync::Arc;
1254 /// let five = Arc::new(5);
1256 /// assert!(five > Arc::new(4));
1258 fn gt(&self, other: &Arc<T>) -> bool {
1259 *(*self) > *(*other)
1262 /// 'Greater than or equal to' comparison for two `Arc`s.
1264 /// The two are compared by calling `>=` on their inner values.
1269 /// use std::sync::Arc;
1271 /// let five = Arc::new(5);
1273 /// assert!(five >= Arc::new(5));
1275 fn ge(&self, other: &Arc<T>) -> bool {
1276 *(*self) >= *(*other)
1279 #[stable(feature = "rust1", since = "1.0.0")]
1280 impl<T: ?Sized + Ord> Ord for Arc<T> {
1281 /// Comparison for two `Arc`s.
1283 /// The two are compared by calling `cmp()` on their inner values.
1288 /// use std::sync::Arc;
1289 /// use std::cmp::Ordering;
1291 /// let five = Arc::new(5);
1293 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
1295 fn cmp(&self, other: &Arc<T>) -> Ordering {
1296 (**self).cmp(&**other)
1299 #[stable(feature = "rust1", since = "1.0.0")]
1300 impl<T: ?Sized + Eq> Eq for Arc<T> {}
1302 #[stable(feature = "rust1", since = "1.0.0")]
1303 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
1304 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1305 fmt::Display::fmt(&**self, f)
1309 #[stable(feature = "rust1", since = "1.0.0")]
1310 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
1311 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1312 fmt::Debug::fmt(&**self, f)
1316 #[stable(feature = "rust1", since = "1.0.0")]
1317 impl<T: ?Sized> fmt::Pointer for Arc<T> {
1318 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1319 fmt::Pointer::fmt(&self.ptr, f)
1323 #[stable(feature = "rust1", since = "1.0.0")]
1324 impl<T: Default> Default for Arc<T> {
1325 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
1330 /// use std::sync::Arc;
1332 /// let x: Arc<i32> = Default::default();
1333 /// assert_eq!(*x, 0);
1335 fn default() -> Arc<T> {
1336 Arc::new(Default::default())
1340 #[stable(feature = "rust1", since = "1.0.0")]
1341 impl<T: ?Sized + Hash> Hash for Arc<T> {
1342 fn hash<H: Hasher>(&self, state: &mut H) {
1343 (**self).hash(state)
1347 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
1348 impl<T> From<T> for Arc<T> {
1349 fn from(t: T) -> Self {
1354 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1355 impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> {
1357 fn from(v: &[T]) -> Arc<[T]> {
1358 <Self as ArcFromSlice<T>>::from_slice(v)
1362 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1363 impl<'a> From<&'a str> for Arc<str> {
1365 fn from(v: &str) -> Arc<str> {
1366 unsafe { mem::transmute(<Arc<[u8]>>::from(v.as_bytes())) }
1370 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1371 impl From<String> for Arc<str> {
1373 fn from(v: String) -> Arc<str> {
1378 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1379 impl<T: ?Sized> From<Box<T>> for Arc<T> {
1381 fn from(v: Box<T>) -> Arc<T> {
1386 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1387 impl<T> From<Vec<T>> for Arc<[T]> {
1389 fn from(mut v: Vec<T>) -> Arc<[T]> {
1391 let arc = Arc::copy_from_slice(&v);
1393 // Allow the Vec to free its memory, but not destroy its contents
1403 use std::boxed::Box;
1404 use std::clone::Clone;
1405 use std::sync::mpsc::channel;
1408 use std::option::Option;
1409 use std::option::Option::{None, Some};
1410 use std::sync::atomic;
1411 use std::sync::atomic::Ordering::{Acquire, SeqCst};
1413 use std::sync::Mutex;
1414 use std::convert::From;
1416 use super::{Arc, Weak};
1419 struct Canary(*mut atomic::AtomicUsize);
1421 impl Drop for Canary {
1422 fn drop(&mut self) {
1426 (*c).fetch_add(1, SeqCst);
1434 #[cfg_attr(target_os = "emscripten", ignore)]
1435 fn manually_share_arc() {
1436 let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
1437 let arc_v = Arc::new(v);
1439 let (tx, rx) = channel();
1441 let _t = thread::spawn(move || {
1442 let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
1443 assert_eq!((*arc_v)[3], 4);
1446 tx.send(arc_v.clone()).unwrap();
1448 assert_eq!((*arc_v)[2], 3);
1449 assert_eq!((*arc_v)[4], 5);
1453 fn test_arc_get_mut() {
1454 let mut x = Arc::new(3);
1455 *Arc::get_mut(&mut x).unwrap() = 4;
1458 assert!(Arc::get_mut(&mut x).is_none());
1460 assert!(Arc::get_mut(&mut x).is_some());
1461 let _w = Arc::downgrade(&x);
1462 assert!(Arc::get_mut(&mut x).is_none());
1467 let x = Arc::new(3);
1468 assert_eq!(Arc::try_unwrap(x), Ok(3));
1469 let x = Arc::new(4);
1471 assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
1472 let x = Arc::new(5);
1473 let _w = Arc::downgrade(&x);
1474 assert_eq!(Arc::try_unwrap(x), Ok(5));
1478 fn into_from_raw() {
1479 let x = Arc::new(box "hello");
1482 let x_ptr = Arc::into_raw(x);
1485 assert_eq!(**x_ptr, "hello");
1487 let x = Arc::from_raw(x_ptr);
1488 assert_eq!(**x, "hello");
1490 assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
1495 fn test_cowarc_clone_make_mut() {
1496 let mut cow0 = Arc::new(75);
1497 let mut cow1 = cow0.clone();
1498 let mut cow2 = cow1.clone();
1500 assert!(75 == *Arc::make_mut(&mut cow0));
1501 assert!(75 == *Arc::make_mut(&mut cow1));
1502 assert!(75 == *Arc::make_mut(&mut cow2));
1504 *Arc::make_mut(&mut cow0) += 1;
1505 *Arc::make_mut(&mut cow1) += 2;
1506 *Arc::make_mut(&mut cow2) += 3;
1508 assert!(76 == *cow0);
1509 assert!(77 == *cow1);
1510 assert!(78 == *cow2);
1512 // none should point to the same backing memory
1513 assert!(*cow0 != *cow1);
1514 assert!(*cow0 != *cow2);
1515 assert!(*cow1 != *cow2);
1519 fn test_cowarc_clone_unique2() {
1520 let mut cow0 = Arc::new(75);
1521 let cow1 = cow0.clone();
1522 let cow2 = cow1.clone();
1524 assert!(75 == *cow0);
1525 assert!(75 == *cow1);
1526 assert!(75 == *cow2);
1528 *Arc::make_mut(&mut cow0) += 1;
1529 assert!(76 == *cow0);
1530 assert!(75 == *cow1);
1531 assert!(75 == *cow2);
1533 // cow1 and cow2 should share the same contents
1534 // cow0 should have a unique reference
1535 assert!(*cow0 != *cow1);
1536 assert!(*cow0 != *cow2);
1537 assert!(*cow1 == *cow2);
1541 fn test_cowarc_clone_weak() {
1542 let mut cow0 = Arc::new(75);
1543 let cow1_weak = Arc::downgrade(&cow0);
1545 assert!(75 == *cow0);
1546 assert!(75 == *cow1_weak.upgrade().unwrap());
1548 *Arc::make_mut(&mut cow0) += 1;
1550 assert!(76 == *cow0);
1551 assert!(cow1_weak.upgrade().is_none());
1556 let x = Arc::new(5);
1557 let y = Arc::downgrade(&x);
1558 assert!(y.upgrade().is_some());
1563 let x = Arc::new(5);
1564 let y = Arc::downgrade(&x);
1566 assert!(y.upgrade().is_none());
1570 fn weak_self_cyclic() {
1572 x: Mutex<Option<Weak<Cycle>>>,
1575 let a = Arc::new(Cycle { x: Mutex::new(None) });
1576 let b = Arc::downgrade(&a.clone());
1577 *a.x.lock().unwrap() = Some(b);
1579 // hopefully we don't double-free (or leak)...
1584 let mut canary = atomic::AtomicUsize::new(0);
1585 let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1587 assert!(canary.load(Acquire) == 1);
1591 fn drop_arc_weak() {
1592 let mut canary = atomic::AtomicUsize::new(0);
1593 let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
1594 let arc_weak = Arc::downgrade(&arc);
1595 assert!(canary.load(Acquire) == 0);
1597 assert!(canary.load(Acquire) == 1);
1602 fn test_strong_count() {
1603 let a = Arc::new(0);
1604 assert!(Arc::strong_count(&a) == 1);
1605 let w = Arc::downgrade(&a);
1606 assert!(Arc::strong_count(&a) == 1);
1607 let b = w.upgrade().expect("");
1608 assert!(Arc::strong_count(&b) == 2);
1609 assert!(Arc::strong_count(&a) == 2);
1612 assert!(Arc::strong_count(&b) == 1);
1614 assert!(Arc::strong_count(&b) == 2);
1615 assert!(Arc::strong_count(&c) == 2);
1619 fn test_weak_count() {
1620 let a = Arc::new(0);
1621 assert!(Arc::strong_count(&a) == 1);
1622 assert!(Arc::weak_count(&a) == 0);
1623 let w = Arc::downgrade(&a);
1624 assert!(Arc::strong_count(&a) == 1);
1625 assert!(Arc::weak_count(&a) == 1);
1627 assert!(Arc::weak_count(&a) == 2);
1630 assert!(Arc::strong_count(&a) == 1);
1631 assert!(Arc::weak_count(&a) == 0);
1633 assert!(Arc::strong_count(&a) == 2);
1634 assert!(Arc::weak_count(&a) == 0);
1635 let d = Arc::downgrade(&c);
1636 assert!(Arc::weak_count(&c) == 1);
1637 assert!(Arc::strong_count(&c) == 2);
1646 let a = Arc::new(5);
1647 assert_eq!(format!("{:?}", a), "5");
1650 // Make sure deriving works with Arc<T>
1651 #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
1658 let x: Arc<[i32]> = Arc::new([1, 2, 3]);
1659 assert_eq!(format!("{:?}", x), "[1, 2, 3]");
1660 let y = Arc::downgrade(&x.clone());
1662 assert!(y.upgrade().is_none());
1666 fn test_from_owned() {
1668 let foo_arc = Arc::from(foo);
1669 assert!(123 == *foo_arc);
1673 fn test_new_weak() {
1674 let foo: Weak<usize> = Weak::new();
1675 assert!(foo.upgrade().is_none());
1680 let five = Arc::new(5);
1681 let same_five = five.clone();
1682 let other_five = Arc::new(5);
1684 assert!(Arc::ptr_eq(&five, &same_five));
1685 assert!(!Arc::ptr_eq(&five, &other_five));
1689 #[cfg_attr(target_os = "emscripten", ignore)]
1690 fn test_weak_count_locked() {
1691 let mut a = Arc::new(atomic::AtomicBool::new(false));
1693 let t = thread::spawn(move || {
1694 for _i in 0..1000000 {
1695 Arc::get_mut(&mut a);
1697 a.store(true, SeqCst);
1700 while !a2.load(SeqCst) {
1701 let n = Arc::weak_count(&a2);
1702 assert!(n < 2, "bad weak count: {}", n);
1708 fn test_from_str() {
1709 let r: Arc<str> = Arc::from("foo");
1711 assert_eq!(&r[..], "foo");
1715 fn test_copy_from_slice() {
1716 let s: &[u32] = &[1, 2, 3];
1717 let r: Arc<[u32]> = Arc::from(s);
1719 assert_eq!(&r[..], [1, 2, 3]);
1723 fn test_clone_from_slice() {
1724 #[derive(Clone, Debug, Eq, PartialEq)]
1727 let s: &[X] = &[X(1), X(2), X(3)];
1728 let r: Arc<[X]> = Arc::from(s);
1730 assert_eq!(&r[..], s);
1735 fn test_clone_from_slice_panic() {
1736 use std::string::{String, ToString};
1738 struct Fail(u32, String);
1740 impl Clone for Fail {
1741 fn clone(&self) -> Fail {
1745 Fail(self.0, self.1.clone())
1750 Fail(0, "foo".to_string()),
1751 Fail(1, "bar".to_string()),
1752 Fail(2, "baz".to_string()),
1755 // Should panic, but not cause memory corruption
1756 let _r: Arc<[Fail]> = Arc::from(s);
1760 fn test_from_box() {
1761 let b: Box<u32> = box 123;
1762 let r: Arc<u32> = Arc::from(b);
1764 assert_eq!(*r, 123);
1768 fn test_from_box_str() {
1769 use std::string::String;
1771 let s = String::from("foo").into_boxed_str();
1772 let r: Arc<str> = Arc::from(s);
1774 assert_eq!(&r[..], "foo");
1778 fn test_from_box_slice() {
1779 let s = vec![1, 2, 3].into_boxed_slice();
1780 let r: Arc<[u32]> = Arc::from(s);
1782 assert_eq!(&r[..], [1, 2, 3]);
1786 fn test_from_box_trait() {
1787 use std::fmt::Display;
1788 use std::string::ToString;
1790 let b: Box<Display> = box 123;
1791 let r: Arc<Display> = Arc::from(b);
1793 assert_eq!(r.to_string(), "123");
1797 fn test_from_box_trait_zero_sized() {
1798 use std::fmt::Debug;
1800 let b: Box<Debug> = box ();
1801 let r: Arc<Debug> = Arc::from(b);
1803 assert_eq!(format!("{:?}", r), "()");
1807 fn test_from_vec() {
1808 let v = vec![1, 2, 3];
1809 let r: Arc<[u32]> = Arc::from(v);
1811 assert_eq!(&r[..], [1, 2, 3]);
1815 #[stable(feature = "rust1", since = "1.0.0")]
1816 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
1817 fn borrow(&self) -> &T {
1822 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
1823 impl<T: ?Sized> AsRef<T> for Arc<T> {
1824 fn as_ref(&self) -> &T {