1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Concurrency-enabled mechanisms for sharing mutable and/or immutable state
17 use core::clone::Clone;
18 use core::kinds::{Share, Send};
19 use core::mem::{min_align_of, size_of, drop};
21 use core::ops::{Drop, Deref};
22 use core::option::{Some, None, Option};
24 use core::ptr::RawPtr;
27 /// An atomically reference counted wrapper for shared state.
31 /// In this example, a large vector of floats is shared between several tasks.
32 /// With simple pipes, without `Arc`, a copy would have to be made for each
36 /// use std::sync::Arc;
39 /// let numbers = Vec::from_fn(100, |i| i as f32);
40 /// let shared_numbers = Arc::new(numbers);
42 /// for _ in range(0u, 10) {
43 /// let child_numbers = shared_numbers.clone();
46 /// let local_numbers = child_numbers.as_slice();
48 /// // Work with the local numbers
53 #[unsafe_no_drop_flag]
56 // FIXME #12808: strange name to try to avoid interfering with
57 // field accesses of the contained type via Deref
58 _ptr: *mut ArcInner<T>,
61 /// A weak pointer to an `Arc`.
63 /// Weak pointers will not keep the data inside of the `Arc` alive, and can be
64 /// used to break cycles between `Arc` pointers.
65 #[unsafe_no_drop_flag]
66 #[experimental = "Weak pointers may not belong in this module."]
68 // FIXME #12808: strange name to try to avoid interfering with
69 // field accesses of the contained type via Deref
70 _ptr: *mut ArcInner<T>,
74 strong: atomics::AtomicUint,
75 weak: atomics::AtomicUint,
79 impl<T: Share + Send> Arc<T> {
80 /// Create an atomically reference counted wrapper.
83 pub fn new(data: T) -> Arc<T> {
84 // Start the weak pointer count as 1 which is the weak pointer that's
85 // held by all the strong pointers (kinda), see std/rc.rs for more info
86 let x = box ArcInner {
87 strong: atomics::AtomicUint::new(1),
88 weak: atomics::AtomicUint::new(1),
91 Arc { _ptr: unsafe { mem::transmute(x) } }
95 fn inner(&self) -> &ArcInner<T> {
96 // This unsafety is ok because while this arc is alive we're guaranteed
97 // that the inner pointer is valid. Furthermore, we know that the
98 // `ArcInner` structure itself is `Share` because the inner data is
99 // `Share` as well, so we're ok loaning out an immutable pointer to
101 unsafe { &*self._ptr }
104 /// Downgrades a strong pointer to a weak pointer
106 /// Weak pointers will not keep the data alive. Once all strong references
107 /// to the underlying data have been dropped, the data itself will be
109 #[experimental = "Weak pointers may not belong in this module."]
110 pub fn downgrade(&self) -> Weak<T> {
111 // See the clone() impl for why this is relaxed
112 self.inner().weak.fetch_add(1, atomics::Relaxed);
113 Weak { _ptr: self._ptr }
117 #[unstable = "waiting on stability of Clone"]
118 impl<T: Share + Send> Clone for Arc<T> {
119 /// Duplicate an atomically reference counted wrapper.
121 /// The resulting two `Arc` objects will point to the same underlying data
122 /// object. However, one of the `Arc` objects can be sent to another task,
123 /// allowing them to share the underlying data.
125 fn clone(&self) -> Arc<T> {
126 // Using a relaxed ordering is alright here, as knowledge of the
127 // original reference prevents other threads from erroneously deleting
130 // As explained in the [Boost documentation][1], Increasing the
131 // reference counter can always be done with memory_order_relaxed: New
132 // references to an object can only be formed from an existing
133 // reference, and passing an existing reference from one thread to
134 // another must already provide any required synchronization.
136 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
137 self.inner().strong.fetch_add(1, atomics::Relaxed);
138 Arc { _ptr: self._ptr }
142 #[experimental = "Deref is experimental."]
143 impl<T: Send + Share> Deref<T> for Arc<T> {
145 fn deref(&self) -> &T {
150 impl<T: Send + Share + Clone> Arc<T> {
151 /// Acquires a mutable pointer to the inner contents by guaranteeing that
152 /// the reference count is one (no sharing is possible).
154 /// This is also referred to as a copy-on-write operation because the inner
155 /// data is cloned if the reference count is greater than one.
158 pub fn make_unique(&mut self) -> &mut T {
159 // Note that we hold a strong reference, which also counts as
160 // a weak reference, so we only clone if there is an
161 // additional reference of either kind.
162 if self.inner().strong.load(atomics::SeqCst) != 1 ||
163 self.inner().weak.load(atomics::SeqCst) != 1 {
164 *self = Arc::new(self.deref().clone())
166 // This unsafety is ok because we're guaranteed that the pointer
167 // returned is the *only* pointer that will ever be returned to T. Our
168 // reference count is guaranteed to be 1 at this point, and we required
169 // the Arc itself to be `mut`, so we're returning the only possible
170 // reference to the inner data.
171 let inner = unsafe { &mut *self._ptr };
177 #[experimental = "waiting on stability of Drop"]
178 impl<T: Share + Send> Drop for Arc<T> {
180 // This structure has #[unsafe_no_drop_flag], so this drop glue may run
181 // more than once (but it is guaranteed to be zeroed after the first if
182 // it's run more than once)
183 if self._ptr.is_null() { return }
185 // Because `fetch_sub` is already atomic, we do not need to synchronize
186 // with other threads unless we are going to delete the object. This
187 // same logic applies to the below `fetch_sub` to the `weak` count.
188 if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
190 // This fence is needed to prevent reordering of use of the data and
191 // deletion of the data. Because it is marked `Release`, the
192 // decreasing of the reference count synchronizes with this `Acquire`
193 // fence. This means that use of the data happens before decreasing
194 // the reference count, which happens before this fence, which
195 // happens before the deletion of the data.
197 // As explained in the [Boost documentation][1],
199 // It is important to enforce any possible access to the object in
200 // one thread (through an existing reference) to *happen before*
201 // deleting the object in a different thread. This is achieved by a
202 // "release" operation after dropping a reference (any access to the
203 // object through this reference must obviously happened before),
204 // and an "acquire" operation before deleting the object.
206 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
207 atomics::fence(atomics::Acquire);
209 // Destroy the data at this time, even though we may not free the box
210 // allocation itself (there may still be weak pointers lying around).
211 unsafe { drop(ptr::read(&self.inner().data)); }
213 if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
214 atomics::fence(atomics::Acquire);
215 unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
216 min_align_of::<ArcInner<T>>()) }
221 #[experimental = "Weak pointers may not belong in this module."]
222 impl<T: Share + Send> Weak<T> {
223 /// Attempts to upgrade this weak reference to a strong reference.
225 /// This method will fail to upgrade this reference if the strong reference
226 /// count has already reached 0, but if there are still other active strong
227 /// references this function will return a new strong reference to the data
228 pub fn upgrade(&self) -> Option<Arc<T>> {
229 // We use a CAS loop to increment the strong count instead of a
230 // fetch_add because once the count hits 0 is must never be above 0.
231 let inner = self.inner();
233 let n = inner.strong.load(atomics::SeqCst);
234 if n == 0 { return None }
235 let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
236 if old == n { return Some(Arc { _ptr: self._ptr }) }
241 fn inner(&self) -> &ArcInner<T> {
242 // See comments above for why this is "safe"
243 unsafe { &*self._ptr }
247 #[experimental = "Weak pointers may not belong in this module."]
248 impl<T: Share + Send> Clone for Weak<T> {
250 fn clone(&self) -> Weak<T> {
251 // See comments in Arc::clone() for why this is relaxed
252 self.inner().weak.fetch_add(1, atomics::Relaxed);
253 Weak { _ptr: self._ptr }
258 #[experimental = "Weak pointers may not belong in this module."]
259 impl<T: Share + Send> Drop for Weak<T> {
261 // see comments above for why this check is here
262 if self._ptr.is_null() { return }
264 // If we find out that we were the last weak pointer, then its time to
265 // deallocate the data entirely. See the discussion in Arc::drop() about
266 // the memory orderings
267 if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
268 atomics::fence(atomics::Acquire);
269 unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
270 min_align_of::<ArcInner<T>>()) }
276 #[allow(experimental)]
278 use std::clone::Clone;
279 use std::collections::MutableSeq;
280 use std::comm::channel;
283 use std::option::{Option, Some, None};
284 use std::sync::atomics;
287 use super::{Arc, Weak};
288 use std::sync::Mutex;
290 struct Canary(*mut atomics::AtomicUint);
298 (*c).fetch_add(1, atomics::SeqCst);
306 fn manually_share_arc() {
307 let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
308 let arc_v = Arc::new(v);
310 let (tx, rx) = channel();
313 let arc_v: Arc<Vec<int>> = rx.recv();
314 assert_eq!(*arc_v.get(3), 4);
317 tx.send(arc_v.clone());
319 assert_eq!(*arc_v.get(2), 3);
320 assert_eq!(*arc_v.get(4), 5);
322 info!("{:?}", arc_v);
326 fn test_cowarc_clone_make_unique() {
327 let mut cow0 = Arc::new(75u);
328 let mut cow1 = cow0.clone();
329 let mut cow2 = cow1.clone();
331 assert!(75 == *cow0.make_unique());
332 assert!(75 == *cow1.make_unique());
333 assert!(75 == *cow2.make_unique());
335 *cow0.make_unique() += 1;
336 *cow1.make_unique() += 2;
337 *cow2.make_unique() += 3;
339 assert!(76 == *cow0);
340 assert!(77 == *cow1);
341 assert!(78 == *cow2);
343 // none should point to the same backing memory
344 assert!(*cow0 != *cow1);
345 assert!(*cow0 != *cow2);
346 assert!(*cow1 != *cow2);
350 fn test_cowarc_clone_unique2() {
351 let mut cow0 = Arc::new(75u);
352 let cow1 = cow0.clone();
353 let cow2 = cow1.clone();
355 assert!(75 == *cow0);
356 assert!(75 == *cow1);
357 assert!(75 == *cow2);
359 *cow0.make_unique() += 1;
361 assert!(76 == *cow0);
362 assert!(75 == *cow1);
363 assert!(75 == *cow2);
365 // cow1 and cow2 should share the same contents
366 // cow0 should have a unique reference
367 assert!(*cow0 != *cow1);
368 assert!(*cow0 != *cow2);
369 assert!(*cow1 == *cow2);
373 fn test_cowarc_clone_weak() {
374 let mut cow0 = Arc::new(75u);
375 let cow1_weak = cow0.downgrade();
377 assert!(75 == *cow0);
378 assert!(75 == *cow1_weak.upgrade().unwrap());
380 *cow0.make_unique() += 1;
382 assert!(76 == *cow0);
383 assert!(cow1_weak.upgrade().is_none());
388 let x = Arc::new(5i);
389 let y = x.downgrade();
390 assert!(y.upgrade().is_some());
395 let x = Arc::new(5i);
396 let y = x.downgrade();
398 assert!(y.upgrade().is_none());
402 fn weak_self_cyclic() {
404 x: Mutex<Option<Weak<Cycle>>>
407 let a = Arc::new(Cycle { x: Mutex::new(None) });
408 let b = a.clone().downgrade();
409 *a.x.lock() = Some(b);
411 // hopefully we don't double-free (or leak)...
416 let mut canary = atomics::AtomicUint::new(0);
417 let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
419 assert!(canary.load(atomics::Acquire) == 1);
424 let mut canary = atomics::AtomicUint::new(0);
425 let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
426 let arc_weak = arc.downgrade();
427 assert!(canary.load(atomics::Acquire) == 0);
429 assert!(canary.load(atomics::Acquire) == 1);