1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
17 use either::{Either, Left, Right};
19 use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
20 use unstable::finally::Finally;
26 /// An atomically reference counted pointer.
28 /// Enforces no shared-memory safety.
29 #[unsafe_no_drop_flag]
30 pub struct UnsafeArc<T> {
31 data: *mut ArcData<T>,
36 // An unwrapper uses this protocol to communicate with the "other" task that
37 // drops the last refcount on an arc. Unfortunately this can't be a proper
38 // pipe protocol because the unwrapper has to access both stages at once.
39 // FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)?
40 unwrapper: AtomicOption<(comm::ChanOne<()>, comm::PortOne<bool>)>,
41 // FIXME(#3224) should be able to make this non-option to save memory
45 unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
46 let data = ~ArcData { count: AtomicUint::new(refcount),
47 unwrapper: AtomicOption::empty(),
52 impl<T: Send> UnsafeArc<T> {
53 pub fn new(data: T) -> UnsafeArc<T> {
54 unsafe { UnsafeArc { data: new_inner(data, 1) } }
57 /// As new(), but returns an extra pre-cloned handle.
58 pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
60 let ptr = new_inner(data, 2);
61 (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
65 /// As new(), but returns a vector of as many pre-cloned handles as requested.
66 pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
69 ~[] // need to free data here
71 let ptr = new_inner(data, num_handles);
72 vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
77 /// As newN(), but from an already-existing handle. Uses one xadd.
78 pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc<T>] {
80 ~[] // The "num_handles - 1" trick (below) fails in the 0 case.
83 // Minus one because we are recycling the given handle's refcount.
84 let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire);
85 // let old_count = (*self.data).count.fetch_add(num_handles, Acquire);
86 assert!(old_count >= 1);
88 cast::forget(self); // Don't run the destructor on this handle.
89 vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
95 pub fn get(&self) -> *mut T {
97 assert!((*self.data).count.load(Relaxed) > 0);
98 let r: *mut T = (*self.data).data.get_mut_ref();
104 pub fn get_immut(&self) -> *T {
106 assert!((*self.data).count.load(Relaxed) > 0);
107 let r: *T = (*self.data).data.get_ref();
112 /// Wait until all other handles are dropped, then retrieve the enclosed
113 /// data. See extra::arc::Arc for specific semantics documentation.
114 /// If called when the task is already unkillable, unwrap will unkillably
115 /// block; otherwise, an unwrapping task can be killed by linked failure.
116 pub fn unwrap(self) -> T {
117 let this = Cell::new(self); // argh
118 do task::unkillable {
120 let mut this = this.take();
121 // The ~ dtor needs to run if this code succeeds.
122 let mut data: ~ArcData<T> = cast::transmute(this.data);
123 // Set up the unwrap protocol.
124 let (p1,c1) = comm::oneshot(); // ()
125 let (p2,c2) = comm::oneshot(); // bool
126 // Try to put our server end in the unwrapper slot.
127 // This needs no barrier -- it's protected by the release barrier on
128 // the xadd, and the acquire+release barrier in the destructor's xadd.
129 if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
130 // Got in. Tell this handle's destructor not to run (we are now it).
131 this.data = ptr::mut_null();
132 // Drop our own reference.
133 let old_count = data.count.fetch_sub(1, Release);
134 assert!(old_count >= 1);
136 // We were the last owner. Can unwrap immediately.
137 // AtomicOption's destructor will free the server endpoint.
138 // FIXME(#3224): it should be like this
139 // let ~ArcData { data: user_data, _ } = data;
141 data.data.take_unwrap()
143 // The *next* person who sees the refcount hit 0 will wake us.
144 let p1 = Cell::new(p1); // argh
145 // Unlike the above one, this cell is necessary. It will get
146 // taken either in the do block or in the finally block.
147 let c2_and_data = Cell::new((c2,data));
149 do task::rekillable { p1.take().recv(); }
150 // Got here. Back in the 'unkillable' without getting killed.
151 let (c2, data) = c2_and_data.take();
153 // FIXME(#3224): it should be like this
154 // let ~ArcData { data: user_data, _ } = data;
157 data.data.take_unwrap()
160 // Killed during wait. Because this might happen while
161 // someone else still holds a reference, we can't free
162 // the data now; the "other" last refcount will free it.
163 let (c2, data) = c2_and_data.take();
167 assert!(c2_and_data.is_empty());
172 // If 'put' returns the server end back to us, we were rejected;
173 // someone else was trying to unwrap. Avoid guaranteed deadlock.
175 fail!("Another task is already unwrapping this Arc!");
181 /// As unwrap above, but without blocking. Returns 'Left(self)' if this is
182 /// not the last reference; 'Right(unwrapped_data)' if so.
183 pub fn try_unwrap(self) -> Either<UnsafeArc<T>, T> {
185 let mut this = self; // FIXME(#4330) mutable self
186 // The ~ dtor needs to run if this code succeeds.
187 let mut data: ~ArcData<T> = cast::transmute(this.data);
188 // This can of course race with anybody else who has a handle, but in
189 // such a case, the returned count will always be at least 2. If we
190 // see 1, no race was possible. All that matters is 1 or not-1.
191 let count = data.count.load(Acquire);
193 // The more interesting race is one with an unwrapper. They may have
194 // already dropped their count -- but if so, the unwrapper pointer
195 // will have been set first, which the barriers ensure we will see.
196 // (Note: using is_empty(), not take(), to not free the unwrapper.)
197 if count == 1 && data.unwrapper.is_empty(Acquire) {
198 // Tell this handle's destructor not to run (we are now it).
199 this.data = ptr::mut_null();
200 // FIXME(#3224) as above
201 Right(data.data.take_unwrap())
210 impl<T: Send> Clone for UnsafeArc<T> {
211 fn clone(&self) -> UnsafeArc<T> {
213 // This barrier might be unnecessary, but I'm not sure...
214 let old_count = (*self.data).count.fetch_add(1, Acquire);
215 assert!(old_count >= 1);
216 return UnsafeArc { data: self.data };
222 impl<T> Drop for UnsafeArc<T>{
225 // Happens when destructing an unwrapper's handle and from `#[unsafe_no_drop_flag]`
226 if self.data.is_null() {
229 let mut data: ~ArcData<T> = cast::transmute(self.data);
230 // Must be acquire+release, not just release, to make sure this
231 // doesn't get reordered to after the unwrapper pointer load.
232 let old_count = data.count.fetch_sub(1, SeqCst);
233 assert!(old_count >= 1);
235 // Were we really last, or should we hand off to an
236 // unwrapper? It's safe to not xchg because the unwrapper
237 // will set the unwrap lock *before* dropping his/her
238 // reference. In effect, being here means we're the only
239 // *awake* task with the data.
240 match data.unwrapper.take(Acquire) {
241 Some(~(message,response)) => {
242 let cell = Cell::new((message, response, data));
243 do task::unkillable {
244 let (message, response, data) = cell.take();
245 // Send 'ready' and wait for a response.
247 // Unkillable wait. Message guaranteed to come.
249 // Other task got the data.
252 // Other task was killed. drop glue takes over.
257 // drop glue takes over.
268 /****************************************************************************/
271 * Enables a runtime assertion that no operation in the argument closure shall
272 * use scheduler operations (deschedule, recv, spawn, etc). This is for use with
273 * pthread mutexes, which may block the entire scheduler thread, rather than
274 * just one task, and is hence prone to deadlocks if mixed with descheduling.
276 * NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
277 * synchronization whatsoever. It only makes sense to use for CPU-local issues.
279 // FIXME(#8140) should not be pub
280 pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
281 use rt::task::{Task, GreenTask, SchedTask};
282 use rt::local::Local;
284 let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
287 match (*t).task_type {
290 (*t).death.inhibit_deschedule();
293 (*t).death.allow_deschedule();
303 #[allow(non_camel_case_types)] // runtime type
304 type rust_little_lock = *libc::c_void;
306 pub struct LittleLock {
310 impl Drop for LittleLock {
313 rust_destroy_little_lock(self.l);
319 pub fn new() -> LittleLock {
322 l: rust_create_little_lock()
327 pub unsafe fn lock<T>(&self, f: &fn() -> T) -> T {
329 rust_lock_little_lock(self.l);
333 rust_unlock_little_lock(self.l);
346 * An arc over mutable data that is protected by a lock. For library use only.
350 * This uses a pthread mutex, not one that's aware of the userspace scheduler.
351 * The user of an Exclusive must be careful not to invoke any functions that may
352 * reschedule the task while holding the lock, or deadlock may result. If you
353 * need to block or deschedule while accessing shared state, use extra::sync::RWArc.
355 pub struct Exclusive<T> {
356 x: UnsafeArc<ExData<T>>
359 impl<T:Send> Clone for Exclusive<T> {
360 // Duplicate an Exclusive Arc, as std::arc::clone.
361 fn clone(&self) -> Exclusive<T> {
362 Exclusive { x: self.x.clone() }
366 impl<T:Send> Exclusive<T> {
367 pub fn new(user_data: T) -> Exclusive<T> {
369 lock: LittleLock::new(),
374 x: UnsafeArc::new(data)
378 // Exactly like std::arc::MutexArc,access(), but with the LittleLock
379 // instead of a proper mutex. Same reason for being unsafe.
381 // Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
382 // accessing the provided condition variable) are prohibited while inside
383 // the Exclusive. Supporting that is a work in progress.
385 pub unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
386 let rec = self.x.get();
387 do (*rec).lock.lock {
389 fail!("Poisoned Exclusive::new - another task failed inside!");
391 (*rec).failed = true;
392 let result = f(&mut (*rec).data);
393 (*rec).failed = false;
399 pub unsafe fn with_imm<U>(&self, f: &fn(x: &T) -> U) -> U {
401 f(cast::transmute_immut(x))
405 pub fn unwrap(self) -> T {
406 let Exclusive { x: x } = self;
407 // Someday we might need to unkillably unwrap an Exclusive, but not today.
408 let inner = x.unwrap();
409 let ExData { data: user_data, _ } = inner; // will destroy the LittleLock
414 externfn!(fn rust_create_little_lock() -> rust_little_lock)
415 externfn!(fn rust_destroy_little_lock(lock: rust_little_lock))
416 externfn!(fn rust_lock_little_lock(lock: rust_little_lock))
417 externfn!(fn rust_unlock_little_lock(lock: rust_little_lock))
425 use super::{Exclusive, UnsafeArc, atomically};
432 assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
436 fn test_atomically() {
437 // NB. The whole runtime will abort on an 'atomic-sleep' violation,
438 // so we can't really test for the converse behaviour.
439 unsafe { do atomically { } } task::deschedule(); // oughtn't fail
443 fn exclusive_new_arc() {
445 let mut futures = ~[];
450 let total = Exclusive::new(~0);
452 for _ in range(0u, num_tasks) {
453 let total = total.clone();
454 let (port, chan) = comm::stream();
458 for _ in range(0u, count) {
459 do total.with |count| {
467 for f in futures.iter() { f.recv() }
469 do total.with |total| {
470 assert!(**total == num_tasks * count)
475 #[test] #[should_fail]
476 fn exclusive_new_poison() {
478 // Tests that if one task fails inside of an Exclusive::new, subsequent
479 // accesses will also fail.
480 let x = Exclusive::new(1);
495 // Tests that the many-refcounts-at-once constructors don't leak.
496 let _ = UnsafeArc::new2(~~"hello");
497 let x = UnsafeArc::newN(~~"hello", 0);
498 assert_eq!(x.len(), 0)
499 let x = UnsafeArc::newN(~~"hello", 1);
500 assert_eq!(x.len(), 1)
501 let x = UnsafeArc::newN(~~"hello", 10);
502 assert_eq!(x.len(), 10)
506 fn arclike_cloneN() {
507 // Tests that the many-refcounts-at-once special-clone doesn't leak.
508 let x = UnsafeArc::new(~~"hello");
510 assert_eq!(x.len(), 0);
511 let x = UnsafeArc::new(~~"hello");
513 assert_eq!(x.len(), 1);
514 let x = UnsafeArc::new(~~"hello");
515 let x = x.cloneN(10);
516 assert_eq!(x.len(), 10);
520 fn arclike_unwrap_basic() {
521 let x = UnsafeArc::new(~~"hello");
522 assert!(x.unwrap() == ~~"hello");
526 fn arclike_try_unwrap() {
527 let x = UnsafeArc::new(~~"hello");
528 assert!(x.try_unwrap().expect_right("try_unwrap failed") == ~~"hello");
532 fn arclike_try_unwrap_fail() {
533 let x = UnsafeArc::new(~~"hello");
535 let left_x = x.try_unwrap();
536 assert!(left_x.is_left());
537 util::ignore(left_x);
538 assert!(x2.try_unwrap().expect_right("try_unwrap none") == ~~"hello");
542 fn arclike_try_unwrap_unwrap_race() {
543 // When an unwrap and a try_unwrap race, the unwrapper should always win.
544 let x = UnsafeArc::new(~~"hello");
545 let x2 = Cell::new(x.clone());
546 let (p,c) = comm::stream();
549 assert!(x2.take().unwrap() == ~~"hello");
553 task::deschedule(); // Try to make the unwrapper get blocked first.
554 let left_x = x.try_unwrap();
555 assert!(left_x.is_left());
556 util::ignore(left_x);
561 fn exclusive_new_unwrap_basic() {
562 // Unlike the above, also tests no double-freeing of the LittleLock.
563 let x = Exclusive::new(~~"hello");
564 assert!(x.unwrap() == ~~"hello");
568 fn exclusive_new_unwrap_contended() {
569 let x = Exclusive::new(~~"hello");
570 let x2 = Cell::new(x.clone());
573 unsafe { do x2.with |_hello| { } }
576 assert!(x.unwrap() == ~~"hello");
578 // Now try the same thing, but with the child task blocking.
579 let x = Exclusive::new(~~"hello");
580 let x2 = Cell::new(x.clone());
582 let mut builder = task::task();
583 builder.future_result(|r| res = Some(r));
586 assert!(x2.unwrap() == ~~"hello");
588 // Have to get rid of our reference before blocking.
593 #[test] #[should_fail]
594 fn exclusive_new_unwrap_conflict() {
595 let x = Exclusive::new(~~"hello");
596 let x2 = Cell::new(x.clone());
598 let mut builder = task::task();
599 builder.future_result(|r| res = Some(r));
602 assert!(x2.unwrap() == ~~"hello");
604 assert!(x.unwrap() == ~~"hello");
605 // See #4689 for why this can't be just "res.recv()".
606 assert!(res.unwrap().recv() == task::Success);
610 fn exclusive_new_unwrap_deadlock() {
611 // This is not guaranteed to get to the deadlock before being killed,
612 // but it will show up sometimes, and if the deadlock were not there,
613 // the test would nondeterministically fail.
614 let result = do task::try {
615 // a task that has two references to the same Exclusive::new will
616 // deadlock when it unwraps. nothing to be done about that.
617 let x = Exclusive::new(~~"hello");
620 do 10.times { task::deschedule(); } // try to let the unwrapper go
621 fail!(); // punt it awake from its deadlock
624 unsafe { do x2.with |_hello| { } }
626 assert!(result.is_err());