################################################################################
TARGET_CRATES := libc std flate arena term \
- serialize sync getopts collections test time rand \
+ serialize getopts collections test time rand \
log regex graphviz core rbml alloc rustrt \
unicode
HOST_CRATES := syntax rustc rustc_trans rustdoc regex_macros fmt_macros \
DEPS_unicode := core
DEPS_alloc := core libc native:jemalloc
DEPS_rustrt := alloc core libc collections native:rustrt_native
-DEPS_std := core libc rand alloc collections rustrt sync unicode \
+DEPS_std := core libc rand alloc collections rustrt unicode \
native:rust_builtin native:backtrace
DEPS_graphviz := std
DEPS_syntax := std term serialize log fmt_macros arena libc
DEPS_serialize := std log
DEPS_rbml := std log serialize
DEPS_term := std log
-DEPS_sync := core alloc rustrt collections
DEPS_getopts := std
DEPS_collections := core alloc unicode
DEPS_num := std
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
- "libsync/mpsc_queue.rs", # BSD
- "libsync/spsc_queue.rs", # BSD
- "libsync/mpmc_bounded_queue.rs", # BSD
- "libsync/mpsc_intrusive.rs", # BSD
+ "libstd/sync/mpsc_queue.rs", # BSD
+ "libstd/sync/spsc_queue.rs", # BSD
+ "libstd/sync/mpmc_bounded_queue.rs", # BSD
"test/bench/shootout-binarytrees.rs", # BSD
"test/bench/shootout-chameneos-redux.rs", # BSD
"test/bench/shootout-fannkuch-redux.rs", # BSD
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Communication primitives for concurrent tasks
+//!
+//! Rust makes it very difficult to share data among tasks to prevent race
+//! conditions and to improve parallelism, but there is often a need for
+//! communication between concurrent tasks. The primitives defined in this
+//! module are the building blocks for synchronization in rust.
+//!
+//! This module provides message-based communication over channels, concretely
+//! defined among three types:
+//!
+//! * `Sender`
+//! * `SyncSender`
+//! * `Receiver`
+//!
+//! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
+//! senders are clone-able such that many tasks can send simultaneously to one
+//! receiver. These channels are *task blocking*, not *thread blocking*. This
+//! means that if one task is blocked on a channel, other tasks can continue to
+//! make progress.
+//!
+//! Rust channels come in one of two flavors:
+//!
+//! 1. An asynchronous, infinitely buffered channel. The `channel()` function
+//! will return a `(Sender, Receiver)` tuple where all sends will be
+//! **asynchronous** (they never block). The channel conceptually has an
+//! infinite buffer.
+//!
+//! 2. A synchronous, bounded channel. The `sync_channel()` function will return
+//! a `(SyncSender, Receiver)` tuple where the storage for pending messages
+//! is a pre-allocated buffer of a fixed size. All sends will be
+//! **synchronous** by blocking until there is buffer space available. Note
+//! that a bound of 0 is allowed, causing the channel to become a
+//! "rendezvous" channel where each sender atomically hands off a message to
+//! a receiver.
+//!
+//! ## Panic Propagation
+//!
+//! In addition to being a core primitive for communicating in rust, channels
+//! are the points at which panics are propagated among tasks. Whenever the one
+//! half of channel is closed, the other half will have its next operation
+//! `panic!`. The purpose of this is to allow propagation of panics among tasks
+//! that are linked to one another via channels.
+//!
+//! There are methods on both of senders and receivers to perform their
+//! respective operations without panicking, however.
+//!
+//! ## Runtime Requirements
+//!
+//! The channel types defined in this module generally have very few runtime
+//! requirements in order to operate. The major requirement they have is for a
+//! local rust `Task` to be available if any *blocking* operation is performed.
+//!
+//! If a local `Task` is not available (for example an FFI callback), then the
+//! `send` operation is safe on a `Sender` (as well as a `send_opt`) as well as
+//! the `try_send` method on a `SyncSender`, but no other operations are
+//! guaranteed to be safe.
+//!
+//! # Example
+//!
+//! Simple usage:
+//!
+//! ```
+//! // Create a simple streaming channel
+//! let (tx, rx) = channel();
+//! spawn(proc() {
+//! tx.send(10i);
+//! });
+//! assert_eq!(rx.recv(), 10i);
+//! ```
+//!
+//! Shared usage:
+//!
+//! ```
+//! // Create a shared channel which can be sent along from many tasks
+//! // where tx is the sending half (tx for transmission), and rx is the receiving
+//! // half (rx for receiving).
+//! let (tx, rx) = channel();
+//! for i in range(0i, 10i) {
+//! let tx = tx.clone();
+//! spawn(proc() {
+//! tx.send(i);
+//! })
+//! }
+//!
+//! for _ in range(0i, 10i) {
+//! let j = rx.recv();
+//! assert!(0 <= j && j < 10);
+//! }
+//! ```
+//!
+//! Propagating panics:
+//!
+//! ```should_fail
+//! // The call to recv() will panic!() because the channel has already hung
+//! // up (or been deallocated)
+//! let (tx, rx) = channel::<int>();
+//! drop(tx);
+//! rx.recv();
+//! ```
+//!
+//! Synchronous channels:
+//!
+//! ```
+//! let (tx, rx) = sync_channel::<int>(0);
+//! spawn(proc() {
+//! // This will wait for the parent task to start receiving
+//! tx.send(53);
+//! });
+//! rx.recv();
+//! ```
+//!
+//! Reading from a channel with a timeout requires to use a Timer together
+//! with the channel. You can use the select! macro to select either and
+//! handle the timeout case. This first example will break out of the loop
+//! after 10 seconds no matter what:
+//!
+//! ```no_run
+//! use std::io::timer::Timer;
+//! use std::time::Duration;
+//!
+//! let (tx, rx) = channel::<int>();
+//! let mut timer = Timer::new().unwrap();
+//! let timeout = timer.oneshot(Duration::seconds(10));
+//!
+//! loop {
+//! select! {
+//! val = rx.recv() => println!("Received {}", val),
+//! () = timeout.recv() => {
+//! println!("timed out, total time was more than 10 seconds")
+//! break;
+//! }
+//! }
+//! }
+//! ```
+//!
+//! This second example is more costly since it allocates a new timer every
+//! time a message is received, but it allows you to timeout after the channel
+//! has been inactive for 5 seconds:
+//!
+//! ```no_run
+//! use std::io::timer::Timer;
+//! use std::time::Duration;
+//!
+//! let (tx, rx) = channel::<int>();
+//! let mut timer = Timer::new().unwrap();
+//!
+//! loop {
+//! let timeout = timer.oneshot(Duration::seconds(5));
+//!
+//! select! {
+//! val = rx.recv() => println!("Received {}", val),
+//! () = timeout.recv() => {
+//! println!("timed out, no message received in 5 seconds")
+//! break;
+//! }
+//! }
+//! }
+//! ```
+
+// A description of how Rust's channel implementation works
+//
+// Channels are supposed to be the basic building block for all other
+// concurrent primitives that are used in Rust. As a result, the channel type
+// needs to be highly optimized, flexible, and broad enough for use everywhere.
+//
+// The choice of implementation of all channels is to be built on lock-free data
+// structures. The channels themselves are then consequently also lock-free data
+// structures. As always with lock-free code, this is a very "here be dragons"
+// territory, especially because I'm unaware of any academic papers which have
+// gone into great length about channels of these flavors.
+//
+// ## Flavors of channels
+//
+// From the perspective of a consumer of this library, there is only one flavor
+// of channel. This channel can be used as a stream and cloned to allow multiple
+// senders. Under the hood, however, there are actually three flavors of
+// channels in play.
+//
+// * Oneshots - these channels are highly optimized for the one-send use case.
+// They contain as few atomics as possible and involve one and
+// exactly one allocation.
+// * Streams - these channels are optimized for the non-shared use case. They
+// use a different concurrent queue which is more tailored for this
+// use case. The initial allocation of this flavor of channel is not
+// optimized.
+// * Shared - this is the most general form of channel that this module offers,
+// a channel with multiple senders. This type is as optimized as it
+// can be, but the previous two types mentioned are much faster for
+// their use-cases.
+//
+// ## Concurrent queues
+//
+// The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
+// recv() obviously blocks. This means that under the hood there must be some
+// shared and concurrent queue holding all of the actual data.
+//
+// With two flavors of channels, two flavors of queues are also used. We have
+// chosen to use queues from a well-known author which are abbreviated as SPSC
+// and MPSC (single producer, single consumer and multiple producer, single
+// consumer). SPSC queues are used for streams while MPSC queues are used for
+// shared channels.
+//
+// ### SPSC optimizations
+//
+// The SPSC queue found online is essentially a linked list of nodes where one
+// half of the nodes are the "queue of data" and the other half of nodes are a
+// cache of unused nodes. The unused nodes are used such that an allocation is
+// not required on every push() and a free doesn't need to happen on every
+// pop().
+//
+// As found online, however, the cache of nodes is of an infinite size. This
+// means that if a channel at one point in its life had 50k items in the queue,
+// then the queue will always have the capacity for 50k items. I believed that
+// this was an unnecessary limitation of the implementation, so I have altered
+// the queue to optionally have a bound on the cache size.
+//
+// By default, streams will have an unbounded SPSC queue with a small-ish cache
+// size. The hope is that the cache is still large enough to have very fast
+// send() operations while not too large such that millions of channels can
+// coexist at once.
+//
+// ### MPSC optimizations
+//
+// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
+// a linked list under the hood to earn its unboundedness, but I have not put
+// forth much effort into having a cache of nodes similar to the SPSC queue.
+//
+// For now, I believe that this is "ok" because shared channels are not the most
+// common type, but soon we may wish to revisit this queue choice and determine
+// another candidate for backend storage of shared channels.
+//
+// ## Overview of the Implementation
+//
+// Now that there's a little background on the concurrent queues used, it's
+// worth going into much more detail about the channels themselves. The basic
+// pseudocode for a send/recv are:
+//
+//
+// send(t) recv()
+// queue.push(t) return if queue.pop()
+// if increment() == -1 deschedule {
+// wakeup() if decrement() > 0
+// cancel_deschedule()
+// }
+// queue.pop()
+//
+// As mentioned before, there are no locks in this implementation, only atomic
+// instructions are used.
+//
+// ### The internal atomic counter
+//
+// Every channel has a shared counter with each half to keep track of the size
+// of the queue. This counter is used to abort descheduling by the receiver and
+// to know when to wake up on the sending side.
+//
+// As seen in the pseudocode, senders will increment this count and receivers
+// will decrement the count. The theory behind this is that if a sender sees a
+// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
+// then it doesn't need to block.
+//
+// The recv() method has a beginning call to pop(), and if successful, it needs
+// to decrement the count. It is a crucial implementation detail that this
+// decrement does *not* happen to the shared counter. If this were the case,
+// then it would be possible for the counter to be very negative when there were
+// no receivers waiting, in which case the senders would have to determine when
+// it was actually appropriate to wake up a receiver.
+//
+// Instead, the "steal count" is kept track of separately (not atomically
+// because it's only used by receivers), and then the decrement() call when
+// descheduling will lump in all of the recent steals into one large decrement.
+//
+// The implication of this is that if a sender sees a -1 count, then there's
+// guaranteed to be a waiter waiting!
+//
+// ## Native Implementation
+//
+// A major goal of these channels is to work seamlessly on and off the runtime.
+// All of the previous race conditions have been worded in terms of
+// scheduler-isms (which is obviously not available without the runtime).
+//
+// For now, native usage of channels (off the runtime) will fall back onto
+// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
+// is still entirely lock-free, the "deschedule" blocks above are surrounded by
+// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
+// condition variable.
+//
+// ## Select
+//
+// Being able to support selection over channels has greatly influenced this
+// design, and not only does selection need to work inside the runtime, but also
+// outside the runtime.
+//
+// The implementation is fairly straightforward. The goal of select() is not to
+// return some data, but only to return which channel can receive data without
+// blocking. The implementation is essentially the entire blocking procedure
+// followed by an increment as soon as its woken up. The cancellation procedure
+// involves an increment and swapping out of to_wake to acquire ownership of the
+// task to unblock.
+//
+// Sadly this current implementation requires multiple allocations, so I have
+// seen the throughput of select() be much worse than it should be. I do not
+// believe that there is anything fundamental which needs to change about these
+// channels, however, in order to support a more efficient select().
+//
+// # Conclusion
+//
+// And now that you've seen all the races that I found and attempted to fix,
+// here's the code for you to find some more!
+
+use core::prelude::*;
+
+pub use self::TryRecvError::*;
+pub use self::TrySendError::*;
+use self::Flavor::*;
+
+use alloc::arc::Arc;
+use core::kinds::marker;
+use core::mem;
+use core::cell::UnsafeCell;
+use rustrt::task::BlockedTask;
+
+pub use comm::select::{Select, Handle};
+
+macro_rules! test (
+ { fn $name:ident() $b:block $(#[$a:meta])*} => (
+ mod $name {
+ #![allow(unused_imports)]
+
+ extern crate rustrt;
+
+ use prelude::*;
+
+ use comm::*;
+ use super::*;
+ use task;
+
+ $(#[$a])* #[test] fn f() { $b }
+ }
+ )
+)
+
+mod oneshot;
+mod select;
+mod shared;
+mod stream;
+mod sync;
+
+/// The receiving-half of Rust's channel type. This half can only be owned by
+/// one task
+#[unstable]
+pub struct Receiver<T> {
+ inner: UnsafeCell<Flavor<T>>,
+ // can't share in an arc
+ _marker: marker::NoSync,
+}
+
+/// An iterator over messages on a receiver, this iterator will block
+/// whenever `next` is called, waiting for a new message, and `None` will be
+/// returned when the corresponding channel has hung up.
+#[unstable]
+pub struct Messages<'a, T:'a> {
+ rx: &'a Receiver<T>
+}
+
+/// The sending-half of Rust's asynchronous channel type. This half can only be
+/// owned by one task, but it can be cloned to send to other tasks.
+#[unstable]
+pub struct Sender<T> {
+ inner: UnsafeCell<Flavor<T>>,
+ // can't share in an arc
+ _marker: marker::NoSync,
+}
+
+/// The sending-half of Rust's synchronous channel type. This half can only be
+/// owned by one task, but it can be cloned to send to other tasks.
+#[unstable = "this type may be renamed, but it will always exist"]
+pub struct SyncSender<T> {
+ inner: Arc<UnsafeCell<sync::Packet<T>>>,
+ // can't share in an arc
+ _marker: marker::NoSync,
+}
+
+/// This enumeration is the list of the possible reasons that try_recv could not
+/// return data when called.
+#[deriving(PartialEq, Clone, Show)]
+#[experimental = "this is likely to be removed in changing try_recv()"]
+pub enum TryRecvError {
+ /// This channel is currently empty, but the sender(s) have not yet
+ /// disconnected, so data may yet become available.
+ Empty,
+ /// This channel's sending half has become disconnected, and there will
+ /// never be any more data received on this channel
+ Disconnected,
+}
+
+/// This enumeration is the list of the possible error outcomes for the
+/// `SyncSender::try_send` method.
+#[deriving(PartialEq, Clone, Show)]
+#[experimental = "this is likely to be removed in changing try_send()"]
+pub enum TrySendError<T> {
+ /// The data could not be sent on the channel because it would require that
+ /// the callee block to send the data.
+ ///
+ /// If this is a buffered channel, then the buffer is full at this time. If
+ /// this is not a buffered channel, then there is no receiver available to
+ /// acquire the data.
+ Full(T),
+ /// This channel's receiving half has disconnected, so the data could not be
+ /// sent. The data is returned back to the callee in this case.
+ RecvDisconnected(T),
+}
+
+enum Flavor<T> {
+ Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
+ Stream(Arc<UnsafeCell<stream::Packet<T>>>),
+ Shared(Arc<UnsafeCell<shared::Packet<T>>>),
+ Sync(Arc<UnsafeCell<sync::Packet<T>>>),
+}
+
+#[doc(hidden)]
+trait UnsafeFlavor<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>>;
+ unsafe fn inner_mut<'a>(&'a self) -> &'a mut Flavor<T> {
+ &mut *self.inner_unsafe().get()
+ }
+ unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
+ &*self.inner_unsafe().get()
+ }
+}
+impl<T> UnsafeFlavor<T> for Sender<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
+ &self.inner
+ }
+}
+impl<T> UnsafeFlavor<T> for Receiver<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
+ &self.inner
+ }
+}
+
+/// Creates a new asynchronous channel, returning the sender/receiver halves.
+///
+/// All data sent on the sender will become available on the receiver, and no
+/// send will block the calling task (this channel has an "infinite buffer").
+///
+/// # Example
+///
+/// ```
+/// // tx is is the sending half (tx for transmission), and rx is the receiving
+/// // half (rx for receiving).
+/// let (tx, rx) = channel();
+///
+/// // Spawn off an expensive computation
+/// spawn(proc() {
+/// # fn expensive_computation() {}
+/// tx.send(expensive_computation());
+/// });
+///
+/// // Do some useful work for awhile
+///
+/// // Let's see what that answer was
+/// println!("{}", rx.recv());
+/// ```
+#[unstable]
+pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
+ let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
+ (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
+}
+
+/// Creates a new synchronous, bounded channel.
+///
+/// Like asynchronous channels, the `Receiver` will block until a message
+/// becomes available. These channels differ greatly in the semantics of the
+/// sender from asynchronous channels, however.
+///
+/// This channel has an internal buffer on which messages will be queued. When
+/// the internal buffer becomes full, future sends will *block* waiting for the
+/// buffer to open up. Note that a buffer size of 0 is valid, in which case this
+/// becomes "rendezvous channel" where each send will not return until a recv
+/// is paired with it.
+///
+/// As with asynchronous channels, all senders will panic in `send` if the
+/// `Receiver` has been destroyed.
+///
+/// # Example
+///
+/// ```
+/// let (tx, rx) = sync_channel(1);
+///
+/// // this returns immediately
+/// tx.send(1i);
+///
+/// spawn(proc() {
+/// // this will block until the previous message has been received
+/// tx.send(2i);
+/// });
+///
+/// assert_eq!(rx.recv(), 1i);
+/// assert_eq!(rx.recv(), 2i);
+/// ```
+#[unstable = "this function may be renamed to more accurately reflect the type \
+ of channel that is is creating"]
+pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
+ let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
+ (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> Sender<T> {
+ fn new(inner: Flavor<T>) -> Sender<T> {
+ Sender {
+ inner: UnsafeCell::new(inner),
+ _marker: marker::NoSync,
+ }
+ }
+
+ /// Sends a value along this channel to be received by the corresponding
+ /// receiver.
+ ///
+ /// Rust channels are infinitely buffered so this method will never block.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the other end of the channel has hung up.
+ /// This means that if the corresponding receiver has fallen out of scope,
+ /// this function will trigger a panic message saying that a message is
+ /// being sent on a closed channel.
+ ///
+ /// Note that if this function does *not* panic, it does not mean that the
+ /// data will be successfully received. All sends are placed into a queue,
+ /// so it is possible for a send to succeed (the other end is alive), but
+ /// then the other end could immediately disconnect.
+ ///
+ /// The purpose of this functionality is to propagate panicks among tasks.
+ /// If a panic is not desired, then consider using the `send_opt` method
+ #[experimental = "this function is being considered candidate for removal \
+ to adhere to the general guidelines of rust"]
+ pub fn send(&self, t: T) {
+ if self.send_opt(t).is_err() {
+ panic!("sending on a closed channel");
+ }
+ }
+
+ /// Attempts to send a value on this channel, returning it back if it could
+ /// not be sent.
+ ///
+ /// A successful send occurs when it is determined that the other end of
+ /// the channel has not hung up already. An unsuccessful send would be one
+ /// where the corresponding receiver has already been deallocated. Note
+ /// that a return value of `Err` means that the data will never be
+ /// received, but a return value of `Ok` does *not* mean that the data
+ /// will be received. It is possible for the corresponding receiver to
+ /// hang up immediately after this function returns `Ok`.
+ ///
+ /// Like `send`, this method will never block.
+ ///
+ /// # Panics
+ ///
+ /// This method will never panic, it will return the message back to the
+ /// caller if the other end is disconnected
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let (tx, rx) = channel();
+ ///
+ /// // This send is always successful
+ /// assert_eq!(tx.send_opt(1i), Ok(()));
+ ///
+ /// // This send will fail because the receiver is gone
+ /// drop(rx);
+ /// assert_eq!(tx.send_opt(1i), Err(1));
+ /// ```
+ #[unstable = "this function may be renamed to send() in the future"]
+ pub fn send_opt(&self, t: T) -> Result<(), T> {
+ let (new_inner, ret) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ unsafe {
+ let p = p.get();
+ if !(*p).sent() {
+ return (*p).send(t);
+ } else {
+ let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
+ match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
+ oneshot::UpSuccess => {
+ let ret = (*a.get()).send(t);
+ (a, ret)
+ }
+ oneshot::UpDisconnected => (a, Err(t)),
+ oneshot::UpWoke(task) => {
+ // This send cannot panic because the task is
+ // asleep (we're looking at it), so the receiver
+ // can't go away.
+ (*a.get()).send(t).ok().unwrap();
+ task.wake().map(|t| t.reawaken());
+ (a, Ok(()))
+ }
+ }
+ }
+ }
+ }
+ Stream(ref p) => return unsafe { (*p.get()).send(t) },
+ Shared(ref p) => return unsafe { (*p.get()).send(t) },
+ Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ let tmp = Sender::new(Stream(new_inner));
+ mem::swap(self.inner_mut(), tmp.inner_mut());
+ }
+ return ret;
+ }
+}
+
+#[unstable]
+impl<T: Send> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ let (packet, sleeper) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ unsafe {
+ (*a.get()).postinit_lock();
+ match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
+ oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
+ oneshot::UpWoke(task) => (a, Some(task))
+ }
+ }
+ }
+ Stream(ref p) => {
+ let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ unsafe {
+ (*a.get()).postinit_lock();
+ match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
+ stream::UpSuccess | stream::UpDisconnected => (a, None),
+ stream::UpWoke(task) => (a, Some(task)),
+ }
+ }
+ }
+ Shared(ref p) => {
+ unsafe { (*p.get()).clone_chan(); }
+ return Sender::new(Shared(p.clone()));
+ }
+ Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ (*packet.get()).inherit_blocker(sleeper);
+
+ let tmp = Sender::new(Shared(packet.clone()));
+ mem::swap(self.inner_mut(), tmp.inner_mut());
+ }
+ Sender::new(Shared(packet))
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Sender<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.inner_mut() } {
+ Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Sync(..) => unreachable!(),
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SyncSender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> SyncSender<T> {
+ fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
+ SyncSender { inner: inner, _marker: marker::NoSync }
+ }
+
+ /// Sends a value on this synchronous channel.
+ ///
+ /// This function will *block* until space in the internal buffer becomes
+ /// available or a receiver is available to hand off the message to.
+ ///
+ /// Note that a successful send does *not* guarantee that the receiver will
+ /// ever see the data if there is a buffer on this channel. Messages may be
+ /// enqueued in the internal buffer for the receiver to receive at a later
+ /// time. If the buffer size is 0, however, it can be guaranteed that the
+ /// receiver has indeed received the data if this function returns success.
+ ///
+ /// # Panics
+ ///
+ /// Similarly to `Sender::send`, this function will panic if the
+ /// corresponding `Receiver` for this channel has disconnected. This
+ /// behavior is used to propagate panics among tasks.
+ ///
+ /// If a panic is not desired, you can achieve the same semantics with the
+ /// `SyncSender::send_opt` method which will not panic if the receiver
+ /// disconnects.
+ #[experimental = "this function is being considered candidate for removal \
+ to adhere to the general guidelines of rust"]
+ pub fn send(&self, t: T) {
+ if self.send_opt(t).is_err() {
+ panic!("sending on a closed channel");
+ }
+ }
+
+ /// Send a value on a channel, returning it back if the receiver
+ /// disconnected
+ ///
+ /// This method will *block* to send the value `t` on the channel, but if
+ /// the value could not be sent due to the receiver disconnecting, the value
+ /// is returned back to the callee. This function is similar to `try_send`,
+ /// except that it will block if the channel is currently full.
+ ///
+ /// # Panics
+ ///
+ /// This function cannot panic.
+ #[unstable = "this function may be renamed to send() in the future"]
+ pub fn send_opt(&self, t: T) -> Result<(), T> {
+ unsafe { (*self.inner.get()).send(t) }
+ }
+
+ /// Attempts to send a value on this channel without blocking.
+ ///
+ /// This method differs from `send_opt` by returning immediately if the
+ /// channel's buffer is full or no receiver is waiting to acquire some
+ /// data. Compared with `send_opt`, this function has two failure cases
+ /// instead of one (one for disconnection, one for a full buffer).
+ ///
+ /// See `SyncSender::send` for notes about guarantees of whether the
+ /// receiver has received the data or not if this function is successful.
+ ///
+ /// # Panics
+ ///
+ /// This function cannot panic
+ #[unstable = "the return type of this function is candidate for \
+ modification"]
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ unsafe { (*self.inner.get()).try_send(t) }
+ }
+}
+
+#[unstable]
+impl<T: Send> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ unsafe { (*self.inner.get()).clone_chan(); }
+ return SyncSender::new(self.inner.clone());
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for SyncSender<T> {
+ fn drop(&mut self) {
+ unsafe { (*self.inner.get()).drop_chan(); }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Receiver
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> Receiver<T> {
+ fn new(inner: Flavor<T>) -> Receiver<T> {
+ Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
+ }
+
+ /// Blocks waiting for a value on this receiver
+ ///
+ /// This function will block if necessary to wait for a corresponding send
+ /// on the channel from its paired `Sender` structure. This receiver will
+ /// be woken up when data is ready, and the data will be returned.
+ ///
+ /// # Panics
+ ///
+ /// Similar to channels, this method will trigger a task panic if the
+ /// other end of the channel has hung up (been deallocated). The purpose of
+ /// this is to propagate panicks among tasks.
+ ///
+ /// If a panic is not desired, then there are two options:
+ ///
+ /// * If blocking is still desired, the `recv_opt` method will return `None`
+ /// when the other end hangs up
+ ///
+ /// * If blocking is not desired, then the `try_recv` method will attempt to
+ /// peek at a value on this receiver.
+ #[experimental = "this function is being considered candidate for removal \
+ to adhere to the general guidelines of rust"]
+ pub fn recv(&self) -> T {
+ match self.recv_opt() {
+ Ok(t) => t,
+ Err(()) => panic!("receiving on a closed channel"),
+ }
+ }
+
+ /// Attempts to return a pending value on this receiver without blocking
+ ///
+ /// This method will never block the caller in order to wait for data to
+ /// become available. Instead, this will always return immediately with a
+ /// possible option of pending data on the channel.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// block on a receiver.
+ ///
+ /// # Panics
+ ///
+ /// This function cannot panic.
+ #[unstable = "the return type of this function may be altered"]
+ pub fn try_recv(&self) -> Result<T, TryRecvError> {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Empty) => return Err(Empty),
+ Err(oneshot::Disconnected) => return Err(Disconnected),
+ Err(oneshot::Upgraded(rx)) => rx,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(stream::Empty) => return Err(Empty),
+ Err(stream::Disconnected) => return Err(Disconnected),
+ Err(stream::Upgraded(rx)) => rx,
+ }
+ }
+ Shared(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(shared::Empty) => return Err(Empty),
+ Err(shared::Disconnected) => return Err(Disconnected),
+ }
+ }
+ Sync(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(sync::Empty) => return Err(Empty),
+ Err(sync::Disconnected) => return Err(Disconnected),
+ }
+ }
+ };
+ unsafe {
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
+ }
+ }
+ }
+
+ /// Attempt to wait for a value on this receiver, but does not panic if the
+ /// corresponding channel has hung up.
+ ///
+ /// This implementation of iterators for ports will always block if there is
+ /// not data available on the receiver, but it will not panic in the case
+ /// that the channel has been deallocated.
+ ///
+ /// In other words, this function has the same semantics as the `recv`
+ /// method except for the panic aspect.
+ ///
+ /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
+ /// the value found on the receiver is returned.
+ #[unstable = "this function may be renamed to recv()"]
+ pub fn recv_opt(&self) -> Result<T, ()> {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Empty) => return unreachable!(),
+ Err(oneshot::Disconnected) => return Err(()),
+ Err(oneshot::Upgraded(rx)) => rx,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(stream::Empty) => return unreachable!(),
+ Err(stream::Disconnected) => return Err(()),
+ Err(stream::Upgraded(rx)) => rx,
+ }
+ }
+ Shared(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(shared::Empty) => return unreachable!(),
+ Err(shared::Disconnected) => return Err(()),
+ }
+ }
+ Sync(ref p) => return unsafe { (*p.get()).recv() }
+ };
+ unsafe {
+ mem::swap(self.inner_mut(), new_port.inner_mut());
+ }
+ }
+ }
+
+ /// Returns an iterator which will block waiting for messages, but never
+ /// `panic!`. It will return `None` when the channel has hung up.
+ #[unstable]
+ pub fn iter<'a>(&'a self) -> Messages<'a, T> {
+ Messages { rx: self }
+ }
+}
+
+impl<T: Send> select::Packet for Receiver<T> {
+ fn can_recv(&self) -> bool {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).can_recv() } {
+ Ok(ret) => return ret,
+ Err(upgrade) => upgrade,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).can_recv() } {
+ Ok(ret) => return ret,
+ Err(upgrade) => upgrade,
+ }
+ }
+ Shared(ref p) => {
+ return unsafe { (*p.get()).can_recv() };
+ }
+ Sync(ref p) => {
+ return unsafe { (*p.get()).can_recv() };
+ }
+ };
+ unsafe {
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
+ }
+ }
+ }
+
+ fn start_selection(&self, mut task: BlockedTask) -> Result<(), BlockedTask>{
+ loop {
+ let (t, new_port) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).start_selection(task) } {
+ oneshot::SelSuccess => return Ok(()),
+ oneshot::SelCanceled(task) => return Err(task),
+ oneshot::SelUpgraded(t, rx) => (t, rx),
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).start_selection(task) } {
+ stream::SelSuccess => return Ok(()),
+ stream::SelCanceled(task) => return Err(task),
+ stream::SelUpgraded(t, rx) => (t, rx),
+ }
+ }
+ Shared(ref p) => {
+ return unsafe { (*p.get()).start_selection(task) };
+ }
+ Sync(ref p) => {
+ return unsafe { (*p.get()).start_selection(task) };
+ }
+ };
+ task = t;
+ unsafe {
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
+ }
+ }
+ }
+
+ fn abort_selection(&self) -> bool {
+ let mut was_upgrade = false;
+ loop {
+ let result = match *unsafe { self.inner() } {
+ Oneshot(ref p) => unsafe { (*p.get()).abort_selection() },
+ Stream(ref p) => unsafe {
+ (*p.get()).abort_selection(was_upgrade)
+ },
+ Shared(ref p) => return unsafe {
+ (*p.get()).abort_selection(was_upgrade)
+ },
+ Sync(ref p) => return unsafe {
+ (*p.get()).abort_selection()
+ },
+ };
+ let new_port = match result { Ok(b) => return b, Err(p) => p };
+ was_upgrade = true;
+ unsafe {
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
+ }
+ }
+ }
+}
+
+#[unstable]
+impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
+ fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.inner_mut() } {
+ Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Sync(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+
+ use os;
+ use super::*;
+
+ pub fn stress_factor() -> uint {
+ match os::getenv("RUST_TEST_STRESS") {
+ Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
+ None => 1,
+ }
+ }
+
+ test!(fn smoke() {
+ let (tx, rx) = channel::<int>();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn drop_full() {
+ let (tx, _rx) = channel();
+ tx.send(box 1i);
+ })
+
+ test!(fn drop_full_shared() {
+ let (tx, _rx) = channel();
+ drop(tx.clone());
+ drop(tx.clone());
+ tx.send(box 1i);
+ })
+
+ test!(fn smoke_shared() {
+ let (tx, rx) = channel::<int>();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ let tx = tx.clone();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_threads() {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ tx.send(1);
+ });
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_port_gone() {
+ let (tx, rx) = channel::<int>();
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone() {
+ let (tx, rx) = channel::<int>();
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone2() {
+ let (tx, rx) = channel::<int>();
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ tx2.send(1);
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent() {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop { tx.send(1) }
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent_shared() {
+ let (tx, rx) = channel::<int>();
+ let tx2 = tx.clone();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop {
+ tx.send(1);
+ tx2.send(1);
+ }
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone_shared() {
+ let (tx, rx) = channel::<()>();
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn chan_gone_concurrent() {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ tx.send(1);
+ tx.send(1);
+ });
+ loop { rx.recv(); }
+ } #[should_fail])
+
+ test!(fn stress() {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ for _ in range(0u, 10000) { tx.send(1i); }
+ });
+ for _ in range(0u, 10000) {
+ assert_eq!(rx.recv(), 1);
+ }
+ })
+
+ test!(fn stress_shared() {
+ static AMT: uint = 10000;
+ static NTHREADS: uint = 8;
+ let (tx, rx) = channel::<int>();
+ let (dtx, drx) = channel::<()>();
+
+ spawn(proc() {
+ for _ in range(0, AMT * NTHREADS) {
+ assert_eq!(rx.recv(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => panic!(),
+ _ => {}
+ }
+ dtx.send(());
+ });
+
+ for _ in range(0, NTHREADS) {
+ let tx = tx.clone();
+ spawn(proc() {
+ for _ in range(0, AMT) { tx.send(1); }
+ });
+ }
+ drop(tx);
+ drx.recv();
+ })
+
+ #[test]
+ fn send_from_outside_runtime() {
+ let (tx1, rx1) = channel::<()>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+ let tx4 = tx3.clone();
+ spawn(proc() {
+ tx1.send(());
+ for _ in range(0i, 40) {
+ assert_eq!(rx2.recv(), 1);
+ }
+ tx3.send(());
+ });
+ rx1.recv();
+ spawn(proc() {
+ for _ in range(0i, 40) {
+ tx2.send(1);
+ }
+ tx4.send(());
+ });
+ rx3.recv();
+ rx3.recv();
+ }
+
+ #[test]
+ fn recv_from_outside_runtime() {
+ let (tx, rx) = channel::<int>();
+ let (dtx, drx) = channel();
+ spawn(proc() {
+ for _ in range(0i, 40) {
+ assert_eq!(rx.recv(), 1);
+ }
+ dtx.send(());
+ });
+ for _ in range(0u, 40) {
+ tx.send(1);
+ }
+ drx.recv();
+ }
+
+ #[test]
+ fn no_runtime() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+ let tx4 = tx3.clone();
+ spawn(proc() {
+ assert_eq!(rx1.recv(), 1);
+ tx2.send(2);
+ tx4.send(());
+ });
+ spawn(proc() {
+ tx1.send(1);
+ assert_eq!(rx2.recv(), 2);
+ tx3.send(());
+ });
+ rx3.recv();
+ rx3.recv();
+ }
+
+ test!(fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = channel::<int>();
+ drop(rx);
+ })
+
+ test!(fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = channel::<int>();
+ drop(tx);
+ })
+
+ test!(fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = channel::<Box<int>>();
+ drop(rx);
+ tx.send(box 0);
+ } #[should_fail])
+
+ test!(fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will panic
+ let res = task::try(proc() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ rx.recv();
+ });
+ // What is our res?
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = channel::<Box<int>>();
+ tx.send(box 10);
+ assert!(rx.recv() == box 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = channel::<int>();
+ assert!(tx.send_opt(10).is_ok());
+ assert!(rx.recv() == 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = channel::<int>();
+ drop(rx);
+ assert!(tx.send_opt(10).is_err());
+ })
+
+ test!(fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = channel::<int>();
+ tx.send(10);
+ assert!(rx.recv_opt() == Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ assert!(rx.recv_opt() == Err(()));
+ })
+
+ test!(fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = channel::<int>();
+ assert_eq!(rx.try_recv(), Err(Empty))
+ tx.send(10);
+ assert_eq!(rx.try_recv(), Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ })
+
+ test!(fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = channel::<int>();
+ assert_eq!(rx.try_recv(), Err(Empty));
+ })
+
+ test!(fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = channel::<Box<int>>();
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+
+ tx.send(box 10);
+ })
+
+ test!(fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = channel::<Box<int>>();
+ spawn(proc() {
+ drop(tx);
+ });
+ let res = task::try(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_multi_thread_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ drop(rx);
+ });
+ drop(tx);
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ drop(rx);
+ });
+ let _ = task::try(proc() {
+ tx.send(1);
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_recv_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ let res = task::try(proc() {
+ rx.recv();
+ });
+ assert!(res.is_err());
+ });
+ spawn(proc() {
+ spawn(proc() {
+ drop(tx);
+ });
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ tx.send(box 10i);
+ });
+ spawn(proc() {
+ assert!(rx.recv() == box 10i);
+ });
+ }
+ })
+
+ test!(fn stream_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel();
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: Sender<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ tx.send(box i);
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ assert!(rx.recv() == box i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+ })
+
+ test!(fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = channel();
+ for _ in range(0i, 10000) { tx.send(()); }
+ for _ in range(0i, 10000) { rx.recv(); }
+ })
+
+ test!(fn shared_chan_stress() {
+ let (tx, rx) = channel();
+ let total = stress_factor() + 100;
+ for _ in range(0, total) {
+ let tx = tx.clone();
+ spawn(proc() {
+ tx.send(());
+ });
+ }
+
+ for _ in range(0, total) {
+ rx.recv();
+ }
+ })
+
+ test!(fn test_nested_recv_iter() {
+ let (tx, rx) = channel::<int>();
+ let (total_tx, total_rx) = channel::<int>();
+
+ spawn(proc() {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc);
+ });
+
+ tx.send(3);
+ tx.send(1);
+ tx.send(2);
+ drop(tx);
+ assert_eq!(total_rx.recv(), 6);
+ })
+
+ test!(fn test_recv_iter_break() {
+ let (tx, rx) = channel::<int>();
+ let (count_tx, count_rx) = channel();
+
+ spawn(proc() {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count);
+ });
+
+ tx.send(2);
+ tx.send(2);
+ tx.send(2);
+ let _ = tx.send_opt(2);
+ drop(tx);
+ assert_eq!(count_rx.recv(), 4);
+ })
+
+ test!(fn try_recv_states() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<()>();
+ let (tx3, rx3) = channel::<()>();
+ spawn(proc() {
+ rx2.recv();
+ tx1.send(1);
+ tx3.send(());
+ rx2.recv();
+ drop(tx1);
+ tx3.send(());
+ });
+
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Err(Disconnected));
+ })
+
+ // This bug used to end up in a livelock inside of the Receiver destructor
+ // because the internal state of the Shared packet was corrupted
+ test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = channel();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ rx.recv(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(());
+ });
+ // make sure the other task has gone to sleep
+ for _ in range(0u, 5000) { task::deschedule(); }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(());
+
+ // wait for the child task to exit before we exit
+ rx2.recv();
+ })
+
+ test!(fn sends_off_the_runtime() {
+ use rustrt::thread::Thread;
+
+ let (tx, rx) = channel();
+ let t = Thread::start(proc() {
+ for _ in range(0u, 1000) {
+ tx.send(());
+ }
+ });
+ for _ in range(0u, 1000) {
+ rx.recv();
+ }
+ t.join();
+ })
+
+ test!(fn try_recvs_off_the_runtime() {
+ use rustrt::thread::Thread;
+
+ let (tx, rx) = channel();
+ let (cdone, pdone) = channel();
+ let t = Thread::start(proc() {
+ let mut hits = 0u;
+ while hits < 10 {
+ match rx.try_recv() {
+ Ok(()) => { hits += 1; }
+ Err(Empty) => { Thread::yield_now(); }
+ Err(Disconnected) => return,
+ }
+ }
+ cdone.send(());
+ });
+ for _ in range(0u, 10) {
+ tx.send(());
+ }
+ t.join();
+ pdone.recv();
+ })
+}
+
+#[cfg(test)]
+mod sync_tests {
+ use prelude::*;
+ use os;
+
+ pub fn stress_factor() -> uint {
+ match os::getenv("RUST_TEST_STRESS") {
+ Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
+ None => 1,
+ }
+ }
+
+ test!(fn smoke() {
+ let (tx, rx) = sync_channel::<int>(1);
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn drop_full() {
+ let (tx, _rx) = sync_channel(1);
+ tx.send(box 1i);
+ })
+
+ test!(fn smoke_shared() {
+ let (tx, rx) = sync_channel::<int>(1);
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ let tx = tx.clone();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_threads() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ tx.send(1);
+ });
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_port_gone() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone2() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ tx2.send(1);
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ rx.recv();
+ });
+ loop { tx.send(1) }
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent_shared() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let tx2 = tx.clone();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop {
+ tx.send(1);
+ tx2.send(1);
+ }
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone_shared() {
+ let (tx, rx) = sync_channel::<()>(0);
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn chan_gone_concurrent() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ tx.send(1);
+ tx.send(1);
+ });
+ loop { rx.recv(); }
+ } #[should_fail])
+
+ test!(fn stress() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ for _ in range(0u, 10000) { tx.send(1); }
+ });
+ for _ in range(0u, 10000) {
+ assert_eq!(rx.recv(), 1);
+ }
+ })
+
+ test!(fn stress_shared() {
+ static AMT: uint = 1000;
+ static NTHREADS: uint = 8;
+ let (tx, rx) = sync_channel::<int>(0);
+ let (dtx, drx) = sync_channel::<()>(0);
+
+ spawn(proc() {
+ for _ in range(0, AMT * NTHREADS) {
+ assert_eq!(rx.recv(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => panic!(),
+ _ => {}
+ }
+ dtx.send(());
+ });
+
+ for _ in range(0, NTHREADS) {
+ let tx = tx.clone();
+ spawn(proc() {
+ for _ in range(0, AMT) { tx.send(1); }
+ });
+ }
+ drop(tx);
+ drx.recv();
+ })
+
+ test!(fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ })
+
+ test!(fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = sync_channel::<int>(0);
+ drop(tx);
+ })
+
+ test!(fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ drop(rx);
+ tx.send(box 0);
+ } #[should_fail])
+
+ test!(fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will panic
+ let res = task::try(proc() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ rx.recv();
+ });
+ // What is our res?
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = sync_channel::<Box<int>>(1);
+ tx.send(box 10);
+ assert!(rx.recv() == box 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(tx.try_send(10), Ok(()));
+ assert!(rx.recv() == 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ assert_eq!(tx.try_send(10), Err(RecvDisconnected(10)));
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed2() {
+ let (tx, _rx) = sync_channel::<int>(0);
+ assert_eq!(tx.try_send(10), Err(Full(10)));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = sync_channel::<int>(1);
+ tx.send(10);
+ assert!(rx.recv_opt() == Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ assert!(rx.recv_opt() == Err(()));
+ })
+
+ test!(fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(rx.try_recv(), Err(Empty))
+ tx.send(10);
+ assert_eq!(rx.try_recv(), Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ })
+
+ test!(fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = sync_channel::<int>(0);
+ assert_eq!(rx.try_recv(), Err(Empty));
+ })
+
+ test!(fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+
+ tx.send(box 10);
+ })
+
+ test!(fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ spawn(proc() {
+ drop(tx);
+ });
+ let res = task::try(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_multi_thread_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ drop(rx);
+ });
+ drop(tx);
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ drop(rx);
+ });
+ let _ = task::try(proc() {
+ tx.send(1);
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_recv_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ let res = task::try(proc() {
+ rx.recv();
+ });
+ assert!(res.is_err());
+ });
+ spawn(proc() {
+ spawn(proc() {
+ drop(tx);
+ });
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ spawn(proc() {
+ tx.send(box 10i);
+ });
+ spawn(proc() {
+ assert!(rx.recv() == box 10i);
+ });
+ }
+ })
+
+ test!(fn stream_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: SyncSender<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ tx.send(box i);
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ assert!(rx.recv() == box i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+ })
+
+ test!(fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = sync_channel(10000);
+ for _ in range(0u, 10000) { tx.send(()); }
+ for _ in range(0u, 10000) { rx.recv(); }
+ })
+
+ test!(fn shared_chan_stress() {
+ let (tx, rx) = sync_channel(0);
+ let total = stress_factor() + 100;
+ for _ in range(0, total) {
+ let tx = tx.clone();
+ spawn(proc() {
+ tx.send(());
+ });
+ }
+
+ for _ in range(0, total) {
+ rx.recv();
+ }
+ })
+
+ test!(fn test_nested_recv_iter() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let (total_tx, total_rx) = sync_channel::<int>(0);
+
+ spawn(proc() {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc);
+ });
+
+ tx.send(3);
+ tx.send(1);
+ tx.send(2);
+ drop(tx);
+ assert_eq!(total_rx.recv(), 6);
+ })
+
+ test!(fn test_recv_iter_break() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let (count_tx, count_rx) = sync_channel(0);
+
+ spawn(proc() {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count);
+ });
+
+ tx.send(2);
+ tx.send(2);
+ tx.send(2);
+ let _ = tx.try_send(2);
+ drop(tx);
+ assert_eq!(count_rx.recv(), 4);
+ })
+
+ test!(fn try_recv_states() {
+ let (tx1, rx1) = sync_channel::<int>(1);
+ let (tx2, rx2) = sync_channel::<()>(1);
+ let (tx3, rx3) = sync_channel::<()>(1);
+ spawn(proc() {
+ rx2.recv();
+ tx1.send(1);
+ tx3.send(());
+ rx2.recv();
+ drop(tx1);
+ tx3.send(());
+ });
+
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Err(Disconnected));
+ })
+
+ // This bug used to end up in a livelock inside of the Receiver destructor
+ // because the internal state of the Shared packet was corrupted
+ test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = sync_channel::<()>(0);
+ let (tx2, rx2) = sync_channel::<()>(0);
+ spawn(proc() {
+ rx.recv(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(());
+ });
+ // make sure the other task has gone to sleep
+ for _ in range(0u, 5000) { task::deschedule(); }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(());
+
+ // wait for the child task to exit before we exit
+ rx2.recv();
+ })
+
+ test!(fn try_recvs_off_the_runtime() {
+ use rustrt::thread::Thread;
+
+ let (tx, rx) = sync_channel::<()>(0);
+ let (cdone, pdone) = channel();
+ let t = Thread::start(proc() {
+ let mut hits = 0u;
+ while hits < 10 {
+ match rx.try_recv() {
+ Ok(()) => { hits += 1; }
+ Err(Empty) => { Thread::yield_now(); }
+ Err(Disconnected) => return,
+ }
+ }
+ cdone.send(());
+ });
+ for _ in range(0u, 10) {
+ tx.send(());
+ }
+ t.join();
+ pdone.recv();
+ })
+
+ test!(fn send_opt1() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() { rx.recv(); });
+ assert_eq!(tx.send_opt(1), Ok(()));
+ })
+
+ test!(fn send_opt2() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() { drop(rx); });
+ assert_eq!(tx.send_opt(1), Err(1));
+ })
+
+ test!(fn send_opt3() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(tx.send_opt(1), Ok(()));
+ spawn(proc() { drop(rx); });
+ assert_eq!(tx.send_opt(1), Err(1));
+ })
+
+ test!(fn send_opt4() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let tx2 = tx.clone();
+ let (done, donerx) = channel();
+ let done2 = done.clone();
+ spawn(proc() {
+ assert_eq!(tx.send_opt(1), Err(1));
+ done.send(());
+ });
+ spawn(proc() {
+ assert_eq!(tx2.send_opt(2), Err(2));
+ done2.send(());
+ });
+ drop(rx);
+ donerx.recv();
+ donerx.recv();
+ })
+
+ test!(fn try_send1() {
+ let (tx, _rx) = sync_channel::<int>(0);
+ assert_eq!(tx.try_send(1), Err(Full(1)));
+ })
+
+ test!(fn try_send2() {
+ let (tx, _rx) = sync_channel::<int>(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ assert_eq!(tx.try_send(1), Err(Full(1)));
+ })
+
+ test!(fn try_send3() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ drop(rx);
+ assert_eq!(tx.try_send(1), Err(RecvDisconnected(1)));
+ })
+
+ test!(fn try_send4() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ for _ in range(0u, 1000) { task::deschedule(); }
+ assert_eq!(tx.try_send(1), Ok(()));
+ });
+ assert_eq!(rx.recv(), 1);
+ } #[ignore(reason = "flaky on libnative")])
+
+ test!(fn issue_15761() {
+ fn repro() {
+ let (tx1, rx1) = sync_channel::<()>(3);
+ let (tx2, rx2) = sync_channel::<()>(3);
+
+ spawn(proc() {
+ rx1.recv();
+ tx2.try_send(()).unwrap();
+ });
+
+ tx1.try_send(()).unwrap();
+ rx2.recv();
+ }
+
+ for _ in range(0u, 100) {
+ repro()
+ }
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Oneshot channels/ports
+///
+/// This is the initial flavor of channels/ports used for comm module. This is
+/// an optimization for the one-use case of a channel. The major optimization of
+/// this type is to have one and exactly one allocation when the chan/port pair
+/// is created.
+///
+/// Another possible optimization would be to not use an Arc box because
+/// in theory we know when the shared packet can be deallocated (no real need
+/// for the atomic reference counting), but I was having trouble how to destroy
+/// the data early in a drop of a Port.
+///
+/// # Implementation
+///
+/// Oneshots are implemented around one atomic uint variable. This variable
+/// indicates both the state of the port/chan but also contains any tasks
+/// blocked on the port. All atomic operations happen on this one word.
+///
+/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
+/// on behalf of the channel side of things (it can be mentally thought of as
+/// consuming the port). This upgrade is then also stored in the shared packet.
+/// The one caveat to consider is that when a port sees a disconnected channel
+/// it must check for data because there is no "data plus upgrade" state.
+
+pub use self::Failure::*;
+pub use self::UpgradeResult::*;
+pub use self::SelectionResult::*;
+use self::MyUpgrade::*;
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::mem;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+
+use sync::atomic;
+use comm::Receiver;
+
+// Various states you can find a port in.
+const EMPTY: uint = 0;
+const DATA: uint = 1;
+const DISCONNECTED: uint = 2;
+
+pub struct Packet<T> {
+ // Internal state of the chan/port pair (stores the blocked task as well)
+ state: atomic::AtomicUint,
+ // One-shot data slot location
+ data: Option<T>,
+ // when used for the second time, a oneshot channel must be upgraded, and
+ // this contains the slot for the upgrade
+ upgrade: MyUpgrade<T>,
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(BlockedTask),
+}
+
+pub enum SelectionResult<T> {
+ SelCanceled(BlockedTask),
+ SelUpgraded(BlockedTask, Receiver<T>),
+ SelSuccess,
+}
+
+enum MyUpgrade<T> {
+ NothingSent,
+ SendUsed,
+ GoUp(Receiver<T>),
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ data: None,
+ upgrade: NothingSent,
+ state: atomic::AtomicUint::new(EMPTY),
+ }
+ }
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // Sanity check
+ match self.upgrade {
+ NothingSent => {}
+ _ => panic!("sending on a oneshot that's already sent on "),
+ }
+ assert!(self.data.is_none());
+ self.data = Some(t);
+ self.upgrade = SendUsed;
+
+ match self.state.swap(DATA, atomic::SeqCst) {
+ // Sent the data, no one was waiting
+ EMPTY => Ok(()),
+
+ // Couldn't send the data, the port hung up first. Return the data
+ // back up the stack.
+ DISCONNECTED => {
+ Err(self.data.take().unwrap())
+ }
+
+ // Not possible, these are one-use channels
+ DATA => unreachable!(),
+
+ // Anything else means that there was a task waiting on the other
+ // end. We leave the 'DATA' state inside so it'll pick it up on the
+ // other end.
+ n => unsafe {
+ let t = BlockedTask::cast_from_uint(n);
+ t.wake().map(|t| t.reawaken());
+ Ok(())
+ }
+ }
+ }
+
+ // Just tests whether this channel has been sent on or not, this is only
+ // safe to use from the sender.
+ pub fn sent(&self) -> bool {
+ match self.upgrade {
+ NothingSent => false,
+ _ => true,
+ }
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure<T>> {
+ // Attempt to not block the task (it's a little expensive). If it looks
+ // like we're not empty, then immediately go through to `try_recv`.
+ if self.state.load(atomic::SeqCst) == EMPTY {
+ let t: Box<Task> = Local::take();
+ t.deschedule(1, |task| {
+ let n = unsafe { task.cast_to_uint() };
+ match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
+ // Nothing on the channel, we legitimately block
+ EMPTY => Ok(()),
+
+ // If there's data or it's a disconnected channel, then we
+ // failed the cmpxchg, so we just wake ourselves back up
+ DATA | DISCONNECTED => {
+ unsafe { Err(BlockedTask::cast_from_uint(n)) }
+ }
+
+ // Only one thread is allowed to sleep on this port
+ _ => unreachable!()
+ }
+ });
+ }
+
+ self.try_recv()
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
+ match self.state.load(atomic::SeqCst) {
+ EMPTY => Err(Empty),
+
+ // We saw some data on the channel, but the channel can be used
+ // again to send us an upgrade. As a result, we need to re-insert
+ // into the channel that there's no data available (otherwise we'll
+ // just see DATA next time). This is done as a cmpxchg because if
+ // the state changes under our feet we'd rather just see that state
+ // change.
+ DATA => {
+ self.state.compare_and_swap(DATA, EMPTY, atomic::SeqCst);
+ match self.data.take() {
+ Some(data) => Ok(data),
+ None => unreachable!(),
+ }
+ }
+
+ // There's no guarantee that we receive before an upgrade happens,
+ // and an upgrade flags the channel as disconnected, so when we see
+ // this we first need to check if there's data available and *then*
+ // we go through and process the upgrade.
+ DISCONNECTED => {
+ match self.data.take() {
+ Some(data) => Ok(data),
+ None => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ SendUsed | NothingSent => Err(Disconnected),
+ GoUp(upgrade) => Err(Upgraded(upgrade))
+ }
+ }
+ }
+ }
+ _ => unreachable!()
+ }
+ }
+
+ // Returns whether the upgrade was completed. If the upgrade wasn't
+ // completed, then the port couldn't get sent to the other half (it will
+ // never receive it).
+ pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
+ let prev = match self.upgrade {
+ NothingSent => NothingSent,
+ SendUsed => SendUsed,
+ _ => panic!("upgrading again"),
+ };
+ self.upgrade = GoUp(up);
+
+ match self.state.swap(DISCONNECTED, atomic::SeqCst) {
+ // If the channel is empty or has data on it, then we're good to go.
+ // Senders will check the data before the upgrade (in case we
+ // plastered over the DATA state).
+ DATA | EMPTY => UpSuccess,
+
+ // If the other end is already disconnected, then we failed the
+ // upgrade. Be sure to trash the port we were given.
+ DISCONNECTED => { self.upgrade = prev; UpDisconnected }
+
+ // If someone's waiting, we gotta wake them up
+ n => UpWoke(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ }
+
+ pub fn drop_chan(&mut self) {
+ match self.state.swap(DISCONNECTED, atomic::SeqCst) {
+ DATA | DISCONNECTED | EMPTY => {}
+
+ // If someone's waiting, we gotta wake them up
+ n => unsafe {
+ let t = BlockedTask::cast_from_uint(n);
+ t.wake().map(|t| t.reawaken());
+ }
+ }
+ }
+
+ pub fn drop_port(&mut self) {
+ match self.state.swap(DISCONNECTED, atomic::SeqCst) {
+ // An empty channel has nothing to do, and a remotely disconnected
+ // channel also has nothing to do b/c we're about to run the drop
+ // glue
+ DISCONNECTED | EMPTY => {}
+
+ // There's data on the channel, so make sure we destroy it promptly.
+ // This is why not using an arc is a little difficult (need the box
+ // to stay valid while we take the data).
+ DATA => { self.data.take().unwrap(); }
+
+ // We're the only ones that can block on this port
+ _ => unreachable!()
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // If Ok, the value is whether this port has data, if Err, then the upgraded
+ // port needs to be checked instead of this one.
+ pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
+ match self.state.load(atomic::SeqCst) {
+ EMPTY => Ok(false), // Welp, we tried
+ DATA => Ok(true), // we have some un-acquired data
+ DISCONNECTED if self.data.is_some() => Ok(true), // we have data
+ DISCONNECTED => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ // The other end sent us an upgrade, so we need to
+ // propagate upwards whether the upgrade can receive
+ // data
+ GoUp(upgrade) => Err(upgrade),
+
+ // If the other end disconnected without sending an
+ // upgrade, then we have data to receive (the channel is
+ // disconnected).
+ up => { self.upgrade = up; Ok(true) }
+ }
+ }
+ _ => unreachable!(), // we're the "one blocker"
+ }
+ }
+
+ // Attempts to start selection on this port. This can either succeed, fail
+ // because there is data, or fail because there is an upgrade pending.
+ pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
+ let n = unsafe { task.cast_to_uint() };
+ match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
+ EMPTY => SelSuccess,
+ DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
+ DISCONNECTED if self.data.is_some() => {
+ SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ DISCONNECTED => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ // The other end sent us an upgrade, so we need to
+ // propagate upwards whether the upgrade can receive
+ // data
+ GoUp(upgrade) => {
+ SelUpgraded(unsafe { BlockedTask::cast_from_uint(n) },
+ upgrade)
+ }
+
+ // If the other end disconnected without sending an
+ // upgrade, then we have data to receive (the channel is
+ // disconnected).
+ up => {
+ self.upgrade = up;
+ SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ }
+ }
+ _ => unreachable!(), // we're the "one blocker"
+ }
+ }
+
+ // Remove a previous selecting task from this port. This ensures that the
+ // blocked task will no longer be visible to any other threads.
+ //
+ // The return value indicates whether there's data on this port.
+ pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
+ let state = match self.state.load(atomic::SeqCst) {
+ // Each of these states means that no further activity will happen
+ // with regard to abortion selection
+ s @ EMPTY |
+ s @ DATA |
+ s @ DISCONNECTED => s,
+
+ // If we've got a blocked task, then use an atomic to gain ownership
+ // of it (may fail)
+ n => self.state.compare_and_swap(n, EMPTY, atomic::SeqCst)
+ };
+
+ // Now that we've got ownership of our state, figure out what to do
+ // about it.
+ match state {
+ EMPTY => unreachable!(),
+ // our task used for select was stolen
+ DATA => Ok(true),
+
+ // If the other end has hung up, then we have complete ownership
+ // of the port. First, check if there was data waiting for us. This
+ // is possible if the other end sent something and then hung up.
+ //
+ // We then need to check to see if there was an upgrade requested,
+ // and if so, the upgraded port needs to have its selection aborted.
+ DISCONNECTED => {
+ if self.data.is_some() {
+ Ok(true)
+ } else {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ GoUp(port) => Err(port),
+ _ => Ok(true),
+ }
+ }
+ }
+
+ // We woke ourselves up from select. Assert that the task should be
+ // trashed and returned that we don't have any data.
+ n => {
+ let t = unsafe { BlockedTask::cast_from_uint(n) };
+ t.trash();
+ Ok(false)
+ }
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(atomic::SeqCst), DISCONNECTED);
+ }
+}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Selection over an array of receivers
+//!
+//! This module contains the implementation machinery necessary for selecting
+//! over a number of receivers. One large goal of this module is to provide an
+//! efficient interface to selecting over any receiver of any type.
+//!
+//! This is achieved through an architecture of a "receiver set" in which
+//! receivers are added to a set and then the entire set is waited on at once.
+//! The set can be waited on multiple times to prevent re-adding each receiver
+//! to the set.
+//!
+//! Usage of this module is currently encouraged to go through the use of the
+//! `select!` macro. This macro allows naturally binding of variables to the
+//! received values of receivers in a much more natural syntax then usage of the
+//! `Select` structure directly.
+//!
+//! # Example
+//!
+//! ```rust
+//! let (tx1, rx1) = channel();
+//! let (tx2, rx2) = channel();
+//!
+//! tx1.send(1i);
+//! tx2.send(2i);
+//!
+//! select! {
+//! val = rx1.recv() => {
+//! assert_eq!(val, 1i);
+//! },
+//! val = rx2.recv() => {
+//! assert_eq!(val, 2i);
+//! }
+//! }
+//! ```
+
+#![allow(dead_code)]
+#![experimental = "This implementation, while likely sufficient, is unsafe and \
+ likely to be error prone. At some point in the future this \
+ module will likely be replaced, and it is currently \
+ unknown how much API breakage that will cause. The ability \
+ to select over a number of channels will remain forever, \
+ but no guarantees beyond this are being made"]
+
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::cell::Cell;
+use core::kinds::marker;
+use core::mem;
+use core::uint;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+
+use comm::Receiver;
+
+/// The "receiver set" of the select interface. This structure is used to manage
+/// a set of receivers which are being selected over.
+pub struct Select {
+ head: *mut Handle<'static, ()>,
+ tail: *mut Handle<'static, ()>,
+ next_id: Cell<uint>,
+ marker1: marker::NoSend,
+}
+
+/// A handle to a receiver which is currently a member of a `Select` set of
+/// receivers. This handle is used to keep the receiver in the set as well as
+/// interact with the underlying receiver.
+pub struct Handle<'rx, T:'rx> {
+ /// The ID of this handle, used to compare against the return value of
+ /// `Select::wait()`
+ id: uint,
+ selector: &'rx Select,
+ next: *mut Handle<'static, ()>,
+ prev: *mut Handle<'static, ()>,
+ added: bool,
+ packet: &'rx Packet+'rx,
+
+ // due to our fun transmutes, we be sure to place this at the end. (nothing
+ // previous relies on T)
+ rx: &'rx Receiver<T>,
+}
+
+struct Packets { cur: *mut Handle<'static, ()> }
+
+#[doc(hidden)]
+pub trait Packet {
+ fn can_recv(&self) -> bool;
+ fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>;
+ fn abort_selection(&self) -> bool;
+}
+
+impl Select {
+ /// Creates a new selection structure. This set is initially empty and
+ /// `wait` will panic!() if called.
+ ///
+ /// Usage of this struct directly can sometimes be burdensome, and usage is
+ /// rather much easier through the `select!` macro.
+ pub fn new() -> Select {
+ Select {
+ marker1: marker::NoSend,
+ head: 0 as *mut Handle<'static, ()>,
+ tail: 0 as *mut Handle<'static, ()>,
+ next_id: Cell::new(1),
+ }
+ }
+
+ /// Creates a new handle into this receiver set for a new receiver. Note
+ /// that this does *not* add the receiver to the receiver set, for that you
+ /// must call the `add` method on the handle itself.
+ pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> {
+ let id = self.next_id.get();
+ self.next_id.set(id + 1);
+ Handle {
+ id: id,
+ selector: self,
+ next: 0 as *mut Handle<'static, ()>,
+ prev: 0 as *mut Handle<'static, ()>,
+ added: false,
+ rx: rx,
+ packet: rx,
+ }
+ }
+
+ /// Waits for an event on this receiver set. The returned value is *not* an
+ /// index, but rather an id. This id can be queried against any active
+ /// `Handle` structures (each one has an `id` method). The handle with
+ /// the matching `id` will have some sort of event available on it. The
+ /// event could either be that data is available or the corresponding
+ /// channel has been closed.
+ pub fn wait(&self) -> uint {
+ self.wait2(true)
+ }
+
+ /// Helper method for skipping the preflight checks during testing
+ fn wait2(&self, do_preflight_checks: bool) -> uint {
+ // Note that this is currently an inefficient implementation. We in
+ // theory have knowledge about all receivers in the set ahead of time,
+ // so this method shouldn't really have to iterate over all of them yet
+ // again. The idea with this "receiver set" interface is to get the
+ // interface right this time around, and later this implementation can
+ // be optimized.
+ //
+ // This implementation can be summarized by:
+ //
+ // fn select(receivers) {
+ // if any receiver ready { return ready index }
+ // deschedule {
+ // block on all receivers
+ // }
+ // unblock on all receivers
+ // return ready index
+ // }
+ //
+ // Most notably, the iterations over all of the receivers shouldn't be
+ // necessary.
+ unsafe {
+ let mut amt = 0;
+ for p in self.iter() {
+ amt += 1;
+ if do_preflight_checks && (*p).packet.can_recv() {
+ return (*p).id;
+ }
+ }
+ assert!(amt > 0);
+
+ let mut ready_index = amt;
+ let mut ready_id = uint::MAX;
+ let mut iter = self.iter().enumerate();
+
+ // Acquire a number of blocking contexts, and block on each one
+ // sequentially until one fails. If one fails, then abort
+ // immediately so we can go unblock on all the other receivers.
+ let task: Box<Task> = Local::take();
+ task.deschedule(amt, |task| {
+ // Prepare for the block
+ let (i, handle) = iter.next().unwrap();
+ match (*handle).packet.start_selection(task) {
+ Ok(()) => Ok(()),
+ Err(task) => {
+ ready_index = i;
+ ready_id = (*handle).id;
+ Err(task)
+ }
+ }
+ });
+
+ // Abort the selection process on each receiver. If the abort
+ // process returns `true`, then that means that the receiver is
+ // ready to receive some data. Note that this also means that the
+ // receiver may have yet to have fully read the `to_wake` field and
+ // woken us up (although the wakeup is guaranteed to fail).
+ //
+ // This situation happens in the window of where a sender invokes
+ // increment(), sees -1, and then decides to wake up the task. After
+ // all this is done, the sending thread will set `selecting` to
+ // `false`. Until this is done, we cannot return. If we were to
+ // return, then a sender could wake up a receiver which has gone
+ // back to sleep after this call to `select`.
+ //
+ // Note that it is a "fairly small window" in which an increment()
+ // views that it should wake a thread up until the `selecting` bit
+ // is set to false. For now, the implementation currently just spins
+ // in a yield loop. This is very distasteful, but this
+ // implementation is already nowhere near what it should ideally be.
+ // A rewrite should focus on avoiding a yield loop, and for now this
+ // implementation is tying us over to a more efficient "don't
+ // iterate over everything every time" implementation.
+ for handle in self.iter().take(ready_index) {
+ if (*handle).packet.abort_selection() {
+ ready_id = (*handle).id;
+ }
+ }
+
+ assert!(ready_id != uint::MAX);
+ return ready_id;
+ }
+ }
+
+ fn iter(&self) -> Packets { Packets { cur: self.head } }
+}
+
+impl<'rx, T: Send> Handle<'rx, T> {
+ /// Retrieve the id of this handle.
+ #[inline]
+ pub fn id(&self) -> uint { self.id }
+
+ /// Receive a value on the underlying receiver. Has the same semantics as
+ /// `Receiver.recv`
+ pub fn recv(&mut self) -> T { self.rx.recv() }
+ /// Block to receive a value on the underlying receiver, returning `Some` on
+ /// success or `None` if the channel disconnects. This function has the same
+ /// semantics as `Receiver.recv_opt`
+ pub fn recv_opt(&mut self) -> Result<T, ()> { self.rx.recv_opt() }
+
+ /// Adds this handle to the receiver set that the handle was created from. This
+ /// method can be called multiple times, but it has no effect if `add` was
+ /// called previously.
+ ///
+ /// This method is unsafe because it requires that the `Handle` is not moved
+ /// while it is added to the `Select` set.
+ pub unsafe fn add(&mut self) {
+ if self.added { return }
+ let selector: &mut Select = mem::transmute(&*self.selector);
+ let me: *mut Handle<'static, ()> = mem::transmute(&*self);
+
+ if selector.head.is_null() {
+ selector.head = me;
+ selector.tail = me;
+ } else {
+ (*me).prev = selector.tail;
+ assert!((*me).next.is_null());
+ (*selector.tail).next = me;
+ selector.tail = me;
+ }
+ self.added = true;
+ }
+
+ /// Removes this handle from the `Select` set. This method is unsafe because
+ /// it has no guarantee that the `Handle` was not moved since `add` was
+ /// called.
+ pub unsafe fn remove(&mut self) {
+ if !self.added { return }
+
+ let selector: &mut Select = mem::transmute(&*self.selector);
+ let me: *mut Handle<'static, ()> = mem::transmute(&*self);
+
+ if self.prev.is_null() {
+ assert_eq!(selector.head, me);
+ selector.head = self.next;
+ } else {
+ (*self.prev).next = self.next;
+ }
+ if self.next.is_null() {
+ assert_eq!(selector.tail, me);
+ selector.tail = self.prev;
+ } else {
+ (*self.next).prev = self.prev;
+ }
+
+ self.next = 0 as *mut Handle<'static, ()>;
+ self.prev = 0 as *mut Handle<'static, ()>;
+
+ self.added = false;
+ }
+}
+
+#[unsafe_destructor]
+impl Drop for Select {
+ fn drop(&mut self) {
+ assert!(self.head.is_null());
+ assert!(self.tail.is_null());
+ }
+}
+
+#[unsafe_destructor]
+impl<'rx, T: Send> Drop for Handle<'rx, T> {
+ fn drop(&mut self) {
+ unsafe { self.remove() }
+ }
+}
+
+impl Iterator<*mut Handle<'static, ()>> for Packets {
+ fn next(&mut self) -> Option<*mut Handle<'static, ()>> {
+ if self.cur.is_null() {
+ None
+ } else {
+ let ret = Some(self.cur);
+ unsafe { self.cur = (*self.cur).next; }
+ ret
+ }
+ }
+}
+
+#[cfg(test)]
+#[allow(unused_imports)]
+mod test {
+ use prelude::*;
+
+ use super::*;
+
+ // Don't use the libstd version so we can pull in the right Select structure
+ // (std::comm points at the wrong one)
+ macro_rules! select {
+ (
+ $($name:pat = $rx:ident.$meth:ident() => $code:expr),+
+ ) => ({
+ use comm::Select;
+ let sel = Select::new();
+ $( let mut $rx = sel.handle(&$rx); )+
+ unsafe {
+ $( $rx.add(); )+
+ }
+ let ret = sel.wait();
+ $( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
+ { unreachable!() }
+ })
+ }
+
+ test!(fn smoke() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ tx1.send(1);
+ select! (
+ foo = rx1.recv() => { assert_eq!(foo, 1); },
+ _bar = rx2.recv() => { panic!() }
+ )
+ tx2.send(2);
+ select! (
+ _foo = rx1.recv() => { panic!() },
+ bar = rx2.recv() => { assert_eq!(bar, 2) }
+ )
+ drop(tx1);
+ select! (
+ foo = rx1.recv_opt() => { assert_eq!(foo, Err(())); },
+ _bar = rx2.recv() => { panic!() }
+ )
+ drop(tx2);
+ select! (
+ bar = rx2.recv_opt() => { assert_eq!(bar, Err(())); }
+ )
+ })
+
+ test!(fn smoke2() {
+ let (_tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (_tx3, rx3) = channel::<int>();
+ let (_tx4, rx4) = channel::<int>();
+ let (tx5, rx5) = channel::<int>();
+ tx5.send(4);
+ select! (
+ _foo = rx1.recv() => { panic!("1") },
+ _foo = rx2.recv() => { panic!("2") },
+ _foo = rx3.recv() => { panic!("3") },
+ _foo = rx4.recv() => { panic!("4") },
+ foo = rx5.recv() => { assert_eq!(foo, 4); }
+ )
+ })
+
+ test!(fn closed() {
+ let (_tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ drop(tx2);
+
+ select! (
+ _a1 = rx1.recv_opt() => { panic!() },
+ a2 = rx2.recv_opt() => { assert_eq!(a2, Err(())); }
+ )
+ })
+
+ test!(fn unblocks() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<int>();
+
+ spawn(proc() {
+ for _ in range(0u, 20) { task::deschedule(); }
+ tx1.send(1);
+ rx3.recv();
+ for _ in range(0u, 20) { task::deschedule(); }
+ });
+
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ _b = rx2.recv() => { panic!() }
+ )
+ tx3.send(1);
+ select! (
+ a = rx1.recv_opt() => { assert_eq!(a, Err(())); },
+ _b = rx2.recv() => { panic!() }
+ )
+ })
+
+ test!(fn both_ready() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ for _ in range(0u, 20) { task::deschedule(); }
+ tx1.send(1);
+ tx2.send(2);
+ rx3.recv();
+ });
+
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ a = rx2.recv() => { assert_eq!(a, 2); }
+ )
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ a = rx2.recv() => { assert_eq!(a, 2); }
+ )
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ assert_eq!(rx2.try_recv(), Err(Empty));
+ tx3.send(());
+ })
+
+ test!(fn stress() {
+ static AMT: int = 10000;
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ for i in range(0, AMT) {
+ if i % 2 == 0 {
+ tx1.send(i);
+ } else {
+ tx2.send(i);
+ }
+ rx3.recv();
+ }
+ });
+
+ for i in range(0, AMT) {
+ select! (
+ i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1); },
+ i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2); }
+ )
+ tx3.send(());
+ }
+ })
+
+ test!(fn cloning() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ rx3.recv();
+ tx1.clone();
+ assert_eq!(rx3.try_recv(), Err(Empty));
+ tx1.send(2);
+ rx3.recv();
+ });
+
+ tx3.send(());
+ select!(
+ _i1 = rx1.recv() => {},
+ _i2 = rx2.recv() => panic!()
+ )
+ tx3.send(());
+ })
+
+ test!(fn cloning2() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ rx3.recv();
+ tx1.clone();
+ assert_eq!(rx3.try_recv(), Err(Empty));
+ tx1.send(2);
+ rx3.recv();
+ });
+
+ tx3.send(());
+ select!(
+ _i1 = rx1.recv() => {},
+ _i2 = rx2.recv() => panic!()
+ )
+ tx3.send(());
+ })
+
+ test!(fn cloning3() {
+ let (tx1, rx1) = channel::<()>();
+ let (tx2, rx2) = channel::<()>();
+ let (tx3, rx3) = channel::<()>();
+ spawn(proc() {
+ let s = Select::new();
+ let mut h1 = s.handle(&rx1);
+ let mut h2 = s.handle(&rx2);
+ unsafe { h2.add(); }
+ unsafe { h1.add(); }
+ assert_eq!(s.wait(), h2.id);
+ tx3.send(());
+ });
+
+ for _ in range(0u, 1000) { task::deschedule(); }
+ drop(tx1.clone());
+ tx2.send(());
+ rx3.recv();
+ })
+
+ test!(fn preflight1() {
+ let (tx, rx) = channel();
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight2() {
+ let (tx, rx) = channel();
+ tx.send(());
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight3() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight4() {
+ let (tx, rx) = channel();
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight5() {
+ let (tx, rx) = channel();
+ tx.send(());
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight6() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight7() {
+ let (tx, rx) = channel::<()>();
+ drop(tx);
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight8() {
+ let (tx, rx) = channel();
+ tx.send(());
+ drop(tx);
+ rx.recv();
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight9() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ drop(tx);
+ rx.recv();
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn oneshot_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0u, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn stream_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ tx1.send(());
+ tx1.send(());
+ rx1.recv();
+ rx1.recv();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0u, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn shared_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ drop(tx1.clone());
+ tx1.send(());
+ rx1.recv();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0u, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn sync1() {
+ let (tx, rx) = sync_channel::<int>(1);
+ tx.send(1);
+ select! {
+ n = rx.recv() => { assert_eq!(n, 1); }
+ }
+ })
+
+ test!(fn sync2() {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ for _ in range(0u, 100) { task::deschedule() }
+ tx.send(1);
+ });
+ select! {
+ n = rx.recv() => { assert_eq!(n, 1); }
+ }
+ })
+
+ test!(fn sync3() {
+ let (tx1, rx1) = sync_channel::<int>(0);
+ let (tx2, rx2): (Sender<int>, Receiver<int>) = channel();
+ spawn(proc() { tx1.send(1); });
+ spawn(proc() { tx2.send(2); });
+ select! {
+ n = rx1.recv() => {
+ assert_eq!(n, 1);
+ assert_eq!(rx2.recv(), 2);
+ },
+ n = rx2.recv() => {
+ assert_eq!(n, 2);
+ assert_eq!(rx1.recv(), 1);
+ }
+ }
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Shared channels
+///
+/// This is the flavor of channels which are not necessarily optimized for any
+/// particular use case, but are the most general in how they are used. Shared
+/// channels are cloneable allowing for multiple senders.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module. You'll also note that the implementation of the shared and stream
+/// channels are quite similar, and this is no coincidence!
+
+pub use self::Failure::*;
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::cmp;
+use core::int;
+use rustrt::local::Local;
+use rustrt::mutex::NativeMutex;
+use rustrt::task::{Task, BlockedTask};
+use rustrt::thread::Thread;
+
+use sync::atomic;
+use sync::mpsc_queue as mpsc;
+
+const DISCONNECTED: int = int::MIN;
+const FUDGE: int = 1024;
+#[cfg(test)]
+const MAX_STEALS: int = 5;
+#[cfg(not(test))]
+const MAX_STEALS: int = 1 << 20;
+
+pub struct Packet<T> {
+ queue: mpsc::Queue<T>,
+ cnt: atomic::AtomicInt, // How many items are on this channel
+ steals: int, // How many times has a port received without blocking?
+ to_wake: atomic::AtomicUint, // Task to wake up
+
+ // The number of channels which are currently using this packet.
+ channels: atomic::AtomicInt,
+
+ // See the discussion in Port::drop and the channel send methods for what
+ // these are used for
+ port_dropped: atomic::AtomicBool,
+ sender_drain: atomic::AtomicInt,
+
+ // this lock protects various portions of this implementation during
+ // select()
+ select_lock: NativeMutex,
+}
+
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+impl<T: Send> Packet<T> {
+ // Creation of a packet *must* be followed by a call to postinit_lock
+ // and later by inherit_blocker
+ pub fn new() -> Packet<T> {
+ let p = Packet {
+ queue: mpsc::Queue::new(),
+ cnt: atomic::AtomicInt::new(0),
+ steals: 0,
+ to_wake: atomic::AtomicUint::new(0),
+ channels: atomic::AtomicInt::new(2),
+ port_dropped: atomic::AtomicBool::new(false),
+ sender_drain: atomic::AtomicInt::new(0),
+ select_lock: unsafe { NativeMutex::new() },
+ };
+ return p;
+ }
+
+ // This function should be used after newly created Packet
+ // was wrapped with an Arc
+ // In other case mutex data will be duplicated while cloning
+ // and that could cause problems on platforms where it is
+ // represented by opaque data structure
+ pub fn postinit_lock(&mut self) {
+ unsafe { self.select_lock.lock_noguard() }
+ }
+
+ // This function is used at the creation of a shared packet to inherit a
+ // previously blocked task. This is done to prevent spurious wakeups of
+ // tasks in select().
+ //
+ // This can only be called at channel-creation time
+ pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
+ match task {
+ Some(task) => {
+ assert_eq!(self.cnt.load(atomic::SeqCst), 0);
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ self.to_wake.store(unsafe { task.cast_to_uint() },
+ atomic::SeqCst);
+ self.cnt.store(-1, atomic::SeqCst);
+
+ // This store is a little sketchy. What's happening here is
+ // that we're transferring a blocker from a oneshot or stream
+ // channel to this shared channel. In doing so, we never
+ // spuriously wake them up and rather only wake them up at the
+ // appropriate time. This implementation of shared channels
+ // assumes that any blocking recv() will undo the increment of
+ // steals performed in try_recv() once the recv is complete.
+ // This thread that we're inheriting, however, is not in the
+ // middle of recv. Hence, the first time we wake them up,
+ // they're going to wake up from their old port, move on to the
+ // upgraded port, and then call the block recv() function.
+ //
+ // When calling this function, they'll find there's data
+ // immediately available, counting it as a steal. This in fact
+ // wasn't a steal because we appropriately blocked them waiting
+ // for data.
+ //
+ // To offset this bad increment, we initially set the steal
+ // count to -1. You'll find some special code in
+ // abort_selection() as well to ensure that this -1 steal count
+ // doesn't escape too far.
+ self.steals = -1;
+ }
+ None => {}
+ }
+
+ // When the shared packet is constructed, we grabbed this lock. The
+ // purpose of this lock is to ensure that abort_selection() doesn't
+ // interfere with this method. After we unlock this lock, we're
+ // signifying that we're done modifying self.cnt and self.to_wake and
+ // the port is ready for the world to continue using it.
+ unsafe { self.select_lock.unlock_noguard() }
+ }
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // See Port::drop for what's going on
+ if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
+
+ // Note that the multiple sender case is a little trickier
+ // semantically than the single sender case. The logic for
+ // incrementing is "add and if disconnected store disconnected".
+ // This could end up leading some senders to believe that there
+ // wasn't a disconnect if in fact there was a disconnect. This means
+ // that while one thread is attempting to re-store the disconnected
+ // states, other threads could walk through merrily incrementing
+ // this very-negative disconnected count. To prevent senders from
+ // spuriously attempting to send when the channels is actually
+ // disconnected, the count has a ranged check here.
+ //
+ // This is also done for another reason. Remember that the return
+ // value of this function is:
+ //
+ // `true` == the data *may* be received, this essentially has no
+ // meaning
+ // `false` == the data will *never* be received, this has a lot of
+ // meaning
+ //
+ // In the SPSC case, we have a check of 'queue.is_empty()' to see
+ // whether the data was actually received, but this same condition
+ // means nothing in a multi-producer context. As a result, this
+ // preflight check serves as the definitive "this will never be
+ // received". Once we get beyond this check, we have permanently
+ // entered the realm of "this may be received"
+ if self.cnt.load(atomic::SeqCst) < DISCONNECTED + FUDGE {
+ return Err(t)
+ }
+
+ self.queue.push(t);
+ match self.cnt.fetch_add(1, atomic::SeqCst) {
+ -1 => {
+ self.take_to_wake().wake().map(|t| t.reawaken());
+ }
+
+ // In this case, we have possibly failed to send our data, and
+ // we need to consider re-popping the data in order to fully
+ // destroy it. We must arbitrate among the multiple senders,
+ // however, because the queues that we're using are
+ // single-consumer queues. In order to do this, all exiting
+ // pushers will use an atomic count in order to count those
+ // flowing through. Pushers who see 0 are required to drain as
+ // much as possible, and then can only exit when they are the
+ // only pusher (otherwise they must try again).
+ n if n < DISCONNECTED + FUDGE => {
+ // see the comment in 'try' for a shared channel for why this
+ // window of "not disconnected" is ok.
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+
+ if self.sender_drain.fetch_add(1, atomic::SeqCst) == 0 {
+ loop {
+ // drain the queue, for info on the thread yield see the
+ // discussion in try_recv
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => {}
+ mpsc::Empty => break,
+ mpsc::Inconsistent => Thread::yield_now(),
+ }
+ }
+ // maybe we're done, if we're not the last ones
+ // here, then we need to go try again.
+ if self.sender_drain.fetch_sub(1, atomic::SeqCst) == 1 {
+ break
+ }
+ }
+
+ // At this point, there may still be data on the queue,
+ // but only if the count hasn't been incremented and
+ // some other sender hasn't finished pushing data just
+ // yet. That sender in question will drain its own data.
+ }
+ }
+
+ // Can't make any assumptions about this case like in the SPSC case.
+ _ => {}
+ }
+
+ Ok(())
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure> {
+ // This code is essentially the exact same as that found in the stream
+ // case (see stream.rs)
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ let task: Box<Task> = Local::take();
+ task.deschedule(1, |task| {
+ self.decrement(task)
+ });
+
+ match self.try_recv() {
+ data @ Ok(..) => { self.steals -= 1; data }
+ data => data,
+ }
+ }
+
+ // Essentially the exact same thing as the stream decrement function.
+ fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ let n = unsafe { task.cast_to_uint() };
+ self.to_wake.store(n, atomic::SeqCst);
+
+ let steals = self.steals;
+ self.steals = 0;
+
+ match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
+ DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 { return Ok(()) }
+ }
+ }
+
+ self.to_wake.store(0, atomic::SeqCst);
+ Err(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure> {
+ let ret = match self.queue.pop() {
+ mpsc::Data(t) => Some(t),
+ mpsc::Empty => None,
+
+ // This is a bit of an interesting case. The channel is
+ // reported as having data available, but our pop() has
+ // failed due to the queue being in an inconsistent state.
+ // This means that there is some pusher somewhere which has
+ // yet to complete, but we are guaranteed that a pop will
+ // eventually succeed. In this case, we spin in a yield loop
+ // because the remote sender should finish their enqueue
+ // operation "very quickly".
+ //
+ // Avoiding this yield loop would require a different queue
+ // abstraction which provides the guarantee that after M
+ // pushes have succeeded, at least M pops will succeed. The
+ // current queues guarantee that if there are N active
+ // pushes, you can pop N times once all N have finished.
+ mpsc::Inconsistent => {
+ let data;
+ loop {
+ Thread::yield_now();
+ match self.queue.pop() {
+ mpsc::Data(t) => { data = t; break }
+ mpsc::Empty => panic!("inconsistent => empty"),
+ mpsc::Inconsistent => {}
+ }
+ }
+ Some(data)
+ }
+ };
+ match ret {
+ // See the discussion in the stream implementation for why we
+ // might decrement steals.
+ Some(data) => {
+ if self.steals > MAX_STEALS {
+ match self.cnt.swap(0, atomic::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, self.steals);
+ self.steals -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(self.steals >= 0);
+ }
+ self.steals += 1;
+ Ok(data)
+ }
+
+ // See the discussion in the stream implementation for why we try
+ // again.
+ None => {
+ match self.cnt.load(atomic::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+ _ => {
+ match self.queue.pop() {
+ mpsc::Data(t) => Ok(t),
+ mpsc::Empty => Err(Disconnected),
+ // with no senders, an inconsistency is impossible.
+ mpsc::Inconsistent => unreachable!(),
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&mut self) {
+ self.channels.fetch_add(1, atomic::SeqCst);
+ }
+
+ // Decrement the reference count on a channel. This is called whenever a
+ // Chan is dropped and may end up waking up a receiver. It's the receiver's
+ // responsibility on the other end to figure out that we've disconnected.
+ pub fn drop_chan(&mut self) {
+ match self.channels.fetch_sub(1, atomic::SeqCst) {
+ 1 => {}
+ n if n > 1 => return,
+ n => panic!("bad number of channels left {}", n),
+ }
+
+ match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
+ -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
+ DISCONNECTED => {}
+ n => { assert!(n >= 0); }
+ }
+ }
+
+ // See the long discussion inside of stream.rs for why the queue is drained,
+ // and why it is done in this fashion.
+ pub fn drop_port(&mut self) {
+ self.port_dropped.store(true, atomic::SeqCst);
+ let mut steals = self.steals;
+ while {
+ let cnt = self.cnt.compare_and_swap(
+ steals, DISCONNECTED, atomic::SeqCst);
+ cnt != DISCONNECTED && cnt != steals
+ } {
+ // See the discussion in 'try_recv' for why we yield
+ // control of this thread.
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => { steals += 1; }
+ mpsc::Empty | mpsc::Inconsistent => break,
+ }
+ }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&mut self) -> BlockedTask {
+ let task = self.to_wake.load(atomic::SeqCst);
+ self.to_wake.store(0, atomic::SeqCst);
+ assert!(task != 0);
+ unsafe { BlockedTask::cast_from_uint(task) }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // Helper function for select, tests whether this port can receive without
+ // blocking (obviously not an atomic decision).
+ //
+ // This is different than the stream version because there's no need to peek
+ // at the queue, we can just look at the local count.
+ pub fn can_recv(&mut self) -> bool {
+ let cnt = self.cnt.load(atomic::SeqCst);
+ cnt == DISCONNECTED || cnt - self.steals > 0
+ }
+
+ // increment the count on the channel (used for selection)
+ fn bump(&mut self, amt: int) -> int {
+ match self.cnt.fetch_add(amt, atomic::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+ DISCONNECTED
+ }
+ n => n
+ }
+ }
+
+ // Inserts the blocked task for selection on this port, returning it back if
+ // the port already has data on it.
+ //
+ // The code here is the same as in stream.rs, except that it doesn't need to
+ // peek at the channel to see if an upgrade is pending.
+ pub fn start_selection(&mut self,
+ task: BlockedTask) -> Result<(), BlockedTask> {
+ match self.decrement(task) {
+ Ok(()) => Ok(()),
+ Err(task) => {
+ let prev = self.bump(1);
+ assert!(prev == DISCONNECTED || prev >= 0);
+ return Err(task);
+ }
+ }
+ }
+
+ // Cancels a previous task waiting on this port, returning whether there's
+ // data on the port.
+ //
+ // This is similar to the stream implementation (hence fewer comments), but
+ // uses a different value for the "steals" variable.
+ pub fn abort_selection(&mut self, _was_upgrade: bool) -> bool {
+ // Before we do anything else, we bounce on this lock. The reason for
+ // doing this is to ensure that any upgrade-in-progress is gone and
+ // done with. Without this bounce, we can race with inherit_blocker
+ // about looking at and dealing with to_wake. Once we have acquired the
+ // lock, we are guaranteed that inherit_blocker is done.
+ unsafe {
+ let _guard = self.select_lock.lock();
+ }
+
+ // Like the stream implementation, we want to make sure that the count
+ // on the channel goes non-negative. We don't know how negative the
+ // stream currently is, so instead of using a steal value of 1, we load
+ // the channel count and figure out what we should do to make it
+ // positive.
+ let steals = {
+ let cnt = self.cnt.load(atomic::SeqCst);
+ if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
+ };
+ let prev = self.bump(steals + 1);
+
+ if prev == DISCONNECTED {
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ true
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+ if prev < 0 {
+ self.take_to_wake().trash();
+ } else {
+ while self.to_wake.load(atomic::SeqCst) != 0 {
+ Thread::yield_now();
+ }
+ }
+ // if the number of steals is -1, it was the pre-emptive -1 steal
+ // count from when we inherited a blocker. This is fine because
+ // we're just going to overwrite it with a real value.
+ assert!(self.steals == 0 || self.steals == -1);
+ self.steals = steals;
+ prev >= 0
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ assert_eq!(self.channels.load(atomic::SeqCst), 0);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Stream channels
+///
+/// This is the flavor of channels which are optimized for one sender and one
+/// receiver. The sender will be upgraded to a shared channel if the channel is
+/// cloned.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module.
+
+pub use self::Failure::*;
+pub use self::UpgradeResult::*;
+pub use self::SelectionResult::*;
+use self::Message::*;
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::cmp;
+use core::int;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+use rustrt::thread::Thread;
+
+use sync::atomic;
+use sync::spsc_queue as spsc;
+use comm::Receiver;
+
+const DISCONNECTED: int = int::MIN;
+#[cfg(test)]
+const MAX_STEALS: int = 5;
+#[cfg(not(test))]
+const MAX_STEALS: int = 1 << 20;
+
+pub struct Packet<T> {
+ queue: spsc::Queue<Message<T>>, // internal queue for all message
+
+ cnt: atomic::AtomicInt, // How many items are on this channel
+ steals: int, // How many times has a port received without blocking?
+ to_wake: atomic::AtomicUint, // Task to wake up
+
+ port_dropped: atomic::AtomicBool, // flag if the channel has been destroyed.
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(BlockedTask),
+}
+
+pub enum SelectionResult<T> {
+ SelSuccess,
+ SelCanceled(BlockedTask),
+ SelUpgraded(BlockedTask, Receiver<T>),
+}
+
+// Any message could contain an "upgrade request" to a new shared port, so the
+// internal queue it's a queue of T, but rather Message<T>
+enum Message<T> {
+ Data(T),
+ GoUp(Receiver<T>),
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ queue: unsafe { spsc::Queue::new(128) },
+
+ cnt: atomic::AtomicInt::new(0),
+ steals: 0,
+ to_wake: atomic::AtomicUint::new(0),
+
+ port_dropped: atomic::AtomicBool::new(false),
+ }
+ }
+
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // If the other port has deterministically gone away, then definitely
+ // must return the data back up the stack. Otherwise, the data is
+ // considered as being sent.
+ if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
+
+ match self.do_send(Data(t)) {
+ UpSuccess | UpDisconnected => {},
+ UpWoke(task) => { task.wake().map(|t| t.reawaken()); }
+ }
+ Ok(())
+ }
+ pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
+ // If the port has gone away, then there's no need to proceed any
+ // further.
+ if self.port_dropped.load(atomic::SeqCst) { return UpDisconnected }
+
+ self.do_send(GoUp(up))
+ }
+
+ fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
+ self.queue.push(t);
+ match self.cnt.fetch_add(1, atomic::SeqCst) {
+ // As described in the mod's doc comment, -1 == wakeup
+ -1 => UpWoke(self.take_to_wake()),
+ // As as described before, SPSC queues must be >= -2
+ -2 => UpSuccess,
+
+ // Be sure to preserve the disconnected state, and the return value
+ // in this case is going to be whether our data was received or not.
+ // This manifests itself on whether we have an empty queue or not.
+ //
+ // Primarily, are required to drain the queue here because the port
+ // will never remove this data. We can only have at most one item to
+ // drain (the port drains the rest).
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+ let first = self.queue.pop();
+ let second = self.queue.pop();
+ assert!(second.is_none());
+
+ match first {
+ Some(..) => UpSuccess, // we failed to send the data
+ None => UpDisconnected, // we successfully sent data
+ }
+ }
+
+ // Otherwise we just sent some data on a non-waiting queue, so just
+ // make sure the world is sane and carry on!
+ n => { assert!(n >= 0); UpSuccess }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&mut self) -> BlockedTask {
+ let task = self.to_wake.load(atomic::SeqCst);
+ self.to_wake.store(0, atomic::SeqCst);
+ assert!(task != 0);
+ unsafe { BlockedTask::cast_from_uint(task) }
+ }
+
+ // Decrements the count on the channel for a sleeper, returning the sleeper
+ // back if it shouldn't sleep. Note that this is the location where we take
+ // steals into account.
+ fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ let n = unsafe { task.cast_to_uint() };
+ self.to_wake.store(n, atomic::SeqCst);
+
+ let steals = self.steals;
+ self.steals = 0;
+
+ match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
+ DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 { return Ok(()) }
+ }
+ }
+
+ self.to_wake.store(0, atomic::SeqCst);
+ Err(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure<T>> {
+ // Optimistic preflight check (scheduling is expensive).
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ // Welp, our channel has no data. Deschedule the current task and
+ // initiate the blocking protocol.
+ let task: Box<Task> = Local::take();
+ task.deschedule(1, |task| {
+ self.decrement(task)
+ });
+
+ match self.try_recv() {
+ // Messages which actually popped from the queue shouldn't count as
+ // a steal, so offset the decrement here (we already have our
+ // "steal" factored into the channel count above).
+ data @ Ok(..) |
+ data @ Err(Upgraded(..)) => {
+ self.steals -= 1;
+ data
+ }
+
+ data => data,
+ }
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
+ match self.queue.pop() {
+ // If we stole some data, record to that effect (this will be
+ // factored into cnt later on).
+ //
+ // Note that we don't allow steals to grow without bound in order to
+ // prevent eventual overflow of either steals or cnt as an overflow
+ // would have catastrophic results. Sometimes, steals > cnt, but
+ // other times cnt > steals, so we don't know the relation between
+ // steals and cnt. This code path is executed only rarely, so we do
+ // a pretty slow operation, of swapping 0 into cnt, taking steals
+ // down as much as possible (without going negative), and then
+ // adding back in whatever we couldn't factor into steals.
+ Some(data) => {
+ if self.steals > MAX_STEALS {
+ match self.cnt.swap(0, atomic::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, self.steals);
+ self.steals -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(self.steals >= 0);
+ }
+ self.steals += 1;
+ match data {
+ Data(t) => Ok(t),
+ GoUp(up) => Err(Upgraded(up)),
+ }
+ }
+
+ None => {
+ match self.cnt.load(atomic::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+
+ // This is a little bit of a tricky case. We failed to pop
+ // data above, and then we have viewed that the channel is
+ // disconnected. In this window more data could have been
+ // sent on the channel. It doesn't really make sense to
+ // return that the channel is disconnected when there's
+ // actually data on it, so be extra sure there's no data by
+ // popping one more time.
+ //
+ // We can ignore steals because the other end is
+ // disconnected and we'll never need to really factor in our
+ // steals again.
+ _ => {
+ match self.queue.pop() {
+ Some(Data(t)) => Ok(t),
+ Some(GoUp(up)) => Err(Upgraded(up)),
+ None => Err(Disconnected),
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pub fn drop_chan(&mut self) {
+ // Dropping a channel is pretty simple, we just flag it as disconnected
+ // and then wakeup a blocker if there is one.
+ match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
+ -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
+ DISCONNECTED => {}
+ n => { assert!(n >= 0); }
+ }
+ }
+
+ pub fn drop_port(&mut self) {
+ // Dropping a port seems like a fairly trivial thing. In theory all we
+ // need to do is flag that we're disconnected and then everything else
+ // can take over (we don't have anyone to wake up).
+ //
+ // The catch for Ports is that we want to drop the entire contents of
+ // the queue. There are multiple reasons for having this property, the
+ // largest of which is that if another chan is waiting in this channel
+ // (but not received yet), then waiting on that port will cause a
+ // deadlock.
+ //
+ // So if we accept that we must now destroy the entire contents of the
+ // queue, this code may make a bit more sense. The tricky part is that
+ // we can't let any in-flight sends go un-dropped, we have to make sure
+ // *everything* is dropped and nothing new will come onto the channel.
+
+ // The first thing we do is set a flag saying that we're done for. All
+ // sends are gated on this flag, so we're immediately guaranteed that
+ // there are a bounded number of active sends that we'll have to deal
+ // with.
+ self.port_dropped.store(true, atomic::SeqCst);
+
+ // Now that we're guaranteed to deal with a bounded number of senders,
+ // we need to drain the queue. This draining process happens atomically
+ // with respect to the "count" of the channel. If the count is nonzero
+ // (with steals taken into account), then there must be data on the
+ // channel. In this case we drain everything and then try again. We will
+ // continue to fail while active senders send data while we're dropping
+ // data, but eventually we're guaranteed to break out of this loop
+ // (because there is a bounded number of senders).
+ let mut steals = self.steals;
+ while {
+ let cnt = self.cnt.compare_and_swap(
+ steals, DISCONNECTED, atomic::SeqCst);
+ cnt != DISCONNECTED && cnt != steals
+ } {
+ loop {
+ match self.queue.pop() {
+ Some(..) => { steals += 1; }
+ None => break
+ }
+ }
+ }
+
+ // At this point in time, we have gated all future senders from sending,
+ // and we have flagged the channel as being disconnected. The senders
+ // still have some responsibility, however, because some sends may not
+ // complete until after we flag the disconnection. There are more
+ // details in the sending methods that see DISCONNECTED
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // Tests to see whether this port can receive without blocking. If Ok is
+ // returned, then that's the answer. If Err is returned, then the returned
+ // port needs to be queried instead (an upgrade happened)
+ pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
+ // We peek at the queue to see if there's anything on it, and we use
+ // this return value to determine if we should pop from the queue and
+ // upgrade this channel immediately. If it looks like we've got an
+ // upgrade pending, then go through the whole recv rigamarole to update
+ // the internal state.
+ match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.recv() {
+ Err(Upgraded(port)) => Err(port),
+ _ => unreachable!(),
+ }
+ }
+ Some(..) => Ok(true),
+ None => Ok(false)
+ }
+ }
+
+ // increment the count on the channel (used for selection)
+ fn bump(&mut self, amt: int) -> int {
+ match self.cnt.fetch_add(amt, atomic::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomic::SeqCst);
+ DISCONNECTED
+ }
+ n => n
+ }
+ }
+
+ // Attempts to start selecting on this port. Like a oneshot, this can fail
+ // immediately because of an upgrade.
+ pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
+ match self.decrement(task) {
+ Ok(()) => SelSuccess,
+ Err(task) => {
+ let ret = match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.queue.pop() {
+ Some(GoUp(port)) => SelUpgraded(task, port),
+ _ => unreachable!(),
+ }
+ }
+ Some(..) => SelCanceled(task),
+ None => SelCanceled(task),
+ };
+ // Undo our decrement above, and we should be guaranteed that the
+ // previous value is positive because we're not going to sleep
+ let prev = self.bump(1);
+ assert!(prev == DISCONNECTED || prev >= 0);
+ return ret;
+ }
+ }
+ }
+
+ // Removes a previous task from being blocked in this port
+ pub fn abort_selection(&mut self,
+ was_upgrade: bool) -> Result<bool, Receiver<T>> {
+ // If we're aborting selection after upgrading from a oneshot, then
+ // we're guarantee that no one is waiting. The only way that we could
+ // have seen the upgrade is if data was actually sent on the channel
+ // half again. For us, this means that there is guaranteed to be data on
+ // this channel. Furthermore, we're guaranteed that there was no
+ // start_selection previously, so there's no need to modify `self.cnt`
+ // at all.
+ //
+ // Hence, because of these invariants, we immediately return `Ok(true)`.
+ // Note that the data may not actually be sent on the channel just yet.
+ // The other end could have flagged the upgrade but not sent data to
+ // this end. This is fine because we know it's a small bounded windows
+ // of time until the data is actually sent.
+ if was_upgrade {
+ assert_eq!(self.steals, 0);
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ return Ok(true)
+ }
+
+ // We want to make sure that the count on the channel goes non-negative,
+ // and in the stream case we can have at most one steal, so just assume
+ // that we had one steal.
+ let steals = 1;
+ let prev = self.bump(steals + 1);
+
+ // If we were previously disconnected, then we know for sure that there
+ // is no task in to_wake, so just keep going
+ let has_data = if prev == DISCONNECTED {
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ true // there is data, that data is that we're disconnected
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+
+ // If the previous count was negative, then we just made things go
+ // positive, hence we passed the -1 boundary and we're responsible
+ // for removing the to_wake() field and trashing it.
+ //
+ // If the previous count was positive then we're in a tougher
+ // situation. A possible race is that a sender just incremented
+ // through -1 (meaning it's going to try to wake a task up), but it
+ // hasn't yet read the to_wake. In order to prevent a future recv()
+ // from waking up too early (this sender picking up the plastered
+ // over to_wake), we spin loop here waiting for to_wake to be 0.
+ // Note that this entire select() implementation needs an overhaul,
+ // and this is *not* the worst part of it, so this is not done as a
+ // final solution but rather out of necessity for now to get
+ // something working.
+ if prev < 0 {
+ self.take_to_wake().trash();
+ } else {
+ while self.to_wake.load(atomic::SeqCst) != 0 {
+ Thread::yield_now();
+ }
+ }
+ assert_eq!(self.steals, 0);
+ self.steals = steals;
+
+ // if we were previously positive, then there's surely data to
+ // receive
+ prev >= 0
+ };
+
+ // Now that we've determined that this queue "has data", we peek at the
+ // queue to see if the data is an upgrade or not. If it's an upgrade,
+ // then we need to destroy this port and abort selection on the
+ // upgraded port.
+ if has_data {
+ match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.queue.pop() {
+ Some(GoUp(port)) => Err(port),
+ _ => unreachable!(),
+ }
+ }
+ _ => Ok(true),
+ }
+ } else {
+ Ok(false)
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
+ assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Synchronous channels/ports
+///
+/// This channel implementation differs significantly from the asynchronous
+/// implementations found next to it (oneshot/stream/share). This is an
+/// implementation of a synchronous, bounded buffer channel.
+///
+/// Each channel is created with some amount of backing buffer, and sends will
+/// *block* until buffer space becomes available. A buffer size of 0 is valid,
+/// which means that every successful send is paired with a successful recv.
+///
+/// This flavor of channels defines a new `send_opt` method for channels which
+/// is the method by which a message is sent but the task does not panic if it
+/// cannot be delivered.
+///
+/// Another major difference is that send() will *always* return back the data
+/// if it couldn't be sent. This is because it is deterministically known when
+/// the data is received and when it is not received.
+///
+/// Implementation-wise, it can all be summed up with "use a mutex plus some
+/// logic". The mutex used here is an OS native mutex, meaning that no user code
+/// is run inside of the mutex (to prevent context switching). This
+/// implementation shares almost all code for the buffered and unbuffered cases
+/// of a synchronous channel. There are a few branches for the unbuffered case,
+/// but they're mostly just relevant to blocking senders.
+
+use core::prelude::*;
+
+pub use self::Failure::*;
+use self::Blocker::*;
+
+use alloc::boxed::Box;
+use vec::Vec;
+use core::mem;
+use core::cell::UnsafeCell;
+use rustrt::local::Local;
+use rustrt::mutex::{NativeMutex, LockGuard};
+use rustrt::task::{Task, BlockedTask};
+
+use sync::atomic;
+
+pub struct Packet<T> {
+ /// Only field outside of the mutex. Just done for kicks, but mainly because
+ /// the other shared channel already had the code implemented
+ channels: atomic::AtomicUint,
+
+ /// The state field is protected by this mutex
+ lock: NativeMutex,
+ state: UnsafeCell<State<T>>,
+}
+
+struct State<T> {
+ disconnected: bool, // Is the channel disconnected yet?
+ queue: Queue, // queue of senders waiting to send data
+ blocker: Blocker, // currently blocked task on this channel
+ buf: Buffer<T>, // storage for buffered messages
+ cap: uint, // capacity of this channel
+
+ /// A curious flag used to indicate whether a sender failed or succeeded in
+ /// blocking. This is used to transmit information back to the task that it
+ /// must dequeue its message from the buffer because it was not received.
+ /// This is only relevant in the 0-buffer case. This obviously cannot be
+ /// safely constructed, but it's guaranteed to always have a valid pointer
+ /// value.
+ canceled: Option<&'static mut bool>,
+}
+
+/// Possible flavors of tasks who can be blocked on this channel.
+enum Blocker {
+ BlockedSender(BlockedTask),
+ BlockedReceiver(BlockedTask),
+ NoneBlocked
+}
+
+/// Simple queue for threading tasks together. Nodes are stack-allocated, so
+/// this structure is not safe at all
+struct Queue {
+ head: *mut Node,
+ tail: *mut Node,
+}
+
+struct Node {
+ task: Option<BlockedTask>,
+ next: *mut Node,
+}
+
+/// A simple ring-buffer
+struct Buffer<T> {
+ buf: Vec<Option<T>>,
+ start: uint,
+ size: uint,
+}
+
+#[deriving(Show)]
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+/// Atomically blocks the current task, placing it into `slot`, unlocking `lock`
+/// in the meantime. This re-locks the mutex upon returning.
+fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
+ lock: &NativeMutex) {
+ let me: Box<Task> = Local::take();
+ me.deschedule(1, |task| {
+ match mem::replace(slot, f(task)) {
+ NoneBlocked => {}
+ _ => unreachable!(),
+ }
+ unsafe { lock.unlock_noguard(); }
+ Ok(())
+ });
+ unsafe { lock.lock_noguard(); }
+}
+
+/// Wakes up a task, dropping the lock at the correct time
+fn wakeup(task: BlockedTask, guard: LockGuard) {
+ // We need to be careful to wake up the waiting task *outside* of the mutex
+ // in case it incurs a context switch.
+ mem::drop(guard);
+ task.wake().map(|t| t.reawaken());
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new(cap: uint) -> Packet<T> {
+ Packet {
+ channels: atomic::AtomicUint::new(1),
+ lock: unsafe { NativeMutex::new() },
+ state: UnsafeCell::new(State {
+ disconnected: false,
+ blocker: NoneBlocked,
+ cap: cap,
+ canceled: None,
+ queue: Queue {
+ head: 0 as *mut Node,
+ tail: 0 as *mut Node,
+ },
+ buf: Buffer {
+ buf: Vec::from_fn(cap + if cap == 0 {1} else {0}, |_| None),
+ start: 0,
+ size: 0,
+ },
+ }),
+ }
+ }
+
+ // Locks this channel, returning a guard for the state and the mutable state
+ // itself. Care should be taken to ensure that the state does not escape the
+ // guard!
+ //
+ // Note that we're ok promoting an & reference to an &mut reference because
+ // the lock ensures that we're the only ones in the world with a pointer to
+ // the state.
+ fn lock<'a>(&'a self) -> (LockGuard<'a>, &'a mut State<T>) {
+ unsafe {
+ let guard = self.lock.lock();
+ (guard, &mut *self.state.get())
+ }
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ let (guard, state) = self.lock();
+
+ // wait for a slot to become available, and enqueue the data
+ while !state.disconnected && state.buf.size() == state.buf.cap() {
+ state.queue.enqueue(&self.lock);
+ }
+ if state.disconnected { return Err(t) }
+ state.buf.enqueue(t);
+
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ // if our capacity is 0, then we need to wait for a receiver to be
+ // available to take our data. After waiting, we check again to make
+ // sure the port didn't go away in the meantime. If it did, we need
+ // to hand back our data.
+ NoneBlocked if state.cap == 0 => {
+ let mut canceled = false;
+ assert!(state.canceled.is_none());
+ state.canceled = Some(unsafe { mem::transmute(&mut canceled) });
+ wait(&mut state.blocker, BlockedSender, &self.lock);
+ if canceled {Err(state.buf.dequeue())} else {Ok(())}
+ }
+
+ // success, we buffered some data
+ NoneBlocked => Ok(()),
+
+ // success, someone's about to receive our buffered data.
+ BlockedReceiver(task) => { wakeup(task, guard); Ok(()) }
+
+ BlockedSender(..) => panic!("lolwut"),
+ }
+ }
+
+ pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
+ let (guard, state) = self.lock();
+ if state.disconnected {
+ Err(super::RecvDisconnected(t))
+ } else if state.buf.size() == state.buf.cap() {
+ Err(super::Full(t))
+ } else if state.cap == 0 {
+ // With capacity 0, even though we have buffer space we can't
+ // transfer the data unless there's a receiver waiting.
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => Err(super::Full(t)),
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(task) => {
+ state.buf.enqueue(t);
+ wakeup(task, guard);
+ Ok(())
+ }
+ }
+ } else {
+ // If the buffer has some space and the capacity isn't 0, then we
+ // just enqueue the data for later retrieval, ensuring to wake up
+ // any blocked receiver if there is one.
+ assert!(state.buf.size() < state.buf.cap());
+ state.buf.enqueue(t);
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ BlockedReceiver(task) => wakeup(task, guard),
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ }
+ Ok(())
+ }
+ }
+
+ // Receives a message from this channel
+ //
+ // When reading this, remember that there can only ever be one receiver at
+ // time.
+ pub fn recv(&self) -> Result<T, ()> {
+ let (guard, state) = self.lock();
+
+ // Wait for the buffer to have something in it. No need for a while loop
+ // because we're the only receiver.
+ let mut waited = false;
+ if !state.disconnected && state.buf.size() == 0 {
+ wait(&mut state.blocker, BlockedReceiver, &self.lock);
+ waited = true;
+ }
+ if state.disconnected && state.buf.size() == 0 { return Err(()) }
+
+ // Pick up the data, wake up our neighbors, and carry on
+ assert!(state.buf.size() > 0);
+ let ret = state.buf.dequeue();
+ self.wakeup_senders(waited, guard, state);
+ return Ok(ret);
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure> {
+ let (guard, state) = self.lock();
+
+ // Easy cases first
+ if state.disconnected { return Err(Disconnected) }
+ if state.buf.size() == 0 { return Err(Empty) }
+
+ // Be sure to wake up neighbors
+ let ret = Ok(state.buf.dequeue());
+ self.wakeup_senders(false, guard, state);
+
+ return ret;
+ }
+
+ // Wake up pending senders after some data has been received
+ //
+ // * `waited` - flag if the receiver blocked to receive some data, or if it
+ // just picked up some data on the way out
+ // * `guard` - the lock guard that is held over this channel's lock
+ fn wakeup_senders(&self, waited: bool,
+ guard: LockGuard,
+ state: &mut State<T>) {
+ let pending_sender1: Option<BlockedTask> = state.queue.dequeue();
+
+ // If this is a no-buffer channel (cap == 0), then if we didn't wait we
+ // need to ACK the sender. If we waited, then the sender waking us up
+ // was already the ACK.
+ let pending_sender2 = if state.cap == 0 && !waited {
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedReceiver(..) => unreachable!(),
+ BlockedSender(task) => {
+ state.canceled.take();
+ Some(task)
+ }
+ }
+ } else {
+ None
+ };
+ mem::drop((state, guard));
+
+ // only outside of the lock do we wake up the pending tasks
+ pending_sender1.map(|t| t.wake().map(|t| t.reawaken()));
+ pending_sender2.map(|t| t.wake().map(|t| t.reawaken()));
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&self) {
+ self.channels.fetch_add(1, atomic::SeqCst);
+ }
+
+ pub fn drop_chan(&self) {
+ // Only flag the channel as disconnected if we're the last channel
+ match self.channels.fetch_sub(1, atomic::SeqCst) {
+ 1 => {}
+ _ => return
+ }
+
+ // Not much to do other than wake up a receiver if one's there
+ let (guard, state) = self.lock();
+ if state.disconnected { return }
+ state.disconnected = true;
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(task) => wakeup(task, guard),
+ }
+ }
+
+ pub fn drop_port(&self) {
+ let (guard, state) = self.lock();
+
+ if state.disconnected { return }
+ state.disconnected = true;
+
+ // If the capacity is 0, then the sender may want its data back after
+ // we're disconnected. Otherwise it's now our responsibility to destroy
+ // the buffered data. As with many other portions of this code, this
+ // needs to be careful to destroy the data *outside* of the lock to
+ // prevent deadlock.
+ let _data = if state.cap != 0 {
+ mem::replace(&mut state.buf.buf, Vec::new())
+ } else {
+ Vec::new()
+ };
+ let mut queue = mem::replace(&mut state.queue, Queue {
+ head: 0 as *mut Node,
+ tail: 0 as *mut Node,
+ });
+
+ let waiter = match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedSender(task) => {
+ *state.canceled.take().unwrap() = true;
+ Some(task)
+ }
+ BlockedReceiver(..) => unreachable!(),
+ };
+ mem::drop((state, guard));
+
+ loop {
+ match queue.dequeue() {
+ Some(task) => { task.wake().map(|t| t.reawaken()); }
+ None => break,
+ }
+ }
+ waiter.map(|t| t.wake().map(|t| t.reawaken()));
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // If Ok, the value is whether this port has data, if Err, then the upgraded
+ // port needs to be checked instead of this one.
+ pub fn can_recv(&self) -> bool {
+ let (_g, state) = self.lock();
+ state.disconnected || state.buf.size() > 0
+ }
+
+ // Attempts to start selection on this port. This can either succeed or fail
+ // because there is data waiting.
+ pub fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>{
+ let (_g, state) = self.lock();
+ if state.disconnected || state.buf.size() > 0 {
+ Err(task)
+ } else {
+ match mem::replace(&mut state.blocker, BlockedReceiver(task)) {
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(..) => unreachable!(),
+ }
+ Ok(())
+ }
+ }
+
+ // Remove a previous selecting task from this port. This ensures that the
+ // blocked task will no longer be visible to any other threads.
+ //
+ // The return value indicates whether there's data on this port.
+ pub fn abort_selection(&self) -> bool {
+ let (_g, state) = self.lock();
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => true,
+ BlockedSender(task) => {
+ state.blocker = BlockedSender(task);
+ true
+ }
+ BlockedReceiver(task) => { task.trash(); false }
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.channels.load(atomic::SeqCst), 0);
+ let (_g, state) = self.lock();
+ assert!(state.queue.dequeue().is_none());
+ assert!(state.canceled.is_none());
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Buffer, a simple ring buffer backed by Vec<T>
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Buffer<T> {
+ fn enqueue(&mut self, t: T) {
+ let pos = (self.start + self.size) % self.buf.len();
+ self.size += 1;
+ let prev = mem::replace(&mut self.buf[pos], Some(t));
+ assert!(prev.is_none());
+ }
+
+ fn dequeue(&mut self) -> T {
+ let start = self.start;
+ self.size -= 1;
+ self.start = (self.start + 1) % self.buf.len();
+ self.buf[start].take().unwrap()
+ }
+
+ fn size(&self) -> uint { self.size }
+ fn cap(&self) -> uint { self.buf.len() }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
+////////////////////////////////////////////////////////////////////////////////
+
+impl Queue {
+ fn enqueue(&mut self, lock: &NativeMutex) {
+ let task: Box<Task> = Local::take();
+ let mut node = Node {
+ task: None,
+ next: 0 as *mut Node,
+ };
+ task.deschedule(1, |task| {
+ node.task = Some(task);
+ if self.tail.is_null() {
+ self.head = &mut node as *mut Node;
+ self.tail = &mut node as *mut Node;
+ } else {
+ unsafe {
+ (*self.tail).next = &mut node as *mut Node;
+ self.tail = &mut node as *mut Node;
+ }
+ }
+ unsafe { lock.unlock_noguard(); }
+ Ok(())
+ });
+ unsafe { lock.lock_noguard(); }
+ assert!(node.next.is_null());
+ }
+
+ fn dequeue(&mut self) -> Option<BlockedTask> {
+ if self.head.is_null() {
+ return None
+ }
+ let node = self.head;
+ self.head = unsafe { (*node).next };
+ if self.head.is_null() {
+ self.tail = 0 as *mut Node;
+ }
+ unsafe {
+ (*node).next = 0 as *mut Node;
+ Some((*node).task.take().unwrap())
+ }
+ }
+}
extern crate core;
extern crate "collections" as core_collections;
extern crate "rand" as core_rand;
-extern crate "sync" as core_sync;
extern crate libc;
extern crate rustrt;
pub use unicode::char;
-pub use core_sync::comm;
-
/* Exported macros */
pub mod macros;
pub mod task;
pub mod sync;
+pub mod comm;
#[cfg(unix)]
#[path = "sys/unix/mod.rs"] mod sys;
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Atomic types
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent
+//! types.
+//!
+//! This module defines atomic versions of a select number of primitive
+//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [C++11 atomic orderings][1].
+//!
+//! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
+//!
+//! Atomic variables are safe to share between threads (they implement `Sync`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
+//! are often used for lazy global initialization.
+//!
+//!
+//! # Examples
+//!
+//! A simple spinlock:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomic::{AtomicUint, SeqCst};
+//!
+//! fn main() {
+//! let spinlock = Arc::new(AtomicUint::new(1));
+//!
+//! let spinlock_clone = spinlock.clone();
+//! spawn(proc() {
+//! spinlock_clone.store(0, SeqCst);
+//! });
+//!
+//! // Wait for the other task to release the lock
+//! while spinlock.load(SeqCst) != 0 {}
+//! }
+//! ```
+//!
+//! Transferring a heap object with `AtomicOption`:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomic::{AtomicOption, SeqCst};
+//!
+//! fn main() {
+//! struct BigObject;
+//!
+//! let shared_big_object = Arc::new(AtomicOption::empty());
+//!
+//! let shared_big_object_clone = shared_big_object.clone();
+//! spawn(proc() {
+//! let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
+//! if unwrapped_big_object.is_some() {
+//! println!("got a big object from another task");
+//! } else {
+//! println!("other task hasn't sent big object yet");
+//! }
+//! });
+//!
+//! shared_big_object.swap(box BigObject, SeqCst);
+//! }
+//! ```
+//!
+//! Keep a global count of live tasks:
+//!
+//! ```
+//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
+//!
+//! static GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
+//!
+//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
+//! println!("live tasks: {}", old_task_count + 1);
+//! ```
+
+#![allow(deprecated)]
+
+use alloc::boxed::Box;
+use core::mem;
+use core::prelude::{Send, Drop, None, Option, Some};
+
+pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
+pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
+pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
+pub use core::atomic::fence;
+
+/// An atomic, nullable unique pointer
+///
+/// This can be used as the concurrency primitive for operations that transfer
+/// owned heap objects across tasks.
+#[unsafe_no_drop_flag]
+#[deprecated = "no longer used; will eventually be replaced by a higher-level\
+ concept like MVar"]
+pub struct AtomicOption<T> {
+ p: AtomicUint,
+}
+
+impl<T: Send> AtomicOption<T> {
+ /// Create a new `AtomicOption`
+ pub fn new(p: Box<T>) -> AtomicOption<T> {
+ unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } }
+ }
+
+ /// Create a new `AtomicOption` that doesn't contain a value
+ pub fn empty() -> AtomicOption<T> { AtomicOption { p: AtomicUint::new(0) } }
+
+ /// Store a value, returning the old value
+ #[inline]
+ pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
+ let val = unsafe { mem::transmute(val) };
+
+ match self.p.swap(val, order) {
+ 0 => None,
+ n => Some(unsafe { mem::transmute(n) }),
+ }
+ }
+
+ /// Remove the value, leaving the `AtomicOption` empty.
+ #[inline]
+ pub fn take(&self, order: Ordering) -> Option<Box<T>> {
+ unsafe { self.swap(mem::transmute(0u), order) }
+ }
+
+ /// Replace an empty value with a non-empty value.
+ ///
+ /// Succeeds if the option is `None` and returns `None` if so. If
+ /// the option was already `Some`, returns `Some` of the rejected
+ /// value.
+ #[inline]
+ pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
+ unsafe {
+ let val = mem::transmute(val);
+ let expected = mem::transmute(0u);
+ let oldval = self.p.compare_and_swap(expected, val, order);
+ if oldval == expected {
+ None
+ } else {
+ Some(mem::transmute(val))
+ }
+ }
+ }
+
+ /// Returns `true` if the `AtomicOption` is empty.
+ ///
+ /// Be careful: The caller must have some external method of ensuring the
+ /// result does not get invalidated by another task after this returns.
+ #[inline]
+ pub fn is_empty(&self, order: Ordering) -> bool {
+ self.p.load(order) as uint == 0
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for AtomicOption<T> {
+ fn drop(&mut self) {
+ let _ = self.take(SeqCst);
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+ use super::*;
+
+ #[test]
+ fn option_empty() {
+ let option: AtomicOption<()> = AtomicOption::empty();
+ assert!(option.is_empty(SeqCst));
+ }
+
+ #[test]
+ fn option_swap() {
+ let p = AtomicOption::new(box 1i);
+ let a = box 2i;
+
+ let b = p.swap(a, SeqCst);
+
+ assert!(b == Some(box 1));
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+
+ #[test]
+ fn option_take() {
+ let p = AtomicOption::new(box 1i);
+
+ assert!(p.take(SeqCst) == Some(box 1));
+ assert!(p.take(SeqCst) == None);
+
+ let p2 = box 2i;
+ p.swap(p2, SeqCst);
+
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+
+ #[test]
+ fn option_fill() {
+ let p = AtomicOption::new(box 1i);
+ assert!(p.fill(box 2i, SeqCst).is_some()); // should fail; shouldn't leak!
+ assert!(p.take(SeqCst) == Some(box 1));
+
+ assert!(p.fill(box 2i, SeqCst).is_none()); // shouldn't fail
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A (mostly) lock-free concurrent work-stealing deque
+//!
+//! This module contains an implementation of the Chase-Lev work stealing deque
+//! described in "Dynamic Circular Work-Stealing Deque". The implementation is
+//! heavily based on the pseudocode found in the paper.
+//!
+//! This implementation does not want to have the restriction of a garbage
+//! collector for reclamation of buffers, and instead it uses a shared pool of
+//! buffers. This shared pool is required for correctness in this
+//! implementation.
+//!
+//! The only lock-synchronized portions of this deque are the buffer allocation
+//! and deallocation portions. Otherwise all operations are lock-free.
+//!
+//! # Example
+//!
+//! use std::sync::deque::BufferPool;
+//!
+//! let mut pool = BufferPool::new();
+//! let (mut worker, mut stealer) = pool.deque();
+//!
+//! // Only the worker may push/pop
+//! worker.push(1i);
+//! worker.pop();
+//!
+//! // Stealers take data from the other end of the deque
+//! worker.push(1i);
+//! stealer.steal();
+//!
+//! // Stealers can be cloned to have many stealers stealing in parallel
+//! worker.push(1i);
+//! let mut stealer2 = stealer.clone();
+//! stealer2.steal();
+
+#![experimental]
+
+// NB: the "buffer pool" strategy is not done for speed, but rather for
+// correctness. For more info, see the comment on `swap_buffer`
+
+// FIXME: all atomic operations in this module use a SeqCst ordering. That is
+// probably overkill
+
+pub use self::Stolen::*;
+
+use core::prelude::*;
+
+use alloc::arc::Arc;
+use alloc::heap::{allocate, deallocate};
+use alloc::boxed::Box;
+use vec::Vec;
+use core::kinds::marker;
+use core::mem::{forget, min_align_of, size_of, transmute};
+use core::ptr;
+use rustrt::exclusive::Exclusive;
+
+use sync::atomic::{AtomicInt, AtomicPtr, SeqCst};
+
+// Once the queue is less than 1/K full, then it will be downsized. Note that
+// the deque requires that this number be less than 2.
+static K: int = 4;
+
+// Minimum number of bits that a buffer size should be. No buffer will resize to
+// under this value, and all deques will initially contain a buffer of this
+// size.
+//
+// The size in question is 1 << MIN_BITS
+static MIN_BITS: uint = 7;
+
+struct Deque<T> {
+ bottom: AtomicInt,
+ top: AtomicInt,
+ array: AtomicPtr<Buffer<T>>,
+ pool: BufferPool<T>,
+}
+
+/// Worker half of the work-stealing deque. This worker has exclusive access to
+/// one side of the deque, and uses `push` and `pop` method to manipulate it.
+///
+/// There may only be one worker per deque.
+pub struct Worker<T> {
+ deque: Arc<Deque<T>>,
+ _noshare: marker::NoSync,
+}
+
+/// The stealing half of the work-stealing deque. Stealers have access to the
+/// opposite end of the deque from the worker, and they only have access to the
+/// `steal` method.
+pub struct Stealer<T> {
+ deque: Arc<Deque<T>>,
+ _noshare: marker::NoSync,
+}
+
+/// When stealing some data, this is an enumeration of the possible outcomes.
+#[deriving(PartialEq, Show)]
+pub enum Stolen<T> {
+ /// The deque was empty at the time of stealing
+ Empty,
+ /// The stealer lost the race for stealing data, and a retry may return more
+ /// data.
+ Abort,
+ /// The stealer has successfully stolen some data.
+ Data(T),
+}
+
+/// The allocation pool for buffers used by work-stealing deques. Right now this
+/// structure is used for reclamation of memory after it is no longer in use by
+/// deques.
+///
+/// This data structure is protected by a mutex, but it is rarely used. Deques
+/// will only use this structure when allocating a new buffer or deallocating a
+/// previous one.
+pub struct BufferPool<T> {
+ pool: Arc<Exclusive<Vec<Box<Buffer<T>>>>>,
+}
+
+/// An internal buffer used by the chase-lev deque. This structure is actually
+/// implemented as a circular buffer, and is used as the intermediate storage of
+/// the data in the deque.
+///
+/// This type is implemented with *T instead of Vec<T> for two reasons:
+///
+/// 1. There is nothing safe about using this buffer. This easily allows the
+/// same value to be read twice in to rust, and there is nothing to
+/// prevent this. The usage by the deque must ensure that one of the
+/// values is forgotten. Furthermore, we only ever want to manually run
+/// destructors for values in this buffer (on drop) because the bounds
+/// are defined by the deque it's owned by.
+///
+/// 2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
+/// LLVM is probably pretty good at doing this already.
+struct Buffer<T> {
+ storage: *const T,
+ log_size: uint,
+}
+
+impl<T: Send> BufferPool<T> {
+ /// Allocates a new buffer pool which in turn can be used to allocate new
+ /// deques.
+ pub fn new() -> BufferPool<T> {
+ BufferPool { pool: Arc::new(Exclusive::new(Vec::new())) }
+ }
+
+ /// Allocates a new work-stealing deque which will send/receiving memory to
+ /// and from this buffer pool.
+ pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
+ let a = Arc::new(Deque::new(self.clone()));
+ let b = a.clone();
+ (Worker { deque: a, _noshare: marker::NoSync },
+ Stealer { deque: b, _noshare: marker::NoSync })
+ }
+
+ fn alloc(&mut self, bits: uint) -> Box<Buffer<T>> {
+ unsafe {
+ let mut pool = self.pool.lock();
+ match pool.iter().position(|x| x.size() >= (1 << bits)) {
+ Some(i) => pool.remove(i).unwrap(),
+ None => box Buffer::new(bits)
+ }
+ }
+ }
+
+ fn free(&self, buf: Box<Buffer<T>>) {
+ unsafe {
+ let mut pool = self.pool.lock();
+ match pool.iter().position(|v| v.size() > buf.size()) {
+ Some(i) => pool.insert(i, buf),
+ None => pool.push(buf),
+ }
+ }
+ }
+}
+
+impl<T: Send> Clone for BufferPool<T> {
+ fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
+}
+
+impl<T: Send> Worker<T> {
+ /// Pushes data onto the front of this work queue.
+ pub fn push(&self, t: T) {
+ unsafe { self.deque.push(t) }
+ }
+ /// Pops data off the front of the work queue, returning `None` on an empty
+ /// queue.
+ pub fn pop(&self) -> Option<T> {
+ unsafe { self.deque.pop() }
+ }
+
+ /// Gets access to the buffer pool that this worker is attached to. This can
+ /// be used to create more deques which share the same buffer pool as this
+ /// deque.
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
+ }
+}
+
+impl<T: Send> Stealer<T> {
+ /// Steals work off the end of the queue (opposite of the worker's end)
+ pub fn steal(&self) -> Stolen<T> {
+ unsafe { self.deque.steal() }
+ }
+
+ /// Gets access to the buffer pool that this stealer is attached to. This
+ /// can be used to create more deques which share the same buffer pool as
+ /// this deque.
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
+ }
+}
+
+impl<T: Send> Clone for Stealer<T> {
+ fn clone(&self) -> Stealer<T> {
+ Stealer { deque: self.deque.clone(), _noshare: marker::NoSync }
+ }
+}
+
+// Almost all of this code can be found directly in the paper so I'm not
+// personally going to heavily comment what's going on here.
+
+impl<T: Send> Deque<T> {
+ fn new(mut pool: BufferPool<T>) -> Deque<T> {
+ let buf = pool.alloc(MIN_BITS);
+ Deque {
+ bottom: AtomicInt::new(0),
+ top: AtomicInt::new(0),
+ array: AtomicPtr::new(unsafe { transmute(buf) }),
+ pool: pool,
+ }
+ }
+
+ unsafe fn push(&self, data: T) {
+ let mut b = self.bottom.load(SeqCst);
+ let t = self.top.load(SeqCst);
+ let mut a = self.array.load(SeqCst);
+ let size = b - t;
+ if size >= (*a).size() - 1 {
+ // You won't find this code in the chase-lev deque paper. This is
+ // alluded to in a small footnote, however. We always free a buffer
+ // when growing in order to prevent leaks.
+ a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
+ b = self.bottom.load(SeqCst);
+ }
+ (*a).put(b, data);
+ self.bottom.store(b + 1, SeqCst);
+ }
+
+ unsafe fn pop(&self) -> Option<T> {
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ let b = b - 1;
+ self.bottom.store(b, SeqCst);
+ let t = self.top.load(SeqCst);
+ let size = b - t;
+ if size < 0 {
+ self.bottom.store(t, SeqCst);
+ return None;
+ }
+ let data = (*a).get(b);
+ if size > 0 {
+ self.maybe_shrink(b, t);
+ return Some(data);
+ }
+ if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
+ self.bottom.store(t + 1, SeqCst);
+ return Some(data);
+ } else {
+ self.bottom.store(t + 1, SeqCst);
+ forget(data); // someone else stole this value
+ return None;
+ }
+ }
+
+ unsafe fn steal(&self) -> Stolen<T> {
+ let t = self.top.load(SeqCst);
+ let old = self.array.load(SeqCst);
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ let size = b - t;
+ if size <= 0 { return Empty }
+ if size % (*a).size() == 0 {
+ if a == old && t == self.top.load(SeqCst) {
+ return Empty
+ }
+ return Abort
+ }
+ let data = (*a).get(t);
+ if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
+ Data(data)
+ } else {
+ forget(data); // someone else stole this value
+ Abort
+ }
+ }
+
+ unsafe fn maybe_shrink(&self, b: int, t: int) {
+ let a = self.array.load(SeqCst);
+ if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
+ self.swap_buffer(b, a, (*a).resize(b, t, -1));
+ }
+ }
+
+ // Helper routine not mentioned in the paper which is used in growing and
+ // shrinking buffers to swap in a new buffer into place. As a bit of a
+ // recap, the whole point that we need a buffer pool rather than just
+ // calling malloc/free directly is that stealers can continue using buffers
+ // after this method has called 'free' on it. The continued usage is simply
+ // a read followed by a forget, but we must make sure that the memory can
+ // continue to be read after we flag this buffer for reclamation.
+ unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
+ buf: Buffer<T>) -> *mut Buffer<T> {
+ let newbuf: *mut Buffer<T> = transmute(box buf);
+ self.array.store(newbuf, SeqCst);
+ let ss = (*newbuf).size();
+ self.bottom.store(b + ss, SeqCst);
+ let t = self.top.load(SeqCst);
+ if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
+ self.bottom.store(b, SeqCst);
+ }
+ self.pool.free(transmute(old));
+ return newbuf;
+ }
+}
+
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Deque<T> {
+ fn drop(&mut self) {
+ let t = self.top.load(SeqCst);
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ // Free whatever is leftover in the dequeue, and then move the buffer
+ // back into the pool.
+ for i in range(t, b) {
+ let _: T = unsafe { (*a).get(i) };
+ }
+ self.pool.free(unsafe { transmute(a) });
+ }
+}
+
+#[inline]
+fn buffer_alloc_size<T>(log_size: uint) -> uint {
+ (1 << log_size) * size_of::<T>()
+}
+
+impl<T: Send> Buffer<T> {
+ unsafe fn new(log_size: uint) -> Buffer<T> {
+ let size = buffer_alloc_size::<T>(log_size);
+ let buffer = allocate(size, min_align_of::<T>());
+ if buffer.is_null() { ::alloc::oom() }
+ Buffer {
+ storage: buffer as *const T,
+ log_size: log_size,
+ }
+ }
+
+ fn size(&self) -> int { 1 << self.log_size }
+
+ // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
+ fn mask(&self) -> int { (1 << self.log_size) - 1 }
+
+ unsafe fn elem(&self, i: int) -> *const T {
+ self.storage.offset(i & self.mask())
+ }
+
+ // This does not protect against loading duplicate values of the same cell,
+ // nor does this clear out the contents contained within. Hence, this is a
+ // very unsafe method which the caller needs to treat specially in case a
+ // race is lost.
+ unsafe fn get(&self, i: int) -> T {
+ ptr::read(self.elem(i))
+ }
+
+ // Unsafe because this unsafely overwrites possibly uninitialized or
+ // initialized data.
+ unsafe fn put(&self, i: int, t: T) {
+ ptr::write(self.elem(i) as *mut T, t);
+ }
+
+ // Again, unsafe because this has incredibly dubious ownership violations.
+ // It is assumed that this buffer is immediately dropped.
+ unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
+ // NB: not entirely obvious, but thanks to 2's complement,
+ // casting delta to uint and then adding gives the desired
+ // effect.
+ let buf = Buffer::new(self.log_size + delta as uint);
+ for i in range(t, b) {
+ buf.put(i, self.get(i));
+ }
+ return buf;
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Buffer<T> {
+ fn drop(&mut self) {
+ // It is assumed that all buffers are empty on drop.
+ let size = buffer_alloc_size::<T>(self.log_size);
+ unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use prelude::*;
+ use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
+
+ use mem;
+ use rustrt::thread::Thread;
+ use rand;
+ use rand::Rng;
+ use sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
+ AtomicUint, INIT_ATOMIC_UINT};
+ use vec;
+
+ #[test]
+ fn smoke() {
+ let pool = BufferPool::new();
+ let (w, s) = pool.deque();
+ assert_eq!(w.pop(), None);
+ assert_eq!(s.steal(), Empty);
+ w.push(1i);
+ assert_eq!(w.pop(), Some(1));
+ w.push(1);
+ assert_eq!(s.steal(), Data(1));
+ w.push(1);
+ assert_eq!(s.clone().steal(), Data(1));
+ }
+
+ #[test]
+ fn stealpush() {
+ static AMT: int = 100000;
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
+ let t = Thread::start(proc() {
+ let mut left = AMT;
+ while left > 0 {
+ match s.steal() {
+ Data(i) => {
+ assert_eq!(i, 1);
+ left -= 1;
+ }
+ Abort | Empty => {}
+ }
+ }
+ });
+
+ for _ in range(0, AMT) {
+ w.push(1);
+ }
+
+ t.join();
+ }
+
+ #[test]
+ fn stealpush_large() {
+ static AMT: int = 100000;
+ let pool = BufferPool::<(int, int)>::new();
+ let (w, s) = pool.deque();
+ let t = Thread::start(proc() {
+ let mut left = AMT;
+ while left > 0 {
+ match s.steal() {
+ Data((1, 10)) => { left -= 1; }
+ Data(..) => panic!(),
+ Abort | Empty => {}
+ }
+ }
+ });
+
+ for _ in range(0, AMT) {
+ w.push((1, 10));
+ }
+
+ t.join();
+ }
+
+ fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
+ nthreads: int, amt: uint) {
+ for _ in range(0, amt) {
+ w.push(box 20);
+ }
+ let mut remaining = AtomicUint::new(amt);
+ let unsafe_remaining: *mut AtomicUint = &mut remaining;
+
+ let threads = range(0, nthreads).map(|_| {
+ let s = s.clone();
+ Thread::start(proc() {
+ unsafe {
+ while (*unsafe_remaining).load(SeqCst) > 0 {
+ match s.steal() {
+ Data(box 20) => {
+ (*unsafe_remaining).fetch_sub(1, SeqCst);
+ }
+ Data(..) => panic!(),
+ Abort | Empty => {}
+ }
+ }
+ }
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ while remaining.load(SeqCst) > 0 {
+ match w.pop() {
+ Some(box 20) => { remaining.fetch_sub(1, SeqCst); }
+ Some(..) => panic!(),
+ None => {}
+ }
+ }
+
+ for thread in threads.into_iter() {
+ thread.join();
+ }
+ }
+
+ #[test]
+ fn run_stampede() {
+ let pool = BufferPool::<Box<int>>::new();
+ let (w, s) = pool.deque();
+ stampede(w, s, 8, 10000);
+ }
+
+ #[test]
+ fn many_stampede() {
+ static AMT: uint = 4;
+ let pool = BufferPool::<Box<int>>::new();
+ let threads = range(0, AMT).map(|_| {
+ let (w, s) = pool.deque();
+ Thread::start(proc() {
+ stampede(w, s, 4, 10000);
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ for thread in threads.into_iter() {
+ thread.join();
+ }
+ }
+
+ #[test]
+ fn stress() {
+ static AMT: int = 100000;
+ static NTHREADS: int = 8;
+ static DONE: AtomicBool = INIT_ATOMIC_BOOL;
+ static HITS: AtomicUint = INIT_ATOMIC_UINT;
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
+
+ let threads = range(0, NTHREADS).map(|_| {
+ let s = s.clone();
+ Thread::start(proc() {
+ loop {
+ match s.steal() {
+ Data(2) => { HITS.fetch_add(1, SeqCst); }
+ Data(..) => panic!(),
+ _ if DONE.load(SeqCst) => break,
+ _ => {}
+ }
+ }
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ let mut rng = rand::task_rng();
+ let mut expected = 0;
+ while expected < AMT {
+ if rng.gen_range(0i, 3) == 2 {
+ match w.pop() {
+ None => {}
+ Some(2) => { HITS.fetch_add(1, SeqCst); },
+ Some(_) => panic!(),
+ }
+ } else {
+ expected += 1;
+ w.push(2);
+ }
+ }
+
+ while HITS.load(SeqCst) < AMT as uint {
+ match w.pop() {
+ None => {}
+ Some(2) => { HITS.fetch_add(1, SeqCst); },
+ Some(_) => panic!(),
+ }
+ }
+ DONE.store(true, SeqCst);
+
+ for thread in threads.into_iter() {
+ thread.join();
+ }
+
+ assert_eq!(HITS.load(SeqCst), expected as uint);
+ }
+
+ #[test]
+ #[cfg_attr(windows, ignore)] // apparently windows scheduling is weird?
+ fn no_starvation() {
+ static AMT: int = 10000;
+ static NTHREADS: int = 4;
+ static DONE: AtomicBool = INIT_ATOMIC_BOOL;
+ let pool = BufferPool::<(int, uint)>::new();
+ let (w, s) = pool.deque();
+
+ let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
+ let s = s.clone();
+ let unique_box = box AtomicUint::new(0);
+ let thread_box = unsafe {
+ *mem::transmute::<&Box<AtomicUint>,
+ *const *mut AtomicUint>(&unique_box)
+ };
+ (Thread::start(proc() {
+ unsafe {
+ loop {
+ match s.steal() {
+ Data((1, 2)) => {
+ (*thread_box).fetch_add(1, SeqCst);
+ }
+ Data(..) => panic!(),
+ _ if DONE.load(SeqCst) => break,
+ _ => {}
+ }
+ }
+ }
+ }), unique_box)
+ }));
+
+ let mut rng = rand::task_rng();
+ let mut myhit = false;
+ 'outer: loop {
+ for _ in range(0, rng.gen_range(0, AMT)) {
+ if !myhit && rng.gen_range(0i, 3) == 2 {
+ match w.pop() {
+ None => {}
+ Some((1, 2)) => myhit = true,
+ Some(_) => panic!(),
+ }
+ } else {
+ w.push((1, 2));
+ }
+ }
+
+ for slot in hits.iter() {
+ let amt = slot.load(SeqCst);
+ if amt == 0 { continue 'outer; }
+ }
+ if myhit {
+ break
+ }
+ }
+
+ DONE.store(true, SeqCst);
+
+ for thread in threads.into_iter() {
+ thread.join();
+ }
+ }
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Wrappers for safe, shared, mutable memory between tasks
+//!
+//! The wrappers in this module build on the primitives from `sync::raw` to
+//! provide safe interfaces around using the primitive locks. These primitives
+//! implement a technique called "poisoning" where when a task panicked with a
+//! held lock, all future attempts to use the lock will panic.
+//!
+//! For example, if two tasks are contending on a mutex and one of them panics
+//! after grabbing the lock, the second task will immediately panic because the
+//! lock is now poisoned.
+
+use core::prelude::*;
+
+use self::Inner::*;
+
+use core::cell::UnsafeCell;
+use rustrt::local::Local;
+use rustrt::task::Task;
+
+use super::raw;
+
+/****************************************************************************
+ * Poisoning helpers
+ ****************************************************************************/
+
+struct PoisonOnFail<'a> {
+ flag: &'a mut bool,
+ failed: bool,
+}
+
+fn failing() -> bool {
+ Local::borrow(None::<Task>).unwinder.unwinding()
+}
+
+impl<'a> PoisonOnFail<'a> {
+ fn check(flag: bool, name: &str) {
+ if flag {
+ panic!("Poisoned {} - another task failed inside!", name);
+ }
+ }
+
+ fn new<'a>(flag: &'a mut bool, name: &str) -> PoisonOnFail<'a> {
+ PoisonOnFail::check(*flag, name);
+ PoisonOnFail {
+ flag: flag,
+ failed: failing()
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for PoisonOnFail<'a> {
+ fn drop(&mut self) {
+ if !self.failed && failing() {
+ *self.flag = true;
+ }
+ }
+}
+
+/****************************************************************************
+ * Condvar
+ ****************************************************************************/
+
+enum Inner<'a> {
+ InnerMutex(raw::MutexGuard<'a>),
+ InnerRWLock(raw::RWLockWriteGuard<'a>),
+}
+
+impl<'b> Inner<'b> {
+ fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> {
+ match *self {
+ InnerMutex(ref m) => &m.cond,
+ InnerRWLock(ref m) => &m.cond,
+ }
+ }
+}
+
+/// A condition variable, a mechanism for unlock-and-descheduling and
+/// signaling, for use with the lock types.
+pub struct Condvar<'a> {
+ name: &'static str,
+ // n.b. Inner must be after PoisonOnFail because we must set the poison flag
+ // *inside* the mutex, and struct fields are destroyed top-to-bottom
+ // (destroy the lock guard last).
+ poison: PoisonOnFail<'a>,
+ inner: Inner<'a>,
+}
+
+impl<'a> Condvar<'a> {
+ /// Atomically exit the associated lock and block until a signal is sent.
+ ///
+ /// wait() is equivalent to wait_on(0).
+ ///
+ /// # Panics
+ ///
+ /// A task which is killed while waiting on a condition variable will wake
+ /// up, panic, and unlock the associated lock as it unwinds.
+ #[inline]
+ pub fn wait(&self) { self.wait_on(0) }
+
+ /// Atomically exit the associated lock and block on a specified condvar
+ /// until a signal is sent on that same condvar.
+ ///
+ /// The associated lock must have been initialised with an appropriate
+ /// number of condvars. The condvar_id must be between 0 and num_condvars-1
+ /// or else this call will fail.
+ #[inline]
+ pub fn wait_on(&self, condvar_id: uint) {
+ assert!(!*self.poison.flag);
+ self.inner.cond().wait_on(condvar_id);
+ // This is why we need to wrap sync::condvar.
+ PoisonOnFail::check(*self.poison.flag, self.name);
+ }
+
+ /// Wake up a blocked task. Returns false if there was no blocked task.
+ #[inline]
+ pub fn signal(&self) -> bool { self.signal_on(0) }
+
+ /// Wake up a blocked task on a specified condvar (as
+ /// sync::cond.signal_on). Returns false if there was no blocked task.
+ #[inline]
+ pub fn signal_on(&self, condvar_id: uint) -> bool {
+ assert!(!*self.poison.flag);
+ self.inner.cond().signal_on(condvar_id)
+ }
+
+ /// Wake up all blocked tasks. Returns the number of tasks woken.
+ #[inline]
+ pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
+
+ /// Wake up all blocked tasks on a specified condvar (as
+ /// sync::cond.broadcast_on). Returns the number of tasks woken.
+ #[inline]
+ pub fn broadcast_on(&self, condvar_id: uint) -> uint {
+ assert!(!*self.poison.flag);
+ self.inner.cond().broadcast_on(condvar_id)
+ }
+}
+
+/****************************************************************************
+ * Mutex
+ ****************************************************************************/
+
+/// A wrapper type which provides synchronized access to the underlying data, of
+/// type `T`. A mutex always provides exclusive access, and concurrent requests
+/// will block while the mutex is already locked.
+///
+/// # Example
+///
+/// ```
+/// use std::sync::{Mutex, Arc};
+///
+/// let mutex = Arc::new(Mutex::new(1i));
+/// let mutex2 = mutex.clone();
+///
+/// spawn(proc() {
+/// let mut val = mutex2.lock();
+/// *val += 1;
+/// val.cond.signal();
+/// });
+///
+/// let value = mutex.lock();
+/// while *value != 2 {
+/// value.cond.wait();
+/// }
+/// ```
+pub struct Mutex<T> {
+ lock: raw::Mutex,
+ failed: UnsafeCell<bool>,
+ data: UnsafeCell<T>,
+}
+
+/// An guard which is created by locking a mutex. Through this guard the
+/// underlying data can be accessed.
+pub struct MutexGuard<'a, T:'a> {
+ // FIXME #12808: strange name to try to avoid interfering with
+ // field accesses of the contained type via Deref
+ _data: &'a mut T,
+ /// Inner condition variable connected to the locked mutex that this guard
+ /// was created from. This can be used for atomic-unlock-and-deschedule.
+ pub cond: Condvar<'a>,
+}
+
+impl<T: Send> Mutex<T> {
+ /// Creates a new mutex to protect the user-supplied data.
+ pub fn new(user_data: T) -> Mutex<T> {
+ Mutex::new_with_condvars(user_data, 1)
+ }
+
+ /// Create a new mutex, with a specified number of associated condvars.
+ ///
+ /// This will allow calling wait_on/signal_on/broadcast_on with condvar IDs
+ /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
+ /// allowed but any operations on the condvar will fail.)
+ pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex<T> {
+ Mutex {
+ lock: raw::Mutex::new_with_condvars(num_condvars),
+ failed: UnsafeCell::new(false),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+
+ /// Access the underlying mutable data with mutual exclusion from other
+ /// tasks. The returned value is an RAII guard which will unlock the mutex
+ /// when dropped. All concurrent tasks attempting to lock the mutex will
+ /// block while the returned value is still alive.
+ ///
+ /// # Panics
+ ///
+ /// Panicking while inside the Mutex will unlock the Mutex while unwinding, so
+ /// that other tasks won't block forever. It will also poison the Mutex:
+ /// any tasks that subsequently try to access it (including those already
+ /// blocked on the mutex) will also panic immediately.
+ #[inline]
+ pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> {
+ let guard = self.lock.lock();
+
+ // These two accesses are safe because we're guaranteed at this point
+ // that we have exclusive access to this mutex. We are indeed able to
+ // promote ourselves from &Mutex to `&mut T`
+ let poison = unsafe { &mut *self.failed.get() };
+ let data = unsafe { &mut *self.data.get() };
+
+ MutexGuard {
+ _data: data,
+ cond: Condvar {
+ name: "Mutex",
+ poison: PoisonOnFail::new(poison, "Mutex"),
+ inner: InnerMutex(guard),
+ },
+ }
+ }
+}
+
+impl<'a, T: Send> Deref<T> for MutexGuard<'a, T> {
+ fn deref<'a>(&'a self) -> &'a T { &*self._data }
+}
+impl<'a, T: Send> DerefMut<T> for MutexGuard<'a, T> {
+ fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
+}
+
+/****************************************************************************
+ * R/W lock protected lock
+ ****************************************************************************/
+
+/// A dual-mode reader-writer lock. The data can be accessed mutably or
+/// immutably, and immutably-accessing tasks may run concurrently.
+///
+/// # Example
+///
+/// ```
+/// use std::sync::{RWLock, Arc};
+///
+/// let lock1 = Arc::new(RWLock::new(1i));
+/// let lock2 = lock1.clone();
+///
+/// spawn(proc() {
+/// let mut val = lock2.write();
+/// *val = 3;
+/// let val = val.downgrade();
+/// println!("{}", *val);
+/// });
+///
+/// let val = lock1.read();
+/// println!("{}", *val);
+/// ```
+pub struct RWLock<T> {
+ lock: raw::RWLock,
+ failed: UnsafeCell<bool>,
+ data: UnsafeCell<T>,
+}
+
+/// A guard which is created by locking an rwlock in write mode. Through this
+/// guard the underlying data can be accessed.
+pub struct RWLockWriteGuard<'a, T:'a> {
+ // FIXME #12808: strange name to try to avoid interfering with
+ // field accesses of the contained type via Deref
+ _data: &'a mut T,
+ /// Inner condition variable that can be used to sleep on the write mode of
+ /// this rwlock.
+ pub cond: Condvar<'a>,
+}
+
+/// A guard which is created by locking an rwlock in read mode. Through this
+/// guard the underlying data can be accessed.
+pub struct RWLockReadGuard<'a, T:'a> {
+ // FIXME #12808: strange names to try to avoid interfering with
+ // field accesses of the contained type via Deref
+ _data: &'a T,
+ _guard: raw::RWLockReadGuard<'a>,
+}
+
+impl<T: Send + Sync> RWLock<T> {
+ /// Create a reader/writer lock with the supplied data.
+ pub fn new(user_data: T) -> RWLock<T> {
+ RWLock::new_with_condvars(user_data, 1)
+ }
+
+ /// Create a reader/writer lock with the supplied data and a specified number
+ /// of condvars (as sync::RWLock::new_with_condvars).
+ pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock<T> {
+ RWLock {
+ lock: raw::RWLock::new_with_condvars(num_condvars),
+ failed: UnsafeCell::new(false),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+
+ /// Access the underlying data mutably. Locks the rwlock in write mode;
+ /// other readers and writers will block.
+ ///
+ /// # Panics
+ ///
+ /// Panicking while inside the lock will unlock the lock while unwinding, so
+ /// that other tasks won't block forever. As Mutex.lock, it will also poison
+ /// the lock, so subsequent readers and writers will both also panic.
+ #[inline]
+ pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a, T> {
+ let guard = self.lock.write();
+
+ // These two accesses are safe because we're guaranteed at this point
+ // that we have exclusive access to this rwlock. We are indeed able to
+ // promote ourselves from &RWLock to `&mut T`
+ let poison = unsafe { &mut *self.failed.get() };
+ let data = unsafe { &mut *self.data.get() };
+
+ RWLockWriteGuard {
+ _data: data,
+ cond: Condvar {
+ name: "RWLock",
+ poison: PoisonOnFail::new(poison, "RWLock"),
+ inner: InnerRWLock(guard),
+ },
+ }
+ }
+
+ /// Access the underlying data immutably. May run concurrently with other
+ /// reading tasks.
+ ///
+ /// # Panics
+ ///
+ /// Panicking will unlock the lock while unwinding. However, unlike all other
+ /// access modes, this will not poison the lock.
+ pub fn read<'a>(&'a self) -> RWLockReadGuard<'a, T> {
+ let guard = self.lock.read();
+ PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
+ RWLockReadGuard {
+ _guard: guard,
+ _data: unsafe { &*self.data.get() },
+ }
+ }
+}
+
+impl<'a, T: Send + Sync> RWLockWriteGuard<'a, T> {
+ /// Consumes this write lock token, returning a new read lock token.
+ ///
+ /// This will allow pending readers to come into the lock.
+ pub fn downgrade(self) -> RWLockReadGuard<'a, T> {
+ let RWLockWriteGuard { _data, cond } = self;
+ // convert the data to read-only explicitly
+ let data = &*_data;
+ let guard = match cond.inner {
+ InnerMutex(..) => unreachable!(),
+ InnerRWLock(guard) => guard.downgrade()
+ };
+ RWLockReadGuard { _guard: guard, _data: data }
+ }
+}
+
+impl<'a, T: Send + Sync> Deref<T> for RWLockReadGuard<'a, T> {
+ fn deref<'a>(&'a self) -> &'a T { self._data }
+}
+impl<'a, T: Send + Sync> Deref<T> for RWLockWriteGuard<'a, T> {
+ fn deref<'a>(&'a self) -> &'a T { &*self._data }
+}
+impl<'a, T: Send + Sync> DerefMut<T> for RWLockWriteGuard<'a, T> {
+ fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
+}
+
+/****************************************************************************
+ * Barrier
+ ****************************************************************************/
+
+/// A barrier enables multiple tasks to synchronize the beginning
+/// of some computation.
+///
+/// ```rust
+/// use std::sync::{Arc, Barrier};
+///
+/// let barrier = Arc::new(Barrier::new(10));
+/// for _ in range(0u, 10) {
+/// let c = barrier.clone();
+/// // The same messages will be printed together.
+/// // You will NOT see any interleaving.
+/// spawn(proc() {
+/// println!("before wait");
+/// c.wait();
+/// println!("after wait");
+/// });
+/// }
+/// ```
+pub struct Barrier {
+ lock: Mutex<BarrierState>,
+ num_tasks: uint,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+ count: uint,
+ generation_id: uint,
+}
+
+impl Barrier {
+ /// Create a new barrier that can block a given number of tasks.
+ pub fn new(num_tasks: uint) -> Barrier {
+ Barrier {
+ lock: Mutex::new(BarrierState {
+ count: 0,
+ generation_id: 0,
+ }),
+ num_tasks: num_tasks,
+ }
+ }
+
+ /// Block the current task until a certain number of tasks is waiting.
+ pub fn wait(&self) {
+ let mut lock = self.lock.lock();
+ let local_gen = lock.generation_id;
+ lock.count += 1;
+ if lock.count < self.num_tasks {
+ // We need a while loop to guard against spurious wakeups.
+ // http://en.wikipedia.org/wiki/Spurious_wakeup
+ while local_gen == lock.generation_id &&
+ lock.count < self.num_tasks {
+ lock.cond.wait();
+ }
+ } else {
+ lock.count = 0;
+ lock.generation_id += 1;
+ lock.cond.broadcast();
+ }
+ }
+}
+
+/****************************************************************************
+ * Tests
+ ****************************************************************************/
+
+#[cfg(test)]
+mod tests {
+ use prelude::*;
+ use comm::Empty;
+ use task;
+ use task::try_future;
+ use sync::Arc;
+
+ use super::{Mutex, Barrier, RWLock};
+
+ #[test]
+ fn test_mutex_arc_condvar() {
+ let arc = Arc::new(Mutex::new(false));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+ task::spawn(proc() {
+ // wait until parent gets in
+ rx.recv();
+ let mut lock = arc2.lock();
+ *lock = true;
+ lock.cond.signal();
+ });
+
+ let lock = arc.lock();
+ tx.send(());
+ assert!(!*lock);
+ while !*lock {
+ lock.cond.wait();
+ }
+ }
+
+ #[test] #[should_fail]
+ fn test_arc_condvar_poison() {
+ let arc = Arc::new(Mutex::new(1i));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+
+ spawn(proc() {
+ rx.recv();
+ let lock = arc2.lock();
+ lock.cond.signal();
+ // Parent should fail when it wakes up.
+ panic!();
+ });
+
+ let lock = arc.lock();
+ tx.send(());
+ while *lock == 1 {
+ lock.cond.wait();
+ }
+ }
+
+ #[test] #[should_fail]
+ fn test_mutex_arc_poison() {
+ let arc = Arc::new(Mutex::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.lock();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.lock();
+ assert_eq!(*lock, 1);
+ }
+
+ #[test]
+ fn test_mutex_arc_nested() {
+ // Tests nested mutexes and access
+ // to underlying data.
+ let arc = Arc::new(Mutex::new(1i));
+ let arc2 = Arc::new(Mutex::new(arc));
+ task::spawn(proc() {
+ let lock = arc2.lock();
+ let lock2 = lock.deref().lock();
+ assert_eq!(*lock2, 1);
+ });
+ }
+
+ #[test]
+ fn test_mutex_arc_access_in_unwind() {
+ let arc = Arc::new(Mutex::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try::<()>(proc() {
+ struct Unwinder {
+ i: Arc<Mutex<int>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ let mut lock = self.i.lock();
+ *lock += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ });
+ let lock = arc.lock();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test] #[should_fail]
+ fn test_rw_arc_poison_wr() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.write();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.read();
+ assert_eq!(*lock, 1);
+ }
+ #[test] #[should_fail]
+ fn test_rw_arc_poison_ww() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.write();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.write();
+ assert_eq!(*lock, 1);
+ }
+ #[test]
+ fn test_rw_arc_no_poison_rr() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.read();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.read();
+ assert_eq!(*lock, 1);
+ }
+ #[test]
+ fn test_rw_arc_no_poison_rw() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.read();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.write();
+ assert_eq!(*lock, 1);
+ }
+ #[test]
+ fn test_rw_arc_no_poison_dr() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try(proc() {
+ let lock = arc2.write().downgrade();
+ assert_eq!(*lock, 2);
+ });
+ let lock = arc.write();
+ assert_eq!(*lock, 1);
+ }
+
+ #[test]
+ fn test_rw_arc() {
+ let arc = Arc::new(RWLock::new(0i));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+
+ task::spawn(proc() {
+ let mut lock = arc2.write();
+ for _ in range(0u, 10) {
+ let tmp = *lock;
+ *lock = -1;
+ task::deschedule();
+ *lock = tmp + 1;
+ }
+ tx.send(());
+ });
+
+ // Readers try to catch the writer in the act
+ let mut children = Vec::new();
+ for _ in range(0u, 5) {
+ let arc3 = arc.clone();
+ children.push(try_future(proc() {
+ let lock = arc3.read();
+ assert!(*lock >= 0);
+ }));
+ }
+
+ // Wait for children to pass their asserts
+ for r in children.iter_mut() {
+ assert!(r.get_ref().is_ok());
+ }
+
+ // Wait for writer to finish
+ rx.recv();
+ let lock = arc.read();
+ assert_eq!(*lock, 10);
+ }
+
+ #[test]
+ fn test_rw_arc_access_in_unwind() {
+ let arc = Arc::new(RWLock::new(1i));
+ let arc2 = arc.clone();
+ let _ = task::try::<()>(proc() {
+ struct Unwinder {
+ i: Arc<RWLock<int>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ let mut lock = self.i.write();
+ *lock += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ });
+ let lock = arc.read();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test]
+ fn test_rw_downgrade() {
+ // (1) A downgrader gets in write mode and does cond.wait.
+ // (2) A writer gets in write mode, sets state to 42, and does signal.
+ // (3) Downgrader wakes, sets state to 31337.
+ // (4) tells writer and all other readers to contend as it downgrades.
+ // (5) Writer attempts to set state back to 42, while downgraded task
+ // and all reader tasks assert that it's 31337.
+ let arc = Arc::new(RWLock::new(0i));
+
+ // Reader tasks
+ let mut reader_convos = Vec::new();
+ for _ in range(0u, 10) {
+ let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
+ reader_convos.push((tx1, rx2));
+ let arcn = arc.clone();
+ task::spawn(proc() {
+ rx1.recv(); // wait for downgrader to give go-ahead
+ let lock = arcn.read();
+ assert_eq!(*lock, 31337);
+ tx2.send(());
+ });
+ }
+
+ // Writer task
+ let arc2 = arc.clone();
+ let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
+ task::spawn(proc() {
+ rx1.recv();
+ {
+ let mut lock = arc2.write();
+ assert_eq!(*lock, 0);
+ *lock = 42;
+ lock.cond.signal();
+ }
+ rx1.recv();
+ {
+ let mut lock = arc2.write();
+ // This shouldn't happen until after the downgrade read
+ // section, and all other readers, finish.
+ assert_eq!(*lock, 31337);
+ *lock = 42;
+ }
+ tx2.send(());
+ });
+
+ // Downgrader (us)
+ let mut lock = arc.write();
+ tx1.send(()); // send to another writer who will wake us up
+ while *lock == 0 {
+ lock.cond.wait();
+ }
+ assert_eq!(*lock, 42);
+ *lock = 31337;
+ // send to other readers
+ for &(ref mut rc, _) in reader_convos.iter_mut() {
+ rc.send(())
+ }
+ let lock = lock.downgrade();
+ // complete handshake with other readers
+ for &(_, ref mut rp) in reader_convos.iter_mut() {
+ rp.recv()
+ }
+ tx1.send(()); // tell writer to try again
+ assert_eq!(*lock, 31337);
+ drop(lock);
+
+ rx2.recv(); // complete handshake with writer
+ }
+
+ #[cfg(test)]
+ fn test_rw_write_cond_downgrade_read_race_helper() {
+ // Tests that when a downgrader hands off the "reader cloud" lock
+ // because of a contending reader, a writer can't race to get it
+ // instead, which would result in readers_and_writers. This tests
+ // the raw module rather than this one, but it's here because an
+ // rwarc gives us extra shared state to help check for the race.
+ let x = Arc::new(RWLock::new(true));
+ let (tx, rx) = channel();
+
+ // writer task
+ let xw = x.clone();
+ task::spawn(proc() {
+ let mut lock = xw.write();
+ tx.send(()); // tell downgrader it's ok to go
+ lock.cond.wait();
+ // The core of the test is here: the condvar reacquire path
+ // must involve order_lock, so that it cannot race with a reader
+ // trying to receive the "reader cloud lock hand-off".
+ *lock = false;
+ });
+
+ rx.recv(); // wait for writer to get in
+
+ let lock = x.write();
+ assert!(*lock);
+ // make writer contend in the cond-reacquire path
+ lock.cond.signal();
+ // make a reader task to trigger the "reader cloud lock" handoff
+ let xr = x.clone();
+ let (tx, rx) = channel();
+ task::spawn(proc() {
+ tx.send(());
+ drop(xr.read());
+ });
+ rx.recv(); // wait for reader task to exist
+
+ let lock = lock.downgrade();
+ // if writer mistakenly got in, make sure it mutates state
+ // before we assert on it
+ for _ in range(0u, 5) { task::deschedule(); }
+ // make sure writer didn't get in.
+ assert!(*lock);
+ }
+ #[test]
+ fn test_rw_write_cond_downgrade_read_race() {
+ // Ideally the above test case would have deschedule statements in it
+ // that helped to expose the race nearly 100% of the time... but adding
+ // deschedules in the intuitively-right locations made it even less
+ // likely, and I wasn't sure why :( . This is a mediocre "next best"
+ // option.
+ for _ in range(0u, 8) {
+ test_rw_write_cond_downgrade_read_race_helper();
+ }
+ }
+
+ /************************************************************************
+ * Barrier tests
+ ************************************************************************/
+ #[test]
+ fn test_barrier() {
+ let barrier = Arc::new(Barrier::new(10));
+ let (tx, rx) = channel();
+
+ for _ in range(0u, 9) {
+ let c = barrier.clone();
+ let tx = tx.clone();
+ spawn(proc() {
+ c.wait();
+ tx.send(true);
+ });
+ }
+
+ // At this point, all spawned tasks should be blocked,
+ // so we shouldn't get anything from the port
+ assert!(match rx.try_recv() {
+ Err(Empty) => true,
+ _ => false,
+ });
+
+ barrier.wait();
+ // Now, the barrier is cleared and we should get data.
+ for _ in range(0u, 9) {
+ rx.recv();
+ }
+ }
+}
#![experimental]
-#[stable]
-pub use core_sync::atomic;
+pub use self::one::{Once, ONCE_INIT};
+
+pub use alloc::arc::{Arc, Weak};
+pub use self::lock::{Mutex, MutexGuard, Condvar, Barrier,
+ RWLock, RWLockReadGuard, RWLockWriteGuard};
-pub use core_sync::{deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
-pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
-pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
-pub use core_sync::{Semaphore, SemaphoreGuard};
-pub use core_sync::one::{Once, ONCE_INIT};
+// The mutex/rwlock in this module are not meant for reexport
+pub use self::raw::{Semaphore, SemaphoreGuard};
pub use self::future::Future;
pub use self::task_pool::TaskPool;
+// Core building blocks for all primitives in this crate
+
+#[stable]
+pub mod atomic;
+
+// Concurrent data structures
+
+pub mod spsc_queue;
+pub mod mpsc_queue;
+pub mod mpmc_bounded_queue;
+pub mod deque;
+
+// Low-level concurrency primitives
+
+mod raw;
+mod mutex;
+mod one;
+
+// Higher level primitives based on those above
+
+mod lock;
+
+// Task management
+
mod future;
mod task_pool;
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+#![experimental]
+#![allow(missing_docs, dead_code)]
+
+// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+
+use core::prelude::*;
+
+use alloc::arc::Arc;
+use vec::Vec;
+use core::num::UnsignedInt;
+use core::cell::UnsafeCell;
+
+use sync::atomic::{AtomicUint,Relaxed,Release,Acquire};
+
+struct Node<T> {
+ sequence: AtomicUint,
+ value: Option<T>,
+}
+
+struct State<T> {
+ pad0: [u8, ..64],
+ buffer: Vec<UnsafeCell<Node<T>>>,
+ mask: uint,
+ pad1: [u8, ..64],
+ enqueue_pos: AtomicUint,
+ pad2: [u8, ..64],
+ dequeue_pos: AtomicUint,
+ pad3: [u8, ..64],
+}
+
+pub struct Queue<T> {
+ state: Arc<State<T>>,
+}
+
+impl<T: Send> State<T> {
+ fn with_capacity(capacity: uint) -> State<T> {
+ let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
+ if capacity < 2 {
+ 2u
+ } else {
+ // use next power of 2 as capacity
+ capacity.next_power_of_two()
+ }
+ } else {
+ capacity
+ };
+ let buffer = Vec::from_fn(capacity, |i| {
+ UnsafeCell::new(Node { sequence:AtomicUint::new(i), value: None })
+ });
+ State{
+ pad0: [0, ..64],
+ buffer: buffer,
+ mask: capacity-1,
+ pad1: [0, ..64],
+ enqueue_pos: AtomicUint::new(0),
+ pad2: [0, ..64],
+ dequeue_pos: AtomicUint::new(0),
+ pad3: [0, ..64],
+ }
+ }
+
+ fn push(&self, value: T) -> bool {
+ let mask = self.mask;
+ let mut pos = self.enqueue_pos.load(Relaxed);
+ loop {
+ let node = &self.buffer[pos & mask];
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
+ let diff: int = seq as int - pos as int;
+
+ if diff == 0 {
+ let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
+ if enqueue_pos == pos {
+ unsafe {
+ (*node.get()).value = Some(value);
+ (*node.get()).sequence.store(pos+1, Release);
+ }
+ break
+ } else {
+ pos = enqueue_pos;
+ }
+ } else if diff < 0 {
+ return false
+ } else {
+ pos = self.enqueue_pos.load(Relaxed);
+ }
+ }
+ true
+ }
+
+ fn pop(&self) -> Option<T> {
+ let mask = self.mask;
+ let mut pos = self.dequeue_pos.load(Relaxed);
+ loop {
+ let node = &self.buffer[pos & mask];
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
+ let diff: int = seq as int - (pos + 1) as int;
+ if diff == 0 {
+ let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
+ if dequeue_pos == pos {
+ unsafe {
+ let value = (*node.get()).value.take();
+ (*node.get()).sequence.store(pos + mask + 1, Release);
+ return value
+ }
+ } else {
+ pos = dequeue_pos;
+ }
+ } else if diff < 0 {
+ return None
+ } else {
+ pos = self.dequeue_pos.load(Relaxed);
+ }
+ }
+ }
+}
+
+impl<T: Send> Queue<T> {
+ pub fn with_capacity(capacity: uint) -> Queue<T> {
+ Queue{
+ state: Arc::new(State::with_capacity(capacity))
+ }
+ }
+
+ pub fn push(&self, value: T) -> bool {
+ self.state.push(value)
+ }
+
+ pub fn pop(&self) -> Option<T> {
+ self.state.pop()
+ }
+}
+
+impl<T: Send> Clone for Queue<T> {
+ fn clone(&self) -> Queue<T> {
+ Queue { state: self.state.clone() }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use prelude::*;
+ use super::Queue;
+
+ #[test]
+ fn test() {
+ let nthreads = 8u;
+ let nmsgs = 1000u;
+ let q = Queue::with_capacity(nthreads*nmsgs);
+ assert_eq!(None, q.pop());
+ let (tx, rx) = channel();
+
+ for _ in range(0, nthreads) {
+ let q = q.clone();
+ let tx = tx.clone();
+ spawn(proc() {
+ let q = q;
+ for i in range(0, nmsgs) {
+ assert!(q.push(i));
+ }
+ tx.send(());
+ });
+ }
+
+ let mut completion_rxs = vec![];
+ for _ in range(0, nthreads) {
+ let (tx, rx) = channel();
+ completion_rxs.push(rx);
+ let q = q.clone();
+ spawn(proc() {
+ let q = q;
+ let mut i = 0u;
+ loop {
+ match q.pop() {
+ None => {},
+ Some(_) => {
+ i += 1;
+ if i == nmsgs { break }
+ }
+ }
+ }
+ tx.send(i);
+ });
+ }
+
+ for rx in completion_rxs.iter_mut() {
+ assert_eq!(nmsgs, rx.recv());
+ }
+ for _ in range(0, nthreads) {
+ rx.recv();
+ }
+ }
+}
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+//! A mostly lock-free multi-producer, single consumer queue.
+//!
+//! This module contains an implementation of a concurrent MPSC queue. This
+//! queue can be used to share data between tasks, and is also used as the
+//! building block of channels in rust.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue may not be appropriate for all use-cases.
+
+#![experimental]
+
+// http://www.1024cores.net/home/lock-free-algorithms
+// /queues/non-intrusive-mpsc-node-based-queue
+
+pub use self::PopResult::*;
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::mem;
+use core::cell::UnsafeCell;
+
+use sync::atomic::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
+
+/// A result of the `pop` function.
+pub enum PopResult<T> {
+ /// Some data has been popped
+ Data(T),
+ /// The queue is empty
+ Empty,
+ /// The queue is in an inconsistent state. Popping data should succeed, but
+ /// some pushers have yet to make enough progress in order allow a pop to
+ /// succeed. It is recommended that a pop() occur "in the near future" in
+ /// order to see if the sender has made progress or not
+ Inconsistent,
+}
+
+struct Node<T> {
+ next: AtomicPtr<Node<T>>,
+ value: Option<T>,
+}
+
+/// The multi-producer single-consumer structure. This is not cloneable, but it
+/// may be safely shared so long as it is guaranteed that there is only one
+/// popper at a time (many pushers are allowed).
+pub struct Queue<T> {
+ head: AtomicPtr<Node<T>>,
+ tail: UnsafeCell<*mut Node<T>>,
+}
+
+impl<T> Node<T> {
+ unsafe fn new(v: Option<T>) -> *mut Node<T> {
+ mem::transmute(box Node {
+ next: AtomicPtr::new(0 as *mut Node<T>),
+ value: v,
+ })
+ }
+}
+
+impl<T: Send> Queue<T> {
+ /// Creates a new queue that is safe to share among multiple producers and
+ /// one consumer.
+ pub fn new() -> Queue<T> {
+ let stub = unsafe { Node::new(None) };
+ Queue {
+ head: AtomicPtr::new(stub),
+ tail: UnsafeCell::new(stub),
+ }
+ }
+
+ /// Pushes a new value onto this queue.
+ pub fn push(&self, t: T) {
+ unsafe {
+ let n = Node::new(Some(t));
+ let prev = self.head.swap(n, AcqRel);
+ (*prev).next.store(n, Release);
+ }
+ }
+
+ /// Pops some data from this queue.
+ ///
+ /// Note that the current implementation means that this function cannot
+ /// return `Option<T>`. It is possible for this queue to be in an
+ /// inconsistent state where many pushes have succeeded and completely
+ /// finished, but pops cannot return `Some(t)`. This inconsistent state
+ /// happens when a pusher is pre-empted at an inopportune moment.
+ ///
+ /// This inconsistent state means that this queue does indeed have data, but
+ /// it does not currently have access to it at this time.
+ pub fn pop(&self) -> PopResult<T> {
+ unsafe {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ assert!((*tail).value.is_none());
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take().unwrap();
+ let _: Box<Node<T>> = mem::transmute(tail);
+ return Data(ret);
+ }
+
+ if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
+ }
+ }
+
+ /// Attempts to pop data from this queue, but doesn't attempt too hard. This
+ /// will canonicalize inconsistent states to a `None` value.
+ pub fn casual_pop(&self) -> Option<T> {
+ match self.pop() {
+ Data(t) => Some(t),
+ Empty | Inconsistent => None,
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.tail.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Relaxed);
+ let _: Box<Node<T>> = mem::transmute(cur);
+ cur = next;
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use prelude::*;
+
+ use alloc::arc::Arc;
+
+ use super::{Queue, Data, Empty, Inconsistent};
+
+ #[test]
+ fn test_full() {
+ let q = Queue::new();
+ q.push(box 1i);
+ q.push(box 2i);
+ }
+
+ #[test]
+ fn test() {
+ let nthreads = 8u;
+ let nmsgs = 1000u;
+ let q = Queue::new();
+ match q.pop() {
+ Empty => {}
+ Inconsistent | Data(..) => panic!()
+ }
+ let (tx, rx) = channel();
+ let q = Arc::new(q);
+
+ for _ in range(0, nthreads) {
+ let tx = tx.clone();
+ let q = q.clone();
+ spawn(proc() {
+ for i in range(0, nmsgs) {
+ q.push(i);
+ }
+ tx.send(());
+ });
+ }
+
+ let mut i = 0u;
+ while i < nthreads * nmsgs {
+ match q.pop() {
+ Empty | Inconsistent => {},
+ Data(_) => { i += 1 }
+ }
+ }
+ drop(tx);
+ for _ in range(0, nthreads) {
+ rx.recv();
+ }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A simple native mutex implementation. Warning: this API is likely
+//! to change soon.
+
+#![allow(dead_code)]
+
+use core::prelude::*;
+use alloc::boxed::Box;
+use rustrt::mutex;
+
+pub const LOCKED: uint = 1 << 0;
+pub const BLOCKED: uint = 1 << 1;
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will properly block tasks waiting for the lock to become
+/// available. The mutex can also be statically initialized or created via a
+/// `new` constructor.
+///
+/// # Example
+///
+/// ```rust,ignore
+/// use std::sync::mutex::Mutex;
+///
+/// let m = Mutex::new();
+/// let guard = m.lock();
+/// // do some work
+/// drop(guard); // unlock the lock
+/// ```
+pub struct Mutex {
+ // Note that this static mutex is in a *box*, not inlined into the struct
+ // itself. This is done for memory safety reasons with the usage of a
+ // StaticNativeMutex inside the static mutex above. Once a native mutex has
+ // been used once, its address can never change (it can't be moved). This
+ // mutex type can be safely moved at any time, so to ensure that the native
+ // mutex is used correctly we box the inner lock to give it a constant
+ // address.
+ lock: Box<StaticMutex>,
+}
+
+/// The static mutex type is provided to allow for static allocation of mutexes.
+///
+/// Note that this is a separate type because using a Mutex correctly means that
+/// it needs to have a destructor run. In Rust, statics are not allowed to have
+/// destructors. As a result, a `StaticMutex` has one extra method when compared
+/// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
+/// documentation can be found directly on the method.
+///
+/// # Example
+///
+/// ```rust,ignore
+/// use std::sync::mutex::{StaticMutex, MUTEX_INIT};
+///
+/// static LOCK: StaticMutex = MUTEX_INIT;
+///
+/// {
+/// let _g = LOCK.lock();
+/// // do some productive work
+/// }
+/// // lock is unlocked here.
+/// ```
+pub struct StaticMutex {
+ lock: mutex::StaticNativeMutex,
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+#[must_use]
+pub struct Guard<'a> {
+ guard: mutex::LockGuard<'a>,
+}
+
+fn lift_guard(guard: mutex::LockGuard) -> Guard {
+ Guard { guard: guard }
+}
+
+/// Static initialization of a mutex. This constant can be used to initialize
+/// other mutex constants.
+pub const MUTEX_INIT: StaticMutex = StaticMutex {
+ lock: mutex::NATIVE_MUTEX_INIT
+};
+
+impl StaticMutex {
+ /// Attempts to grab this lock, see `Mutex::try_lock`
+ pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
+ unsafe { self.lock.trylock().map(lift_guard) }
+ }
+
+ /// Acquires this lock, see `Mutex::lock`
+ pub fn lock<'a>(&'a self) -> Guard<'a> {
+ lift_guard(unsafe { self.lock.lock() })
+ }
+
+ /// Deallocates resources associated with this static mutex.
+ ///
+ /// This method is unsafe because it provides no guarantees that there are
+ /// no active users of this mutex, and safety is not guaranteed if there are
+ /// active users of this mutex.
+ ///
+ /// This method is required to ensure that there are no memory leaks on
+ /// *all* platforms. It may be the case that some platforms do not leak
+ /// memory if this method is not called, but this is not guaranteed to be
+ /// true on all platforms.
+ pub unsafe fn destroy(&self) {
+ self.lock.destroy()
+ }
+}
+
+impl Mutex {
+ /// Creates a new mutex in an unlocked state ready for use.
+ pub fn new() -> Mutex {
+ Mutex {
+ lock: box StaticMutex {
+ lock: unsafe { mutex::StaticNativeMutex::new() },
+ }
+ }
+ }
+
+ /// Attempts to acquire this lock.
+ ///
+ /// If the lock could not be acquired at this time, then `None` is returned.
+ /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+ /// guard is dropped.
+ ///
+ /// This function does not block.
+ pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
+ self.lock.try_lock()
+ }
+
+ /// Acquires a mutex, blocking the current task until it is able to do so.
+ ///
+ /// This function will block the local task until it is available to acquire
+ /// the mutex. Upon returning, the task is the only task with the mutex
+ /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+ /// the guard goes out of scope, the mutex will be unlocked.
+ pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() }
+}
+
+impl Drop for Mutex {
+ fn drop(&mut self) {
+ // This is actually safe b/c we know that there is no further usage of
+ // this mutex (it's up to the user to arrange for a mutex to get
+ // dropped, that's not our job)
+ unsafe { self.lock.destroy() }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+ use super::{Mutex, StaticMutex, MUTEX_INIT};
+
+ #[test]
+ fn smoke() {
+ let m = Mutex::new();
+ drop(m.lock());
+ drop(m.lock());
+ }
+
+ #[test]
+ fn smoke_static() {
+ static M: StaticMutex = MUTEX_INIT;
+ unsafe {
+ drop(M.lock());
+ drop(M.lock());
+ M.destroy();
+ }
+ }
+
+ #[test]
+ fn lots_and_lots() {
+ static M: StaticMutex = MUTEX_INIT;
+ static mut CNT: uint = 0;
+ static J: uint = 1000;
+ static K: uint = 3;
+
+ fn inc() {
+ for _ in range(0, J) {
+ unsafe {
+ let _g = M.lock();
+ CNT += 1;
+ }
+ }
+ }
+
+ let (tx, rx) = channel();
+ for _ in range(0, K) {
+ let tx2 = tx.clone();
+ spawn(proc() { inc(); tx2.send(()); });
+ let tx2 = tx.clone();
+ spawn(proc() { inc(); tx2.send(()); });
+ }
+
+ drop(tx);
+ for _ in range(0, 2 * K) {
+ rx.recv();
+ }
+ assert_eq!(unsafe {CNT}, J * K * 2);
+ unsafe {
+ M.destroy();
+ }
+ }
+
+ #[test]
+ fn trylock() {
+ let m = Mutex::new();
+ assert!(m.try_lock().is_some());
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A "once initialization" primitive
+//!
+//! This primitive is meant to be used to run one-time initialization. An
+//! example use case would be for initializing an FFI library.
+
+use core::prelude::*;
+
+use core::int;
+use core::atomic;
+
+use super::mutex::{StaticMutex, MUTEX_INIT};
+
+/// A synchronization primitive which can be used to run a one-time global
+/// initialization. Useful for one-time initialization for FFI or related
+/// functionality. This type can only be constructed with the `ONCE_INIT`
+/// value.
+///
+/// # Example
+///
+/// ```rust,ignore
+/// use std::sync::one::{Once, ONCE_INIT};
+///
+/// static START: Once = ONCE_INIT;
+///
+/// START.doit(|| {
+/// // run initialization here
+/// });
+/// ```
+pub struct Once {
+ mutex: StaticMutex,
+ cnt: atomic::AtomicInt,
+ lock_cnt: atomic::AtomicInt,
+}
+
+/// Initialization value for static `Once` values.
+pub const ONCE_INIT: Once = Once {
+ mutex: MUTEX_INIT,
+ cnt: atomic::INIT_ATOMIC_INT,
+ lock_cnt: atomic::INIT_ATOMIC_INT,
+};
+
+impl Once {
+ /// Perform an initialization routine once and only once. The given closure
+ /// will be executed if this is the first time `doit` has been called, and
+ /// otherwise the routine will *not* be invoked.
+ ///
+ /// This method will block the calling task if another initialization
+ /// routine is currently running.
+ ///
+ /// When this function returns, it is guaranteed that some initialization
+ /// has run and completed (it may not be the closure specified).
+ pub fn doit(&self, f: ||) {
+ // Optimize common path: load is much cheaper than fetch_add.
+ if self.cnt.load(atomic::SeqCst) < 0 {
+ return
+ }
+
+ // Implementation-wise, this would seem like a fairly trivial primitive.
+ // The stickler part is where our mutexes currently require an
+ // allocation, and usage of a `Once` shouldn't leak this allocation.
+ //
+ // This means that there must be a deterministic destroyer of the mutex
+ // contained within (because it's not needed after the initialization
+ // has run).
+ //
+ // The general scheme here is to gate all future threads once
+ // initialization has completed with a "very negative" count, and to
+ // allow through threads to lock the mutex if they see a non negative
+ // count. For all threads grabbing the mutex, exactly one of them should
+ // be responsible for unlocking the mutex, and this should only be done
+ // once everyone else is done with the mutex.
+ //
+ // This atomicity is achieved by swapping a very negative value into the
+ // shared count when the initialization routine has completed. This will
+ // read the number of threads which will at some point attempt to
+ // acquire the mutex. This count is then squirreled away in a separate
+ // variable, and the last person on the way out of the mutex is then
+ // responsible for destroying the mutex.
+ //
+ // It is crucial that the negative value is swapped in *after* the
+ // initialization routine has completed because otherwise new threads
+ // calling `doit` will return immediately before the initialization has
+ // completed.
+
+ let prev = self.cnt.fetch_add(1, atomic::SeqCst);
+ if prev < 0 {
+ // Make sure we never overflow, we'll never have int::MIN
+ // simultaneous calls to `doit` to make this value go back to 0
+ self.cnt.store(int::MIN, atomic::SeqCst);
+ return
+ }
+
+ // If the count is negative, then someone else finished the job,
+ // otherwise we run the job and record how many people will try to grab
+ // this lock
+ let guard = self.mutex.lock();
+ if self.cnt.load(atomic::SeqCst) > 0 {
+ f();
+ let prev = self.cnt.swap(int::MIN, atomic::SeqCst);
+ self.lock_cnt.store(prev, atomic::SeqCst);
+ }
+ drop(guard);
+
+ // Last one out cleans up after everyone else, no leaks!
+ if self.lock_cnt.fetch_add(-1, atomic::SeqCst) == 1 {
+ unsafe { self.mutex.destroy() }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+ use task;
+ use super::{ONCE_INIT, Once};
+
+ #[test]
+ fn smoke_once() {
+ static O: Once = ONCE_INIT;
+ let mut a = 0i;
+ O.doit(|| a += 1);
+ assert_eq!(a, 1);
+ O.doit(|| a += 1);
+ assert_eq!(a, 1);
+ }
+
+ #[test]
+ fn stampede_once() {
+ static O: Once = ONCE_INIT;
+ static mut run: bool = false;
+
+ let (tx, rx) = channel();
+ for _ in range(0u, 10) {
+ let tx = tx.clone();
+ spawn(proc() {
+ for _ in range(0u, 4) { task::deschedule() }
+ unsafe {
+ O.doit(|| {
+ assert!(!run);
+ run = true;
+ });
+ assert!(run);
+ }
+ tx.send(());
+ });
+ }
+
+ unsafe {
+ O.doit(|| {
+ assert!(!run);
+ run = true;
+ });
+ assert!(run);
+ }
+
+ for _ in range(0u, 10) {
+ rx.recv();
+ }
+ }
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Raw concurrency primitives you know and love.
+//!
+//! These primitives are not recommended for general use, but are provided for
+//! flavorful use-cases. It is recommended to use the types at the top of the
+//! `sync` crate which wrap values directly and provide safer abstractions for
+//! containing data.
+
+// A side-effect of merging libsync into libstd; will go away once
+// libsync rewrite lands
+#![allow(dead_code)]
+
+use core::prelude::*;
+use self::ReacquireOrderLock::*;
+
+use core::atomic;
+use core::finally::Finally;
+use core::kinds::marker;
+use core::mem;
+use core::cell::UnsafeCell;
+use vec::Vec;
+
+use super::mutex;
+use comm::{Receiver, Sender, channel};
+
+/****************************************************************************
+ * Internals
+ ****************************************************************************/
+
+// Each waiting task receives on one of these.
+type WaitEnd = Receiver<()>;
+type SignalEnd = Sender<()>;
+// A doubly-ended queue of waiting tasks.
+struct WaitQueue {
+ head: Receiver<SignalEnd>,
+ tail: Sender<SignalEnd>,
+}
+
+impl WaitQueue {
+ fn new() -> WaitQueue {
+ let (block_tail, block_head) = channel();
+ WaitQueue { head: block_head, tail: block_tail }
+ }
+
+ // Signals one live task from the queue.
+ fn signal(&self) -> bool {
+ match self.head.try_recv() {
+ Ok(ch) => {
+ // Send a wakeup signal. If the waiter was killed, its port will
+ // have closed. Keep trying until we get a live task.
+ if ch.send_opt(()).is_ok() {
+ true
+ } else {
+ self.signal()
+ }
+ }
+ _ => false
+ }
+ }
+
+ fn broadcast(&self) -> uint {
+ let mut count = 0;
+ loop {
+ match self.head.try_recv() {
+ Ok(ch) => {
+ if ch.send_opt(()).is_ok() {
+ count += 1;
+ }
+ }
+ _ => break
+ }
+ }
+ count
+ }
+
+ fn wait_end(&self) -> WaitEnd {
+ let (signal_end, wait_end) = channel();
+ self.tail.send(signal_end);
+ wait_end
+ }
+}
+
+// The building-block used to make semaphores, mutexes, and rwlocks.
+struct Sem<Q> {
+ lock: mutex::Mutex,
+ // n.b, we need Sem to be `Sync`, but the WaitQueue type is not send/share
+ // (for good reason). We have an internal invariant on this semaphore,
+ // however, that the queue is never accessed outside of a locked
+ // context.
+ inner: UnsafeCell<SemInner<Q>>
+}
+
+struct SemInner<Q> {
+ count: int,
+ waiters: WaitQueue,
+ // Can be either unit or another waitqueue. Some sems shouldn't come with
+ // a condition variable attached, others should.
+ blocked: Q,
+}
+
+#[must_use]
+struct SemGuard<'a, Q:'a> {
+ sem: &'a Sem<Q>,
+}
+
+impl<Q: Send> Sem<Q> {
+ fn new(count: int, q: Q) -> Sem<Q> {
+ assert!(count >= 0,
+ "semaphores cannot be initialized with negative values");
+ Sem {
+ lock: mutex::Mutex::new(),
+ inner: UnsafeCell::new(SemInner {
+ waiters: WaitQueue::new(),
+ count: count,
+ blocked: q,
+ })
+ }
+ }
+
+ unsafe fn with(&self, f: |&mut SemInner<Q>|) {
+ let _g = self.lock.lock();
+ // This &mut is safe because, due to the lock, we are the only one who can touch the data
+ f(&mut *self.inner.get())
+ }
+
+ pub fn acquire(&self) {
+ unsafe {
+ let mut waiter_nobe = None;
+ self.with(|state| {
+ state.count -= 1;
+ if state.count < 0 {
+ // Create waiter nobe, enqueue ourself, and tell
+ // outer scope we need to block.
+ waiter_nobe = Some(state.waiters.wait_end());
+ }
+ });
+ // Uncomment if you wish to test for sem races. Not
+ // valgrind-friendly.
+ /* for _ in range(0u, 1000) { task::deschedule(); } */
+ // Need to wait outside the exclusive.
+ if waiter_nobe.is_some() {
+ let _ = waiter_nobe.unwrap().recv();
+ }
+ }
+ }
+
+ pub fn release(&self) {
+ unsafe {
+ self.with(|state| {
+ state.count += 1;
+ if state.count <= 0 {
+ state.waiters.signal();
+ }
+ })
+ }
+ }
+
+ pub fn access<'a>(&'a self) -> SemGuard<'a, Q> {
+ self.acquire();
+ SemGuard { sem: self }
+ }
+}
+
+#[unsafe_destructor]
+impl<'a, Q: Send> Drop for SemGuard<'a, Q> {
+ fn drop(&mut self) {
+ self.sem.release();
+ }
+}
+
+impl Sem<Vec<WaitQueue>> {
+ fn new_and_signal(count: int, num_condvars: uint) -> Sem<Vec<WaitQueue>> {
+ let mut queues = Vec::new();
+ for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); }
+ Sem::new(count, queues)
+ }
+
+ // The only other places that condvars get built are rwlock.write_cond()
+ // and rwlock_write_mode.
+ pub fn access_cond<'a>(&'a self) -> SemCondGuard<'a> {
+ SemCondGuard {
+ guard: self.access(),
+ cvar: Condvar { sem: self, order: Nothing, nocopy: marker::NoCopy },
+ }
+ }
+}
+
+// FIXME(#3598): Want to use an Option down below, but we need a custom enum
+// that's not polymorphic to get around the fact that lifetimes are invariant
+// inside of type parameters.
+enum ReacquireOrderLock<'a> {
+ Nothing, // c.c
+ Just(&'a Semaphore),
+}
+
+/// A mechanism for atomic-unlock-and-deschedule blocking and signalling.
+pub struct Condvar<'a> {
+ // The 'Sem' object associated with this condvar. This is the one that's
+ // atomically-unlocked-and-descheduled upon and reacquired during wakeup.
+ sem: &'a Sem<Vec<WaitQueue> >,
+ // This is (can be) an extra semaphore which is held around the reacquire
+ // operation on the first one. This is only used in cvars associated with
+ // rwlocks, and is needed to ensure that, when a downgrader is trying to
+ // hand off the access lock (which would be the first field, here), a 2nd
+ // writer waking up from a cvar wait can't race with a reader to steal it,
+ // See the comment in write_cond for more detail.
+ order: ReacquireOrderLock<'a>,
+ // Make sure condvars are non-copyable.
+ nocopy: marker::NoCopy,
+}
+
+impl<'a> Condvar<'a> {
+ /// Atomically drop the associated lock, and block until a signal is sent.
+ ///
+ /// # Panics
+ ///
+ /// A task which is killed while waiting on a condition variable will wake
+ /// up, panic, and unlock the associated lock as it unwinds.
+ pub fn wait(&self) { self.wait_on(0) }
+
+ /// As wait(), but can specify which of multiple condition variables to
+ /// wait on. Only a signal_on() or broadcast_on() with the same condvar_id
+ /// will wake this thread.
+ ///
+ /// The associated lock must have been initialised with an appropriate
+ /// number of condvars. The condvar_id must be between 0 and num_condvars-1
+ /// or else this call will panic.
+ ///
+ /// wait() is equivalent to wait_on(0).
+ pub fn wait_on(&self, condvar_id: uint) {
+ let mut wait_end = None;
+ let mut out_of_bounds = None;
+ // Release lock, 'atomically' enqueuing ourselves in so doing.
+ unsafe {
+ self.sem.with(|state| {
+ if condvar_id < state.blocked.len() {
+ // Drop the lock.
+ state.count += 1;
+ if state.count <= 0 {
+ state.waiters.signal();
+ }
+ // Create waiter nobe, and enqueue ourself to
+ // be woken up by a signaller.
+ wait_end = Some(state.blocked[condvar_id].wait_end());
+ } else {
+ out_of_bounds = Some(state.blocked.len());
+ }
+ })
+ }
+
+ // If deschedule checks start getting inserted anywhere, we can be
+ // killed before or after enqueueing.
+ check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || {
+ // Unconditionally "block". (Might not actually block if a
+ // signaller already sent -- I mean 'unconditionally' in contrast
+ // with acquire().)
+ (|| {
+ let _ = wait_end.take().unwrap().recv();
+ }).finally(|| {
+ // Reacquire the condvar.
+ match self.order {
+ Just(lock) => {
+ let _g = lock.access();
+ self.sem.acquire();
+ }
+ Nothing => self.sem.acquire(),
+ }
+ })
+ })
+ }
+
+ /// Wake up a blocked task. Returns false if there was no blocked task.
+ pub fn signal(&self) -> bool { self.signal_on(0) }
+
+ /// As signal, but with a specified condvar_id. See wait_on.
+ pub fn signal_on(&self, condvar_id: uint) -> bool {
+ unsafe {
+ let mut out_of_bounds = None;
+ let mut result = false;
+ self.sem.with(|state| {
+ if condvar_id < state.blocked.len() {
+ result = state.blocked[condvar_id].signal();
+ } else {
+ out_of_bounds = Some(state.blocked.len());
+ }
+ });
+ check_cvar_bounds(out_of_bounds,
+ condvar_id,
+ "cond.signal_on()",
+ || result)
+ }
+ }
+
+ /// Wake up all blocked tasks. Returns the number of tasks woken.
+ pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
+
+ /// As broadcast, but with a specified condvar_id. See wait_on.
+ pub fn broadcast_on(&self, condvar_id: uint) -> uint {
+ let mut out_of_bounds = None;
+ let mut queue = None;
+ unsafe {
+ self.sem.with(|state| {
+ if condvar_id < state.blocked.len() {
+ // To avoid :broadcast_heavy, we make a new waitqueue,
+ // swap it out with the old one, and broadcast on the
+ // old one outside of the little-lock.
+ queue = Some(mem::replace(&mut state.blocked[condvar_id],
+ WaitQueue::new()));
+ } else {
+ out_of_bounds = Some(state.blocked.len());
+ }
+ });
+ check_cvar_bounds(out_of_bounds,
+ condvar_id,
+ "cond.signal_on()",
+ || {
+ queue.take().unwrap().broadcast()
+ })
+ }
+ }
+}
+
+// Checks whether a condvar ID was out of bounds, and panics if so, or does
+// something else next on success.
+#[inline]
+fn check_cvar_bounds<U>(
+ out_of_bounds: Option<uint>,
+ id: uint,
+ act: &str,
+ blk: || -> U)
+ -> U {
+ match out_of_bounds {
+ Some(0) =>
+ panic!("{} with illegal ID {} - this lock has no condvars!", act, id),
+ Some(length) =>
+ panic!("{} with illegal ID {} - ID must be less than {}", act, id, length),
+ None => blk()
+ }
+}
+
+#[must_use]
+struct SemCondGuard<'a> {
+ guard: SemGuard<'a, Vec<WaitQueue>>,
+ cvar: Condvar<'a>,
+}
+
+/****************************************************************************
+ * Semaphores
+ ****************************************************************************/
+
+/// A counting, blocking, bounded-waiting semaphore.
+pub struct Semaphore {
+ sem: Sem<()>,
+}
+
+/// An RAII guard used to represent an acquired resource to a semaphore. When
+/// dropped, this value will release the resource back to the semaphore.
+#[must_use]
+pub struct SemaphoreGuard<'a> {
+ _guard: SemGuard<'a, ()>,
+}
+
+impl Semaphore {
+ /// Create a new semaphore with the specified count.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `count` is negative.
+ pub fn new(count: int) -> Semaphore {
+ Semaphore { sem: Sem::new(count, ()) }
+ }
+
+ /// Acquire a resource represented by the semaphore. Blocks if necessary
+ /// until resource(s) become available.
+ pub fn acquire(&self) { self.sem.acquire() }
+
+ /// Release a held resource represented by the semaphore. Wakes a blocked
+ /// contending task, if any exist. Won't block the caller.
+ pub fn release(&self) { self.sem.release() }
+
+ /// Acquire a resource of this semaphore, returning an RAII guard which will
+ /// release the resource when dropped.
+ pub fn access<'a>(&'a self) -> SemaphoreGuard<'a> {
+ SemaphoreGuard { _guard: self.sem.access() }
+ }
+}
+
+/****************************************************************************
+ * Mutexes
+ ****************************************************************************/
+
+/// A blocking, bounded-waiting, mutual exclusion lock with an associated
+/// FIFO condition variable.
+///
+/// # Panics
+///
+/// A task which panicks while holding a mutex will unlock the mutex as it
+/// unwinds.
+pub struct Mutex {
+ sem: Sem<Vec<WaitQueue>>,
+}
+
+/// An RAII structure which is used to gain access to a mutex's condition
+/// variable. Additionally, when a value of this type is dropped, the
+/// corresponding mutex is also unlocked.
+#[must_use]
+pub struct MutexGuard<'a> {
+ _guard: SemGuard<'a, Vec<WaitQueue>>,
+ /// Inner condition variable which is connected to the outer mutex, and can
+ /// be used for atomic-unlock-and-deschedule.
+ pub cond: Condvar<'a>,
+}
+
+impl Mutex {
+ /// Create a new mutex, with one associated condvar.
+ pub fn new() -> Mutex { Mutex::new_with_condvars(1) }
+
+ /// Create a new mutex, with a specified number of associated condvars. This
+ /// will allow calling wait_on/signal_on/broadcast_on with condvar IDs
+ /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
+ /// allowed but any operations on the condvar will panic.)
+ pub fn new_with_condvars(num_condvars: uint) -> Mutex {
+ Mutex { sem: Sem::new_and_signal(1, num_condvars) }
+ }
+
+ /// Acquires ownership of this mutex, returning an RAII guard which will
+ /// unlock the mutex when dropped. The associated condition variable can
+ /// also be accessed through the returned guard.
+ pub fn lock<'a>(&'a self) -> MutexGuard<'a> {
+ let SemCondGuard { guard, cvar } = self.sem.access_cond();
+ MutexGuard { _guard: guard, cond: cvar }
+ }
+}
+
+/****************************************************************************
+ * Reader-writer locks
+ ****************************************************************************/
+
+// NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
+
+/// A blocking, no-starvation, reader-writer lock with an associated condvar.
+///
+/// # Panics
+///
+/// A task which panics while holding an rwlock will unlock the rwlock as it
+/// unwinds.
+pub struct RWLock {
+ order_lock: Semaphore,
+ access_lock: Sem<Vec<WaitQueue>>,
+
+ // The only way the count flag is ever accessed is with xadd. Since it is
+ // a read-modify-write operation, multiple xadds on different cores will
+ // always be consistent with respect to each other, so a monotonic/relaxed
+ // consistency ordering suffices (i.e., no extra barriers are needed).
+ //
+ // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
+ // acquire/release orderings superfluously. Change these someday.
+ read_count: atomic::AtomicUint,
+}
+
+/// An RAII helper which is created by acquiring a read lock on an RWLock. When
+/// dropped, this will unlock the RWLock.
+#[must_use]
+pub struct RWLockReadGuard<'a> {
+ lock: &'a RWLock,
+}
+
+/// An RAII helper which is created by acquiring a write lock on an RWLock. When
+/// dropped, this will unlock the RWLock.
+///
+/// A value of this type can also be consumed to downgrade to a read-only lock.
+#[must_use]
+pub struct RWLockWriteGuard<'a> {
+ lock: &'a RWLock,
+ /// Inner condition variable that is connected to the write-mode of the
+ /// outer rwlock.
+ pub cond: Condvar<'a>,
+}
+
+impl RWLock {
+ /// Create a new rwlock, with one associated condvar.
+ pub fn new() -> RWLock { RWLock::new_with_condvars(1) }
+
+ /// Create a new rwlock, with a specified number of associated condvars.
+ /// Similar to mutex_with_condvars.
+ pub fn new_with_condvars(num_condvars: uint) -> RWLock {
+ RWLock {
+ order_lock: Semaphore::new(1),
+ access_lock: Sem::new_and_signal(1, num_condvars),
+ read_count: atomic::AtomicUint::new(0),
+ }
+ }
+
+ /// Acquires a read-lock, returning an RAII guard that will unlock the lock
+ /// when dropped. Calls to 'read' from other tasks may run concurrently with
+ /// this one.
+ pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
+ let _guard = self.order_lock.access();
+ let old_count = self.read_count.fetch_add(1, atomic::Acquire);
+ if old_count == 0 {
+ self.access_lock.acquire();
+ }
+ RWLockReadGuard { lock: self }
+ }
+
+ /// Acquire a write-lock, returning an RAII guard that will unlock the lock
+ /// when dropped. No calls to 'read' or 'write' from other tasks will run
+ /// concurrently with this one.
+ ///
+ /// You can also downgrade a write to a read by calling the `downgrade`
+ /// method on the returned guard. Additionally, the guard will contain a
+ /// `Condvar` attached to this lock.
+ ///
+ /// # Example
+ ///
+ /// ```{rust,ignore}
+ /// use std::sync::raw::RWLock;
+ ///
+ /// let lock = RWLock::new();
+ /// let write = lock.write();
+ /// // ... exclusive access ...
+ /// let read = write.downgrade();
+ /// // ... shared access ...
+ /// drop(read);
+ /// ```
+ pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a> {
+ let _g = self.order_lock.access();
+ self.access_lock.acquire();
+
+ // It's important to thread our order lock into the condvar, so that
+ // when a cond.wait() wakes up, it uses it while reacquiring the
+ // access lock. If we permitted a waking-up writer to "cut in line",
+ // there could arise a subtle race when a downgrader attempts to hand
+ // off the reader cloud lock to a waiting reader. This race is tested
+ // in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like:
+ // T1 (writer) T2 (downgrader) T3 (reader)
+ // [in cond.wait()]
+ // [locks for writing]
+ // [holds access_lock]
+ // [is signalled, perhaps by
+ // downgrader or a 4th thread]
+ // tries to lock access(!)
+ // lock order_lock
+ // xadd read_count[0->1]
+ // tries to lock access
+ // [downgrade]
+ // xadd read_count[1->2]
+ // unlock access
+ // Since T1 contended on the access lock before T3 did, it will steal
+ // the lock handoff. Adding order_lock in the condvar reacquire path
+ // solves this because T1 will hold order_lock while waiting on access,
+ // which will cause T3 to have to wait until T1 finishes its write,
+ // which can't happen until T2 finishes the downgrade-read entirely.
+ // The astute reader will also note that making waking writers use the
+ // order_lock is better for not starving readers.
+ RWLockWriteGuard {
+ lock: self,
+ cond: Condvar {
+ sem: &self.access_lock,
+ order: Just(&self.order_lock),
+ nocopy: marker::NoCopy,
+ }
+ }
+ }
+}
+
+impl<'a> RWLockWriteGuard<'a> {
+ /// Consumes this write lock and converts it into a read lock.
+ pub fn downgrade(self) -> RWLockReadGuard<'a> {
+ let lock = self.lock;
+ // Don't run the destructor of the write guard, we're in charge of
+ // things from now on
+ unsafe { mem::forget(self) }
+
+ let old_count = lock.read_count.fetch_add(1, atomic::Release);
+ // If another reader was already blocking, we need to hand-off
+ // the "reader cloud" access lock to them.
+ if old_count != 0 {
+ // Guaranteed not to let another writer in, because
+ // another reader was holding the order_lock. Hence they
+ // must be the one to get the access_lock (because all
+ // access_locks are acquired with order_lock held). See
+ // the comment in write_cond for more justification.
+ lock.access_lock.release();
+ }
+ RWLockReadGuard { lock: lock }
+ }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for RWLockWriteGuard<'a> {
+ fn drop(&mut self) {
+ self.lock.access_lock.release();
+ }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for RWLockReadGuard<'a> {
+ fn drop(&mut self) {
+ let old_count = self.lock.read_count.fetch_sub(1, atomic::Release);
+ assert!(old_count > 0);
+ if old_count == 1 {
+ // Note: this release used to be outside of a locked access
+ // to exclusive-protected state. If this code is ever
+ // converted back to such (instead of using atomic ops),
+ // this access MUST NOT go inside the exclusive access.
+ self.lock.access_lock.release();
+ }
+ }
+}
+
+/****************************************************************************
+ * Tests
+ ****************************************************************************/
+
+#[cfg(test)]
+mod tests {
+ pub use self::RWLockMode::*;
+
+ use sync::Arc;
+ use prelude::*;
+ use super::{Semaphore, Mutex, RWLock, Condvar};
+
+ use mem;
+ use result;
+ use task;
+
+ /************************************************************************
+ * Semaphore tests
+ ************************************************************************/
+ #[test]
+ fn test_sem_acquire_release() {
+ let s = Semaphore::new(1);
+ s.acquire();
+ s.release();
+ s.acquire();
+ }
+ #[test]
+ fn test_sem_basic() {
+ let s = Semaphore::new(1);
+ let _g = s.access();
+ }
+ #[test]
+ #[should_fail]
+ fn test_sem_basic2() {
+ Semaphore::new(-1);
+ }
+ #[test]
+ fn test_sem_as_mutex() {
+ let s = Arc::new(Semaphore::new(1));
+ let s2 = s.clone();
+ task::spawn(proc() {
+ let _g = s2.access();
+ for _ in range(0u, 5) { task::deschedule(); }
+ });
+ let _g = s.access();
+ for _ in range(0u, 5) { task::deschedule(); }
+ }
+ #[test]
+ fn test_sem_as_cvar() {
+ /* Child waits and parent signals */
+ let (tx, rx) = channel();
+ let s = Arc::new(Semaphore::new(0));
+ let s2 = s.clone();
+ task::spawn(proc() {
+ s2.acquire();
+ tx.send(());
+ });
+ for _ in range(0u, 5) { task::deschedule(); }
+ s.release();
+ let _ = rx.recv();
+
+ /* Parent waits and child signals */
+ let (tx, rx) = channel();
+ let s = Arc::new(Semaphore::new(0));
+ let s2 = s.clone();
+ task::spawn(proc() {
+ for _ in range(0u, 5) { task::deschedule(); }
+ s2.release();
+ let _ = rx.recv();
+ });
+ s.acquire();
+ tx.send(());
+ }
+ #[test]
+ fn test_sem_multi_resource() {
+ // Parent and child both get in the critical section at the same
+ // time, and shake hands.
+ let s = Arc::new(Semaphore::new(2));
+ let s2 = s.clone();
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ task::spawn(proc() {
+ let _g = s2.access();
+ let _ = rx2.recv();
+ tx1.send(());
+ });
+ let _g = s.access();
+ tx2.send(());
+ let _ = rx1.recv();
+ }
+ #[test]
+ fn test_sem_runtime_friendly_blocking() {
+ // Force the runtime to schedule two threads on the same sched_loop.
+ // When one blocks, it should schedule the other one.
+ let s = Arc::new(Semaphore::new(1));
+ let s2 = s.clone();
+ let (tx, rx) = channel();
+ {
+ let _g = s.access();
+ task::spawn(proc() {
+ tx.send(());
+ drop(s2.access());
+ tx.send(());
+ });
+ rx.recv(); // wait for child to come alive
+ for _ in range(0u, 5) { task::deschedule(); } // let the child contend
+ }
+ rx.recv(); // wait for child to be done
+ }
+ /************************************************************************
+ * Mutex tests
+ ************************************************************************/
+ #[test]
+ fn test_mutex_lock() {
+ // Unsafely achieve shared state, and do the textbook
+ // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance.
+ let (tx, rx) = channel();
+ let m = Arc::new(Mutex::new());
+ let m2 = m.clone();
+ let mut sharedstate = box 0;
+ {
+ let ptr: *mut int = &mut *sharedstate;
+ task::spawn(proc() {
+ access_shared(ptr, &m2, 10);
+ tx.send(());
+ });
+ }
+ {
+ access_shared(&mut *sharedstate, &m, 10);
+ let _ = rx.recv();
+
+ assert_eq!(*sharedstate, 20);
+ }
+
+ fn access_shared(sharedstate: *mut int, m: &Arc<Mutex>, n: uint) {
+ for _ in range(0u, n) {
+ let _g = m.lock();
+ let oldval = unsafe { *sharedstate };
+ task::deschedule();
+ unsafe { *sharedstate = oldval + 1; }
+ }
+ }
+ }
+ #[test]
+ fn test_mutex_cond_wait() {
+ let m = Arc::new(Mutex::new());
+
+ // Child wakes up parent
+ {
+ let lock = m.lock();
+ let m2 = m.clone();
+ task::spawn(proc() {
+ let lock = m2.lock();
+ let woken = lock.cond.signal();
+ assert!(woken);
+ });
+ lock.cond.wait();
+ }
+ // Parent wakes up child
+ let (tx, rx) = channel();
+ let m3 = m.clone();
+ task::spawn(proc() {
+ let lock = m3.lock();
+ tx.send(());
+ lock.cond.wait();
+ tx.send(());
+ });
+ rx.recv(); // Wait until child gets in the mutex
+ {
+ let lock = m.lock();
+ let woken = lock.cond.signal();
+ assert!(woken);
+ }
+ rx.recv(); // Wait until child wakes up
+ }
+
+ fn test_mutex_cond_broadcast_helper(num_waiters: uint) {
+ let m = Arc::new(Mutex::new());
+ let mut rxs = Vec::new();
+
+ for _ in range(0u, num_waiters) {
+ let mi = m.clone();
+ let (tx, rx) = channel();
+ rxs.push(rx);
+ task::spawn(proc() {
+ let lock = mi.lock();
+ tx.send(());
+ lock.cond.wait();
+ tx.send(());
+ });
+ }
+
+ // wait until all children get in the mutex
+ for rx in rxs.iter_mut() { rx.recv(); }
+ {
+ let lock = m.lock();
+ let num_woken = lock.cond.broadcast();
+ assert_eq!(num_woken, num_waiters);
+ }
+ // wait until all children wake up
+ for rx in rxs.iter_mut() { rx.recv(); }
+ }
+ #[test]
+ fn test_mutex_cond_broadcast() {
+ test_mutex_cond_broadcast_helper(12);
+ }
+ #[test]
+ fn test_mutex_cond_broadcast_none() {
+ test_mutex_cond_broadcast_helper(0);
+ }
+ #[test]
+ fn test_mutex_cond_no_waiter() {
+ let m = Arc::new(Mutex::new());
+ let m2 = m.clone();
+ let _ = task::try(proc() {
+ drop(m.lock());
+ });
+ let lock = m2.lock();
+ assert!(!lock.cond.signal());
+ }
+ #[test]
+ fn test_mutex_killed_simple() {
+ use any::Any;
+
+ // Mutex must get automatically unlocked if panicked/killed within.
+ let m = Arc::new(Mutex::new());
+ let m2 = m.clone();
+
+ let result: result::Result<(), Box<Any + Send>> = task::try(proc() {
+ let _lock = m2.lock();
+ panic!();
+ });
+ assert!(result.is_err());
+ // child task must have finished by the time try returns
+ drop(m.lock());
+ }
+ #[test]
+ fn test_mutex_cond_signal_on_0() {
+ // Tests that signal_on(0) is equivalent to signal().
+ let m = Arc::new(Mutex::new());
+ let lock = m.lock();
+ let m2 = m.clone();
+ task::spawn(proc() {
+ let lock = m2.lock();
+ lock.cond.signal_on(0);
+ });
+ lock.cond.wait();
+ }
+ #[test]
+ fn test_mutex_no_condvars() {
+ let result = task::try(proc() {
+ let m = Mutex::new_with_condvars(0);
+ m.lock().cond.wait();
+ });
+ assert!(result.is_err());
+ let result = task::try(proc() {
+ let m = Mutex::new_with_condvars(0);
+ m.lock().cond.signal();
+ });
+ assert!(result.is_err());
+ let result = task::try(proc() {
+ let m = Mutex::new_with_condvars(0);
+ m.lock().cond.broadcast();
+ });
+ assert!(result.is_err());
+ }
+ /************************************************************************
+ * Reader/writer lock tests
+ ************************************************************************/
+ #[cfg(test)]
+ pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead }
+ #[cfg(test)]
+ fn lock_rwlock_in_mode(x: &Arc<RWLock>, mode: RWLockMode, blk: ||) {
+ match mode {
+ Read => { let _g = x.read(); blk() }
+ Write => { let _g = x.write(); blk() }
+ Downgrade => { let _g = x.write(); blk() }
+ DowngradeRead => { let _g = x.write().downgrade(); blk() }
+ }
+ }
+ #[cfg(test)]
+ fn test_rwlock_exclusion(x: Arc<RWLock>,
+ mode1: RWLockMode,
+ mode2: RWLockMode) {
+ // Test mutual exclusion between readers and writers. Just like the
+ // mutex mutual exclusion test, a ways above.
+ let (tx, rx) = channel();
+ let x2 = x.clone();
+ let mut sharedstate = box 0;
+ {
+ let ptr: *const int = &*sharedstate;
+ task::spawn(proc() {
+ let sharedstate: &mut int =
+ unsafe { mem::transmute(ptr) };
+ access_shared(sharedstate, &x2, mode1, 10);
+ tx.send(());
+ });
+ }
+ {
+ access_shared(&mut *sharedstate, &x, mode2, 10);
+ let _ = rx.recv();
+
+ assert_eq!(*sharedstate, 20);
+ }
+
+ fn access_shared(sharedstate: &mut int, x: &Arc<RWLock>,
+ mode: RWLockMode, n: uint) {
+ for _ in range(0u, n) {
+ lock_rwlock_in_mode(x, mode, || {
+ let oldval = *sharedstate;
+ task::deschedule();
+ *sharedstate = oldval + 1;
+ })
+ }
+ }
+ }
+ #[test]
+ fn test_rwlock_readers_wont_modify_the_data() {
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Read);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Downgrade);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Read);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write);
+ }
+ #[test]
+ fn test_rwlock_writers_and_writers() {
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Downgrade);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write);
+ test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade);
+ }
+ #[cfg(test)]
+ fn test_rwlock_handshake(x: Arc<RWLock>,
+ mode1: RWLockMode,
+ mode2: RWLockMode,
+ make_mode2_go_first: bool) {
+ // Much like sem_multi_resource.
+ let x2 = x.clone();
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ task::spawn(proc() {
+ if !make_mode2_go_first {
+ rx2.recv(); // parent sends to us once it locks, or ...
+ }
+ lock_rwlock_in_mode(&x2, mode2, || {
+ if make_mode2_go_first {
+ tx1.send(()); // ... we send to it once we lock
+ }
+ rx2.recv();
+ tx1.send(());
+ })
+ });
+ if make_mode2_go_first {
+ rx1.recv(); // child sends to us once it locks, or ...
+ }
+ lock_rwlock_in_mode(&x, mode1, || {
+ if !make_mode2_go_first {
+ tx2.send(()); // ... we send to it once we lock
+ }
+ tx2.send(());
+ rx1.recv();
+ })
+ }
+ #[test]
+ fn test_rwlock_readers_and_readers() {
+ test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false);
+ // The downgrader needs to get in before the reader gets in, otherwise
+ // they cannot end up reading at the same time.
+ test_rwlock_handshake(Arc::new(RWLock::new()), DowngradeRead, Read, false);
+ test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true);
+ // Two downgrade_reads can never both end up reading at the same time.
+ }
+ #[test]
+ fn test_rwlock_downgrade_unlock() {
+ // Tests that downgrade can unlock the lock in both modes
+ let x = Arc::new(RWLock::new());
+ lock_rwlock_in_mode(&x, Downgrade, || { });
+ test_rwlock_handshake(x, Read, Read, false);
+ let y = Arc::new(RWLock::new());
+ lock_rwlock_in_mode(&y, DowngradeRead, || { });
+ test_rwlock_exclusion(y, Write, Write);
+ }
+ #[test]
+ fn test_rwlock_read_recursive() {
+ let x = RWLock::new();
+ let _g1 = x.read();
+ let _g2 = x.read();
+ }
+ #[test]
+ fn test_rwlock_cond_wait() {
+ // As test_mutex_cond_wait above.
+ let x = Arc::new(RWLock::new());
+
+ // Child wakes up parent
+ {
+ let lock = x.write();
+ let x2 = x.clone();
+ task::spawn(proc() {
+ let lock = x2.write();
+ assert!(lock.cond.signal());
+ });
+ lock.cond.wait();
+ }
+ // Parent wakes up child
+ let (tx, rx) = channel();
+ let x3 = x.clone();
+ task::spawn(proc() {
+ let lock = x3.write();
+ tx.send(());
+ lock.cond.wait();
+ tx.send(());
+ });
+ rx.recv(); // Wait until child gets in the rwlock
+ drop(x.read()); // Must be able to get in as a reader
+ {
+ let x = x.write();
+ assert!(x.cond.signal());
+ }
+ rx.recv(); // Wait until child wakes up
+ drop(x.read()); // Just for good measure
+ }
+ #[cfg(test)]
+ fn test_rwlock_cond_broadcast_helper(num_waiters: uint) {
+ // Much like the mutex broadcast test. Downgrade-enabled.
+ fn lock_cond(x: &Arc<RWLock>, blk: |c: &Condvar|) {
+ let lock = x.write();
+ blk(&lock.cond);
+ }
+
+ let x = Arc::new(RWLock::new());
+ let mut rxs = Vec::new();
+
+ for _ in range(0u, num_waiters) {
+ let xi = x.clone();
+ let (tx, rx) = channel();
+ rxs.push(rx);
+ task::spawn(proc() {
+ lock_cond(&xi, |cond| {
+ tx.send(());
+ cond.wait();
+ tx.send(());
+ })
+ });
+ }
+
+ // wait until all children get in the mutex
+ for rx in rxs.iter_mut() { let _ = rx.recv(); }
+ lock_cond(&x, |cond| {
+ let num_woken = cond.broadcast();
+ assert_eq!(num_woken, num_waiters);
+ });
+ // wait until all children wake up
+ for rx in rxs.iter_mut() { let _ = rx.recv(); }
+ }
+ #[test]
+ fn test_rwlock_cond_broadcast() {
+ test_rwlock_cond_broadcast_helper(0);
+ test_rwlock_cond_broadcast_helper(12);
+ }
+ #[cfg(test)]
+ fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) {
+ use any::Any;
+
+ // Mutex must get automatically unlocked if panicked/killed within.
+ let x = Arc::new(RWLock::new());
+ let x2 = x.clone();
+
+ let result: result::Result<(), Box<Any + Send>> = task::try(proc() {
+ lock_rwlock_in_mode(&x2, mode1, || {
+ panic!();
+ })
+ });
+ assert!(result.is_err());
+ // child task must have finished by the time try returns
+ lock_rwlock_in_mode(&x, mode2, || { })
+ }
+ #[test]
+ fn test_rwlock_reader_killed_writer() {
+ rwlock_kill_helper(Read, Write);
+ }
+ #[test]
+ fn test_rwlock_writer_killed_reader() {
+ rwlock_kill_helper(Write, Read);
+ }
+ #[test]
+ fn test_rwlock_reader_killed_reader() {
+ rwlock_kill_helper(Read, Read);
+ }
+ #[test]
+ fn test_rwlock_writer_killed_writer() {
+ rwlock_kill_helper(Write, Write);
+ }
+ #[test]
+ fn test_rwlock_kill_downgrader() {
+ rwlock_kill_helper(Downgrade, Read);
+ rwlock_kill_helper(Read, Downgrade);
+ rwlock_kill_helper(Downgrade, Write);
+ rwlock_kill_helper(Write, Downgrade);
+ rwlock_kill_helper(DowngradeRead, Read);
+ rwlock_kill_helper(Read, DowngradeRead);
+ rwlock_kill_helper(DowngradeRead, Write);
+ rwlock_kill_helper(Write, DowngradeRead);
+ rwlock_kill_helper(DowngradeRead, Downgrade);
+ rwlock_kill_helper(DowngradeRead, Downgrade);
+ rwlock_kill_helper(Downgrade, DowngradeRead);
+ rwlock_kill_helper(Downgrade, DowngradeRead);
+ }
+}
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
+
+//! A single-producer single-consumer concurrent queue
+//!
+//! This module contains the implementation of an SPSC queue which can be used
+//! concurrently between two tasks. This data structure is safe to use and
+//! enforces the semantics that there is one pusher and one popper.
+
+#![experimental]
+
+use core::prelude::*;
+
+use alloc::boxed::Box;
+use core::mem;
+use core::cell::UnsafeCell;
+use alloc::arc::Arc;
+
+use sync::atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
+
+// Node within the linked list queue of messages to send
+struct Node<T> {
+ // FIXME: this could be an uninitialized T if we're careful enough, and
+ // that would reduce memory usage (and be a bit faster).
+ // is it worth it?
+ value: Option<T>, // nullable for re-use of nodes
+ next: AtomicPtr<Node<T>>, // next node in the queue
+}
+
+/// The single-producer single-consumer queue. This structure is not cloneable,
+/// but it can be safely shared in an Arc if it is guaranteed that there
+/// is only one popper and one pusher touching the queue at any one point in
+/// time.
+pub struct Queue<T> {
+ // consumer fields
+ tail: UnsafeCell<*mut Node<T>>, // where to pop from
+ tail_prev: AtomicPtr<Node<T>>, // where to pop from
+
+ // producer fields
+ head: UnsafeCell<*mut Node<T>>, // where to push to
+ first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
+ tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
+
+ // Cache maintenance fields. Additions and subtractions are stored
+ // separately in order to allow them to use nonatomic addition/subtraction.
+ cache_bound: uint,
+ cache_additions: AtomicUint,
+ cache_subtractions: AtomicUint,
+}
+
+/// A safe abstraction for the consumer in a single-producer single-consumer
+/// queue.
+pub struct Consumer<T> {
+ inner: Arc<Queue<T>>
+}
+
+impl<T: Send> Consumer<T> {
+ /// Attempts to pop the value from the head of the queue, returning `None`
+ /// if the queue is empty.
+ pub fn pop(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ /// Attempts to peek at the head of the queue, returning `None` if the queue
+ /// is empty.
+ pub fn peek<'a>(&'a mut self) -> Option<&'a mut T> {
+ self.inner.peek()
+ }
+}
+
+/// A safe abstraction for the producer in a single-producer single-consumer
+/// queue.
+pub struct Producer<T> {
+ inner: Arc<Queue<T>>
+}
+
+impl<T: Send> Producer<T> {
+ /// Pushes a new value onto the queue.
+ pub fn push(&mut self, t: T) {
+ self.inner.push(t)
+ }
+}
+
+impl<T: Send> Node<T> {
+ fn new() -> *mut Node<T> {
+ unsafe {
+ mem::transmute(box Node {
+ value: None,
+ next: AtomicPtr::new(0 as *mut Node<T>),
+ })
+ }
+ }
+}
+
+/// Creates a new queue with a consumer-producer pair.
+///
+/// The producer returned is connected to the consumer to push all data to
+/// the consumer.
+///
+/// # Arguments
+///
+/// * `bound` - This queue implementation is implemented with a linked
+/// list, and this means that a push is always a malloc. In
+/// order to amortize this cost, an internal cache of nodes is
+/// maintained to prevent a malloc from always being
+/// necessary. This bound is the limit on the size of the
+/// cache (if desired). If the value is 0, then the cache has
+/// no bound. Otherwise, the cache will never grow larger than
+/// `bound` (although the queue itself could be much larger.
+pub fn queue<T: Send>(bound: uint) -> (Consumer<T>, Producer<T>) {
+ let q = unsafe { Queue::new(bound) };
+ let arc = Arc::new(q);
+ let consumer = Consumer { inner: arc.clone() };
+ let producer = Producer { inner: arc };
+
+ (consumer, producer)
+}
+
+impl<T: Send> Queue<T> {
+ /// Creates a new queue.
+ ///
+ /// This is unsafe as the type system doesn't enforce a single
+ /// consumer-producer relationship. It also allows the consumer to `pop`
+ /// items while there is a `peek` active due to all methods having a
+ /// non-mutable receiver.
+ ///
+ /// # Arguments
+ ///
+ /// * `bound` - This queue implementation is implemented with a linked
+ /// list, and this means that a push is always a malloc. In
+ /// order to amortize this cost, an internal cache of nodes is
+ /// maintained to prevent a malloc from always being
+ /// necessary. This bound is the limit on the size of the
+ /// cache (if desired). If the value is 0, then the cache has
+ /// no bound. Otherwise, the cache will never grow larger than
+ /// `bound` (although the queue itself could be much larger.
+ pub unsafe fn new(bound: uint) -> Queue<T> {
+ let n1 = Node::new();
+ let n2 = Node::new();
+ (*n1).next.store(n2, Relaxed);
+ Queue {
+ tail: UnsafeCell::new(n2),
+ tail_prev: AtomicPtr::new(n1),
+ head: UnsafeCell::new(n2),
+ first: UnsafeCell::new(n1),
+ tail_copy: UnsafeCell::new(n1),
+ cache_bound: bound,
+ cache_additions: AtomicUint::new(0),
+ cache_subtractions: AtomicUint::new(0),
+ }
+ }
+
+ /// Pushes a new value onto this queue. Note that to use this function
+ /// safely, it must be externally guaranteed that there is only one pusher.
+ pub fn push(&self, t: T) {
+ unsafe {
+ // Acquire a node (which either uses a cached one or allocates a new
+ // one), and then append this to the 'head' node.
+ let n = self.alloc();
+ assert!((*n).value.is_none());
+ (*n).value = Some(t);
+ (*n).next.store(0 as *mut Node<T>, Relaxed);
+ (**self.head.get()).next.store(n, Release);
+ *self.head.get() = n;
+ }
+ }
+
+ unsafe fn alloc(&self) -> *mut Node<T> {
+ // First try to see if we can consume the 'first' node for our uses.
+ // We try to avoid as many atomic instructions as possible here, so
+ // the addition to cache_subtractions is not atomic (plus we're the
+ // only one subtracting from the cache).
+ if *self.first.get() != *self.tail_copy.get() {
+ if self.cache_bound > 0 {
+ let b = self.cache_subtractions.load(Relaxed);
+ self.cache_subtractions.store(b + 1, Relaxed);
+ }
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
+ return ret;
+ }
+ // If the above fails, then update our copy of the tail and try
+ // again.
+ *self.tail_copy.get() = self.tail_prev.load(Acquire);
+ if *self.first.get() != *self.tail_copy.get() {
+ if self.cache_bound > 0 {
+ let b = self.cache_subtractions.load(Relaxed);
+ self.cache_subtractions.store(b + 1, Relaxed);
+ }
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
+ return ret;
+ }
+ // If all of that fails, then we have to allocate a new node
+ // (there's nothing in the node cache).
+ Node::new()
+ }
+
+ /// Attempts to pop a value from this queue. Remember that to use this type
+ /// safely you must ensure that there is only one popper at a time.
+ pub fn pop(&self) -> Option<T> {
+ unsafe {
+ // The `tail` node is not actually a used node, but rather a
+ // sentinel from where we should start popping from. Hence, look at
+ // tail's next field and see if we can use it. If we do a pop, then
+ // the current tail node is a candidate for going into the cache.
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+ if next.is_null() { return None }
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take();
+
+ *self.tail.get() = next;
+ if self.cache_bound == 0 {
+ self.tail_prev.store(tail, Release);
+ } else {
+ // FIXME: this is dubious with overflow.
+ let additions = self.cache_additions.load(Relaxed);
+ let subtractions = self.cache_subtractions.load(Relaxed);
+ let size = additions - subtractions;
+
+ if size < self.cache_bound {
+ self.tail_prev.store(tail, Release);
+ self.cache_additions.store(additions + 1, Relaxed);
+ } else {
+ (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed);
+ // We have successfully erased all references to 'tail', so
+ // now we can safely drop it.
+ let _: Box<Node<T>> = mem::transmute(tail);
+ }
+ }
+ return ret;
+ }
+ }
+
+ /// Attempts to peek at the head of the queue, returning `None` if the queue
+ /// has no data currently
+ ///
+ /// # Warning
+ /// The reference returned is invalid if it is not used before the consumer
+ /// pops the value off the queue. If the producer then pushes another value
+ /// onto the queue, it will overwrite the value pointed to by the reference.
+ pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
+ // This is essentially the same as above with all the popping bits
+ // stripped out.
+ unsafe {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+ if next.is_null() { return None }
+ return (*next).value.as_mut();
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.first.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Relaxed);
+ let _n: Box<Node<T>> = mem::transmute(cur);
+ cur = next;
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+
+ use super::{queue};
+
+ #[test]
+ fn smoke() {
+ let (mut consumer, mut producer) = queue(0);
+ producer.push(1i);
+ producer.push(2);
+ assert_eq!(consumer.pop(), Some(1i));
+ assert_eq!(consumer.pop(), Some(2));
+ assert_eq!(consumer.pop(), None);
+ producer.push(3);
+ producer.push(4);
+ assert_eq!(consumer.pop(), Some(3));
+ assert_eq!(consumer.pop(), Some(4));
+ assert_eq!(consumer.pop(), None);
+ }
+
+ #[test]
+ fn peek() {
+ let (mut consumer, mut producer) = queue(0);
+ producer.push(vec![1i]);
+
+ // Ensure the borrowchecker works
+ match consumer.peek() {
+ Some(vec) => match vec.as_slice() {
+ // Note that `pop` is not allowed here due to borrow
+ [1] => {}
+ _ => return
+ },
+ None => unreachable!()
+ }
+
+ consumer.pop();
+ }
+
+ #[test]
+ fn drop_full() {
+ let (_, mut producer) = queue(0);
+ producer.push(box 1i);
+ producer.push(box 2i);
+ }
+
+ #[test]
+ fn smoke_bound() {
+ let (mut consumer, mut producer) = queue(1);
+ producer.push(1i);
+ producer.push(2);
+ assert_eq!(consumer.pop(), Some(1));
+ assert_eq!(consumer.pop(), Some(2));
+ assert_eq!(consumer.pop(), None);
+ producer.push(3);
+ producer.push(4);
+ assert_eq!(consumer.pop(), Some(3));
+ assert_eq!(consumer.pop(), Some(4));
+ assert_eq!(consumer.pop(), None);
+ }
+
+ #[test]
+ fn stress() {
+ stress_bound(0);
+ stress_bound(1);
+
+ fn stress_bound(bound: uint) {
+ let (consumer, mut producer) = queue(bound);
+
+ let (tx, rx) = channel();
+ spawn(proc() {
+ // Move the consumer to a local mutable slot
+ let mut consumer = consumer;
+ for _ in range(0u, 100000) {
+ loop {
+ match consumer.pop() {
+ Some(1i) => break,
+ Some(_) => panic!(),
+ None => {}
+ }
+ }
+ }
+ tx.send(());
+ });
+ for _ in range(0i, 100000) {
+ producer.push(1);
+ }
+ rx.recv();
+ }
+ }
+}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Atomic types
-//!
-//! Atomic types provide primitive shared-memory communication between
-//! threads, and are the building blocks of other concurrent
-//! types.
-//!
-//! This module defines atomic versions of a select number of primitive
-//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
-//! Atomic types present operations that, when used correctly, synchronize
-//! updates between threads.
-//!
-//! Each method takes an `Ordering` which represents the strength of
-//! the memory barrier for that operation. These orderings are the
-//! same as [C++11 atomic orderings][1].
-//!
-//! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
-//!
-//! Atomic variables are safe to share between threads (they implement `Sync`)
-//! but they do not themselves provide the mechanism for sharing. The most
-//! common way to share an atomic variable is to put it into an `Arc` (an
-//! atomically-reference-counted shared pointer).
-//!
-//! Most atomic types may be stored in static variables, initialized using
-//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
-//! are often used for lazy global initialization.
-//!
-//!
-//! # Examples
-//!
-//! A simple spinlock:
-//!
-//! ```
-//! use std::sync::Arc;
-//! use std::sync::atomic::{AtomicUint, SeqCst};
-//!
-//! fn main() {
-//! let spinlock = Arc::new(AtomicUint::new(1));
-//!
-//! let spinlock_clone = spinlock.clone();
-//! spawn(proc() {
-//! spinlock_clone.store(0, SeqCst);
-//! });
-//!
-//! // Wait for the other task to release the lock
-//! while spinlock.load(SeqCst) != 0 {}
-//! }
-//! ```
-//!
-//! Transferring a heap object with `AtomicOption`:
-//!
-//! ```
-//! use std::sync::Arc;
-//! use std::sync::atomic::{AtomicOption, SeqCst};
-//!
-//! fn main() {
-//! struct BigObject;
-//!
-//! let shared_big_object = Arc::new(AtomicOption::empty());
-//!
-//! let shared_big_object_clone = shared_big_object.clone();
-//! spawn(proc() {
-//! let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
-//! if unwrapped_big_object.is_some() {
-//! println!("got a big object from another task");
-//! } else {
-//! println!("other task hasn't sent big object yet");
-//! }
-//! });
-//!
-//! shared_big_object.swap(box BigObject, SeqCst);
-//! }
-//! ```
-//!
-//! Keep a global count of live tasks:
-//!
-//! ```
-//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
-//!
-//! static GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
-//!
-//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
-//! println!("live tasks: {}", old_task_count + 1);
-//! ```
-
-#![allow(deprecated)]
-
-use alloc::boxed::Box;
-use core::mem;
-use core::prelude::{Send, Drop, None, Option, Some};
-
-pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
-pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
-pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
-pub use core::atomic::fence;
-
-/// An atomic, nullable unique pointer
-///
-/// This can be used as the concurrency primitive for operations that transfer
-/// owned heap objects across tasks.
-#[unsafe_no_drop_flag]
-#[deprecated = "no longer used; will eventually be replaced by a higher-level\
- concept like MVar"]
-pub struct AtomicOption<T> {
- p: AtomicUint,
-}
-
-impl<T: Send> AtomicOption<T> {
- /// Create a new `AtomicOption`
- pub fn new(p: Box<T>) -> AtomicOption<T> {
- unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } }
- }
-
- /// Create a new `AtomicOption` that doesn't contain a value
- pub fn empty() -> AtomicOption<T> { AtomicOption { p: AtomicUint::new(0) } }
-
- /// Store a value, returning the old value
- #[inline]
- pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
- let val = unsafe { mem::transmute(val) };
-
- match self.p.swap(val, order) {
- 0 => None,
- n => Some(unsafe { mem::transmute(n) }),
- }
- }
-
- /// Remove the value, leaving the `AtomicOption` empty.
- #[inline]
- pub fn take(&self, order: Ordering) -> Option<Box<T>> {
- unsafe { self.swap(mem::transmute(0u), order) }
- }
-
- /// Replace an empty value with a non-empty value.
- ///
- /// Succeeds if the option is `None` and returns `None` if so. If
- /// the option was already `Some`, returns `Some` of the rejected
- /// value.
- #[inline]
- pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
- unsafe {
- let val = mem::transmute(val);
- let expected = mem::transmute(0u);
- let oldval = self.p.compare_and_swap(expected, val, order);
- if oldval == expected {
- None
- } else {
- Some(mem::transmute(val))
- }
- }
- }
-
- /// Returns `true` if the `AtomicOption` is empty.
- ///
- /// Be careful: The caller must have some external method of ensuring the
- /// result does not get invalidated by another task after this returns.
- #[inline]
- pub fn is_empty(&self, order: Ordering) -> bool {
- self.p.load(order) as uint == 0
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for AtomicOption<T> {
- fn drop(&mut self) {
- let _ = self.take(SeqCst);
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::prelude::*;
- use super::*;
-
- #[test]
- fn option_empty() {
- let option: AtomicOption<()> = AtomicOption::empty();
- assert!(option.is_empty(SeqCst));
- }
-
- #[test]
- fn option_swap() {
- let p = AtomicOption::new(box 1i);
- let a = box 2i;
-
- let b = p.swap(a, SeqCst);
-
- assert!(b == Some(box 1));
- assert!(p.take(SeqCst) == Some(box 2));
- }
-
- #[test]
- fn option_take() {
- let p = AtomicOption::new(box 1i);
-
- assert!(p.take(SeqCst) == Some(box 1));
- assert!(p.take(SeqCst) == None);
-
- let p2 = box 2i;
- p.swap(p2, SeqCst);
-
- assert!(p.take(SeqCst) == Some(box 2));
- }
-
- #[test]
- fn option_fill() {
- let p = AtomicOption::new(box 1i);
- assert!(p.fill(box 2i, SeqCst).is_some()); // should fail; shouldn't leak!
- assert!(p.take(SeqCst) == Some(box 1));
-
- assert!(p.fill(box 2i, SeqCst).is_none()); // shouldn't fail
- assert!(p.take(SeqCst) == Some(box 2));
- }
-}
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Communication primitives for concurrent tasks
-//!
-//! Rust makes it very difficult to share data among tasks to prevent race
-//! conditions and to improve parallelism, but there is often a need for
-//! communication between concurrent tasks. The primitives defined in this
-//! module are the building blocks for synchronization in rust.
-//!
-//! This module provides message-based communication over channels, concretely
-//! defined among three types:
-//!
-//! * `Sender`
-//! * `SyncSender`
-//! * `Receiver`
-//!
-//! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
-//! senders are clone-able such that many tasks can send simultaneously to one
-//! receiver. These channels are *task blocking*, not *thread blocking*. This
-//! means that if one task is blocked on a channel, other tasks can continue to
-//! make progress.
-//!
-//! Rust channels come in one of two flavors:
-//!
-//! 1. An asynchronous, infinitely buffered channel. The `channel()` function
-//! will return a `(Sender, Receiver)` tuple where all sends will be
-//! **asynchronous** (they never block). The channel conceptually has an
-//! infinite buffer.
-//!
-//! 2. A synchronous, bounded channel. The `sync_channel()` function will return
-//! a `(SyncSender, Receiver)` tuple where the storage for pending messages
-//! is a pre-allocated buffer of a fixed size. All sends will be
-//! **synchronous** by blocking until there is buffer space available. Note
-//! that a bound of 0 is allowed, causing the channel to become a
-//! "rendezvous" channel where each sender atomically hands off a message to
-//! a receiver.
-//!
-//! ## Panic Propagation
-//!
-//! In addition to being a core primitive for communicating in rust, channels
-//! are the points at which panics are propagated among tasks. Whenever the one
-//! half of channel is closed, the other half will have its next operation
-//! `panic!`. The purpose of this is to allow propagation of panics among tasks
-//! that are linked to one another via channels.
-//!
-//! There are methods on both of senders and receivers to perform their
-//! respective operations without panicking, however.
-//!
-//! ## Runtime Requirements
-//!
-//! The channel types defined in this module generally have very few runtime
-//! requirements in order to operate. The major requirement they have is for a
-//! local rust `Task` to be available if any *blocking* operation is performed.
-//!
-//! If a local `Task` is not available (for example an FFI callback), then the
-//! `send` operation is safe on a `Sender` (as well as a `send_opt`) as well as
-//! the `try_send` method on a `SyncSender`, but no other operations are
-//! guaranteed to be safe.
-//!
-//! # Example
-//!
-//! Simple usage:
-//!
-//! ```
-//! // Create a simple streaming channel
-//! let (tx, rx) = channel();
-//! spawn(proc() {
-//! tx.send(10i);
-//! });
-//! assert_eq!(rx.recv(), 10i);
-//! ```
-//!
-//! Shared usage:
-//!
-//! ```
-//! // Create a shared channel which can be sent along from many tasks
-//! // where tx is the sending half (tx for transmission), and rx is the receiving
-//! // half (rx for receiving).
-//! let (tx, rx) = channel();
-//! for i in range(0i, 10i) {
-//! let tx = tx.clone();
-//! spawn(proc() {
-//! tx.send(i);
-//! })
-//! }
-//!
-//! for _ in range(0i, 10i) {
-//! let j = rx.recv();
-//! assert!(0 <= j && j < 10);
-//! }
-//! ```
-//!
-//! Propagating panics:
-//!
-//! ```should_fail
-//! // The call to recv() will panic!() because the channel has already hung
-//! // up (or been deallocated)
-//! let (tx, rx) = channel::<int>();
-//! drop(tx);
-//! rx.recv();
-//! ```
-//!
-//! Synchronous channels:
-//!
-//! ```
-//! let (tx, rx) = sync_channel::<int>(0);
-//! spawn(proc() {
-//! // This will wait for the parent task to start receiving
-//! tx.send(53);
-//! });
-//! rx.recv();
-//! ```
-//!
-//! Reading from a channel with a timeout requires to use a Timer together
-//! with the channel. You can use the select! macro to select either and
-//! handle the timeout case. This first example will break out of the loop
-//! after 10 seconds no matter what:
-//!
-//! ```no_run
-//! use std::io::timer::Timer;
-//! use std::time::Duration;
-//!
-//! let (tx, rx) = channel::<int>();
-//! let mut timer = Timer::new().unwrap();
-//! let timeout = timer.oneshot(Duration::seconds(10));
-//!
-//! loop {
-//! select! {
-//! val = rx.recv() => println!("Received {}", val),
-//! () = timeout.recv() => {
-//! println!("timed out, total time was more than 10 seconds")
-//! break;
-//! }
-//! }
-//! }
-//! ```
-//!
-//! This second example is more costly since it allocates a new timer every
-//! time a message is received, but it allows you to timeout after the channel
-//! has been inactive for 5 seconds:
-//!
-//! ```no_run
-//! use std::io::timer::Timer;
-//! use std::time::Duration;
-//!
-//! let (tx, rx) = channel::<int>();
-//! let mut timer = Timer::new().unwrap();
-//!
-//! loop {
-//! let timeout = timer.oneshot(Duration::seconds(5));
-//!
-//! select! {
-//! val = rx.recv() => println!("Received {}", val),
-//! () = timeout.recv() => {
-//! println!("timed out, no message received in 5 seconds")
-//! break;
-//! }
-//! }
-//! }
-//! ```
-
-// A description of how Rust's channel implementation works
-//
-// Channels are supposed to be the basic building block for all other
-// concurrent primitives that are used in Rust. As a result, the channel type
-// needs to be highly optimized, flexible, and broad enough for use everywhere.
-//
-// The choice of implementation of all channels is to be built on lock-free data
-// structures. The channels themselves are then consequently also lock-free data
-// structures. As always with lock-free code, this is a very "here be dragons"
-// territory, especially because I'm unaware of any academic papers which have
-// gone into great length about channels of these flavors.
-//
-// ## Flavors of channels
-//
-// From the perspective of a consumer of this library, there is only one flavor
-// of channel. This channel can be used as a stream and cloned to allow multiple
-// senders. Under the hood, however, there are actually three flavors of
-// channels in play.
-//
-// * Oneshots - these channels are highly optimized for the one-send use case.
-// They contain as few atomics as possible and involve one and
-// exactly one allocation.
-// * Streams - these channels are optimized for the non-shared use case. They
-// use a different concurrent queue which is more tailored for this
-// use case. The initial allocation of this flavor of channel is not
-// optimized.
-// * Shared - this is the most general form of channel that this module offers,
-// a channel with multiple senders. This type is as optimized as it
-// can be, but the previous two types mentioned are much faster for
-// their use-cases.
-//
-// ## Concurrent queues
-//
-// The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
-// recv() obviously blocks. This means that under the hood there must be some
-// shared and concurrent queue holding all of the actual data.
-//
-// With two flavors of channels, two flavors of queues are also used. We have
-// chosen to use queues from a well-known author which are abbreviated as SPSC
-// and MPSC (single producer, single consumer and multiple producer, single
-// consumer). SPSC queues are used for streams while MPSC queues are used for
-// shared channels.
-//
-// ### SPSC optimizations
-//
-// The SPSC queue found online is essentially a linked list of nodes where one
-// half of the nodes are the "queue of data" and the other half of nodes are a
-// cache of unused nodes. The unused nodes are used such that an allocation is
-// not required on every push() and a free doesn't need to happen on every
-// pop().
-//
-// As found online, however, the cache of nodes is of an infinite size. This
-// means that if a channel at one point in its life had 50k items in the queue,
-// then the queue will always have the capacity for 50k items. I believed that
-// this was an unnecessary limitation of the implementation, so I have altered
-// the queue to optionally have a bound on the cache size.
-//
-// By default, streams will have an unbounded SPSC queue with a small-ish cache
-// size. The hope is that the cache is still large enough to have very fast
-// send() operations while not too large such that millions of channels can
-// coexist at once.
-//
-// ### MPSC optimizations
-//
-// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
-// a linked list under the hood to earn its unboundedness, but I have not put
-// forth much effort into having a cache of nodes similar to the SPSC queue.
-//
-// For now, I believe that this is "ok" because shared channels are not the most
-// common type, but soon we may wish to revisit this queue choice and determine
-// another candidate for backend storage of shared channels.
-//
-// ## Overview of the Implementation
-//
-// Now that there's a little background on the concurrent queues used, it's
-// worth going into much more detail about the channels themselves. The basic
-// pseudocode for a send/recv are:
-//
-//
-// send(t) recv()
-// queue.push(t) return if queue.pop()
-// if increment() == -1 deschedule {
-// wakeup() if decrement() > 0
-// cancel_deschedule()
-// }
-// queue.pop()
-//
-// As mentioned before, there are no locks in this implementation, only atomic
-// instructions are used.
-//
-// ### The internal atomic counter
-//
-// Every channel has a shared counter with each half to keep track of the size
-// of the queue. This counter is used to abort descheduling by the receiver and
-// to know when to wake up on the sending side.
-//
-// As seen in the pseudocode, senders will increment this count and receivers
-// will decrement the count. The theory behind this is that if a sender sees a
-// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
-// then it doesn't need to block.
-//
-// The recv() method has a beginning call to pop(), and if successful, it needs
-// to decrement the count. It is a crucial implementation detail that this
-// decrement does *not* happen to the shared counter. If this were the case,
-// then it would be possible for the counter to be very negative when there were
-// no receivers waiting, in which case the senders would have to determine when
-// it was actually appropriate to wake up a receiver.
-//
-// Instead, the "steal count" is kept track of separately (not atomically
-// because it's only used by receivers), and then the decrement() call when
-// descheduling will lump in all of the recent steals into one large decrement.
-//
-// The implication of this is that if a sender sees a -1 count, then there's
-// guaranteed to be a waiter waiting!
-//
-// ## Native Implementation
-//
-// A major goal of these channels is to work seamlessly on and off the runtime.
-// All of the previous race conditions have been worded in terms of
-// scheduler-isms (which is obviously not available without the runtime).
-//
-// For now, native usage of channels (off the runtime) will fall back onto
-// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
-// is still entirely lock-free, the "deschedule" blocks above are surrounded by
-// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
-// condition variable.
-//
-// ## Select
-//
-// Being able to support selection over channels has greatly influenced this
-// design, and not only does selection need to work inside the runtime, but also
-// outside the runtime.
-//
-// The implementation is fairly straightforward. The goal of select() is not to
-// return some data, but only to return which channel can receive data without
-// blocking. The implementation is essentially the entire blocking procedure
-// followed by an increment as soon as its woken up. The cancellation procedure
-// involves an increment and swapping out of to_wake to acquire ownership of the
-// task to unblock.
-//
-// Sadly this current implementation requires multiple allocations, so I have
-// seen the throughput of select() be much worse than it should be. I do not
-// believe that there is anything fundamental which needs to change about these
-// channels, however, in order to support a more efficient select().
-//
-// # Conclusion
-//
-// And now that you've seen all the races that I found and attempted to fix,
-// here's the code for you to find some more!
-
-use core::prelude::*;
-
-pub use self::TryRecvError::*;
-pub use self::TrySendError::*;
-use self::Flavor::*;
-
-use alloc::arc::Arc;
-use core::kinds::marker;
-use core::mem;
-use core::cell::UnsafeCell;
-use rustrt::task::BlockedTask;
-
-pub use comm::select::{Select, Handle};
-
-macro_rules! test (
- { fn $name:ident() $b:block $(#[$a:meta])*} => (
- mod $name {
- #![allow(unused_imports)]
-
- extern crate rustrt;
-
- use std::prelude::*;
-
- use comm::*;
- use super::*;
- use super::super::*;
- use std::task;
-
- $(#[$a])* #[test] fn f() { $b }
- }
- )
-)
-
-mod oneshot;
-mod select;
-mod shared;
-mod stream;
-mod sync;
-
-/// The receiving-half of Rust's channel type. This half can only be owned by
-/// one task
-#[unstable]
-pub struct Receiver<T> {
- inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
-}
-
-/// An iterator over messages on a receiver, this iterator will block
-/// whenever `next` is called, waiting for a new message, and `None` will be
-/// returned when the corresponding channel has hung up.
-#[unstable]
-pub struct Messages<'a, T:'a> {
- rx: &'a Receiver<T>
-}
-
-/// The sending-half of Rust's asynchronous channel type. This half can only be
-/// owned by one task, but it can be cloned to send to other tasks.
-#[unstable]
-pub struct Sender<T> {
- inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
-}
-
-/// The sending-half of Rust's synchronous channel type. This half can only be
-/// owned by one task, but it can be cloned to send to other tasks.
-#[unstable = "this type may be renamed, but it will always exist"]
-pub struct SyncSender<T> {
- inner: Arc<UnsafeCell<sync::Packet<T>>>,
- // can't share in an arc
- _marker: marker::NoSync,
-}
-
-/// This enumeration is the list of the possible reasons that try_recv could not
-/// return data when called.
-#[deriving(PartialEq, Clone, Show)]
-#[experimental = "this is likely to be removed in changing try_recv()"]
-pub enum TryRecvError {
- /// This channel is currently empty, but the sender(s) have not yet
- /// disconnected, so data may yet become available.
- Empty,
- /// This channel's sending half has become disconnected, and there will
- /// never be any more data received on this channel
- Disconnected,
-}
-
-/// This enumeration is the list of the possible error outcomes for the
-/// `SyncSender::try_send` method.
-#[deriving(PartialEq, Clone, Show)]
-#[experimental = "this is likely to be removed in changing try_send()"]
-pub enum TrySendError<T> {
- /// The data could not be sent on the channel because it would require that
- /// the callee block to send the data.
- ///
- /// If this is a buffered channel, then the buffer is full at this time. If
- /// this is not a buffered channel, then there is no receiver available to
- /// acquire the data.
- Full(T),
- /// This channel's receiving half has disconnected, so the data could not be
- /// sent. The data is returned back to the callee in this case.
- RecvDisconnected(T),
-}
-
-enum Flavor<T> {
- Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
- Stream(Arc<UnsafeCell<stream::Packet<T>>>),
- Shared(Arc<UnsafeCell<shared::Packet<T>>>),
- Sync(Arc<UnsafeCell<sync::Packet<T>>>),
-}
-
-#[doc(hidden)]
-trait UnsafeFlavor<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>>;
- unsafe fn inner_mut<'a>(&'a self) -> &'a mut Flavor<T> {
- &mut *self.inner_unsafe().get()
- }
- unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
- &*self.inner_unsafe().get()
- }
-}
-impl<T> UnsafeFlavor<T> for Sender<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
- &self.inner
- }
-}
-impl<T> UnsafeFlavor<T> for Receiver<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
- &self.inner
- }
-}
-
-/// Creates a new asynchronous channel, returning the sender/receiver halves.
-///
-/// All data sent on the sender will become available on the receiver, and no
-/// send will block the calling task (this channel has an "infinite buffer").
-///
-/// # Example
-///
-/// ```
-/// // tx is is the sending half (tx for transmission), and rx is the receiving
-/// // half (rx for receiving).
-/// let (tx, rx) = channel();
-///
-/// // Spawn off an expensive computation
-/// spawn(proc() {
-/// # fn expensive_computation() {}
-/// tx.send(expensive_computation());
-/// });
-///
-/// // Do some useful work for awhile
-///
-/// // Let's see what that answer was
-/// println!("{}", rx.recv());
-/// ```
-#[unstable]
-pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
- (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
-}
-
-/// Creates a new synchronous, bounded channel.
-///
-/// Like asynchronous channels, the `Receiver` will block until a message
-/// becomes available. These channels differ greatly in the semantics of the
-/// sender from asynchronous channels, however.
-///
-/// This channel has an internal buffer on which messages will be queued. When
-/// the internal buffer becomes full, future sends will *block* waiting for the
-/// buffer to open up. Note that a buffer size of 0 is valid, in which case this
-/// becomes "rendezvous channel" where each send will not return until a recv
-/// is paired with it.
-///
-/// As with asynchronous channels, all senders will panic in `send` if the
-/// `Receiver` has been destroyed.
-///
-/// # Example
-///
-/// ```
-/// let (tx, rx) = sync_channel(1);
-///
-/// // this returns immediately
-/// tx.send(1i);
-///
-/// spawn(proc() {
-/// // this will block until the previous message has been received
-/// tx.send(2i);
-/// });
-///
-/// assert_eq!(rx.recv(), 1i);
-/// assert_eq!(rx.recv(), 2i);
-/// ```
-#[unstable = "this function may be renamed to more accurately reflect the type \
- of channel that is is creating"]
-pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
- (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Sender
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> Sender<T> {
- fn new(inner: Flavor<T>) -> Sender<T> {
- Sender {
- inner: UnsafeCell::new(inner),
- _marker: marker::NoSync,
- }
- }
-
- /// Sends a value along this channel to be received by the corresponding
- /// receiver.
- ///
- /// Rust channels are infinitely buffered so this method will never block.
- ///
- /// # Panics
- ///
- /// This function will panic if the other end of the channel has hung up.
- /// This means that if the corresponding receiver has fallen out of scope,
- /// this function will trigger a panic message saying that a message is
- /// being sent on a closed channel.
- ///
- /// Note that if this function does *not* panic, it does not mean that the
- /// data will be successfully received. All sends are placed into a queue,
- /// so it is possible for a send to succeed (the other end is alive), but
- /// then the other end could immediately disconnect.
- ///
- /// The purpose of this functionality is to propagate panicks among tasks.
- /// If a panic is not desired, then consider using the `send_opt` method
- #[experimental = "this function is being considered candidate for removal \
- to adhere to the general guidelines of rust"]
- pub fn send(&self, t: T) {
- if self.send_opt(t).is_err() {
- panic!("sending on a closed channel");
- }
- }
-
- /// Attempts to send a value on this channel, returning it back if it could
- /// not be sent.
- ///
- /// A successful send occurs when it is determined that the other end of
- /// the channel has not hung up already. An unsuccessful send would be one
- /// where the corresponding receiver has already been deallocated. Note
- /// that a return value of `Err` means that the data will never be
- /// received, but a return value of `Ok` does *not* mean that the data
- /// will be received. It is possible for the corresponding receiver to
- /// hang up immediately after this function returns `Ok`.
- ///
- /// Like `send`, this method will never block.
- ///
- /// # Panics
- ///
- /// This method will never panic, it will return the message back to the
- /// caller if the other end is disconnected
- ///
- /// # Example
- ///
- /// ```
- /// let (tx, rx) = channel();
- ///
- /// // This send is always successful
- /// assert_eq!(tx.send_opt(1i), Ok(()));
- ///
- /// // This send will fail because the receiver is gone
- /// drop(rx);
- /// assert_eq!(tx.send_opt(1i), Err(1));
- /// ```
- #[unstable = "this function may be renamed to send() in the future"]
- pub fn send_opt(&self, t: T) -> Result<(), T> {
- let (new_inner, ret) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- unsafe {
- let p = p.get();
- if !(*p).sent() {
- return (*p).send(t);
- } else {
- let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
- match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
- oneshot::UpSuccess => {
- let ret = (*a.get()).send(t);
- (a, ret)
- }
- oneshot::UpDisconnected => (a, Err(t)),
- oneshot::UpWoke(task) => {
- // This send cannot panic because the task is
- // asleep (we're looking at it), so the receiver
- // can't go away.
- (*a.get()).send(t).ok().unwrap();
- task.wake().map(|t| t.reawaken());
- (a, Ok(()))
- }
- }
- }
- }
- }
- Stream(ref p) => return unsafe { (*p.get()).send(t) },
- Shared(ref p) => return unsafe { (*p.get()).send(t) },
- Sync(..) => unreachable!(),
- };
-
- unsafe {
- let tmp = Sender::new(Stream(new_inner));
- mem::swap(self.inner_mut(), tmp.inner_mut());
- }
- return ret;
- }
-}
-
-#[unstable]
-impl<T: Send> Clone for Sender<T> {
- fn clone(&self) -> Sender<T> {
- let (packet, sleeper) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
- unsafe {
- (*a.get()).postinit_lock();
- match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
- oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
- oneshot::UpWoke(task) => (a, Some(task))
- }
- }
- }
- Stream(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
- unsafe {
- (*a.get()).postinit_lock();
- match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
- stream::UpSuccess | stream::UpDisconnected => (a, None),
- stream::UpWoke(task) => (a, Some(task)),
- }
- }
- }
- Shared(ref p) => {
- unsafe { (*p.get()).clone_chan(); }
- return Sender::new(Shared(p.clone()));
- }
- Sync(..) => unreachable!(),
- };
-
- unsafe {
- (*packet.get()).inherit_blocker(sleeper);
-
- let tmp = Sender::new(Shared(packet.clone()));
- mem::swap(self.inner_mut(), tmp.inner_mut());
- }
- Sender::new(Shared(packet))
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Sender<T> {
- fn drop(&mut self) {
- match *unsafe { self.inner_mut() } {
- Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Sync(..) => unreachable!(),
- }
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// SyncSender
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> SyncSender<T> {
- fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
- SyncSender { inner: inner, _marker: marker::NoSync }
- }
-
- /// Sends a value on this synchronous channel.
- ///
- /// This function will *block* until space in the internal buffer becomes
- /// available or a receiver is available to hand off the message to.
- ///
- /// Note that a successful send does *not* guarantee that the receiver will
- /// ever see the data if there is a buffer on this channel. Messages may be
- /// enqueued in the internal buffer for the receiver to receive at a later
- /// time. If the buffer size is 0, however, it can be guaranteed that the
- /// receiver has indeed received the data if this function returns success.
- ///
- /// # Panics
- ///
- /// Similarly to `Sender::send`, this function will panic if the
- /// corresponding `Receiver` for this channel has disconnected. This
- /// behavior is used to propagate panics among tasks.
- ///
- /// If a panic is not desired, you can achieve the same semantics with the
- /// `SyncSender::send_opt` method which will not panic if the receiver
- /// disconnects.
- #[experimental = "this function is being considered candidate for removal \
- to adhere to the general guidelines of rust"]
- pub fn send(&self, t: T) {
- if self.send_opt(t).is_err() {
- panic!("sending on a closed channel");
- }
- }
-
- /// Send a value on a channel, returning it back if the receiver
- /// disconnected
- ///
- /// This method will *block* to send the value `t` on the channel, but if
- /// the value could not be sent due to the receiver disconnecting, the value
- /// is returned back to the callee. This function is similar to `try_send`,
- /// except that it will block if the channel is currently full.
- ///
- /// # Panics
- ///
- /// This function cannot panic.
- #[unstable = "this function may be renamed to send() in the future"]
- pub fn send_opt(&self, t: T) -> Result<(), T> {
- unsafe { (*self.inner.get()).send(t) }
- }
-
- /// Attempts to send a value on this channel without blocking.
- ///
- /// This method differs from `send_opt` by returning immediately if the
- /// channel's buffer is full or no receiver is waiting to acquire some
- /// data. Compared with `send_opt`, this function has two failure cases
- /// instead of one (one for disconnection, one for a full buffer).
- ///
- /// See `SyncSender::send` for notes about guarantees of whether the
- /// receiver has received the data or not if this function is successful.
- ///
- /// # Panics
- ///
- /// This function cannot panic
- #[unstable = "the return type of this function is candidate for \
- modification"]
- pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
- unsafe { (*self.inner.get()).try_send(t) }
- }
-}
-
-#[unstable]
-impl<T: Send> Clone for SyncSender<T> {
- fn clone(&self) -> SyncSender<T> {
- unsafe { (*self.inner.get()).clone_chan(); }
- return SyncSender::new(self.inner.clone());
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for SyncSender<T> {
- fn drop(&mut self) {
- unsafe { (*self.inner.get()).drop_chan(); }
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Receiver
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> Receiver<T> {
- fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
- }
-
- /// Blocks waiting for a value on this receiver
- ///
- /// This function will block if necessary to wait for a corresponding send
- /// on the channel from its paired `Sender` structure. This receiver will
- /// be woken up when data is ready, and the data will be returned.
- ///
- /// # Panics
- ///
- /// Similar to channels, this method will trigger a task panic if the
- /// other end of the channel has hung up (been deallocated). The purpose of
- /// this is to propagate panicks among tasks.
- ///
- /// If a panic is not desired, then there are two options:
- ///
- /// * If blocking is still desired, the `recv_opt` method will return `None`
- /// when the other end hangs up
- ///
- /// * If blocking is not desired, then the `try_recv` method will attempt to
- /// peek at a value on this receiver.
- #[experimental = "this function is being considered candidate for removal \
- to adhere to the general guidelines of rust"]
- pub fn recv(&self) -> T {
- match self.recv_opt() {
- Ok(t) => t,
- Err(()) => panic!("receiving on a closed channel"),
- }
- }
-
- /// Attempts to return a pending value on this receiver without blocking
- ///
- /// This method will never block the caller in order to wait for data to
- /// become available. Instead, this will always return immediately with a
- /// possible option of pending data on the channel.
- ///
- /// This is useful for a flavor of "optimistic check" before deciding to
- /// block on a receiver.
- ///
- /// # Panics
- ///
- /// This function cannot panic.
- #[unstable = "the return type of this function may be altered"]
- pub fn try_recv(&self) -> Result<T, TryRecvError> {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(oneshot::Empty) => return Err(Empty),
- Err(oneshot::Disconnected) => return Err(Disconnected),
- Err(oneshot::Upgraded(rx)) => rx,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(stream::Empty) => return Err(Empty),
- Err(stream::Disconnected) => return Err(Disconnected),
- Err(stream::Upgraded(rx)) => rx,
- }
- }
- Shared(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(shared::Empty) => return Err(Empty),
- Err(shared::Disconnected) => return Err(Disconnected),
- }
- }
- Sync(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(sync::Empty) => return Err(Empty),
- Err(sync::Disconnected) => return Err(Disconnected),
- }
- }
- };
- unsafe {
- mem::swap(self.inner_mut(),
- new_port.inner_mut());
- }
- }
- }
-
- /// Attempt to wait for a value on this receiver, but does not panic if the
- /// corresponding channel has hung up.
- ///
- /// This implementation of iterators for ports will always block if there is
- /// not data available on the receiver, but it will not panic in the case
- /// that the channel has been deallocated.
- ///
- /// In other words, this function has the same semantics as the `recv`
- /// method except for the panic aspect.
- ///
- /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
- /// the value found on the receiver is returned.
- #[unstable = "this function may be renamed to recv()"]
- pub fn recv_opt(&self) -> Result<T, ()> {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(oneshot::Empty) => return unreachable!(),
- Err(oneshot::Disconnected) => return Err(()),
- Err(oneshot::Upgraded(rx)) => rx,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(stream::Empty) => return unreachable!(),
- Err(stream::Disconnected) => return Err(()),
- Err(stream::Upgraded(rx)) => rx,
- }
- }
- Shared(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(shared::Empty) => return unreachable!(),
- Err(shared::Disconnected) => return Err(()),
- }
- }
- Sync(ref p) => return unsafe { (*p.get()).recv() }
- };
- unsafe {
- mem::swap(self.inner_mut(), new_port.inner_mut());
- }
- }
- }
-
- /// Returns an iterator which will block waiting for messages, but never
- /// `panic!`. It will return `None` when the channel has hung up.
- #[unstable]
- pub fn iter<'a>(&'a self) -> Messages<'a, T> {
- Messages { rx: self }
- }
-}
-
-impl<T: Send> select::Packet for Receiver<T> {
- fn can_recv(&self) -> bool {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).can_recv() } {
- Ok(ret) => return ret,
- Err(upgrade) => upgrade,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).can_recv() } {
- Ok(ret) => return ret,
- Err(upgrade) => upgrade,
- }
- }
- Shared(ref p) => {
- return unsafe { (*p.get()).can_recv() };
- }
- Sync(ref p) => {
- return unsafe { (*p.get()).can_recv() };
- }
- };
- unsafe {
- mem::swap(self.inner_mut(),
- new_port.inner_mut());
- }
- }
- }
-
- fn start_selection(&self, mut task: BlockedTask) -> Result<(), BlockedTask>{
- loop {
- let (t, new_port) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).start_selection(task) } {
- oneshot::SelSuccess => return Ok(()),
- oneshot::SelCanceled(task) => return Err(task),
- oneshot::SelUpgraded(t, rx) => (t, rx),
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).start_selection(task) } {
- stream::SelSuccess => return Ok(()),
- stream::SelCanceled(task) => return Err(task),
- stream::SelUpgraded(t, rx) => (t, rx),
- }
- }
- Shared(ref p) => {
- return unsafe { (*p.get()).start_selection(task) };
- }
- Sync(ref p) => {
- return unsafe { (*p.get()).start_selection(task) };
- }
- };
- task = t;
- unsafe {
- mem::swap(self.inner_mut(),
- new_port.inner_mut());
- }
- }
- }
-
- fn abort_selection(&self) -> bool {
- let mut was_upgrade = false;
- loop {
- let result = match *unsafe { self.inner() } {
- Oneshot(ref p) => unsafe { (*p.get()).abort_selection() },
- Stream(ref p) => unsafe {
- (*p.get()).abort_selection(was_upgrade)
- },
- Shared(ref p) => return unsafe {
- (*p.get()).abort_selection(was_upgrade)
- },
- Sync(ref p) => return unsafe {
- (*p.get()).abort_selection()
- },
- };
- let new_port = match result { Ok(b) => return b, Err(p) => p };
- was_upgrade = true;
- unsafe {
- mem::swap(self.inner_mut(),
- new_port.inner_mut());
- }
- }
- }
-}
-
-#[unstable]
-impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
- fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Receiver<T> {
- fn drop(&mut self) {
- match *unsafe { self.inner_mut() } {
- Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Sync(ref mut p) => unsafe { (*p.get()).drop_port(); },
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::prelude::*;
-
- use std::os;
- use super::*;
-
- pub fn stress_factor() -> uint {
- match os::getenv("RUST_TEST_STRESS") {
- Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
- None => 1,
- }
- }
-
- test!(fn smoke() {
- let (tx, rx) = channel::<int>();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn drop_full() {
- let (tx, _rx) = channel();
- tx.send(box 1i);
- })
-
- test!(fn drop_full_shared() {
- let (tx, _rx) = channel();
- drop(tx.clone());
- drop(tx.clone());
- tx.send(box 1i);
- })
-
- test!(fn smoke_shared() {
- let (tx, rx) = channel::<int>();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- let tx = tx.clone();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_threads() {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- tx.send(1);
- });
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_port_gone() {
- let (tx, rx) = channel::<int>();
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone() {
- let (tx, rx) = channel::<int>();
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone2() {
- let (tx, rx) = channel::<int>();
- drop(rx);
- let tx2 = tx.clone();
- drop(tx);
- tx2.send(1);
- } #[should_fail])
-
- test!(fn port_gone_concurrent() {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- rx.recv();
- });
- loop { tx.send(1) }
- } #[should_fail])
-
- test!(fn port_gone_concurrent_shared() {
- let (tx, rx) = channel::<int>();
- let tx2 = tx.clone();
- spawn(proc() {
- rx.recv();
- });
- loop {
- tx.send(1);
- tx2.send(1);
- }
- } #[should_fail])
-
- test!(fn smoke_chan_gone() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- rx.recv();
- } #[should_fail])
-
- test!(fn smoke_chan_gone_shared() {
- let (tx, rx) = channel::<()>();
- let tx2 = tx.clone();
- drop(tx);
- drop(tx2);
- rx.recv();
- } #[should_fail])
-
- test!(fn chan_gone_concurrent() {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- tx.send(1);
- tx.send(1);
- });
- loop { rx.recv(); }
- } #[should_fail])
-
- test!(fn stress() {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- for _ in range(0u, 10000) { tx.send(1i); }
- });
- for _ in range(0u, 10000) {
- assert_eq!(rx.recv(), 1);
- }
- })
-
- test!(fn stress_shared() {
- static AMT: uint = 10000;
- static NTHREADS: uint = 8;
- let (tx, rx) = channel::<int>();
- let (dtx, drx) = channel::<()>();
-
- spawn(proc() {
- for _ in range(0, AMT * NTHREADS) {
- assert_eq!(rx.recv(), 1);
- }
- match rx.try_recv() {
- Ok(..) => panic!(),
- _ => {}
- }
- dtx.send(());
- });
-
- for _ in range(0, NTHREADS) {
- let tx = tx.clone();
- spawn(proc() {
- for _ in range(0, AMT) { tx.send(1); }
- });
- }
- drop(tx);
- drx.recv();
- })
-
- #[test]
- fn send_from_outside_runtime() {
- let (tx1, rx1) = channel::<()>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
- let tx4 = tx3.clone();
- spawn(proc() {
- tx1.send(());
- for _ in range(0i, 40) {
- assert_eq!(rx2.recv(), 1);
- }
- tx3.send(());
- });
- rx1.recv();
- spawn(proc() {
- for _ in range(0i, 40) {
- tx2.send(1);
- }
- tx4.send(());
- });
- rx3.recv();
- rx3.recv();
- }
-
- #[test]
- fn recv_from_outside_runtime() {
- let (tx, rx) = channel::<int>();
- let (dtx, drx) = channel();
- spawn(proc() {
- for _ in range(0i, 40) {
- assert_eq!(rx.recv(), 1);
- }
- dtx.send(());
- });
- for _ in range(0u, 40) {
- tx.send(1);
- }
- drx.recv();
- }
-
- #[test]
- fn no_runtime() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
- let tx4 = tx3.clone();
- spawn(proc() {
- assert_eq!(rx1.recv(), 1);
- tx2.send(2);
- tx4.send(());
- });
- spawn(proc() {
- tx1.send(1);
- assert_eq!(rx2.recv(), 2);
- tx3.send(());
- });
- rx3.recv();
- rx3.recv();
- }
-
- test!(fn oneshot_single_thread_close_port_first() {
- // Simple test of closing without sending
- let (_tx, rx) = channel::<int>();
- drop(rx);
- })
-
- test!(fn oneshot_single_thread_close_chan_first() {
- // Simple test of closing without sending
- let (tx, _rx) = channel::<int>();
- drop(tx);
- })
-
- test!(fn oneshot_single_thread_send_port_close() {
- // Testing that the sender cleans up the payload if receiver is closed
- let (tx, rx) = channel::<Box<int>>();
- drop(rx);
- tx.send(box 0);
- } #[should_fail])
-
- test!(fn oneshot_single_thread_recv_chan_close() {
- // Receiving on a closed chan will panic
- let res = task::try(proc() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- rx.recv();
- });
- // What is our res?
- assert!(res.is_err());
- })
-
- test!(fn oneshot_single_thread_send_then_recv() {
- let (tx, rx) = channel::<Box<int>>();
- tx.send(box 10);
- assert!(rx.recv() == box 10);
- })
-
- test!(fn oneshot_single_thread_try_send_open() {
- let (tx, rx) = channel::<int>();
- assert!(tx.send_opt(10).is_ok());
- assert!(rx.recv() == 10);
- })
-
- test!(fn oneshot_single_thread_try_send_closed() {
- let (tx, rx) = channel::<int>();
- drop(rx);
- assert!(tx.send_opt(10).is_err());
- })
-
- test!(fn oneshot_single_thread_try_recv_open() {
- let (tx, rx) = channel::<int>();
- tx.send(10);
- assert!(rx.recv_opt() == Ok(10));
- })
-
- test!(fn oneshot_single_thread_try_recv_closed() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- assert!(rx.recv_opt() == Err(()));
- })
-
- test!(fn oneshot_single_thread_peek_data() {
- let (tx, rx) = channel::<int>();
- assert_eq!(rx.try_recv(), Err(Empty))
- tx.send(10);
- assert_eq!(rx.try_recv(), Ok(10));
- })
-
- test!(fn oneshot_single_thread_peek_close() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- assert_eq!(rx.try_recv(), Err(Disconnected));
- assert_eq!(rx.try_recv(), Err(Disconnected));
- })
-
- test!(fn oneshot_single_thread_peek_open() {
- let (_tx, rx) = channel::<int>();
- assert_eq!(rx.try_recv(), Err(Empty));
- })
-
- test!(fn oneshot_multi_task_recv_then_send() {
- let (tx, rx) = channel::<Box<int>>();
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
-
- tx.send(box 10);
- })
-
- test!(fn oneshot_multi_task_recv_then_close() {
- let (tx, rx) = channel::<Box<int>>();
- spawn(proc() {
- drop(tx);
- });
- let res = task::try(proc() {
- assert!(rx.recv() == box 10);
- });
- assert!(res.is_err());
- })
-
- test!(fn oneshot_multi_thread_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- drop(rx);
- });
- drop(tx);
- }
- })
-
- test!(fn oneshot_multi_thread_send_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- drop(rx);
- });
- let _ = task::try(proc() {
- tx.send(1);
- });
- }
- })
-
- test!(fn oneshot_multi_thread_recv_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- let res = task::try(proc() {
- rx.recv();
- });
- assert!(res.is_err());
- });
- spawn(proc() {
- spawn(proc() {
- drop(tx);
- });
- });
- }
- })
-
- test!(fn oneshot_multi_thread_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel();
- spawn(proc() {
- tx.send(box 10i);
- });
- spawn(proc() {
- assert!(rx.recv() == box 10i);
- });
- }
- })
-
- test!(fn stream_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel();
-
- send(tx, 0);
- recv(rx, 0);
-
- fn send(tx: Sender<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- tx.send(box i);
- send(tx, i + 1);
- });
- }
-
- fn recv(rx: Receiver<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- assert!(rx.recv() == box i);
- recv(rx, i + 1);
- });
- }
- }
- })
-
- test!(fn recv_a_lot() {
- // Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = channel();
- for _ in range(0i, 10000) { tx.send(()); }
- for _ in range(0i, 10000) { rx.recv(); }
- })
-
- test!(fn shared_chan_stress() {
- let (tx, rx) = channel();
- let total = stress_factor() + 100;
- for _ in range(0, total) {
- let tx = tx.clone();
- spawn(proc() {
- tx.send(());
- });
- }
-
- for _ in range(0, total) {
- rx.recv();
- }
- })
-
- test!(fn test_nested_recv_iter() {
- let (tx, rx) = channel::<int>();
- let (total_tx, total_rx) = channel::<int>();
-
- spawn(proc() {
- let mut acc = 0;
- for x in rx.iter() {
- acc += x;
- }
- total_tx.send(acc);
- });
-
- tx.send(3);
- tx.send(1);
- tx.send(2);
- drop(tx);
- assert_eq!(total_rx.recv(), 6);
- })
-
- test!(fn test_recv_iter_break() {
- let (tx, rx) = channel::<int>();
- let (count_tx, count_rx) = channel();
-
- spawn(proc() {
- let mut count = 0;
- for x in rx.iter() {
- if count >= 3 {
- break;
- } else {
- count += x;
- }
- }
- count_tx.send(count);
- });
-
- tx.send(2);
- tx.send(2);
- tx.send(2);
- let _ = tx.send_opt(2);
- drop(tx);
- assert_eq!(count_rx.recv(), 4);
- })
-
- test!(fn try_recv_states() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<()>();
- let (tx3, rx3) = channel::<()>();
- spawn(proc() {
- rx2.recv();
- tx1.send(1);
- tx3.send(());
- rx2.recv();
- drop(tx1);
- tx3.send(());
- });
-
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Ok(1));
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Err(Disconnected));
- })
-
- // This bug used to end up in a livelock inside of the Receiver destructor
- // because the internal state of the Shared packet was corrupted
- test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
- let (tx, rx) = channel();
- let (tx2, rx2) = channel();
- spawn(proc() {
- rx.recv(); // wait on a oneshot
- drop(rx); // destroy a shared
- tx2.send(());
- });
- // make sure the other task has gone to sleep
- for _ in range(0u, 5000) { task::deschedule(); }
-
- // upgrade to a shared chan and send a message
- let t = tx.clone();
- drop(tx);
- t.send(());
-
- // wait for the child task to exit before we exit
- rx2.recv();
- })
-
- test!(fn sends_off_the_runtime() {
- use rustrt::thread::Thread;
-
- let (tx, rx) = channel();
- let t = Thread::start(proc() {
- for _ in range(0u, 1000) {
- tx.send(());
- }
- });
- for _ in range(0u, 1000) {
- rx.recv();
- }
- t.join();
- })
-
- test!(fn try_recvs_off_the_runtime() {
- use rustrt::thread::Thread;
-
- let (tx, rx) = channel();
- let (cdone, pdone) = channel();
- let t = Thread::start(proc() {
- let mut hits = 0u;
- while hits < 10 {
- match rx.try_recv() {
- Ok(()) => { hits += 1; }
- Err(Empty) => { Thread::yield_now(); }
- Err(Disconnected) => return,
- }
- }
- cdone.send(());
- });
- for _ in range(0u, 10) {
- tx.send(());
- }
- t.join();
- pdone.recv();
- })
-}
-
-#[cfg(test)]
-mod sync_tests {
- use std::prelude::*;
- use std::os;
-
- pub fn stress_factor() -> uint {
- match os::getenv("RUST_TEST_STRESS") {
- Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
- None => 1,
- }
- }
-
- test!(fn smoke() {
- let (tx, rx) = sync_channel::<int>(1);
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn drop_full() {
- let (tx, _rx) = sync_channel(1);
- tx.send(box 1i);
- })
-
- test!(fn smoke_shared() {
- let (tx, rx) = sync_channel::<int>(1);
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- let tx = tx.clone();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_threads() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- tx.send(1);
- });
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_port_gone() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone2() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(rx);
- let tx2 = tx.clone();
- drop(tx);
- tx2.send(1);
- } #[should_fail])
-
- test!(fn port_gone_concurrent() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- rx.recv();
- });
- loop { tx.send(1) }
- } #[should_fail])
-
- test!(fn port_gone_concurrent_shared() {
- let (tx, rx) = sync_channel::<int>(0);
- let tx2 = tx.clone();
- spawn(proc() {
- rx.recv();
- });
- loop {
- tx.send(1);
- tx2.send(1);
- }
- } #[should_fail])
-
- test!(fn smoke_chan_gone() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- rx.recv();
- } #[should_fail])
-
- test!(fn smoke_chan_gone_shared() {
- let (tx, rx) = sync_channel::<()>(0);
- let tx2 = tx.clone();
- drop(tx);
- drop(tx2);
- rx.recv();
- } #[should_fail])
-
- test!(fn chan_gone_concurrent() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- tx.send(1);
- tx.send(1);
- });
- loop { rx.recv(); }
- } #[should_fail])
-
- test!(fn stress() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- for _ in range(0u, 10000) { tx.send(1); }
- });
- for _ in range(0u, 10000) {
- assert_eq!(rx.recv(), 1);
- }
- })
-
- test!(fn stress_shared() {
- static AMT: uint = 1000;
- static NTHREADS: uint = 8;
- let (tx, rx) = sync_channel::<int>(0);
- let (dtx, drx) = sync_channel::<()>(0);
-
- spawn(proc() {
- for _ in range(0, AMT * NTHREADS) {
- assert_eq!(rx.recv(), 1);
- }
- match rx.try_recv() {
- Ok(..) => panic!(),
- _ => {}
- }
- dtx.send(());
- });
-
- for _ in range(0, NTHREADS) {
- let tx = tx.clone();
- spawn(proc() {
- for _ in range(0, AMT) { tx.send(1); }
- });
- }
- drop(tx);
- drx.recv();
- })
-
- test!(fn oneshot_single_thread_close_port_first() {
- // Simple test of closing without sending
- let (_tx, rx) = sync_channel::<int>(0);
- drop(rx);
- })
-
- test!(fn oneshot_single_thread_close_chan_first() {
- // Simple test of closing without sending
- let (tx, _rx) = sync_channel::<int>(0);
- drop(tx);
- })
-
- test!(fn oneshot_single_thread_send_port_close() {
- // Testing that the sender cleans up the payload if receiver is closed
- let (tx, rx) = sync_channel::<Box<int>>(0);
- drop(rx);
- tx.send(box 0);
- } #[should_fail])
-
- test!(fn oneshot_single_thread_recv_chan_close() {
- // Receiving on a closed chan will panic
- let res = task::try(proc() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- rx.recv();
- });
- // What is our res?
- assert!(res.is_err());
- })
-
- test!(fn oneshot_single_thread_send_then_recv() {
- let (tx, rx) = sync_channel::<Box<int>>(1);
- tx.send(box 10);
- assert!(rx.recv() == box 10);
- })
-
- test!(fn oneshot_single_thread_try_send_open() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(tx.try_send(10), Ok(()));
- assert!(rx.recv() == 10);
- })
-
- test!(fn oneshot_single_thread_try_send_closed() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(rx);
- assert_eq!(tx.try_send(10), Err(RecvDisconnected(10)));
- })
-
- test!(fn oneshot_single_thread_try_send_closed2() {
- let (tx, _rx) = sync_channel::<int>(0);
- assert_eq!(tx.try_send(10), Err(Full(10)));
- })
-
- test!(fn oneshot_single_thread_try_recv_open() {
- let (tx, rx) = sync_channel::<int>(1);
- tx.send(10);
- assert!(rx.recv_opt() == Ok(10));
- })
-
- test!(fn oneshot_single_thread_try_recv_closed() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- assert!(rx.recv_opt() == Err(()));
- })
-
- test!(fn oneshot_single_thread_peek_data() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(rx.try_recv(), Err(Empty))
- tx.send(10);
- assert_eq!(rx.try_recv(), Ok(10));
- })
-
- test!(fn oneshot_single_thread_peek_close() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- assert_eq!(rx.try_recv(), Err(Disconnected));
- assert_eq!(rx.try_recv(), Err(Disconnected));
- })
-
- test!(fn oneshot_single_thread_peek_open() {
- let (_tx, rx) = sync_channel::<int>(0);
- assert_eq!(rx.try_recv(), Err(Empty));
- })
-
- test!(fn oneshot_multi_task_recv_then_send() {
- let (tx, rx) = sync_channel::<Box<int>>(0);
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
-
- tx.send(box 10);
- })
-
- test!(fn oneshot_multi_task_recv_then_close() {
- let (tx, rx) = sync_channel::<Box<int>>(0);
- spawn(proc() {
- drop(tx);
- });
- let res = task::try(proc() {
- assert!(rx.recv() == box 10);
- });
- assert!(res.is_err());
- })
-
- test!(fn oneshot_multi_thread_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- drop(rx);
- });
- drop(tx);
- }
- })
-
- test!(fn oneshot_multi_thread_send_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- drop(rx);
- });
- let _ = task::try(proc() {
- tx.send(1);
- });
- }
- })
-
- test!(fn oneshot_multi_thread_recv_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- let res = task::try(proc() {
- rx.recv();
- });
- assert!(res.is_err());
- });
- spawn(proc() {
- spawn(proc() {
- drop(tx);
- });
- });
- }
- })
-
- test!(fn oneshot_multi_thread_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<Box<int>>(0);
- spawn(proc() {
- tx.send(box 10i);
- });
- spawn(proc() {
- assert!(rx.recv() == box 10i);
- });
- }
- })
-
- test!(fn stream_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<Box<int>>(0);
-
- send(tx, 0);
- recv(rx, 0);
-
- fn send(tx: SyncSender<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- tx.send(box i);
- send(tx, i + 1);
- });
- }
-
- fn recv(rx: Receiver<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- assert!(rx.recv() == box i);
- recv(rx, i + 1);
- });
- }
- }
- })
-
- test!(fn recv_a_lot() {
- // Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = sync_channel(10000);
- for _ in range(0u, 10000) { tx.send(()); }
- for _ in range(0u, 10000) { rx.recv(); }
- })
-
- test!(fn shared_chan_stress() {
- let (tx, rx) = sync_channel(0);
- let total = stress_factor() + 100;
- for _ in range(0, total) {
- let tx = tx.clone();
- spawn(proc() {
- tx.send(());
- });
- }
-
- for _ in range(0, total) {
- rx.recv();
- }
- })
-
- test!(fn test_nested_recv_iter() {
- let (tx, rx) = sync_channel::<int>(0);
- let (total_tx, total_rx) = sync_channel::<int>(0);
-
- spawn(proc() {
- let mut acc = 0;
- for x in rx.iter() {
- acc += x;
- }
- total_tx.send(acc);
- });
-
- tx.send(3);
- tx.send(1);
- tx.send(2);
- drop(tx);
- assert_eq!(total_rx.recv(), 6);
- })
-
- test!(fn test_recv_iter_break() {
- let (tx, rx) = sync_channel::<int>(0);
- let (count_tx, count_rx) = sync_channel(0);
-
- spawn(proc() {
- let mut count = 0;
- for x in rx.iter() {
- if count >= 3 {
- break;
- } else {
- count += x;
- }
- }
- count_tx.send(count);
- });
-
- tx.send(2);
- tx.send(2);
- tx.send(2);
- let _ = tx.try_send(2);
- drop(tx);
- assert_eq!(count_rx.recv(), 4);
- })
-
- test!(fn try_recv_states() {
- let (tx1, rx1) = sync_channel::<int>(1);
- let (tx2, rx2) = sync_channel::<()>(1);
- let (tx3, rx3) = sync_channel::<()>(1);
- spawn(proc() {
- rx2.recv();
- tx1.send(1);
- tx3.send(());
- rx2.recv();
- drop(tx1);
- tx3.send(());
- });
-
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Ok(1));
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Err(Disconnected));
- })
-
- // This bug used to end up in a livelock inside of the Receiver destructor
- // because the internal state of the Shared packet was corrupted
- test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
- let (tx, rx) = sync_channel::<()>(0);
- let (tx2, rx2) = sync_channel::<()>(0);
- spawn(proc() {
- rx.recv(); // wait on a oneshot
- drop(rx); // destroy a shared
- tx2.send(());
- });
- // make sure the other task has gone to sleep
- for _ in range(0u, 5000) { task::deschedule(); }
-
- // upgrade to a shared chan and send a message
- let t = tx.clone();
- drop(tx);
- t.send(());
-
- // wait for the child task to exit before we exit
- rx2.recv();
- })
-
- test!(fn try_recvs_off_the_runtime() {
- use rustrt::thread::Thread;
-
- let (tx, rx) = sync_channel::<()>(0);
- let (cdone, pdone) = channel();
- let t = Thread::start(proc() {
- let mut hits = 0u;
- while hits < 10 {
- match rx.try_recv() {
- Ok(()) => { hits += 1; }
- Err(Empty) => { Thread::yield_now(); }
- Err(Disconnected) => return,
- }
- }
- cdone.send(());
- });
- for _ in range(0u, 10) {
- tx.send(());
- }
- t.join();
- pdone.recv();
- })
-
- test!(fn send_opt1() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() { rx.recv(); });
- assert_eq!(tx.send_opt(1), Ok(()));
- })
-
- test!(fn send_opt2() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() { drop(rx); });
- assert_eq!(tx.send_opt(1), Err(1));
- })
-
- test!(fn send_opt3() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(tx.send_opt(1), Ok(()));
- spawn(proc() { drop(rx); });
- assert_eq!(tx.send_opt(1), Err(1));
- })
-
- test!(fn send_opt4() {
- let (tx, rx) = sync_channel::<int>(0);
- let tx2 = tx.clone();
- let (done, donerx) = channel();
- let done2 = done.clone();
- spawn(proc() {
- assert_eq!(tx.send_opt(1), Err(1));
- done.send(());
- });
- spawn(proc() {
- assert_eq!(tx2.send_opt(2), Err(2));
- done2.send(());
- });
- drop(rx);
- donerx.recv();
- donerx.recv();
- })
-
- test!(fn try_send1() {
- let (tx, _rx) = sync_channel::<int>(0);
- assert_eq!(tx.try_send(1), Err(Full(1)));
- })
-
- test!(fn try_send2() {
- let (tx, _rx) = sync_channel::<int>(1);
- assert_eq!(tx.try_send(1), Ok(()));
- assert_eq!(tx.try_send(1), Err(Full(1)));
- })
-
- test!(fn try_send3() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(tx.try_send(1), Ok(()));
- drop(rx);
- assert_eq!(tx.try_send(1), Err(RecvDisconnected(1)));
- })
-
- test!(fn try_send4() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- for _ in range(0u, 1000) { task::deschedule(); }
- assert_eq!(tx.try_send(1), Ok(()));
- });
- assert_eq!(rx.recv(), 1);
- } #[ignore(reason = "flaky on libnative")])
-
- test!(fn issue_15761() {
- fn repro() {
- let (tx1, rx1) = sync_channel::<()>(3);
- let (tx2, rx2) = sync_channel::<()>(3);
-
- spawn(proc() {
- rx1.recv();
- tx2.try_send(()).unwrap();
- });
-
- tx1.try_send(()).unwrap();
- rx2.recv();
- }
-
- for _ in range(0u, 100) {
- repro()
- }
- })
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Oneshot channels/ports
-///
-/// This is the initial flavor of channels/ports used for comm module. This is
-/// an optimization for the one-use case of a channel. The major optimization of
-/// this type is to have one and exactly one allocation when the chan/port pair
-/// is created.
-///
-/// Another possible optimization would be to not use an Arc box because
-/// in theory we know when the shared packet can be deallocated (no real need
-/// for the atomic reference counting), but I was having trouble how to destroy
-/// the data early in a drop of a Port.
-///
-/// # Implementation
-///
-/// Oneshots are implemented around one atomic uint variable. This variable
-/// indicates both the state of the port/chan but also contains any tasks
-/// blocked on the port. All atomic operations happen on this one word.
-///
-/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
-/// on behalf of the channel side of things (it can be mentally thought of as
-/// consuming the port). This upgrade is then also stored in the shared packet.
-/// The one caveat to consider is that when a port sees a disconnected channel
-/// it must check for data because there is no "data plus upgrade" state.
-
-pub use self::Failure::*;
-pub use self::UpgradeResult::*;
-pub use self::SelectionResult::*;
-use self::MyUpgrade::*;
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::mem;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
-
-use atomic;
-use comm::Receiver;
-
-// Various states you can find a port in.
-const EMPTY: uint = 0;
-const DATA: uint = 1;
-const DISCONNECTED: uint = 2;
-
-pub struct Packet<T> {
- // Internal state of the chan/port pair (stores the blocked task as well)
- state: atomic::AtomicUint,
- // One-shot data slot location
- data: Option<T>,
- // when used for the second time, a oneshot channel must be upgraded, and
- // this contains the slot for the upgrade
- upgrade: MyUpgrade<T>,
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(BlockedTask),
-}
-
-pub enum SelectionResult<T> {
- SelCanceled(BlockedTask),
- SelUpgraded(BlockedTask, Receiver<T>),
- SelSuccess,
-}
-
-enum MyUpgrade<T> {
- NothingSent,
- SendUsed,
- GoUp(Receiver<T>),
-}
-
-impl<T: Send> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- data: None,
- upgrade: NothingSent,
- state: atomic::AtomicUint::new(EMPTY),
- }
- }
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // Sanity check
- match self.upgrade {
- NothingSent => {}
- _ => panic!("sending on a oneshot that's already sent on "),
- }
- assert!(self.data.is_none());
- self.data = Some(t);
- self.upgrade = SendUsed;
-
- match self.state.swap(DATA, atomic::SeqCst) {
- // Sent the data, no one was waiting
- EMPTY => Ok(()),
-
- // Couldn't send the data, the port hung up first. Return the data
- // back up the stack.
- DISCONNECTED => {
- Err(self.data.take().unwrap())
- }
-
- // Not possible, these are one-use channels
- DATA => unreachable!(),
-
- // Anything else means that there was a task waiting on the other
- // end. We leave the 'DATA' state inside so it'll pick it up on the
- // other end.
- n => unsafe {
- let t = BlockedTask::cast_from_uint(n);
- t.wake().map(|t| t.reawaken());
- Ok(())
- }
- }
- }
-
- // Just tests whether this channel has been sent on or not, this is only
- // safe to use from the sender.
- pub fn sent(&self) -> bool {
- match self.upgrade {
- NothingSent => false,
- _ => true,
- }
- }
-
- pub fn recv(&mut self) -> Result<T, Failure<T>> {
- // Attempt to not block the task (it's a little expensive). If it looks
- // like we're not empty, then immediately go through to `try_recv`.
- if self.state.load(atomic::SeqCst) == EMPTY {
- let t: Box<Task> = Local::take();
- t.deschedule(1, |task| {
- let n = unsafe { task.cast_to_uint() };
- match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
- // Nothing on the channel, we legitimately block
- EMPTY => Ok(()),
-
- // If there's data or it's a disconnected channel, then we
- // failed the cmpxchg, so we just wake ourselves back up
- DATA | DISCONNECTED => {
- unsafe { Err(BlockedTask::cast_from_uint(n)) }
- }
-
- // Only one thread is allowed to sleep on this port
- _ => unreachable!()
- }
- });
- }
-
- self.try_recv()
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
- match self.state.load(atomic::SeqCst) {
- EMPTY => Err(Empty),
-
- // We saw some data on the channel, but the channel can be used
- // again to send us an upgrade. As a result, we need to re-insert
- // into the channel that there's no data available (otherwise we'll
- // just see DATA next time). This is done as a cmpxchg because if
- // the state changes under our feet we'd rather just see that state
- // change.
- DATA => {
- self.state.compare_and_swap(DATA, EMPTY, atomic::SeqCst);
- match self.data.take() {
- Some(data) => Ok(data),
- None => unreachable!(),
- }
- }
-
- // There's no guarantee that we receive before an upgrade happens,
- // and an upgrade flags the channel as disconnected, so when we see
- // this we first need to check if there's data available and *then*
- // we go through and process the upgrade.
- DISCONNECTED => {
- match self.data.take() {
- Some(data) => Ok(data),
- None => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- SendUsed | NothingSent => Err(Disconnected),
- GoUp(upgrade) => Err(Upgraded(upgrade))
- }
- }
- }
- }
- _ => unreachable!()
- }
- }
-
- // Returns whether the upgrade was completed. If the upgrade wasn't
- // completed, then the port couldn't get sent to the other half (it will
- // never receive it).
- pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
- let prev = match self.upgrade {
- NothingSent => NothingSent,
- SendUsed => SendUsed,
- _ => panic!("upgrading again"),
- };
- self.upgrade = GoUp(up);
-
- match self.state.swap(DISCONNECTED, atomic::SeqCst) {
- // If the channel is empty or has data on it, then we're good to go.
- // Senders will check the data before the upgrade (in case we
- // plastered over the DATA state).
- DATA | EMPTY => UpSuccess,
-
- // If the other end is already disconnected, then we failed the
- // upgrade. Be sure to trash the port we were given.
- DISCONNECTED => { self.upgrade = prev; UpDisconnected }
-
- // If someone's waiting, we gotta wake them up
- n => UpWoke(unsafe { BlockedTask::cast_from_uint(n) })
- }
- }
-
- pub fn drop_chan(&mut self) {
- match self.state.swap(DISCONNECTED, atomic::SeqCst) {
- DATA | DISCONNECTED | EMPTY => {}
-
- // If someone's waiting, we gotta wake them up
- n => unsafe {
- let t = BlockedTask::cast_from_uint(n);
- t.wake().map(|t| t.reawaken());
- }
- }
- }
-
- pub fn drop_port(&mut self) {
- match self.state.swap(DISCONNECTED, atomic::SeqCst) {
- // An empty channel has nothing to do, and a remotely disconnected
- // channel also has nothing to do b/c we're about to run the drop
- // glue
- DISCONNECTED | EMPTY => {}
-
- // There's data on the channel, so make sure we destroy it promptly.
- // This is why not using an arc is a little difficult (need the box
- // to stay valid while we take the data).
- DATA => { self.data.take().unwrap(); }
-
- // We're the only ones that can block on this port
- _ => unreachable!()
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // If Ok, the value is whether this port has data, if Err, then the upgraded
- // port needs to be checked instead of this one.
- pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
- match self.state.load(atomic::SeqCst) {
- EMPTY => Ok(false), // Welp, we tried
- DATA => Ok(true), // we have some un-acquired data
- DISCONNECTED if self.data.is_some() => Ok(true), // we have data
- DISCONNECTED => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- // The other end sent us an upgrade, so we need to
- // propagate upwards whether the upgrade can receive
- // data
- GoUp(upgrade) => Err(upgrade),
-
- // If the other end disconnected without sending an
- // upgrade, then we have data to receive (the channel is
- // disconnected).
- up => { self.upgrade = up; Ok(true) }
- }
- }
- _ => unreachable!(), // we're the "one blocker"
- }
- }
-
- // Attempts to start selection on this port. This can either succeed, fail
- // because there is data, or fail because there is an upgrade pending.
- pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
- let n = unsafe { task.cast_to_uint() };
- match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
- EMPTY => SelSuccess,
- DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
- DISCONNECTED if self.data.is_some() => {
- SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
- }
- DISCONNECTED => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- // The other end sent us an upgrade, so we need to
- // propagate upwards whether the upgrade can receive
- // data
- GoUp(upgrade) => {
- SelUpgraded(unsafe { BlockedTask::cast_from_uint(n) },
- upgrade)
- }
-
- // If the other end disconnected without sending an
- // upgrade, then we have data to receive (the channel is
- // disconnected).
- up => {
- self.upgrade = up;
- SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
- }
- }
- }
- _ => unreachable!(), // we're the "one blocker"
- }
- }
-
- // Remove a previous selecting task from this port. This ensures that the
- // blocked task will no longer be visible to any other threads.
- //
- // The return value indicates whether there's data on this port.
- pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
- let state = match self.state.load(atomic::SeqCst) {
- // Each of these states means that no further activity will happen
- // with regard to abortion selection
- s @ EMPTY |
- s @ DATA |
- s @ DISCONNECTED => s,
-
- // If we've got a blocked task, then use an atomic to gain ownership
- // of it (may fail)
- n => self.state.compare_and_swap(n, EMPTY, atomic::SeqCst)
- };
-
- // Now that we've got ownership of our state, figure out what to do
- // about it.
- match state {
- EMPTY => unreachable!(),
- // our task used for select was stolen
- DATA => Ok(true),
-
- // If the other end has hung up, then we have complete ownership
- // of the port. First, check if there was data waiting for us. This
- // is possible if the other end sent something and then hung up.
- //
- // We then need to check to see if there was an upgrade requested,
- // and if so, the upgraded port needs to have its selection aborted.
- DISCONNECTED => {
- if self.data.is_some() {
- Ok(true)
- } else {
- match mem::replace(&mut self.upgrade, SendUsed) {
- GoUp(port) => Err(port),
- _ => Ok(true),
- }
- }
- }
-
- // We woke ourselves up from select. Assert that the task should be
- // trashed and returned that we don't have any data.
- n => {
- let t = unsafe { BlockedTask::cast_from_uint(n) };
- t.trash();
- Ok(false)
- }
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.state.load(atomic::SeqCst), DISCONNECTED);
- }
-}
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Selection over an array of receivers
-//!
-//! This module contains the implementation machinery necessary for selecting
-//! over a number of receivers. One large goal of this module is to provide an
-//! efficient interface to selecting over any receiver of any type.
-//!
-//! This is achieved through an architecture of a "receiver set" in which
-//! receivers are added to a set and then the entire set is waited on at once.
-//! The set can be waited on multiple times to prevent re-adding each receiver
-//! to the set.
-//!
-//! Usage of this module is currently encouraged to go through the use of the
-//! `select!` macro. This macro allows naturally binding of variables to the
-//! received values of receivers in a much more natural syntax then usage of the
-//! `Select` structure directly.
-//!
-//! # Example
-//!
-//! ```rust
-//! let (tx1, rx1) = channel();
-//! let (tx2, rx2) = channel();
-//!
-//! tx1.send(1i);
-//! tx2.send(2i);
-//!
-//! select! {
-//! val = rx1.recv() => {
-//! assert_eq!(val, 1i);
-//! },
-//! val = rx2.recv() => {
-//! assert_eq!(val, 2i);
-//! }
-//! }
-//! ```
-
-#![allow(dead_code)]
-#![experimental = "This implementation, while likely sufficient, is unsafe and \
- likely to be error prone. At some point in the future this \
- module will likely be replaced, and it is currently \
- unknown how much API breakage that will cause. The ability \
- to select over a number of channels will remain forever, \
- but no guarantees beyond this are being made"]
-
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::cell::Cell;
-use core::kinds::marker;
-use core::mem;
-use core::uint;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
-
-use comm::Receiver;
-
-/// The "receiver set" of the select interface. This structure is used to manage
-/// a set of receivers which are being selected over.
-pub struct Select {
- head: *mut Handle<'static, ()>,
- tail: *mut Handle<'static, ()>,
- next_id: Cell<uint>,
- marker1: marker::NoSend,
-}
-
-/// A handle to a receiver which is currently a member of a `Select` set of
-/// receivers. This handle is used to keep the receiver in the set as well as
-/// interact with the underlying receiver.
-pub struct Handle<'rx, T:'rx> {
- /// The ID of this handle, used to compare against the return value of
- /// `Select::wait()`
- id: uint,
- selector: &'rx Select,
- next: *mut Handle<'static, ()>,
- prev: *mut Handle<'static, ()>,
- added: bool,
- packet: &'rx Packet+'rx,
-
- // due to our fun transmutes, we be sure to place this at the end. (nothing
- // previous relies on T)
- rx: &'rx Receiver<T>,
-}
-
-struct Packets { cur: *mut Handle<'static, ()> }
-
-#[doc(hidden)]
-pub trait Packet {
- fn can_recv(&self) -> bool;
- fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>;
- fn abort_selection(&self) -> bool;
-}
-
-impl Select {
- /// Creates a new selection structure. This set is initially empty and
- /// `wait` will panic!() if called.
- ///
- /// Usage of this struct directly can sometimes be burdensome, and usage is
- /// rather much easier through the `select!` macro.
- pub fn new() -> Select {
- Select {
- marker1: marker::NoSend,
- head: 0 as *mut Handle<'static, ()>,
- tail: 0 as *mut Handle<'static, ()>,
- next_id: Cell::new(1),
- }
- }
-
- /// Creates a new handle into this receiver set for a new receiver. Note
- /// that this does *not* add the receiver to the receiver set, for that you
- /// must call the `add` method on the handle itself.
- pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> {
- let id = self.next_id.get();
- self.next_id.set(id + 1);
- Handle {
- id: id,
- selector: self,
- next: 0 as *mut Handle<'static, ()>,
- prev: 0 as *mut Handle<'static, ()>,
- added: false,
- rx: rx,
- packet: rx,
- }
- }
-
- /// Waits for an event on this receiver set. The returned value is *not* an
- /// index, but rather an id. This id can be queried against any active
- /// `Handle` structures (each one has an `id` method). The handle with
- /// the matching `id` will have some sort of event available on it. The
- /// event could either be that data is available or the corresponding
- /// channel has been closed.
- pub fn wait(&self) -> uint {
- self.wait2(true)
- }
-
- /// Helper method for skipping the preflight checks during testing
- fn wait2(&self, do_preflight_checks: bool) -> uint {
- // Note that this is currently an inefficient implementation. We in
- // theory have knowledge about all receivers in the set ahead of time,
- // so this method shouldn't really have to iterate over all of them yet
- // again. The idea with this "receiver set" interface is to get the
- // interface right this time around, and later this implementation can
- // be optimized.
- //
- // This implementation can be summarized by:
- //
- // fn select(receivers) {
- // if any receiver ready { return ready index }
- // deschedule {
- // block on all receivers
- // }
- // unblock on all receivers
- // return ready index
- // }
- //
- // Most notably, the iterations over all of the receivers shouldn't be
- // necessary.
- unsafe {
- let mut amt = 0;
- for p in self.iter() {
- amt += 1;
- if do_preflight_checks && (*p).packet.can_recv() {
- return (*p).id;
- }
- }
- assert!(amt > 0);
-
- let mut ready_index = amt;
- let mut ready_id = uint::MAX;
- let mut iter = self.iter().enumerate();
-
- // Acquire a number of blocking contexts, and block on each one
- // sequentially until one fails. If one fails, then abort
- // immediately so we can go unblock on all the other receivers.
- let task: Box<Task> = Local::take();
- task.deschedule(amt, |task| {
- // Prepare for the block
- let (i, handle) = iter.next().unwrap();
- match (*handle).packet.start_selection(task) {
- Ok(()) => Ok(()),
- Err(task) => {
- ready_index = i;
- ready_id = (*handle).id;
- Err(task)
- }
- }
- });
-
- // Abort the selection process on each receiver. If the abort
- // process returns `true`, then that means that the receiver is
- // ready to receive some data. Note that this also means that the
- // receiver may have yet to have fully read the `to_wake` field and
- // woken us up (although the wakeup is guaranteed to fail).
- //
- // This situation happens in the window of where a sender invokes
- // increment(), sees -1, and then decides to wake up the task. After
- // all this is done, the sending thread will set `selecting` to
- // `false`. Until this is done, we cannot return. If we were to
- // return, then a sender could wake up a receiver which has gone
- // back to sleep after this call to `select`.
- //
- // Note that it is a "fairly small window" in which an increment()
- // views that it should wake a thread up until the `selecting` bit
- // is set to false. For now, the implementation currently just spins
- // in a yield loop. This is very distasteful, but this
- // implementation is already nowhere near what it should ideally be.
- // A rewrite should focus on avoiding a yield loop, and for now this
- // implementation is tying us over to a more efficient "don't
- // iterate over everything every time" implementation.
- for handle in self.iter().take(ready_index) {
- if (*handle).packet.abort_selection() {
- ready_id = (*handle).id;
- }
- }
-
- assert!(ready_id != uint::MAX);
- return ready_id;
- }
- }
-
- fn iter(&self) -> Packets { Packets { cur: self.head } }
-}
-
-impl<'rx, T: Send> Handle<'rx, T> {
- /// Retrieve the id of this handle.
- #[inline]
- pub fn id(&self) -> uint { self.id }
-
- /// Receive a value on the underlying receiver. Has the same semantics as
- /// `Receiver.recv`
- pub fn recv(&mut self) -> T { self.rx.recv() }
- /// Block to receive a value on the underlying receiver, returning `Some` on
- /// success or `None` if the channel disconnects. This function has the same
- /// semantics as `Receiver.recv_opt`
- pub fn recv_opt(&mut self) -> Result<T, ()> { self.rx.recv_opt() }
-
- /// Adds this handle to the receiver set that the handle was created from. This
- /// method can be called multiple times, but it has no effect if `add` was
- /// called previously.
- ///
- /// This method is unsafe because it requires that the `Handle` is not moved
- /// while it is added to the `Select` set.
- pub unsafe fn add(&mut self) {
- if self.added { return }
- let selector: &mut Select = mem::transmute(&*self.selector);
- let me: *mut Handle<'static, ()> = mem::transmute(&*self);
-
- if selector.head.is_null() {
- selector.head = me;
- selector.tail = me;
- } else {
- (*me).prev = selector.tail;
- assert!((*me).next.is_null());
- (*selector.tail).next = me;
- selector.tail = me;
- }
- self.added = true;
- }
-
- /// Removes this handle from the `Select` set. This method is unsafe because
- /// it has no guarantee that the `Handle` was not moved since `add` was
- /// called.
- pub unsafe fn remove(&mut self) {
- if !self.added { return }
-
- let selector: &mut Select = mem::transmute(&*self.selector);
- let me: *mut Handle<'static, ()> = mem::transmute(&*self);
-
- if self.prev.is_null() {
- assert_eq!(selector.head, me);
- selector.head = self.next;
- } else {
- (*self.prev).next = self.next;
- }
- if self.next.is_null() {
- assert_eq!(selector.tail, me);
- selector.tail = self.prev;
- } else {
- (*self.next).prev = self.prev;
- }
-
- self.next = 0 as *mut Handle<'static, ()>;
- self.prev = 0 as *mut Handle<'static, ()>;
-
- self.added = false;
- }
-}
-
-#[unsafe_destructor]
-impl Drop for Select {
- fn drop(&mut self) {
- assert!(self.head.is_null());
- assert!(self.tail.is_null());
- }
-}
-
-#[unsafe_destructor]
-impl<'rx, T: Send> Drop for Handle<'rx, T> {
- fn drop(&mut self) {
- unsafe { self.remove() }
- }
-}
-
-impl Iterator<*mut Handle<'static, ()>> for Packets {
- fn next(&mut self) -> Option<*mut Handle<'static, ()>> {
- if self.cur.is_null() {
- None
- } else {
- let ret = Some(self.cur);
- unsafe { self.cur = (*self.cur).next; }
- ret
- }
- }
-}
-
-#[cfg(test)]
-#[allow(unused_imports)]
-mod test {
- use std::prelude::*;
-
- use super::super::*;
-
- // Don't use the libstd version so we can pull in the right Select structure
- // (std::comm points at the wrong one)
- macro_rules! select {
- (
- $($name:pat = $rx:ident.$meth:ident() => $code:expr),+
- ) => ({
- use comm::Select;
- let sel = Select::new();
- $( let mut $rx = sel.handle(&$rx); )+
- unsafe {
- $( $rx.add(); )+
- }
- let ret = sel.wait();
- $( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
- { unreachable!() }
- })
- }
-
- test!(fn smoke() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- tx1.send(1);
- select! (
- foo = rx1.recv() => { assert_eq!(foo, 1); },
- _bar = rx2.recv() => { panic!() }
- )
- tx2.send(2);
- select! (
- _foo = rx1.recv() => { panic!() },
- bar = rx2.recv() => { assert_eq!(bar, 2) }
- )
- drop(tx1);
- select! (
- foo = rx1.recv_opt() => { assert_eq!(foo, Err(())); },
- _bar = rx2.recv() => { panic!() }
- )
- drop(tx2);
- select! (
- bar = rx2.recv_opt() => { assert_eq!(bar, Err(())); }
- )
- })
-
- test!(fn smoke2() {
- let (_tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (_tx3, rx3) = channel::<int>();
- let (_tx4, rx4) = channel::<int>();
- let (tx5, rx5) = channel::<int>();
- tx5.send(4);
- select! (
- _foo = rx1.recv() => { panic!("1") },
- _foo = rx2.recv() => { panic!("2") },
- _foo = rx3.recv() => { panic!("3") },
- _foo = rx4.recv() => { panic!("4") },
- foo = rx5.recv() => { assert_eq!(foo, 4); }
- )
- })
-
- test!(fn closed() {
- let (_tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- drop(tx2);
-
- select! (
- _a1 = rx1.recv_opt() => { panic!() },
- a2 = rx2.recv_opt() => { assert_eq!(a2, Err(())); }
- )
- })
-
- test!(fn unblocks() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<int>();
-
- spawn(proc() {
- for _ in range(0u, 20) { task::deschedule(); }
- tx1.send(1);
- rx3.recv();
- for _ in range(0u, 20) { task::deschedule(); }
- });
-
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- _b = rx2.recv() => { panic!() }
- )
- tx3.send(1);
- select! (
- a = rx1.recv_opt() => { assert_eq!(a, Err(())); },
- _b = rx2.recv() => { panic!() }
- )
- })
-
- test!(fn both_ready() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- for _ in range(0u, 20) { task::deschedule(); }
- tx1.send(1);
- tx2.send(2);
- rx3.recv();
- });
-
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- a = rx2.recv() => { assert_eq!(a, 2); }
- )
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- a = rx2.recv() => { assert_eq!(a, 2); }
- )
- assert_eq!(rx1.try_recv(), Err(Empty));
- assert_eq!(rx2.try_recv(), Err(Empty));
- tx3.send(());
- })
-
- test!(fn stress() {
- static AMT: int = 10000;
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- for i in range(0, AMT) {
- if i % 2 == 0 {
- tx1.send(i);
- } else {
- tx2.send(i);
- }
- rx3.recv();
- }
- });
-
- for i in range(0, AMT) {
- select! (
- i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1); },
- i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2); }
- )
- tx3.send(());
- }
- })
-
- test!(fn cloning() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- rx3.recv();
- tx1.clone();
- assert_eq!(rx3.try_recv(), Err(Empty));
- tx1.send(2);
- rx3.recv();
- });
-
- tx3.send(());
- select!(
- _i1 = rx1.recv() => {},
- _i2 = rx2.recv() => panic!()
- )
- tx3.send(());
- })
-
- test!(fn cloning2() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- rx3.recv();
- tx1.clone();
- assert_eq!(rx3.try_recv(), Err(Empty));
- tx1.send(2);
- rx3.recv();
- });
-
- tx3.send(());
- select!(
- _i1 = rx1.recv() => {},
- _i2 = rx2.recv() => panic!()
- )
- tx3.send(());
- })
-
- test!(fn cloning3() {
- let (tx1, rx1) = channel::<()>();
- let (tx2, rx2) = channel::<()>();
- let (tx3, rx3) = channel::<()>();
- spawn(proc() {
- let s = Select::new();
- let mut h1 = s.handle(&rx1);
- let mut h2 = s.handle(&rx2);
- unsafe { h2.add(); }
- unsafe { h1.add(); }
- assert_eq!(s.wait(), h2.id);
- tx3.send(());
- });
-
- for _ in range(0u, 1000) { task::deschedule(); }
- drop(tx1.clone());
- tx2.send(());
- rx3.recv();
- })
-
- test!(fn preflight1() {
- let (tx, rx) = channel();
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight2() {
- let (tx, rx) = channel();
- tx.send(());
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight3() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight4() {
- let (tx, rx) = channel();
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight5() {
- let (tx, rx) = channel();
- tx.send(());
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight6() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight7() {
- let (tx, rx) = channel::<()>();
- drop(tx);
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight8() {
- let (tx, rx) = channel();
- tx.send(());
- drop(tx);
- rx.recv();
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight9() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- drop(tx);
- rx.recv();
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn oneshot_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0u, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn stream_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- tx1.send(());
- tx1.send(());
- rx1.recv();
- rx1.recv();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0u, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn shared_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- drop(tx1.clone());
- tx1.send(());
- rx1.recv();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0u, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn sync1() {
- let (tx, rx) = sync_channel::<int>(1);
- tx.send(1);
- select! {
- n = rx.recv() => { assert_eq!(n, 1); }
- }
- })
-
- test!(fn sync2() {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- for _ in range(0u, 100) { task::deschedule() }
- tx.send(1);
- });
- select! {
- n = rx.recv() => { assert_eq!(n, 1); }
- }
- })
-
- test!(fn sync3() {
- let (tx1, rx1) = sync_channel::<int>(0);
- let (tx2, rx2): (Sender<int>, Receiver<int>) = channel();
- spawn(proc() { tx1.send(1); });
- spawn(proc() { tx2.send(2); });
- select! {
- n = rx1.recv() => {
- assert_eq!(n, 1);
- assert_eq!(rx2.recv(), 2);
- },
- n = rx2.recv() => {
- assert_eq!(n, 2);
- assert_eq!(rx1.recv(), 1);
- }
- }
- })
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Shared channels
-///
-/// This is the flavor of channels which are not necessarily optimized for any
-/// particular use case, but are the most general in how they are used. Shared
-/// channels are cloneable allowing for multiple senders.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module. You'll also note that the implementation of the shared and stream
-/// channels are quite similar, and this is no coincidence!
-
-pub use self::Failure::*;
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::cmp;
-use core::int;
-use rustrt::local::Local;
-use rustrt::mutex::NativeMutex;
-use rustrt::task::{Task, BlockedTask};
-use rustrt::thread::Thread;
-
-use atomic;
-use mpsc_queue as mpsc;
-
-const DISCONNECTED: int = int::MIN;
-const FUDGE: int = 1024;
-#[cfg(test)]
-const MAX_STEALS: int = 5;
-#[cfg(not(test))]
-const MAX_STEALS: int = 1 << 20;
-
-pub struct Packet<T> {
- queue: mpsc::Queue<T>,
- cnt: atomic::AtomicInt, // How many items are on this channel
- steals: int, // How many times has a port received without blocking?
- to_wake: atomic::AtomicUint, // Task to wake up
-
- // The number of channels which are currently using this packet.
- channels: atomic::AtomicInt,
-
- // See the discussion in Port::drop and the channel send methods for what
- // these are used for
- port_dropped: atomic::AtomicBool,
- sender_drain: atomic::AtomicInt,
-
- // this lock protects various portions of this implementation during
- // select()
- select_lock: NativeMutex,
-}
-
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-impl<T: Send> Packet<T> {
- // Creation of a packet *must* be followed by a call to postinit_lock
- // and later by inherit_blocker
- pub fn new() -> Packet<T> {
- let p = Packet {
- queue: mpsc::Queue::new(),
- cnt: atomic::AtomicInt::new(0),
- steals: 0,
- to_wake: atomic::AtomicUint::new(0),
- channels: atomic::AtomicInt::new(2),
- port_dropped: atomic::AtomicBool::new(false),
- sender_drain: atomic::AtomicInt::new(0),
- select_lock: unsafe { NativeMutex::new() },
- };
- return p;
- }
-
- // This function should be used after newly created Packet
- // was wrapped with an Arc
- // In other case mutex data will be duplicated while cloning
- // and that could cause problems on platforms where it is
- // represented by opaque data structure
- pub fn postinit_lock(&mut self) {
- unsafe { self.select_lock.lock_noguard() }
- }
-
- // This function is used at the creation of a shared packet to inherit a
- // previously blocked task. This is done to prevent spurious wakeups of
- // tasks in select().
- //
- // This can only be called at channel-creation time
- pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
- match task {
- Some(task) => {
- assert_eq!(self.cnt.load(atomic::SeqCst), 0);
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- self.to_wake.store(unsafe { task.cast_to_uint() },
- atomic::SeqCst);
- self.cnt.store(-1, atomic::SeqCst);
-
- // This store is a little sketchy. What's happening here is
- // that we're transferring a blocker from a oneshot or stream
- // channel to this shared channel. In doing so, we never
- // spuriously wake them up and rather only wake them up at the
- // appropriate time. This implementation of shared channels
- // assumes that any blocking recv() will undo the increment of
- // steals performed in try_recv() once the recv is complete.
- // This thread that we're inheriting, however, is not in the
- // middle of recv. Hence, the first time we wake them up,
- // they're going to wake up from their old port, move on to the
- // upgraded port, and then call the block recv() function.
- //
- // When calling this function, they'll find there's data
- // immediately available, counting it as a steal. This in fact
- // wasn't a steal because we appropriately blocked them waiting
- // for data.
- //
- // To offset this bad increment, we initially set the steal
- // count to -1. You'll find some special code in
- // abort_selection() as well to ensure that this -1 steal count
- // doesn't escape too far.
- self.steals = -1;
- }
- None => {}
- }
-
- // When the shared packet is constructed, we grabbed this lock. The
- // purpose of this lock is to ensure that abort_selection() doesn't
- // interfere with this method. After we unlock this lock, we're
- // signifying that we're done modifying self.cnt and self.to_wake and
- // the port is ready for the world to continue using it.
- unsafe { self.select_lock.unlock_noguard() }
- }
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // See Port::drop for what's going on
- if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
-
- // Note that the multiple sender case is a little trickier
- // semantically than the single sender case. The logic for
- // incrementing is "add and if disconnected store disconnected".
- // This could end up leading some senders to believe that there
- // wasn't a disconnect if in fact there was a disconnect. This means
- // that while one thread is attempting to re-store the disconnected
- // states, other threads could walk through merrily incrementing
- // this very-negative disconnected count. To prevent senders from
- // spuriously attempting to send when the channels is actually
- // disconnected, the count has a ranged check here.
- //
- // This is also done for another reason. Remember that the return
- // value of this function is:
- //
- // `true` == the data *may* be received, this essentially has no
- // meaning
- // `false` == the data will *never* be received, this has a lot of
- // meaning
- //
- // In the SPSC case, we have a check of 'queue.is_empty()' to see
- // whether the data was actually received, but this same condition
- // means nothing in a multi-producer context. As a result, this
- // preflight check serves as the definitive "this will never be
- // received". Once we get beyond this check, we have permanently
- // entered the realm of "this may be received"
- if self.cnt.load(atomic::SeqCst) < DISCONNECTED + FUDGE {
- return Err(t)
- }
-
- self.queue.push(t);
- match self.cnt.fetch_add(1, atomic::SeqCst) {
- -1 => {
- self.take_to_wake().wake().map(|t| t.reawaken());
- }
-
- // In this case, we have possibly failed to send our data, and
- // we need to consider re-popping the data in order to fully
- // destroy it. We must arbitrate among the multiple senders,
- // however, because the queues that we're using are
- // single-consumer queues. In order to do this, all exiting
- // pushers will use an atomic count in order to count those
- // flowing through. Pushers who see 0 are required to drain as
- // much as possible, and then can only exit when they are the
- // only pusher (otherwise they must try again).
- n if n < DISCONNECTED + FUDGE => {
- // see the comment in 'try' for a shared channel for why this
- // window of "not disconnected" is ok.
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
-
- if self.sender_drain.fetch_add(1, atomic::SeqCst) == 0 {
- loop {
- // drain the queue, for info on the thread yield see the
- // discussion in try_recv
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => {}
- mpsc::Empty => break,
- mpsc::Inconsistent => Thread::yield_now(),
- }
- }
- // maybe we're done, if we're not the last ones
- // here, then we need to go try again.
- if self.sender_drain.fetch_sub(1, atomic::SeqCst) == 1 {
- break
- }
- }
-
- // At this point, there may still be data on the queue,
- // but only if the count hasn't been incremented and
- // some other sender hasn't finished pushing data just
- // yet. That sender in question will drain its own data.
- }
- }
-
- // Can't make any assumptions about this case like in the SPSC case.
- _ => {}
- }
-
- Ok(())
- }
-
- pub fn recv(&mut self) -> Result<T, Failure> {
- // This code is essentially the exact same as that found in the stream
- // case (see stream.rs)
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- let task: Box<Task> = Local::take();
- task.deschedule(1, |task| {
- self.decrement(task)
- });
-
- match self.try_recv() {
- data @ Ok(..) => { self.steals -= 1; data }
- data => data,
- }
- }
-
- // Essentially the exact same thing as the stream decrement function.
- fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- let n = unsafe { task.cast_to_uint() };
- self.to_wake.store(n, atomic::SeqCst);
-
- let steals = self.steals;
- self.steals = 0;
-
- match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
- DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 { return Ok(()) }
- }
- }
-
- self.to_wake.store(0, atomic::SeqCst);
- Err(unsafe { BlockedTask::cast_from_uint(n) })
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure> {
- let ret = match self.queue.pop() {
- mpsc::Data(t) => Some(t),
- mpsc::Empty => None,
-
- // This is a bit of an interesting case. The channel is
- // reported as having data available, but our pop() has
- // failed due to the queue being in an inconsistent state.
- // This means that there is some pusher somewhere which has
- // yet to complete, but we are guaranteed that a pop will
- // eventually succeed. In this case, we spin in a yield loop
- // because the remote sender should finish their enqueue
- // operation "very quickly".
- //
- // Avoiding this yield loop would require a different queue
- // abstraction which provides the guarantee that after M
- // pushes have succeeded, at least M pops will succeed. The
- // current queues guarantee that if there are N active
- // pushes, you can pop N times once all N have finished.
- mpsc::Inconsistent => {
- let data;
- loop {
- Thread::yield_now();
- match self.queue.pop() {
- mpsc::Data(t) => { data = t; break }
- mpsc::Empty => panic!("inconsistent => empty"),
- mpsc::Inconsistent => {}
- }
- }
- Some(data)
- }
- };
- match ret {
- // See the discussion in the stream implementation for why we
- // might decrement steals.
- Some(data) => {
- if self.steals > MAX_STEALS {
- match self.cnt.swap(0, atomic::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
- }
- n => {
- let m = cmp::min(n, self.steals);
- self.steals -= m;
- self.bump(n - m);
- }
- }
- assert!(self.steals >= 0);
- }
- self.steals += 1;
- Ok(data)
- }
-
- // See the discussion in the stream implementation for why we try
- // again.
- None => {
- match self.cnt.load(atomic::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
- _ => {
- match self.queue.pop() {
- mpsc::Data(t) => Ok(t),
- mpsc::Empty => Err(Disconnected),
- // with no senders, an inconsistency is impossible.
- mpsc::Inconsistent => unreachable!(),
- }
- }
- }
- }
- }
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&mut self) {
- self.channels.fetch_add(1, atomic::SeqCst);
- }
-
- // Decrement the reference count on a channel. This is called whenever a
- // Chan is dropped and may end up waking up a receiver. It's the receiver's
- // responsibility on the other end to figure out that we've disconnected.
- pub fn drop_chan(&mut self) {
- match self.channels.fetch_sub(1, atomic::SeqCst) {
- 1 => {}
- n if n > 1 => return,
- n => panic!("bad number of channels left {}", n),
- }
-
- match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
- -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
- DISCONNECTED => {}
- n => { assert!(n >= 0); }
- }
- }
-
- // See the long discussion inside of stream.rs for why the queue is drained,
- // and why it is done in this fashion.
- pub fn drop_port(&mut self) {
- self.port_dropped.store(true, atomic::SeqCst);
- let mut steals = self.steals;
- while {
- let cnt = self.cnt.compare_and_swap(
- steals, DISCONNECTED, atomic::SeqCst);
- cnt != DISCONNECTED && cnt != steals
- } {
- // See the discussion in 'try_recv' for why we yield
- // control of this thread.
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => { steals += 1; }
- mpsc::Empty | mpsc::Inconsistent => break,
- }
- }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&mut self) -> BlockedTask {
- let task = self.to_wake.load(atomic::SeqCst);
- self.to_wake.store(0, atomic::SeqCst);
- assert!(task != 0);
- unsafe { BlockedTask::cast_from_uint(task) }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // Helper function for select, tests whether this port can receive without
- // blocking (obviously not an atomic decision).
- //
- // This is different than the stream version because there's no need to peek
- // at the queue, we can just look at the local count.
- pub fn can_recv(&mut self) -> bool {
- let cnt = self.cnt.load(atomic::SeqCst);
- cnt == DISCONNECTED || cnt - self.steals > 0
- }
-
- // increment the count on the channel (used for selection)
- fn bump(&mut self, amt: int) -> int {
- match self.cnt.fetch_add(amt, atomic::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
- DISCONNECTED
- }
- n => n
- }
- }
-
- // Inserts the blocked task for selection on this port, returning it back if
- // the port already has data on it.
- //
- // The code here is the same as in stream.rs, except that it doesn't need to
- // peek at the channel to see if an upgrade is pending.
- pub fn start_selection(&mut self,
- task: BlockedTask) -> Result<(), BlockedTask> {
- match self.decrement(task) {
- Ok(()) => Ok(()),
- Err(task) => {
- let prev = self.bump(1);
- assert!(prev == DISCONNECTED || prev >= 0);
- return Err(task);
- }
- }
- }
-
- // Cancels a previous task waiting on this port, returning whether there's
- // data on the port.
- //
- // This is similar to the stream implementation (hence fewer comments), but
- // uses a different value for the "steals" variable.
- pub fn abort_selection(&mut self, _was_upgrade: bool) -> bool {
- // Before we do anything else, we bounce on this lock. The reason for
- // doing this is to ensure that any upgrade-in-progress is gone and
- // done with. Without this bounce, we can race with inherit_blocker
- // about looking at and dealing with to_wake. Once we have acquired the
- // lock, we are guaranteed that inherit_blocker is done.
- unsafe {
- let _guard = self.select_lock.lock();
- }
-
- // Like the stream implementation, we want to make sure that the count
- // on the channel goes non-negative. We don't know how negative the
- // stream currently is, so instead of using a steal value of 1, we load
- // the channel count and figure out what we should do to make it
- // positive.
- let steals = {
- let cnt = self.cnt.load(atomic::SeqCst);
- if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
- };
- let prev = self.bump(steals + 1);
-
- if prev == DISCONNECTED {
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- true
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
- if prev < 0 {
- self.take_to_wake().trash();
- } else {
- while self.to_wake.load(atomic::SeqCst) != 0 {
- Thread::yield_now();
- }
- }
- // if the number of steals is -1, it was the pre-emptive -1 steal
- // count from when we inherited a blocker. This is fine because
- // we're just going to overwrite it with a real value.
- assert!(self.steals == 0 || self.steals == -1);
- self.steals = steals;
- prev >= 0
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- assert_eq!(self.channels.load(atomic::SeqCst), 0);
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Stream channels
-///
-/// This is the flavor of channels which are optimized for one sender and one
-/// receiver. The sender will be upgraded to a shared channel if the channel is
-/// cloned.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module.
-
-pub use self::Failure::*;
-pub use self::UpgradeResult::*;
-pub use self::SelectionResult::*;
-use self::Message::*;
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::cmp;
-use core::int;
-use rustrt::local::Local;
-use rustrt::task::{Task, BlockedTask};
-use rustrt::thread::Thread;
-
-use atomic;
-use comm::Receiver;
-use spsc_queue as spsc;
-
-const DISCONNECTED: int = int::MIN;
-#[cfg(test)]
-const MAX_STEALS: int = 5;
-#[cfg(not(test))]
-const MAX_STEALS: int = 1 << 20;
-
-pub struct Packet<T> {
- queue: spsc::Queue<Message<T>>, // internal queue for all message
-
- cnt: atomic::AtomicInt, // How many items are on this channel
- steals: int, // How many times has a port received without blocking?
- to_wake: atomic::AtomicUint, // Task to wake up
-
- port_dropped: atomic::AtomicBool, // flag if the channel has been destroyed.
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(BlockedTask),
-}
-
-pub enum SelectionResult<T> {
- SelSuccess,
- SelCanceled(BlockedTask),
- SelUpgraded(BlockedTask, Receiver<T>),
-}
-
-// Any message could contain an "upgrade request" to a new shared port, so the
-// internal queue it's a queue of T, but rather Message<T>
-enum Message<T> {
- Data(T),
- GoUp(Receiver<T>),
-}
-
-impl<T: Send> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- queue: unsafe { spsc::Queue::new(128) },
-
- cnt: atomic::AtomicInt::new(0),
- steals: 0,
- to_wake: atomic::AtomicUint::new(0),
-
- port_dropped: atomic::AtomicBool::new(false),
- }
- }
-
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // If the other port has deterministically gone away, then definitely
- // must return the data back up the stack. Otherwise, the data is
- // considered as being sent.
- if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
-
- match self.do_send(Data(t)) {
- UpSuccess | UpDisconnected => {},
- UpWoke(task) => { task.wake().map(|t| t.reawaken()); }
- }
- Ok(())
- }
- pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
- // If the port has gone away, then there's no need to proceed any
- // further.
- if self.port_dropped.load(atomic::SeqCst) { return UpDisconnected }
-
- self.do_send(GoUp(up))
- }
-
- fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
- self.queue.push(t);
- match self.cnt.fetch_add(1, atomic::SeqCst) {
- // As described in the mod's doc comment, -1 == wakeup
- -1 => UpWoke(self.take_to_wake()),
- // As as described before, SPSC queues must be >= -2
- -2 => UpSuccess,
-
- // Be sure to preserve the disconnected state, and the return value
- // in this case is going to be whether our data was received or not.
- // This manifests itself on whether we have an empty queue or not.
- //
- // Primarily, are required to drain the queue here because the port
- // will never remove this data. We can only have at most one item to
- // drain (the port drains the rest).
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
- let first = self.queue.pop();
- let second = self.queue.pop();
- assert!(second.is_none());
-
- match first {
- Some(..) => UpSuccess, // we failed to send the data
- None => UpDisconnected, // we successfully sent data
- }
- }
-
- // Otherwise we just sent some data on a non-waiting queue, so just
- // make sure the world is sane and carry on!
- n => { assert!(n >= 0); UpSuccess }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&mut self) -> BlockedTask {
- let task = self.to_wake.load(atomic::SeqCst);
- self.to_wake.store(0, atomic::SeqCst);
- assert!(task != 0);
- unsafe { BlockedTask::cast_from_uint(task) }
- }
-
- // Decrements the count on the channel for a sleeper, returning the sleeper
- // back if it shouldn't sleep. Note that this is the location where we take
- // steals into account.
- fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- let n = unsafe { task.cast_to_uint() };
- self.to_wake.store(n, atomic::SeqCst);
-
- let steals = self.steals;
- self.steals = 0;
-
- match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
- DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 { return Ok(()) }
- }
- }
-
- self.to_wake.store(0, atomic::SeqCst);
- Err(unsafe { BlockedTask::cast_from_uint(n) })
- }
-
- pub fn recv(&mut self) -> Result<T, Failure<T>> {
- // Optimistic preflight check (scheduling is expensive).
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- // Welp, our channel has no data. Deschedule the current task and
- // initiate the blocking protocol.
- let task: Box<Task> = Local::take();
- task.deschedule(1, |task| {
- self.decrement(task)
- });
-
- match self.try_recv() {
- // Messages which actually popped from the queue shouldn't count as
- // a steal, so offset the decrement here (we already have our
- // "steal" factored into the channel count above).
- data @ Ok(..) |
- data @ Err(Upgraded(..)) => {
- self.steals -= 1;
- data
- }
-
- data => data,
- }
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
- match self.queue.pop() {
- // If we stole some data, record to that effect (this will be
- // factored into cnt later on).
- //
- // Note that we don't allow steals to grow without bound in order to
- // prevent eventual overflow of either steals or cnt as an overflow
- // would have catastrophic results. Sometimes, steals > cnt, but
- // other times cnt > steals, so we don't know the relation between
- // steals and cnt. This code path is executed only rarely, so we do
- // a pretty slow operation, of swapping 0 into cnt, taking steals
- // down as much as possible (without going negative), and then
- // adding back in whatever we couldn't factor into steals.
- Some(data) => {
- if self.steals > MAX_STEALS {
- match self.cnt.swap(0, atomic::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
- }
- n => {
- let m = cmp::min(n, self.steals);
- self.steals -= m;
- self.bump(n - m);
- }
- }
- assert!(self.steals >= 0);
- }
- self.steals += 1;
- match data {
- Data(t) => Ok(t),
- GoUp(up) => Err(Upgraded(up)),
- }
- }
-
- None => {
- match self.cnt.load(atomic::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
-
- // This is a little bit of a tricky case. We failed to pop
- // data above, and then we have viewed that the channel is
- // disconnected. In this window more data could have been
- // sent on the channel. It doesn't really make sense to
- // return that the channel is disconnected when there's
- // actually data on it, so be extra sure there's no data by
- // popping one more time.
- //
- // We can ignore steals because the other end is
- // disconnected and we'll never need to really factor in our
- // steals again.
- _ => {
- match self.queue.pop() {
- Some(Data(t)) => Ok(t),
- Some(GoUp(up)) => Err(Upgraded(up)),
- None => Err(Disconnected),
- }
- }
- }
- }
- }
- }
-
- pub fn drop_chan(&mut self) {
- // Dropping a channel is pretty simple, we just flag it as disconnected
- // and then wakeup a blocker if there is one.
- match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
- -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
- DISCONNECTED => {}
- n => { assert!(n >= 0); }
- }
- }
-
- pub fn drop_port(&mut self) {
- // Dropping a port seems like a fairly trivial thing. In theory all we
- // need to do is flag that we're disconnected and then everything else
- // can take over (we don't have anyone to wake up).
- //
- // The catch for Ports is that we want to drop the entire contents of
- // the queue. There are multiple reasons for having this property, the
- // largest of which is that if another chan is waiting in this channel
- // (but not received yet), then waiting on that port will cause a
- // deadlock.
- //
- // So if we accept that we must now destroy the entire contents of the
- // queue, this code may make a bit more sense. The tricky part is that
- // we can't let any in-flight sends go un-dropped, we have to make sure
- // *everything* is dropped and nothing new will come onto the channel.
-
- // The first thing we do is set a flag saying that we're done for. All
- // sends are gated on this flag, so we're immediately guaranteed that
- // there are a bounded number of active sends that we'll have to deal
- // with.
- self.port_dropped.store(true, atomic::SeqCst);
-
- // Now that we're guaranteed to deal with a bounded number of senders,
- // we need to drain the queue. This draining process happens atomically
- // with respect to the "count" of the channel. If the count is nonzero
- // (with steals taken into account), then there must be data on the
- // channel. In this case we drain everything and then try again. We will
- // continue to fail while active senders send data while we're dropping
- // data, but eventually we're guaranteed to break out of this loop
- // (because there is a bounded number of senders).
- let mut steals = self.steals;
- while {
- let cnt = self.cnt.compare_and_swap(
- steals, DISCONNECTED, atomic::SeqCst);
- cnt != DISCONNECTED && cnt != steals
- } {
- loop {
- match self.queue.pop() {
- Some(..) => { steals += 1; }
- None => break
- }
- }
- }
-
- // At this point in time, we have gated all future senders from sending,
- // and we have flagged the channel as being disconnected. The senders
- // still have some responsibility, however, because some sends may not
- // complete until after we flag the disconnection. There are more
- // details in the sending methods that see DISCONNECTED
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // Tests to see whether this port can receive without blocking. If Ok is
- // returned, then that's the answer. If Err is returned, then the returned
- // port needs to be queried instead (an upgrade happened)
- pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
- // We peek at the queue to see if there's anything on it, and we use
- // this return value to determine if we should pop from the queue and
- // upgrade this channel immediately. If it looks like we've got an
- // upgrade pending, then go through the whole recv rigamarole to update
- // the internal state.
- match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.recv() {
- Err(Upgraded(port)) => Err(port),
- _ => unreachable!(),
- }
- }
- Some(..) => Ok(true),
- None => Ok(false)
- }
- }
-
- // increment the count on the channel (used for selection)
- fn bump(&mut self, amt: int) -> int {
- match self.cnt.fetch_add(amt, atomic::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomic::SeqCst);
- DISCONNECTED
- }
- n => n
- }
- }
-
- // Attempts to start selecting on this port. Like a oneshot, this can fail
- // immediately because of an upgrade.
- pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
- match self.decrement(task) {
- Ok(()) => SelSuccess,
- Err(task) => {
- let ret = match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.queue.pop() {
- Some(GoUp(port)) => SelUpgraded(task, port),
- _ => unreachable!(),
- }
- }
- Some(..) => SelCanceled(task),
- None => SelCanceled(task),
- };
- // Undo our decrement above, and we should be guaranteed that the
- // previous value is positive because we're not going to sleep
- let prev = self.bump(1);
- assert!(prev == DISCONNECTED || prev >= 0);
- return ret;
- }
- }
- }
-
- // Removes a previous task from being blocked in this port
- pub fn abort_selection(&mut self,
- was_upgrade: bool) -> Result<bool, Receiver<T>> {
- // If we're aborting selection after upgrading from a oneshot, then
- // we're guarantee that no one is waiting. The only way that we could
- // have seen the upgrade is if data was actually sent on the channel
- // half again. For us, this means that there is guaranteed to be data on
- // this channel. Furthermore, we're guaranteed that there was no
- // start_selection previously, so there's no need to modify `self.cnt`
- // at all.
- //
- // Hence, because of these invariants, we immediately return `Ok(true)`.
- // Note that the data may not actually be sent on the channel just yet.
- // The other end could have flagged the upgrade but not sent data to
- // this end. This is fine because we know it's a small bounded windows
- // of time until the data is actually sent.
- if was_upgrade {
- assert_eq!(self.steals, 0);
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- return Ok(true)
- }
-
- // We want to make sure that the count on the channel goes non-negative,
- // and in the stream case we can have at most one steal, so just assume
- // that we had one steal.
- let steals = 1;
- let prev = self.bump(steals + 1);
-
- // If we were previously disconnected, then we know for sure that there
- // is no task in to_wake, so just keep going
- let has_data = if prev == DISCONNECTED {
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- true // there is data, that data is that we're disconnected
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
-
- // If the previous count was negative, then we just made things go
- // positive, hence we passed the -1 boundary and we're responsible
- // for removing the to_wake() field and trashing it.
- //
- // If the previous count was positive then we're in a tougher
- // situation. A possible race is that a sender just incremented
- // through -1 (meaning it's going to try to wake a task up), but it
- // hasn't yet read the to_wake. In order to prevent a future recv()
- // from waking up too early (this sender picking up the plastered
- // over to_wake), we spin loop here waiting for to_wake to be 0.
- // Note that this entire select() implementation needs an overhaul,
- // and this is *not* the worst part of it, so this is not done as a
- // final solution but rather out of necessity for now to get
- // something working.
- if prev < 0 {
- self.take_to_wake().trash();
- } else {
- while self.to_wake.load(atomic::SeqCst) != 0 {
- Thread::yield_now();
- }
- }
- assert_eq!(self.steals, 0);
- self.steals = steals;
-
- // if we were previously positive, then there's surely data to
- // receive
- prev >= 0
- };
-
- // Now that we've determined that this queue "has data", we peek at the
- // queue to see if the data is an upgrade or not. If it's an upgrade,
- // then we need to destroy this port and abort selection on the
- // upgraded port.
- if has_data {
- match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.queue.pop() {
- Some(GoUp(port)) => Err(port),
- _ => unreachable!(),
- }
- }
- _ => Ok(true),
- }
- } else {
- Ok(false)
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
- assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Synchronous channels/ports
-///
-/// This channel implementation differs significantly from the asynchronous
-/// implementations found next to it (oneshot/stream/share). This is an
-/// implementation of a synchronous, bounded buffer channel.
-///
-/// Each channel is created with some amount of backing buffer, and sends will
-/// *block* until buffer space becomes available. A buffer size of 0 is valid,
-/// which means that every successful send is paired with a successful recv.
-///
-/// This flavor of channels defines a new `send_opt` method for channels which
-/// is the method by which a message is sent but the task does not panic if it
-/// cannot be delivered.
-///
-/// Another major difference is that send() will *always* return back the data
-/// if it couldn't be sent. This is because it is deterministically known when
-/// the data is received and when it is not received.
-///
-/// Implementation-wise, it can all be summed up with "use a mutex plus some
-/// logic". The mutex used here is an OS native mutex, meaning that no user code
-/// is run inside of the mutex (to prevent context switching). This
-/// implementation shares almost all code for the buffered and unbuffered cases
-/// of a synchronous channel. There are a few branches for the unbuffered case,
-/// but they're mostly just relevant to blocking senders.
-
-use core::prelude::*;
-
-pub use self::Failure::*;
-use self::Blocker::*;
-
-use alloc::boxed::Box;
-use collections::Vec;
-use core::mem;
-use core::cell::UnsafeCell;
-use rustrt::local::Local;
-use rustrt::mutex::{NativeMutex, LockGuard};
-use rustrt::task::{Task, BlockedTask};
-
-use atomic;
-
-pub struct Packet<T> {
- /// Only field outside of the mutex. Just done for kicks, but mainly because
- /// the other shared channel already had the code implemented
- channels: atomic::AtomicUint,
-
- /// The state field is protected by this mutex
- lock: NativeMutex,
- state: UnsafeCell<State<T>>,
-}
-
-struct State<T> {
- disconnected: bool, // Is the channel disconnected yet?
- queue: Queue, // queue of senders waiting to send data
- blocker: Blocker, // currently blocked task on this channel
- buf: Buffer<T>, // storage for buffered messages
- cap: uint, // capacity of this channel
-
- /// A curious flag used to indicate whether a sender failed or succeeded in
- /// blocking. This is used to transmit information back to the task that it
- /// must dequeue its message from the buffer because it was not received.
- /// This is only relevant in the 0-buffer case. This obviously cannot be
- /// safely constructed, but it's guaranteed to always have a valid pointer
- /// value.
- canceled: Option<&'static mut bool>,
-}
-
-/// Possible flavors of tasks who can be blocked on this channel.
-enum Blocker {
- BlockedSender(BlockedTask),
- BlockedReceiver(BlockedTask),
- NoneBlocked
-}
-
-/// Simple queue for threading tasks together. Nodes are stack-allocated, so
-/// this structure is not safe at all
-struct Queue {
- head: *mut Node,
- tail: *mut Node,
-}
-
-struct Node {
- task: Option<BlockedTask>,
- next: *mut Node,
-}
-
-/// A simple ring-buffer
-struct Buffer<T> {
- buf: Vec<Option<T>>,
- start: uint,
- size: uint,
-}
-
-#[deriving(Show)]
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-/// Atomically blocks the current task, placing it into `slot`, unlocking `lock`
-/// in the meantime. This re-locks the mutex upon returning.
-fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
- lock: &NativeMutex) {
- let me: Box<Task> = Local::take();
- me.deschedule(1, |task| {
- match mem::replace(slot, f(task)) {
- NoneBlocked => {}
- _ => unreachable!(),
- }
- unsafe { lock.unlock_noguard(); }
- Ok(())
- });
- unsafe { lock.lock_noguard(); }
-}
-
-/// Wakes up a task, dropping the lock at the correct time
-fn wakeup(task: BlockedTask, guard: LockGuard) {
- // We need to be careful to wake up the waiting task *outside* of the mutex
- // in case it incurs a context switch.
- mem::drop(guard);
- task.wake().map(|t| t.reawaken());
-}
-
-impl<T: Send> Packet<T> {
- pub fn new(cap: uint) -> Packet<T> {
- Packet {
- channels: atomic::AtomicUint::new(1),
- lock: unsafe { NativeMutex::new() },
- state: UnsafeCell::new(State {
- disconnected: false,
- blocker: NoneBlocked,
- cap: cap,
- canceled: None,
- queue: Queue {
- head: 0 as *mut Node,
- tail: 0 as *mut Node,
- },
- buf: Buffer {
- buf: Vec::from_fn(cap + if cap == 0 {1} else {0}, |_| None),
- start: 0,
- size: 0,
- },
- }),
- }
- }
-
- // Locks this channel, returning a guard for the state and the mutable state
- // itself. Care should be taken to ensure that the state does not escape the
- // guard!
- //
- // Note that we're ok promoting an & reference to an &mut reference because
- // the lock ensures that we're the only ones in the world with a pointer to
- // the state.
- fn lock<'a>(&'a self) -> (LockGuard<'a>, &'a mut State<T>) {
- unsafe {
- let guard = self.lock.lock();
- (guard, &mut *self.state.get())
- }
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- let (guard, state) = self.lock();
-
- // wait for a slot to become available, and enqueue the data
- while !state.disconnected && state.buf.size() == state.buf.cap() {
- state.queue.enqueue(&self.lock);
- }
- if state.disconnected { return Err(t) }
- state.buf.enqueue(t);
-
- match mem::replace(&mut state.blocker, NoneBlocked) {
- // if our capacity is 0, then we need to wait for a receiver to be
- // available to take our data. After waiting, we check again to make
- // sure the port didn't go away in the meantime. If it did, we need
- // to hand back our data.
- NoneBlocked if state.cap == 0 => {
- let mut canceled = false;
- assert!(state.canceled.is_none());
- state.canceled = Some(unsafe { mem::transmute(&mut canceled) });
- wait(&mut state.blocker, BlockedSender, &self.lock);
- if canceled {Err(state.buf.dequeue())} else {Ok(())}
- }
-
- // success, we buffered some data
- NoneBlocked => Ok(()),
-
- // success, someone's about to receive our buffered data.
- BlockedReceiver(task) => { wakeup(task, guard); Ok(()) }
-
- BlockedSender(..) => panic!("lolwut"),
- }
- }
-
- pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
- let (guard, state) = self.lock();
- if state.disconnected {
- Err(super::RecvDisconnected(t))
- } else if state.buf.size() == state.buf.cap() {
- Err(super::Full(t))
- } else if state.cap == 0 {
- // With capacity 0, even though we have buffer space we can't
- // transfer the data unless there's a receiver waiting.
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => Err(super::Full(t)),
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(task) => {
- state.buf.enqueue(t);
- wakeup(task, guard);
- Ok(())
- }
- }
- } else {
- // If the buffer has some space and the capacity isn't 0, then we
- // just enqueue the data for later retrieval, ensuring to wake up
- // any blocked receiver if there is one.
- assert!(state.buf.size() < state.buf.cap());
- state.buf.enqueue(t);
- match mem::replace(&mut state.blocker, NoneBlocked) {
- BlockedReceiver(task) => wakeup(task, guard),
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- }
- Ok(())
- }
- }
-
- // Receives a message from this channel
- //
- // When reading this, remember that there can only ever be one receiver at
- // time.
- pub fn recv(&self) -> Result<T, ()> {
- let (guard, state) = self.lock();
-
- // Wait for the buffer to have something in it. No need for a while loop
- // because we're the only receiver.
- let mut waited = false;
- if !state.disconnected && state.buf.size() == 0 {
- wait(&mut state.blocker, BlockedReceiver, &self.lock);
- waited = true;
- }
- if state.disconnected && state.buf.size() == 0 { return Err(()) }
-
- // Pick up the data, wake up our neighbors, and carry on
- assert!(state.buf.size() > 0);
- let ret = state.buf.dequeue();
- self.wakeup_senders(waited, guard, state);
- return Ok(ret);
- }
-
- pub fn try_recv(&self) -> Result<T, Failure> {
- let (guard, state) = self.lock();
-
- // Easy cases first
- if state.disconnected { return Err(Disconnected) }
- if state.buf.size() == 0 { return Err(Empty) }
-
- // Be sure to wake up neighbors
- let ret = Ok(state.buf.dequeue());
- self.wakeup_senders(false, guard, state);
-
- return ret;
- }
-
- // Wake up pending senders after some data has been received
- //
- // * `waited` - flag if the receiver blocked to receive some data, or if it
- // just picked up some data on the way out
- // * `guard` - the lock guard that is held over this channel's lock
- fn wakeup_senders(&self, waited: bool,
- guard: LockGuard,
- state: &mut State<T>) {
- let pending_sender1: Option<BlockedTask> = state.queue.dequeue();
-
- // If this is a no-buffer channel (cap == 0), then if we didn't wait we
- // need to ACK the sender. If we waited, then the sender waking us up
- // was already the ACK.
- let pending_sender2 = if state.cap == 0 && !waited {
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedReceiver(..) => unreachable!(),
- BlockedSender(task) => {
- state.canceled.take();
- Some(task)
- }
- }
- } else {
- None
- };
- mem::drop((state, guard));
-
- // only outside of the lock do we wake up the pending tasks
- pending_sender1.map(|t| t.wake().map(|t| t.reawaken()));
- pending_sender2.map(|t| t.wake().map(|t| t.reawaken()));
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&self) {
- self.channels.fetch_add(1, atomic::SeqCst);
- }
-
- pub fn drop_chan(&self) {
- // Only flag the channel as disconnected if we're the last channel
- match self.channels.fetch_sub(1, atomic::SeqCst) {
- 1 => {}
- _ => return
- }
-
- // Not much to do other than wake up a receiver if one's there
- let (guard, state) = self.lock();
- if state.disconnected { return }
- state.disconnected = true;
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(task) => wakeup(task, guard),
- }
- }
-
- pub fn drop_port(&self) {
- let (guard, state) = self.lock();
-
- if state.disconnected { return }
- state.disconnected = true;
-
- // If the capacity is 0, then the sender may want its data back after
- // we're disconnected. Otherwise it's now our responsibility to destroy
- // the buffered data. As with many other portions of this code, this
- // needs to be careful to destroy the data *outside* of the lock to
- // prevent deadlock.
- let _data = if state.cap != 0 {
- mem::replace(&mut state.buf.buf, Vec::new())
- } else {
- Vec::new()
- };
- let mut queue = mem::replace(&mut state.queue, Queue {
- head: 0 as *mut Node,
- tail: 0 as *mut Node,
- });
-
- let waiter = match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedSender(task) => {
- *state.canceled.take().unwrap() = true;
- Some(task)
- }
- BlockedReceiver(..) => unreachable!(),
- };
- mem::drop((state, guard));
-
- loop {
- match queue.dequeue() {
- Some(task) => { task.wake().map(|t| t.reawaken()); }
- None => break,
- }
- }
- waiter.map(|t| t.wake().map(|t| t.reawaken()));
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // If Ok, the value is whether this port has data, if Err, then the upgraded
- // port needs to be checked instead of this one.
- pub fn can_recv(&self) -> bool {
- let (_g, state) = self.lock();
- state.disconnected || state.buf.size() > 0
- }
-
- // Attempts to start selection on this port. This can either succeed or fail
- // because there is data waiting.
- pub fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>{
- let (_g, state) = self.lock();
- if state.disconnected || state.buf.size() > 0 {
- Err(task)
- } else {
- match mem::replace(&mut state.blocker, BlockedReceiver(task)) {
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(..) => unreachable!(),
- }
- Ok(())
- }
- }
-
- // Remove a previous selecting task from this port. This ensures that the
- // blocked task will no longer be visible to any other threads.
- //
- // The return value indicates whether there's data on this port.
- pub fn abort_selection(&self) -> bool {
- let (_g, state) = self.lock();
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => true,
- BlockedSender(task) => {
- state.blocker = BlockedSender(task);
- true
- }
- BlockedReceiver(task) => { task.trash(); false }
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.channels.load(atomic::SeqCst), 0);
- let (_g, state) = self.lock();
- assert!(state.queue.dequeue().is_none());
- assert!(state.canceled.is_none());
- }
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-// Buffer, a simple ring buffer backed by Vec<T>
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T> Buffer<T> {
- fn enqueue(&mut self, t: T) {
- let pos = (self.start + self.size) % self.buf.len();
- self.size += 1;
- let prev = mem::replace(&mut self.buf[pos], Some(t));
- assert!(prev.is_none());
- }
-
- fn dequeue(&mut self) -> T {
- let start = self.start;
- self.size -= 1;
- self.start = (self.start + 1) % self.buf.len();
- self.buf[start].take().unwrap()
- }
-
- fn size(&self) -> uint { self.size }
- fn cap(&self) -> uint { self.buf.len() }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
-////////////////////////////////////////////////////////////////////////////////
-
-impl Queue {
- fn enqueue(&mut self, lock: &NativeMutex) {
- let task: Box<Task> = Local::take();
- let mut node = Node {
- task: None,
- next: 0 as *mut Node,
- };
- task.deschedule(1, |task| {
- node.task = Some(task);
- if self.tail.is_null() {
- self.head = &mut node as *mut Node;
- self.tail = &mut node as *mut Node;
- } else {
- unsafe {
- (*self.tail).next = &mut node as *mut Node;
- self.tail = &mut node as *mut Node;
- }
- }
- unsafe { lock.unlock_noguard(); }
- Ok(())
- });
- unsafe { lock.lock_noguard(); }
- assert!(node.next.is_null());
- }
-
- fn dequeue(&mut self) -> Option<BlockedTask> {
- if self.head.is_null() {
- return None
- }
- let node = self.head;
- self.head = unsafe { (*node).next };
- if self.head.is_null() {
- self.tail = 0 as *mut Node;
- }
- unsafe {
- (*node).next = 0 as *mut Node;
- Some((*node).task.take().unwrap())
- }
- }
-}
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A (mostly) lock-free concurrent work-stealing deque
-//!
-//! This module contains an implementation of the Chase-Lev work stealing deque
-//! described in "Dynamic Circular Work-Stealing Deque". The implementation is
-//! heavily based on the pseudocode found in the paper.
-//!
-//! This implementation does not want to have the restriction of a garbage
-//! collector for reclamation of buffers, and instead it uses a shared pool of
-//! buffers. This shared pool is required for correctness in this
-//! implementation.
-//!
-//! The only lock-synchronized portions of this deque are the buffer allocation
-//! and deallocation portions. Otherwise all operations are lock-free.
-//!
-//! # Example
-//!
-//! use std::sync::deque::BufferPool;
-//!
-//! let mut pool = BufferPool::new();
-//! let (mut worker, mut stealer) = pool.deque();
-//!
-//! // Only the worker may push/pop
-//! worker.push(1i);
-//! worker.pop();
-//!
-//! // Stealers take data from the other end of the deque
-//! worker.push(1i);
-//! stealer.steal();
-//!
-//! // Stealers can be cloned to have many stealers stealing in parallel
-//! worker.push(1i);
-//! let mut stealer2 = stealer.clone();
-//! stealer2.steal();
-
-#![experimental]
-
-// NB: the "buffer pool" strategy is not done for speed, but rather for
-// correctness. For more info, see the comment on `swap_buffer`
-
-// FIXME: all atomic operations in this module use a SeqCst ordering. That is
-// probably overkill
-
-pub use self::Stolen::*;
-
-use core::prelude::*;
-
-use alloc::arc::Arc;
-use alloc::heap::{allocate, deallocate};
-use alloc::boxed::Box;
-use collections::Vec;
-use core::kinds::marker;
-use core::mem::{forget, min_align_of, size_of, transmute};
-use core::ptr;
-use rustrt::exclusive::Exclusive;
-
-use atomic::{AtomicInt, AtomicPtr, SeqCst};
-
-// Once the queue is less than 1/K full, then it will be downsized. Note that
-// the deque requires that this number be less than 2.
-static K: int = 4;
-
-// Minimum number of bits that a buffer size should be. No buffer will resize to
-// under this value, and all deques will initially contain a buffer of this
-// size.
-//
-// The size in question is 1 << MIN_BITS
-static MIN_BITS: uint = 7;
-
-struct Deque<T> {
- bottom: AtomicInt,
- top: AtomicInt,
- array: AtomicPtr<Buffer<T>>,
- pool: BufferPool<T>,
-}
-
-/// Worker half of the work-stealing deque. This worker has exclusive access to
-/// one side of the deque, and uses `push` and `pop` method to manipulate it.
-///
-/// There may only be one worker per deque.
-pub struct Worker<T> {
- deque: Arc<Deque<T>>,
- _noshare: marker::NoSync,
-}
-
-/// The stealing half of the work-stealing deque. Stealers have access to the
-/// opposite end of the deque from the worker, and they only have access to the
-/// `steal` method.
-pub struct Stealer<T> {
- deque: Arc<Deque<T>>,
- _noshare: marker::NoSync,
-}
-
-/// When stealing some data, this is an enumeration of the possible outcomes.
-#[deriving(PartialEq, Show)]
-pub enum Stolen<T> {
- /// The deque was empty at the time of stealing
- Empty,
- /// The stealer lost the race for stealing data, and a retry may return more
- /// data.
- Abort,
- /// The stealer has successfully stolen some data.
- Data(T),
-}
-
-/// The allocation pool for buffers used by work-stealing deques. Right now this
-/// structure is used for reclamation of memory after it is no longer in use by
-/// deques.
-///
-/// This data structure is protected by a mutex, but it is rarely used. Deques
-/// will only use this structure when allocating a new buffer or deallocating a
-/// previous one.
-pub struct BufferPool<T> {
- pool: Arc<Exclusive<Vec<Box<Buffer<T>>>>>,
-}
-
-/// An internal buffer used by the chase-lev deque. This structure is actually
-/// implemented as a circular buffer, and is used as the intermediate storage of
-/// the data in the deque.
-///
-/// This type is implemented with *T instead of Vec<T> for two reasons:
-///
-/// 1. There is nothing safe about using this buffer. This easily allows the
-/// same value to be read twice in to rust, and there is nothing to
-/// prevent this. The usage by the deque must ensure that one of the
-/// values is forgotten. Furthermore, we only ever want to manually run
-/// destructors for values in this buffer (on drop) because the bounds
-/// are defined by the deque it's owned by.
-///
-/// 2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
-/// LLVM is probably pretty good at doing this already.
-struct Buffer<T> {
- storage: *const T,
- log_size: uint,
-}
-
-impl<T: Send> BufferPool<T> {
- /// Allocates a new buffer pool which in turn can be used to allocate new
- /// deques.
- pub fn new() -> BufferPool<T> {
- BufferPool { pool: Arc::new(Exclusive::new(Vec::new())) }
- }
-
- /// Allocates a new work-stealing deque which will send/receiving memory to
- /// and from this buffer pool.
- pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
- let a = Arc::new(Deque::new(self.clone()));
- let b = a.clone();
- (Worker { deque: a, _noshare: marker::NoSync },
- Stealer { deque: b, _noshare: marker::NoSync })
- }
-
- fn alloc(&mut self, bits: uint) -> Box<Buffer<T>> {
- unsafe {
- let mut pool = self.pool.lock();
- match pool.iter().position(|x| x.size() >= (1 << bits)) {
- Some(i) => pool.remove(i).unwrap(),
- None => box Buffer::new(bits)
- }
- }
- }
-
- fn free(&self, buf: Box<Buffer<T>>) {
- unsafe {
- let mut pool = self.pool.lock();
- match pool.iter().position(|v| v.size() > buf.size()) {
- Some(i) => pool.insert(i, buf),
- None => pool.push(buf),
- }
- }
- }
-}
-
-impl<T: Send> Clone for BufferPool<T> {
- fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
-}
-
-impl<T: Send> Worker<T> {
- /// Pushes data onto the front of this work queue.
- pub fn push(&self, t: T) {
- unsafe { self.deque.push(t) }
- }
- /// Pops data off the front of the work queue, returning `None` on an empty
- /// queue.
- pub fn pop(&self) -> Option<T> {
- unsafe { self.deque.pop() }
- }
-
- /// Gets access to the buffer pool that this worker is attached to. This can
- /// be used to create more deques which share the same buffer pool as this
- /// deque.
- pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
- &self.deque.pool
- }
-}
-
-impl<T: Send> Stealer<T> {
- /// Steals work off the end of the queue (opposite of the worker's end)
- pub fn steal(&self) -> Stolen<T> {
- unsafe { self.deque.steal() }
- }
-
- /// Gets access to the buffer pool that this stealer is attached to. This
- /// can be used to create more deques which share the same buffer pool as
- /// this deque.
- pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
- &self.deque.pool
- }
-}
-
-impl<T: Send> Clone for Stealer<T> {
- fn clone(&self) -> Stealer<T> {
- Stealer { deque: self.deque.clone(), _noshare: marker::NoSync }
- }
-}
-
-// Almost all of this code can be found directly in the paper so I'm not
-// personally going to heavily comment what's going on here.
-
-impl<T: Send> Deque<T> {
- fn new(mut pool: BufferPool<T>) -> Deque<T> {
- let buf = pool.alloc(MIN_BITS);
- Deque {
- bottom: AtomicInt::new(0),
- top: AtomicInt::new(0),
- array: AtomicPtr::new(unsafe { transmute(buf) }),
- pool: pool,
- }
- }
-
- unsafe fn push(&self, data: T) {
- let mut b = self.bottom.load(SeqCst);
- let t = self.top.load(SeqCst);
- let mut a = self.array.load(SeqCst);
- let size = b - t;
- if size >= (*a).size() - 1 {
- // You won't find this code in the chase-lev deque paper. This is
- // alluded to in a small footnote, however. We always free a buffer
- // when growing in order to prevent leaks.
- a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
- b = self.bottom.load(SeqCst);
- }
- (*a).put(b, data);
- self.bottom.store(b + 1, SeqCst);
- }
-
- unsafe fn pop(&self) -> Option<T> {
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- let b = b - 1;
- self.bottom.store(b, SeqCst);
- let t = self.top.load(SeqCst);
- let size = b - t;
- if size < 0 {
- self.bottom.store(t, SeqCst);
- return None;
- }
- let data = (*a).get(b);
- if size > 0 {
- self.maybe_shrink(b, t);
- return Some(data);
- }
- if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
- self.bottom.store(t + 1, SeqCst);
- return Some(data);
- } else {
- self.bottom.store(t + 1, SeqCst);
- forget(data); // someone else stole this value
- return None;
- }
- }
-
- unsafe fn steal(&self) -> Stolen<T> {
- let t = self.top.load(SeqCst);
- let old = self.array.load(SeqCst);
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- let size = b - t;
- if size <= 0 { return Empty }
- if size % (*a).size() == 0 {
- if a == old && t == self.top.load(SeqCst) {
- return Empty
- }
- return Abort
- }
- let data = (*a).get(t);
- if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
- Data(data)
- } else {
- forget(data); // someone else stole this value
- Abort
- }
- }
-
- unsafe fn maybe_shrink(&self, b: int, t: int) {
- let a = self.array.load(SeqCst);
- if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
- self.swap_buffer(b, a, (*a).resize(b, t, -1));
- }
- }
-
- // Helper routine not mentioned in the paper which is used in growing and
- // shrinking buffers to swap in a new buffer into place. As a bit of a
- // recap, the whole point that we need a buffer pool rather than just
- // calling malloc/free directly is that stealers can continue using buffers
- // after this method has called 'free' on it. The continued usage is simply
- // a read followed by a forget, but we must make sure that the memory can
- // continue to be read after we flag this buffer for reclamation.
- unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
- buf: Buffer<T>) -> *mut Buffer<T> {
- let newbuf: *mut Buffer<T> = transmute(box buf);
- self.array.store(newbuf, SeqCst);
- let ss = (*newbuf).size();
- self.bottom.store(b + ss, SeqCst);
- let t = self.top.load(SeqCst);
- if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
- self.bottom.store(b, SeqCst);
- }
- self.pool.free(transmute(old));
- return newbuf;
- }
-}
-
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Deque<T> {
- fn drop(&mut self) {
- let t = self.top.load(SeqCst);
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- // Free whatever is leftover in the dequeue, and then move the buffer
- // back into the pool.
- for i in range(t, b) {
- let _: T = unsafe { (*a).get(i) };
- }
- self.pool.free(unsafe { transmute(a) });
- }
-}
-
-#[inline]
-fn buffer_alloc_size<T>(log_size: uint) -> uint {
- (1 << log_size) * size_of::<T>()
-}
-
-impl<T: Send> Buffer<T> {
- unsafe fn new(log_size: uint) -> Buffer<T> {
- let size = buffer_alloc_size::<T>(log_size);
- let buffer = allocate(size, min_align_of::<T>());
- if buffer.is_null() { ::alloc::oom() }
- Buffer {
- storage: buffer as *const T,
- log_size: log_size,
- }
- }
-
- fn size(&self) -> int { 1 << self.log_size }
-
- // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
- fn mask(&self) -> int { (1 << self.log_size) - 1 }
-
- unsafe fn elem(&self, i: int) -> *const T {
- self.storage.offset(i & self.mask())
- }
-
- // This does not protect against loading duplicate values of the same cell,
- // nor does this clear out the contents contained within. Hence, this is a
- // very unsafe method which the caller needs to treat specially in case a
- // race is lost.
- unsafe fn get(&self, i: int) -> T {
- ptr::read(self.elem(i))
- }
-
- // Unsafe because this unsafely overwrites possibly uninitialized or
- // initialized data.
- unsafe fn put(&self, i: int, t: T) {
- ptr::write(self.elem(i) as *mut T, t);
- }
-
- // Again, unsafe because this has incredibly dubious ownership violations.
- // It is assumed that this buffer is immediately dropped.
- unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
- // NB: not entirely obvious, but thanks to 2's complement,
- // casting delta to uint and then adding gives the desired
- // effect.
- let buf = Buffer::new(self.log_size + delta as uint);
- for i in range(t, b) {
- buf.put(i, self.get(i));
- }
- return buf;
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Buffer<T> {
- fn drop(&mut self) {
- // It is assumed that all buffers are empty on drop.
- let size = buffer_alloc_size::<T>(self.log_size);
- unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::prelude::*;
- use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
-
- use std::mem;
- use rustrt::thread::Thread;
- use std::rand;
- use std::rand::Rng;
- use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
- AtomicUint, INIT_ATOMIC_UINT};
- use std::vec;
-
- #[test]
- fn smoke() {
- let pool = BufferPool::new();
- let (w, s) = pool.deque();
- assert_eq!(w.pop(), None);
- assert_eq!(s.steal(), Empty);
- w.push(1i);
- assert_eq!(w.pop(), Some(1));
- w.push(1);
- assert_eq!(s.steal(), Data(1));
- w.push(1);
- assert_eq!(s.clone().steal(), Data(1));
- }
-
- #[test]
- fn stealpush() {
- static AMT: int = 100000;
- let pool = BufferPool::<int>::new();
- let (w, s) = pool.deque();
- let t = Thread::start(proc() {
- let mut left = AMT;
- while left > 0 {
- match s.steal() {
- Data(i) => {
- assert_eq!(i, 1);
- left -= 1;
- }
- Abort | Empty => {}
- }
- }
- });
-
- for _ in range(0, AMT) {
- w.push(1);
- }
-
- t.join();
- }
-
- #[test]
- fn stealpush_large() {
- static AMT: int = 100000;
- let pool = BufferPool::<(int, int)>::new();
- let (w, s) = pool.deque();
- let t = Thread::start(proc() {
- let mut left = AMT;
- while left > 0 {
- match s.steal() {
- Data((1, 10)) => { left -= 1; }
- Data(..) => panic!(),
- Abort | Empty => {}
- }
- }
- });
-
- for _ in range(0, AMT) {
- w.push((1, 10));
- }
-
- t.join();
- }
-
- fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
- nthreads: int, amt: uint) {
- for _ in range(0, amt) {
- w.push(box 20);
- }
- let mut remaining = AtomicUint::new(amt);
- let unsafe_remaining: *mut AtomicUint = &mut remaining;
-
- let threads = range(0, nthreads).map(|_| {
- let s = s.clone();
- Thread::start(proc() {
- unsafe {
- while (*unsafe_remaining).load(SeqCst) > 0 {
- match s.steal() {
- Data(box 20) => {
- (*unsafe_remaining).fetch_sub(1, SeqCst);
- }
- Data(..) => panic!(),
- Abort | Empty => {}
- }
- }
- }
- })
- }).collect::<Vec<Thread<()>>>();
-
- while remaining.load(SeqCst) > 0 {
- match w.pop() {
- Some(box 20) => { remaining.fetch_sub(1, SeqCst); }
- Some(..) => panic!(),
- None => {}
- }
- }
-
- for thread in threads.into_iter() {
- thread.join();
- }
- }
-
- #[test]
- fn run_stampede() {
- let pool = BufferPool::<Box<int>>::new();
- let (w, s) = pool.deque();
- stampede(w, s, 8, 10000);
- }
-
- #[test]
- fn many_stampede() {
- static AMT: uint = 4;
- let pool = BufferPool::<Box<int>>::new();
- let threads = range(0, AMT).map(|_| {
- let (w, s) = pool.deque();
- Thread::start(proc() {
- stampede(w, s, 4, 10000);
- })
- }).collect::<Vec<Thread<()>>>();
-
- for thread in threads.into_iter() {
- thread.join();
- }
- }
-
- #[test]
- fn stress() {
- static AMT: int = 100000;
- static NTHREADS: int = 8;
- static DONE: AtomicBool = INIT_ATOMIC_BOOL;
- static HITS: AtomicUint = INIT_ATOMIC_UINT;
- let pool = BufferPool::<int>::new();
- let (w, s) = pool.deque();
-
- let threads = range(0, NTHREADS).map(|_| {
- let s = s.clone();
- Thread::start(proc() {
- loop {
- match s.steal() {
- Data(2) => { HITS.fetch_add(1, SeqCst); }
- Data(..) => panic!(),
- _ if DONE.load(SeqCst) => break,
- _ => {}
- }
- }
- })
- }).collect::<Vec<Thread<()>>>();
-
- let mut rng = rand::task_rng();
- let mut expected = 0;
- while expected < AMT {
- if rng.gen_range(0i, 3) == 2 {
- match w.pop() {
- None => {}
- Some(2) => { HITS.fetch_add(1, SeqCst); },
- Some(_) => panic!(),
- }
- } else {
- expected += 1;
- w.push(2);
- }
- }
-
- while HITS.load(SeqCst) < AMT as uint {
- match w.pop() {
- None => {}
- Some(2) => { HITS.fetch_add(1, SeqCst); },
- Some(_) => panic!(),
- }
- }
- DONE.store(true, SeqCst);
-
- for thread in threads.into_iter() {
- thread.join();
- }
-
- assert_eq!(HITS.load(SeqCst), expected as uint);
- }
-
- #[test]
- #[cfg_attr(windows, ignore)] // apparently windows scheduling is weird?
- fn no_starvation() {
- static AMT: int = 10000;
- static NTHREADS: int = 4;
- static DONE: AtomicBool = INIT_ATOMIC_BOOL;
- let pool = BufferPool::<(int, uint)>::new();
- let (w, s) = pool.deque();
-
- let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
- let s = s.clone();
- let unique_box = box AtomicUint::new(0);
- let thread_box = unsafe {
- *mem::transmute::<&Box<AtomicUint>,
- *const *mut AtomicUint>(&unique_box)
- };
- (Thread::start(proc() {
- unsafe {
- loop {
- match s.steal() {
- Data((1, 2)) => {
- (*thread_box).fetch_add(1, SeqCst);
- }
- Data(..) => panic!(),
- _ if DONE.load(SeqCst) => break,
- _ => {}
- }
- }
- }
- }), unique_box)
- }));
-
- let mut rng = rand::task_rng();
- let mut myhit = false;
- 'outer: loop {
- for _ in range(0, rng.gen_range(0, AMT)) {
- if !myhit && rng.gen_range(0i, 3) == 2 {
- match w.pop() {
- None => {}
- Some((1, 2)) => myhit = true,
- Some(_) => panic!(),
- }
- } else {
- w.push((1, 2));
- }
- }
-
- for slot in hits.iter() {
- let amt = slot.load(SeqCst);
- if amt == 0 { continue 'outer; }
- }
- if myhit {
- break
- }
- }
-
- DONE.store(true, SeqCst);
-
- for thread in threads.into_iter() {
- thread.join();
- }
- }
-}
+++ /dev/null
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Core concurrency-enabled mechanisms and primitives.
-//!
-//! This crate contains the implementations of Rust's core synchronization
-//! primitives. This includes channels, mutexes, condition variables, etc.
-//!
-//! The interface of this crate is experimental, and it is not recommended to
-//! use this crate specifically. Instead, its functionality is reexported
-//! through `std::sync`.
-
-#![crate_name = "sync"]
-#![experimental]
-#![crate_type = "rlib"]
-#![crate_type = "dylib"]
-#![license = "MIT/ASL2"]
-#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
- html_favicon_url = "http://www.rust-lang.org/favicon.ico",
- html_root_url = "http://doc.rust-lang.org/nightly/",
- html_playground_url = "http://play.rust-lang.org/")]
-
-#![feature(phase, globs, macro_rules, unsafe_destructor)]
-#![feature(import_shadowing)]
-#![deny(missing_docs)]
-#![no_std]
-
-#[phase(plugin, link)] extern crate core;
-extern crate alloc;
-extern crate collections;
-extern crate rustrt;
-
-#[cfg(test)] extern crate test;
-#[cfg(test)] #[phase(plugin, link)] extern crate std;
-
-pub use alloc::arc::{Arc, Weak};
-pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
- RWLock, RWLockReadGuard, RWLockWriteGuard};
-
-// The mutex/rwlock in this module are not meant for reexport
-pub use raw::{Semaphore, SemaphoreGuard};
-
-// Core building blocks for all primitives in this crate
-
-pub mod atomic;
-
-// Concurrent data structures
-
-pub mod spsc_queue;
-pub mod mpsc_queue;
-pub mod mpmc_bounded_queue;
-pub mod deque;
-
-// Low-level concurrency primitives
-
-pub mod raw;
-pub mod mutex;
-pub mod one;
-
-// Message-passing based communication
-
-pub mod comm;
-
-// Higher level primitives based on those above
-
-mod lock;
-
-#[cfg(not(test))]
-mod std {
- pub use core::{fmt, option, cmp, clone};
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Wrappers for safe, shared, mutable memory between tasks
-//!
-//! The wrappers in this module build on the primitives from `sync::raw` to
-//! provide safe interfaces around using the primitive locks. These primitives
-//! implement a technique called "poisoning" where when a task panicked with a
-//! held lock, all future attempts to use the lock will panic.
-//!
-//! For example, if two tasks are contending on a mutex and one of them panics
-//! after grabbing the lock, the second task will immediately panic because the
-//! lock is now poisoned.
-
-use core::prelude::*;
-
-use self::Inner::*;
-
-use core::cell::UnsafeCell;
-use rustrt::local::Local;
-use rustrt::task::Task;
-
-use raw;
-
-/****************************************************************************
- * Poisoning helpers
- ****************************************************************************/
-
-struct PoisonOnFail<'a> {
- flag: &'a mut bool,
- failed: bool,
-}
-
-fn failing() -> bool {
- Local::borrow(None::<Task>).unwinder.unwinding()
-}
-
-impl<'a> PoisonOnFail<'a> {
- fn check(flag: bool, name: &str) {
- if flag {
- panic!("Poisoned {} - another task failed inside!", name);
- }
- }
-
- fn new<'a>(flag: &'a mut bool, name: &str) -> PoisonOnFail<'a> {
- PoisonOnFail::check(*flag, name);
- PoisonOnFail {
- flag: flag,
- failed: failing()
- }
- }
-}
-
-#[unsafe_destructor]
-impl<'a> Drop for PoisonOnFail<'a> {
- fn drop(&mut self) {
- if !self.failed && failing() {
- *self.flag = true;
- }
- }
-}
-
-/****************************************************************************
- * Condvar
- ****************************************************************************/
-
-enum Inner<'a> {
- InnerMutex(raw::MutexGuard<'a>),
- InnerRWLock(raw::RWLockWriteGuard<'a>),
-}
-
-impl<'b> Inner<'b> {
- fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> {
- match *self {
- InnerMutex(ref m) => &m.cond,
- InnerRWLock(ref m) => &m.cond,
- }
- }
-}
-
-/// A condition variable, a mechanism for unlock-and-descheduling and
-/// signaling, for use with the lock types.
-pub struct Condvar<'a> {
- name: &'static str,
- // n.b. Inner must be after PoisonOnFail because we must set the poison flag
- // *inside* the mutex, and struct fields are destroyed top-to-bottom
- // (destroy the lock guard last).
- poison: PoisonOnFail<'a>,
- inner: Inner<'a>,
-}
-
-impl<'a> Condvar<'a> {
- /// Atomically exit the associated lock and block until a signal is sent.
- ///
- /// wait() is equivalent to wait_on(0).
- ///
- /// # Panics
- ///
- /// A task which is killed while waiting on a condition variable will wake
- /// up, panic, and unlock the associated lock as it unwinds.
- #[inline]
- pub fn wait(&self) { self.wait_on(0) }
-
- /// Atomically exit the associated lock and block on a specified condvar
- /// until a signal is sent on that same condvar.
- ///
- /// The associated lock must have been initialised with an appropriate
- /// number of condvars. The condvar_id must be between 0 and num_condvars-1
- /// or else this call will fail.
- #[inline]
- pub fn wait_on(&self, condvar_id: uint) {
- assert!(!*self.poison.flag);
- self.inner.cond().wait_on(condvar_id);
- // This is why we need to wrap sync::condvar.
- PoisonOnFail::check(*self.poison.flag, self.name);
- }
-
- /// Wake up a blocked task. Returns false if there was no blocked task.
- #[inline]
- pub fn signal(&self) -> bool { self.signal_on(0) }
-
- /// Wake up a blocked task on a specified condvar (as
- /// sync::cond.signal_on). Returns false if there was no blocked task.
- #[inline]
- pub fn signal_on(&self, condvar_id: uint) -> bool {
- assert!(!*self.poison.flag);
- self.inner.cond().signal_on(condvar_id)
- }
-
- /// Wake up all blocked tasks. Returns the number of tasks woken.
- #[inline]
- pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
-
- /// Wake up all blocked tasks on a specified condvar (as
- /// sync::cond.broadcast_on). Returns the number of tasks woken.
- #[inline]
- pub fn broadcast_on(&self, condvar_id: uint) -> uint {
- assert!(!*self.poison.flag);
- self.inner.cond().broadcast_on(condvar_id)
- }
-}
-
-/****************************************************************************
- * Mutex
- ****************************************************************************/
-
-/// A wrapper type which provides synchronized access to the underlying data, of
-/// type `T`. A mutex always provides exclusive access, and concurrent requests
-/// will block while the mutex is already locked.
-///
-/// # Example
-///
-/// ```
-/// use sync::{Mutex, Arc};
-///
-/// let mutex = Arc::new(Mutex::new(1i));
-/// let mutex2 = mutex.clone();
-///
-/// spawn(proc() {
-/// let mut val = mutex2.lock();
-/// *val += 1;
-/// val.cond.signal();
-/// });
-///
-/// let value = mutex.lock();
-/// while *value != 2 {
-/// value.cond.wait();
-/// }
-/// ```
-pub struct Mutex<T> {
- lock: raw::Mutex,
- failed: UnsafeCell<bool>,
- data: UnsafeCell<T>,
-}
-
-/// An guard which is created by locking a mutex. Through this guard the
-/// underlying data can be accessed.
-pub struct MutexGuard<'a, T:'a> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- /// Inner condition variable connected to the locked mutex that this guard
- /// was created from. This can be used for atomic-unlock-and-deschedule.
- pub cond: Condvar<'a>,
-}
-
-impl<T: Send> Mutex<T> {
- /// Creates a new mutex to protect the user-supplied data.
- pub fn new(user_data: T) -> Mutex<T> {
- Mutex::new_with_condvars(user_data, 1)
- }
-
- /// Create a new mutex, with a specified number of associated condvars.
- ///
- /// This will allow calling wait_on/signal_on/broadcast_on with condvar IDs
- /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
- /// allowed but any operations on the condvar will fail.)
- pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex<T> {
- Mutex {
- lock: raw::Mutex::new_with_condvars(num_condvars),
- failed: UnsafeCell::new(false),
- data: UnsafeCell::new(user_data),
- }
- }
-
- /// Access the underlying mutable data with mutual exclusion from other
- /// tasks. The returned value is an RAII guard which will unlock the mutex
- /// when dropped. All concurrent tasks attempting to lock the mutex will
- /// block while the returned value is still alive.
- ///
- /// # Panics
- ///
- /// Panicking while inside the Mutex will unlock the Mutex while unwinding, so
- /// that other tasks won't block forever. It will also poison the Mutex:
- /// any tasks that subsequently try to access it (including those already
- /// blocked on the mutex) will also panic immediately.
- #[inline]
- pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> {
- let guard = self.lock.lock();
-
- // These two accesses are safe because we're guaranteed at this point
- // that we have exclusive access to this mutex. We are indeed able to
- // promote ourselves from &Mutex to `&mut T`
- let poison = unsafe { &mut *self.failed.get() };
- let data = unsafe { &mut *self.data.get() };
-
- MutexGuard {
- _data: data,
- cond: Condvar {
- name: "Mutex",
- poison: PoisonOnFail::new(poison, "Mutex"),
- inner: InnerMutex(guard),
- },
- }
- }
-}
-
-impl<'a, T: Send> Deref<T> for MutexGuard<'a, T> {
- fn deref<'a>(&'a self) -> &'a T { &*self._data }
-}
-impl<'a, T: Send> DerefMut<T> for MutexGuard<'a, T> {
- fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
-}
-
-/****************************************************************************
- * R/W lock protected lock
- ****************************************************************************/
-
-/// A dual-mode reader-writer lock. The data can be accessed mutably or
-/// immutably, and immutably-accessing tasks may run concurrently.
-///
-/// # Example
-///
-/// ```
-/// use sync::{RWLock, Arc};
-///
-/// let lock1 = Arc::new(RWLock::new(1i));
-/// let lock2 = lock1.clone();
-///
-/// spawn(proc() {
-/// let mut val = lock2.write();
-/// *val = 3;
-/// let val = val.downgrade();
-/// println!("{}", *val);
-/// });
-///
-/// let val = lock1.read();
-/// println!("{}", *val);
-/// ```
-pub struct RWLock<T> {
- lock: raw::RWLock,
- failed: UnsafeCell<bool>,
- data: UnsafeCell<T>,
-}
-
-/// A guard which is created by locking an rwlock in write mode. Through this
-/// guard the underlying data can be accessed.
-pub struct RWLockWriteGuard<'a, T:'a> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- /// Inner condition variable that can be used to sleep on the write mode of
- /// this rwlock.
- pub cond: Condvar<'a>,
-}
-
-/// A guard which is created by locking an rwlock in read mode. Through this
-/// guard the underlying data can be accessed.
-pub struct RWLockReadGuard<'a, T:'a> {
- // FIXME #12808: strange names to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a T,
- _guard: raw::RWLockReadGuard<'a>,
-}
-
-impl<T: Send + Sync> RWLock<T> {
- /// Create a reader/writer lock with the supplied data.
- pub fn new(user_data: T) -> RWLock<T> {
- RWLock::new_with_condvars(user_data, 1)
- }
-
- /// Create a reader/writer lock with the supplied data and a specified number
- /// of condvars (as sync::RWLock::new_with_condvars).
- pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock<T> {
- RWLock {
- lock: raw::RWLock::new_with_condvars(num_condvars),
- failed: UnsafeCell::new(false),
- data: UnsafeCell::new(user_data),
- }
- }
-
- /// Access the underlying data mutably. Locks the rwlock in write mode;
- /// other readers and writers will block.
- ///
- /// # Panics
- ///
- /// Panicking while inside the lock will unlock the lock while unwinding, so
- /// that other tasks won't block forever. As Mutex.lock, it will also poison
- /// the lock, so subsequent readers and writers will both also panic.
- #[inline]
- pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a, T> {
- let guard = self.lock.write();
-
- // These two accesses are safe because we're guaranteed at this point
- // that we have exclusive access to this rwlock. We are indeed able to
- // promote ourselves from &RWLock to `&mut T`
- let poison = unsafe { &mut *self.failed.get() };
- let data = unsafe { &mut *self.data.get() };
-
- RWLockWriteGuard {
- _data: data,
- cond: Condvar {
- name: "RWLock",
- poison: PoisonOnFail::new(poison, "RWLock"),
- inner: InnerRWLock(guard),
- },
- }
- }
-
- /// Access the underlying data immutably. May run concurrently with other
- /// reading tasks.
- ///
- /// # Panics
- ///
- /// Panicking will unlock the lock while unwinding. However, unlike all other
- /// access modes, this will not poison the lock.
- pub fn read<'a>(&'a self) -> RWLockReadGuard<'a, T> {
- let guard = self.lock.read();
- PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
- RWLockReadGuard {
- _guard: guard,
- _data: unsafe { &*self.data.get() },
- }
- }
-}
-
-impl<'a, T: Send + Sync> RWLockWriteGuard<'a, T> {
- /// Consumes this write lock token, returning a new read lock token.
- ///
- /// This will allow pending readers to come into the lock.
- pub fn downgrade(self) -> RWLockReadGuard<'a, T> {
- let RWLockWriteGuard { _data, cond } = self;
- // convert the data to read-only explicitly
- let data = &*_data;
- let guard = match cond.inner {
- InnerMutex(..) => unreachable!(),
- InnerRWLock(guard) => guard.downgrade()
- };
- RWLockReadGuard { _guard: guard, _data: data }
- }
-}
-
-impl<'a, T: Send + Sync> Deref<T> for RWLockReadGuard<'a, T> {
- fn deref<'a>(&'a self) -> &'a T { self._data }
-}
-impl<'a, T: Send + Sync> Deref<T> for RWLockWriteGuard<'a, T> {
- fn deref<'a>(&'a self) -> &'a T { &*self._data }
-}
-impl<'a, T: Send + Sync> DerefMut<T> for RWLockWriteGuard<'a, T> {
- fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
-}
-
-/****************************************************************************
- * Barrier
- ****************************************************************************/
-
-/// A barrier enables multiple tasks to synchronize the beginning
-/// of some computation.
-///
-/// ```rust
-/// use sync::{Arc, Barrier};
-///
-/// let barrier = Arc::new(Barrier::new(10));
-/// for _ in range(0u, 10) {
-/// let c = barrier.clone();
-/// // The same messages will be printed together.
-/// // You will NOT see any interleaving.
-/// spawn(proc() {
-/// println!("before wait");
-/// c.wait();
-/// println!("after wait");
-/// });
-/// }
-/// ```
-pub struct Barrier {
- lock: Mutex<BarrierState>,
- num_tasks: uint,
-}
-
-// The inner state of a double barrier
-struct BarrierState {
- count: uint,
- generation_id: uint,
-}
-
-impl Barrier {
- /// Create a new barrier that can block a given number of tasks.
- pub fn new(num_tasks: uint) -> Barrier {
- Barrier {
- lock: Mutex::new(BarrierState {
- count: 0,
- generation_id: 0,
- }),
- num_tasks: num_tasks,
- }
- }
-
- /// Block the current task until a certain number of tasks is waiting.
- pub fn wait(&self) {
- let mut lock = self.lock.lock();
- let local_gen = lock.generation_id;
- lock.count += 1;
- if lock.count < self.num_tasks {
- // We need a while loop to guard against spurious wakeups.
- // http://en.wikipedia.org/wiki/Spurious_wakeup
- while local_gen == lock.generation_id &&
- lock.count < self.num_tasks {
- lock.cond.wait();
- }
- } else {
- lock.count = 0;
- lock.generation_id += 1;
- lock.cond.broadcast();
- }
- }
-}
-
-/****************************************************************************
- * Tests
- ****************************************************************************/
-
-#[cfg(test)]
-mod tests {
- use std::prelude::*;
- use std::comm::Empty;
- use std::task;
- use std::task::try_future;
-
- use Arc;
- use super::{Mutex, Barrier, RWLock};
-
- #[test]
- fn test_mutex_arc_condvar() {
- let arc = Arc::new(Mutex::new(false));
- let arc2 = arc.clone();
- let (tx, rx) = channel();
- task::spawn(proc() {
- // wait until parent gets in
- rx.recv();
- let mut lock = arc2.lock();
- *lock = true;
- lock.cond.signal();
- });
-
- let lock = arc.lock();
- tx.send(());
- assert!(!*lock);
- while !*lock {
- lock.cond.wait();
- }
- }
-
- #[test] #[should_fail]
- fn test_arc_condvar_poison() {
- let arc = Arc::new(Mutex::new(1i));
- let arc2 = arc.clone();
- let (tx, rx) = channel();
-
- spawn(proc() {
- rx.recv();
- let lock = arc2.lock();
- lock.cond.signal();
- // Parent should fail when it wakes up.
- panic!();
- });
-
- let lock = arc.lock();
- tx.send(());
- while *lock == 1 {
- lock.cond.wait();
- }
- }
-
- #[test] #[should_fail]
- fn test_mutex_arc_poison() {
- let arc = Arc::new(Mutex::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.lock();
- assert_eq!(*lock, 2);
- });
- let lock = arc.lock();
- assert_eq!(*lock, 1);
- }
-
- #[test]
- fn test_mutex_arc_nested() {
- // Tests nested mutexes and access
- // to underlying data.
- let arc = Arc::new(Mutex::new(1i));
- let arc2 = Arc::new(Mutex::new(arc));
- task::spawn(proc() {
- let lock = arc2.lock();
- let lock2 = lock.deref().lock();
- assert_eq!(*lock2, 1);
- });
- }
-
- #[test]
- fn test_mutex_arc_access_in_unwind() {
- let arc = Arc::new(Mutex::new(1i));
- let arc2 = arc.clone();
- let _ = task::try::<()>(proc() {
- struct Unwinder {
- i: Arc<Mutex<int>>,
- }
- impl Drop for Unwinder {
- fn drop(&mut self) {
- let mut lock = self.i.lock();
- *lock += 1;
- }
- }
- let _u = Unwinder { i: arc2 };
- panic!();
- });
- let lock = arc.lock();
- assert_eq!(*lock, 2);
- }
-
- #[test] #[should_fail]
- fn test_rw_arc_poison_wr() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
- });
- let lock = arc.read();
- assert_eq!(*lock, 1);
- }
- #[test] #[should_fail]
- fn test_rw_arc_poison_ww() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
- });
- let lock = arc.write();
- assert_eq!(*lock, 1);
- }
- #[test]
- fn test_rw_arc_no_poison_rr() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
- });
- let lock = arc.read();
- assert_eq!(*lock, 1);
- }
- #[test]
- fn test_rw_arc_no_poison_rw() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
- });
- let lock = arc.write();
- assert_eq!(*lock, 1);
- }
- #[test]
- fn test_rw_arc_no_poison_dr() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try(proc() {
- let lock = arc2.write().downgrade();
- assert_eq!(*lock, 2);
- });
- let lock = arc.write();
- assert_eq!(*lock, 1);
- }
-
- #[test]
- fn test_rw_arc() {
- let arc = Arc::new(RWLock::new(0i));
- let arc2 = arc.clone();
- let (tx, rx) = channel();
-
- task::spawn(proc() {
- let mut lock = arc2.write();
- for _ in range(0u, 10) {
- let tmp = *lock;
- *lock = -1;
- task::deschedule();
- *lock = tmp + 1;
- }
- tx.send(());
- });
-
- // Readers try to catch the writer in the act
- let mut children = Vec::new();
- for _ in range(0u, 5) {
- let arc3 = arc.clone();
- children.push(try_future(proc() {
- let lock = arc3.read();
- assert!(*lock >= 0);
- }));
- }
-
- // Wait for children to pass their asserts
- for r in children.iter_mut() {
- assert!(r.get_ref().is_ok());
- }
-
- // Wait for writer to finish
- rx.recv();
- let lock = arc.read();
- assert_eq!(*lock, 10);
- }
-
- #[test]
- fn test_rw_arc_access_in_unwind() {
- let arc = Arc::new(RWLock::new(1i));
- let arc2 = arc.clone();
- let _ = task::try::<()>(proc() {
- struct Unwinder {
- i: Arc<RWLock<int>>,
- }
- impl Drop for Unwinder {
- fn drop(&mut self) {
- let mut lock = self.i.write();
- *lock += 1;
- }
- }
- let _u = Unwinder { i: arc2 };
- panic!();
- });
- let lock = arc.read();
- assert_eq!(*lock, 2);
- }
-
- #[test]
- fn test_rw_downgrade() {
- // (1) A downgrader gets in write mode and does cond.wait.
- // (2) A writer gets in write mode, sets state to 42, and does signal.
- // (3) Downgrader wakes, sets state to 31337.
- // (4) tells writer and all other readers to contend as it downgrades.
- // (5) Writer attempts to set state back to 42, while downgraded task
- // and all reader tasks assert that it's 31337.
- let arc = Arc::new(RWLock::new(0i));
-
- // Reader tasks
- let mut reader_convos = Vec::new();
- for _ in range(0u, 10) {
- let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
- reader_convos.push((tx1, rx2));
- let arcn = arc.clone();
- task::spawn(proc() {
- rx1.recv(); // wait for downgrader to give go-ahead
- let lock = arcn.read();
- assert_eq!(*lock, 31337);
- tx2.send(());
- });
- }
-
- // Writer task
- let arc2 = arc.clone();
- let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
- task::spawn(proc() {
- rx1.recv();
- {
- let mut lock = arc2.write();
- assert_eq!(*lock, 0);
- *lock = 42;
- lock.cond.signal();
- }
- rx1.recv();
- {
- let mut lock = arc2.write();
- // This shouldn't happen until after the downgrade read
- // section, and all other readers, finish.
- assert_eq!(*lock, 31337);
- *lock = 42;
- }
- tx2.send(());
- });
-
- // Downgrader (us)
- let mut lock = arc.write();
- tx1.send(()); // send to another writer who will wake us up
- while *lock == 0 {
- lock.cond.wait();
- }
- assert_eq!(*lock, 42);
- *lock = 31337;
- // send to other readers
- for &(ref mut rc, _) in reader_convos.iter_mut() {
- rc.send(())
- }
- let lock = lock.downgrade();
- // complete handshake with other readers
- for &(_, ref mut rp) in reader_convos.iter_mut() {
- rp.recv()
- }
- tx1.send(()); // tell writer to try again
- assert_eq!(*lock, 31337);
- drop(lock);
-
- rx2.recv(); // complete handshake with writer
- }
-
- #[cfg(test)]
- fn test_rw_write_cond_downgrade_read_race_helper() {
- // Tests that when a downgrader hands off the "reader cloud" lock
- // because of a contending reader, a writer can't race to get it
- // instead, which would result in readers_and_writers. This tests
- // the raw module rather than this one, but it's here because an
- // rwarc gives us extra shared state to help check for the race.
- let x = Arc::new(RWLock::new(true));
- let (tx, rx) = channel();
-
- // writer task
- let xw = x.clone();
- task::spawn(proc() {
- let mut lock = xw.write();
- tx.send(()); // tell downgrader it's ok to go
- lock.cond.wait();
- // The core of the test is here: the condvar reacquire path
- // must involve order_lock, so that it cannot race with a reader
- // trying to receive the "reader cloud lock hand-off".
- *lock = false;
- });
-
- rx.recv(); // wait for writer to get in
-
- let lock = x.write();
- assert!(*lock);
- // make writer contend in the cond-reacquire path
- lock.cond.signal();
- // make a reader task to trigger the "reader cloud lock" handoff
- let xr = x.clone();
- let (tx, rx) = channel();
- task::spawn(proc() {
- tx.send(());
- drop(xr.read());
- });
- rx.recv(); // wait for reader task to exist
-
- let lock = lock.downgrade();
- // if writer mistakenly got in, make sure it mutates state
- // before we assert on it
- for _ in range(0u, 5) { task::deschedule(); }
- // make sure writer didn't get in.
- assert!(*lock);
- }
- #[test]
- fn test_rw_write_cond_downgrade_read_race() {
- // Ideally the above test case would have deschedule statements in it
- // that helped to expose the race nearly 100% of the time... but adding
- // deschedules in the intuitively-right locations made it even less
- // likely, and I wasn't sure why :( . This is a mediocre "next best"
- // option.
- for _ in range(0u, 8) {
- test_rw_write_cond_downgrade_read_race_helper();
- }
- }
-
- /************************************************************************
- * Barrier tests
- ************************************************************************/
- #[test]
- fn test_barrier() {
- let barrier = Arc::new(Barrier::new(10));
- let (tx, rx) = channel();
-
- for _ in range(0u, 9) {
- let c = barrier.clone();
- let tx = tx.clone();
- spawn(proc() {
- c.wait();
- tx.send(true);
- });
- }
-
- // At this point, all spawned tasks should be blocked,
- // so we shouldn't get anything from the port
- assert!(match rx.try_recv() {
- Err(Empty) => true,
- _ => false,
- });
-
- barrier.wait();
- // Now, the barrier is cleared and we should get data.
- for _ in range(0u, 9) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-#![experimental]
-#![allow(missing_docs, dead_code)]
-
-// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
-
-use core::prelude::*;
-
-use alloc::arc::Arc;
-use collections::Vec;
-use core::num::UnsignedInt;
-use core::cell::UnsafeCell;
-
-use atomic::{AtomicUint,Relaxed,Release,Acquire};
-
-struct Node<T> {
- sequence: AtomicUint,
- value: Option<T>,
-}
-
-struct State<T> {
- pad0: [u8, ..64],
- buffer: Vec<UnsafeCell<Node<T>>>,
- mask: uint,
- pad1: [u8, ..64],
- enqueue_pos: AtomicUint,
- pad2: [u8, ..64],
- dequeue_pos: AtomicUint,
- pad3: [u8, ..64],
-}
-
-pub struct Queue<T> {
- state: Arc<State<T>>,
-}
-
-impl<T: Send> State<T> {
- fn with_capacity(capacity: uint) -> State<T> {
- let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
- if capacity < 2 {
- 2u
- } else {
- // use next power of 2 as capacity
- capacity.next_power_of_two()
- }
- } else {
- capacity
- };
- let buffer = Vec::from_fn(capacity, |i| {
- UnsafeCell::new(Node { sequence:AtomicUint::new(i), value: None })
- });
- State{
- pad0: [0, ..64],
- buffer: buffer,
- mask: capacity-1,
- pad1: [0, ..64],
- enqueue_pos: AtomicUint::new(0),
- pad2: [0, ..64],
- dequeue_pos: AtomicUint::new(0),
- pad3: [0, ..64],
- }
- }
-
- fn push(&self, value: T) -> bool {
- let mask = self.mask;
- let mut pos = self.enqueue_pos.load(Relaxed);
- loop {
- let node = &self.buffer[pos & mask];
- let seq = unsafe { (*node.get()).sequence.load(Acquire) };
- let diff: int = seq as int - pos as int;
-
- if diff == 0 {
- let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
- if enqueue_pos == pos {
- unsafe {
- (*node.get()).value = Some(value);
- (*node.get()).sequence.store(pos+1, Release);
- }
- break
- } else {
- pos = enqueue_pos;
- }
- } else if diff < 0 {
- return false
- } else {
- pos = self.enqueue_pos.load(Relaxed);
- }
- }
- true
- }
-
- fn pop(&self) -> Option<T> {
- let mask = self.mask;
- let mut pos = self.dequeue_pos.load(Relaxed);
- loop {
- let node = &self.buffer[pos & mask];
- let seq = unsafe { (*node.get()).sequence.load(Acquire) };
- let diff: int = seq as int - (pos + 1) as int;
- if diff == 0 {
- let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
- if dequeue_pos == pos {
- unsafe {
- let value = (*node.get()).value.take();
- (*node.get()).sequence.store(pos + mask + 1, Release);
- return value
- }
- } else {
- pos = dequeue_pos;
- }
- } else if diff < 0 {
- return None
- } else {
- pos = self.dequeue_pos.load(Relaxed);
- }
- }
- }
-}
-
-impl<T: Send> Queue<T> {
- pub fn with_capacity(capacity: uint) -> Queue<T> {
- Queue{
- state: Arc::new(State::with_capacity(capacity))
- }
- }
-
- pub fn push(&self, value: T) -> bool {
- self.state.push(value)
- }
-
- pub fn pop(&self) -> Option<T> {
- self.state.pop()
- }
-}
-
-impl<T: Send> Clone for Queue<T> {
- fn clone(&self) -> Queue<T> {
- Queue { state: self.state.clone() }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::prelude::*;
- use super::Queue;
-
- #[test]
- fn test() {
- let nthreads = 8u;
- let nmsgs = 1000u;
- let q = Queue::with_capacity(nthreads*nmsgs);
- assert_eq!(None, q.pop());
- let (tx, rx) = channel();
-
- for _ in range(0, nthreads) {
- let q = q.clone();
- let tx = tx.clone();
- spawn(proc() {
- let q = q;
- for i in range(0, nmsgs) {
- assert!(q.push(i));
- }
- tx.send(());
- });
- }
-
- let mut completion_rxs = vec![];
- for _ in range(0, nthreads) {
- let (tx, rx) = channel();
- completion_rxs.push(rx);
- let q = q.clone();
- spawn(proc() {
- let q = q;
- let mut i = 0u;
- loop {
- match q.pop() {
- None => {},
- Some(_) => {
- i += 1;
- if i == nmsgs { break }
- }
- }
- }
- tx.send(i);
- });
- }
-
- for rx in completion_rxs.iter_mut() {
- assert_eq!(nmsgs, rx.recv());
- }
- for _ in range(0, nthreads) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-//! A mostly lock-free multi-producer, single consumer queue.
-//!
-//! This module contains an implementation of a concurrent MPSC queue. This
-//! queue can be used to share data between tasks, and is also used as the
-//! building block of channels in rust.
-//!
-//! Note that the current implementation of this queue has a caveat of the `pop`
-//! method, and see the method for more information about it. Due to this
-//! caveat, this queue may not be appropriate for all use-cases.
-
-#![experimental]
-
-// http://www.1024cores.net/home/lock-free-algorithms
-// /queues/non-intrusive-mpsc-node-based-queue
-
-pub use self::PopResult::*;
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::mem;
-use core::cell::UnsafeCell;
-
-use atomic::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
-
-/// A result of the `pop` function.
-pub enum PopResult<T> {
- /// Some data has been popped
- Data(T),
- /// The queue is empty
- Empty,
- /// The queue is in an inconsistent state. Popping data should succeed, but
- /// some pushers have yet to make enough progress in order allow a pop to
- /// succeed. It is recommended that a pop() occur "in the near future" in
- /// order to see if the sender has made progress or not
- Inconsistent,
-}
-
-struct Node<T> {
- next: AtomicPtr<Node<T>>,
- value: Option<T>,
-}
-
-/// The multi-producer single-consumer structure. This is not cloneable, but it
-/// may be safely shared so long as it is guaranteed that there is only one
-/// popper at a time (many pushers are allowed).
-pub struct Queue<T> {
- head: AtomicPtr<Node<T>>,
- tail: UnsafeCell<*mut Node<T>>,
-}
-
-impl<T> Node<T> {
- unsafe fn new(v: Option<T>) -> *mut Node<T> {
- mem::transmute(box Node {
- next: AtomicPtr::new(0 as *mut Node<T>),
- value: v,
- })
- }
-}
-
-impl<T: Send> Queue<T> {
- /// Creates a new queue that is safe to share among multiple producers and
- /// one consumer.
- pub fn new() -> Queue<T> {
- let stub = unsafe { Node::new(None) };
- Queue {
- head: AtomicPtr::new(stub),
- tail: UnsafeCell::new(stub),
- }
- }
-
- /// Pushes a new value onto this queue.
- pub fn push(&self, t: T) {
- unsafe {
- let n = Node::new(Some(t));
- let prev = self.head.swap(n, AcqRel);
- (*prev).next.store(n, Release);
- }
- }
-
- /// Pops some data from this queue.
- ///
- /// Note that the current implementation means that this function cannot
- /// return `Option<T>`. It is possible for this queue to be in an
- /// inconsistent state where many pushes have succeeded and completely
- /// finished, but pops cannot return `Some(t)`. This inconsistent state
- /// happens when a pusher is pre-empted at an inopportune moment.
- ///
- /// This inconsistent state means that this queue does indeed have data, but
- /// it does not currently have access to it at this time.
- pub fn pop(&self) -> PopResult<T> {
- unsafe {
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
-
- if !next.is_null() {
- *self.tail.get() = next;
- assert!((*tail).value.is_none());
- assert!((*next).value.is_some());
- let ret = (*next).value.take().unwrap();
- let _: Box<Node<T>> = mem::transmute(tail);
- return Data(ret);
- }
-
- if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
- }
- }
-
- /// Attempts to pop data from this queue, but doesn't attempt too hard. This
- /// will canonicalize inconsistent states to a `None` value.
- pub fn casual_pop(&self) -> Option<T> {
- match self.pop() {
- Data(t) => Some(t),
- Empty | Inconsistent => None,
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Queue<T> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.tail.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Relaxed);
- let _: Box<Node<T>> = mem::transmute(cur);
- cur = next;
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::prelude::*;
-
- use alloc::arc::Arc;
-
- use super::{Queue, Data, Empty, Inconsistent};
-
- #[test]
- fn test_full() {
- let q = Queue::new();
- q.push(box 1i);
- q.push(box 2i);
- }
-
- #[test]
- fn test() {
- let nthreads = 8u;
- let nmsgs = 1000u;
- let q = Queue::new();
- match q.pop() {
- Empty => {}
- Inconsistent | Data(..) => panic!()
- }
- let (tx, rx) = channel();
- let q = Arc::new(q);
-
- for _ in range(0, nthreads) {
- let tx = tx.clone();
- let q = q.clone();
- spawn(proc() {
- for i in range(0, nmsgs) {
- q.push(i);
- }
- tx.send(());
- });
- }
-
- let mut i = 0u;
- while i < nthreads * nmsgs {
- match q.pop() {
- Empty | Inconsistent => {},
- Data(_) => { i += 1 }
- }
- }
- drop(tx);
- for _ in range(0, nthreads) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A simple native mutex implementation. Warning: this API is likely
-//! to change soon.
-
-#![allow(dead_code)]
-
-use core::prelude::*;
-use alloc::boxed::Box;
-use rustrt::mutex;
-
-pub const LOCKED: uint = 1 << 0;
-pub const BLOCKED: uint = 1 << 1;
-
-/// A mutual exclusion primitive useful for protecting shared data
-///
-/// This mutex will properly block tasks waiting for the lock to become
-/// available. The mutex can also be statically initialized or created via a
-/// `new` constructor.
-///
-/// # Example
-///
-/// ```rust
-/// use sync::mutex::Mutex;
-///
-/// let m = Mutex::new();
-/// let guard = m.lock();
-/// // do some work
-/// drop(guard); // unlock the lock
-/// ```
-pub struct Mutex {
- // Note that this static mutex is in a *box*, not inlined into the struct
- // itself. This is done for memory safety reasons with the usage of a
- // StaticNativeMutex inside the static mutex above. Once a native mutex has
- // been used once, its address can never change (it can't be moved). This
- // mutex type can be safely moved at any time, so to ensure that the native
- // mutex is used correctly we box the inner lock to give it a constant
- // address.
- lock: Box<StaticMutex>,
-}
-
-/// The static mutex type is provided to allow for static allocation of mutexes.
-///
-/// Note that this is a separate type because using a Mutex correctly means that
-/// it needs to have a destructor run. In Rust, statics are not allowed to have
-/// destructors. As a result, a `StaticMutex` has one extra method when compared
-/// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
-/// documentation can be found directly on the method.
-///
-/// # Example
-///
-/// ```rust
-/// use sync::mutex::{StaticMutex, MUTEX_INIT};
-///
-/// static LOCK: StaticMutex = MUTEX_INIT;
-///
-/// {
-/// let _g = LOCK.lock();
-/// // do some productive work
-/// }
-/// // lock is unlocked here.
-/// ```
-pub struct StaticMutex {
- lock: mutex::StaticNativeMutex,
-}
-
-/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
-/// dropped (falls out of scope), the lock will be unlocked.
-#[must_use]
-pub struct Guard<'a> {
- guard: mutex::LockGuard<'a>,
-}
-
-fn lift_guard(guard: mutex::LockGuard) -> Guard {
- Guard { guard: guard }
-}
-
-/// Static initialization of a mutex. This constant can be used to initialize
-/// other mutex constants.
-pub const MUTEX_INIT: StaticMutex = StaticMutex {
- lock: mutex::NATIVE_MUTEX_INIT
-};
-
-impl StaticMutex {
- /// Attempts to grab this lock, see `Mutex::try_lock`
- pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
- unsafe { self.lock.trylock().map(lift_guard) }
- }
-
- /// Acquires this lock, see `Mutex::lock`
- pub fn lock<'a>(&'a self) -> Guard<'a> {
- lift_guard(unsafe { self.lock.lock() })
- }
-
- /// Deallocates resources associated with this static mutex.
- ///
- /// This method is unsafe because it provides no guarantees that there are
- /// no active users of this mutex, and safety is not guaranteed if there are
- /// active users of this mutex.
- ///
- /// This method is required to ensure that there are no memory leaks on
- /// *all* platforms. It may be the case that some platforms do not leak
- /// memory if this method is not called, but this is not guaranteed to be
- /// true on all platforms.
- pub unsafe fn destroy(&self) {
- self.lock.destroy()
- }
-}
-
-impl Mutex {
- /// Creates a new mutex in an unlocked state ready for use.
- pub fn new() -> Mutex {
- Mutex {
- lock: box StaticMutex {
- lock: unsafe { mutex::StaticNativeMutex::new() },
- }
- }
- }
-
- /// Attempts to acquire this lock.
- ///
- /// If the lock could not be acquired at this time, then `None` is returned.
- /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
- /// guard is dropped.
- ///
- /// This function does not block.
- pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
- self.lock.try_lock()
- }
-
- /// Acquires a mutex, blocking the current task until it is able to do so.
- ///
- /// This function will block the local task until it is available to acquire
- /// the mutex. Upon returning, the task is the only task with the mutex
- /// held. An RAII guard is returned to allow scoped unlock of the lock. When
- /// the guard goes out of scope, the mutex will be unlocked.
- pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() }
-}
-
-impl Drop for Mutex {
- fn drop(&mut self) {
- // This is actually safe b/c we know that there is no further usage of
- // this mutex (it's up to the user to arrange for a mutex to get
- // dropped, that's not our job)
- unsafe { self.lock.destroy() }
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::prelude::*;
- use super::{Mutex, StaticMutex, MUTEX_INIT};
-
- #[test]
- fn smoke() {
- let m = Mutex::new();
- drop(m.lock());
- drop(m.lock());
- }
-
- #[test]
- fn smoke_static() {
- static M: StaticMutex = MUTEX_INIT;
- unsafe {
- drop(M.lock());
- drop(M.lock());
- M.destroy();
- }
- }
-
- #[test]
- fn lots_and_lots() {
- static M: StaticMutex = MUTEX_INIT;
- static mut CNT: uint = 0;
- static J: uint = 1000;
- static K: uint = 3;
-
- fn inc() {
- for _ in range(0, J) {
- unsafe {
- let _g = M.lock();
- CNT += 1;
- }
- }
- }
-
- let (tx, rx) = channel();
- for _ in range(0, K) {
- let tx2 = tx.clone();
- spawn(proc() { inc(); tx2.send(()); });
- let tx2 = tx.clone();
- spawn(proc() { inc(); tx2.send(()); });
- }
-
- drop(tx);
- for _ in range(0, 2 * K) {
- rx.recv();
- }
- assert_eq!(unsafe {CNT}, J * K * 2);
- unsafe {
- M.destroy();
- }
- }
-
- #[test]
- fn trylock() {
- let m = Mutex::new();
- assert!(m.try_lock().is_some());
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A "once initialization" primitive
-//!
-//! This primitive is meant to be used to run one-time initialization. An
-//! example use case would be for initializing an FFI library.
-
-use core::prelude::*;
-
-use core::int;
-use core::atomic;
-
-use mutex::{StaticMutex, MUTEX_INIT};
-
-/// A synchronization primitive which can be used to run a one-time global
-/// initialization. Useful for one-time initialization for FFI or related
-/// functionality. This type can only be constructed with the `ONCE_INIT`
-/// value.
-///
-/// # Example
-///
-/// ```rust
-/// use sync::one::{Once, ONCE_INIT};
-///
-/// static START: Once = ONCE_INIT;
-///
-/// START.doit(|| {
-/// // run initialization here
-/// });
-/// ```
-pub struct Once {
- mutex: StaticMutex,
- cnt: atomic::AtomicInt,
- lock_cnt: atomic::AtomicInt,
-}
-
-/// Initialization value for static `Once` values.
-pub const ONCE_INIT: Once = Once {
- mutex: MUTEX_INIT,
- cnt: atomic::INIT_ATOMIC_INT,
- lock_cnt: atomic::INIT_ATOMIC_INT,
-};
-
-impl Once {
- /// Perform an initialization routine once and only once. The given closure
- /// will be executed if this is the first time `doit` has been called, and
- /// otherwise the routine will *not* be invoked.
- ///
- /// This method will block the calling task if another initialization
- /// routine is currently running.
- ///
- /// When this function returns, it is guaranteed that some initialization
- /// has run and completed (it may not be the closure specified).
- pub fn doit(&self, f: ||) {
- // Optimize common path: load is much cheaper than fetch_add.
- if self.cnt.load(atomic::SeqCst) < 0 {
- return
- }
-
- // Implementation-wise, this would seem like a fairly trivial primitive.
- // The stickler part is where our mutexes currently require an
- // allocation, and usage of a `Once` shouldn't leak this allocation.
- //
- // This means that there must be a deterministic destroyer of the mutex
- // contained within (because it's not needed after the initialization
- // has run).
- //
- // The general scheme here is to gate all future threads once
- // initialization has completed with a "very negative" count, and to
- // allow through threads to lock the mutex if they see a non negative
- // count. For all threads grabbing the mutex, exactly one of them should
- // be responsible for unlocking the mutex, and this should only be done
- // once everyone else is done with the mutex.
- //
- // This atomicity is achieved by swapping a very negative value into the
- // shared count when the initialization routine has completed. This will
- // read the number of threads which will at some point attempt to
- // acquire the mutex. This count is then squirreled away in a separate
- // variable, and the last person on the way out of the mutex is then
- // responsible for destroying the mutex.
- //
- // It is crucial that the negative value is swapped in *after* the
- // initialization routine has completed because otherwise new threads
- // calling `doit` will return immediately before the initialization has
- // completed.
-
- let prev = self.cnt.fetch_add(1, atomic::SeqCst);
- if prev < 0 {
- // Make sure we never overflow, we'll never have int::MIN
- // simultaneous calls to `doit` to make this value go back to 0
- self.cnt.store(int::MIN, atomic::SeqCst);
- return
- }
-
- // If the count is negative, then someone else finished the job,
- // otherwise we run the job and record how many people will try to grab
- // this lock
- let guard = self.mutex.lock();
- if self.cnt.load(atomic::SeqCst) > 0 {
- f();
- let prev = self.cnt.swap(int::MIN, atomic::SeqCst);
- self.lock_cnt.store(prev, atomic::SeqCst);
- }
- drop(guard);
-
- // Last one out cleans up after everyone else, no leaks!
- if self.lock_cnt.fetch_add(-1, atomic::SeqCst) == 1 {
- unsafe { self.mutex.destroy() }
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::prelude::*;
- use std::task;
- use super::{ONCE_INIT, Once};
-
- #[test]
- fn smoke_once() {
- static O: Once = ONCE_INIT;
- let mut a = 0i;
- O.doit(|| a += 1);
- assert_eq!(a, 1);
- O.doit(|| a += 1);
- assert_eq!(a, 1);
- }
-
- #[test]
- fn stampede_once() {
- static O: Once = ONCE_INIT;
- static mut run: bool = false;
-
- let (tx, rx) = channel();
- for _ in range(0u, 10) {
- let tx = tx.clone();
- spawn(proc() {
- for _ in range(0u, 4) { task::deschedule() }
- unsafe {
- O.doit(|| {
- assert!(!run);
- run = true;
- });
- assert!(run);
- }
- tx.send(());
- });
- }
-
- unsafe {
- O.doit(|| {
- assert!(!run);
- run = true;
- });
- assert!(run);
- }
-
- for _ in range(0u, 10) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Raw concurrency primitives you know and love.
-//!
-//! These primitives are not recommended for general use, but are provided for
-//! flavorful use-cases. It is recommended to use the types at the top of the
-//! `sync` crate which wrap values directly and provide safer abstractions for
-//! containing data.
-
-use core::prelude::*;
-use self::ReacquireOrderLock::*;
-
-use core::atomic;
-use core::finally::Finally;
-use core::kinds::marker;
-use core::mem;
-use core::cell::UnsafeCell;
-use collections::Vec;
-
-use mutex;
-use comm::{Receiver, Sender, channel};
-
-/****************************************************************************
- * Internals
- ****************************************************************************/
-
-// Each waiting task receives on one of these.
-type WaitEnd = Receiver<()>;
-type SignalEnd = Sender<()>;
-// A doubly-ended queue of waiting tasks.
-struct WaitQueue {
- head: Receiver<SignalEnd>,
- tail: Sender<SignalEnd>,
-}
-
-impl WaitQueue {
- fn new() -> WaitQueue {
- let (block_tail, block_head) = channel();
- WaitQueue { head: block_head, tail: block_tail }
- }
-
- // Signals one live task from the queue.
- fn signal(&self) -> bool {
- match self.head.try_recv() {
- Ok(ch) => {
- // Send a wakeup signal. If the waiter was killed, its port will
- // have closed. Keep trying until we get a live task.
- if ch.send_opt(()).is_ok() {
- true
- } else {
- self.signal()
- }
- }
- _ => false
- }
- }
-
- fn broadcast(&self) -> uint {
- let mut count = 0;
- loop {
- match self.head.try_recv() {
- Ok(ch) => {
- if ch.send_opt(()).is_ok() {
- count += 1;
- }
- }
- _ => break
- }
- }
- count
- }
-
- fn wait_end(&self) -> WaitEnd {
- let (signal_end, wait_end) = channel();
- self.tail.send(signal_end);
- wait_end
- }
-}
-
-// The building-block used to make semaphores, mutexes, and rwlocks.
-struct Sem<Q> {
- lock: mutex::Mutex,
- // n.b, we need Sem to be `Sync`, but the WaitQueue type is not send/share
- // (for good reason). We have an internal invariant on this semaphore,
- // however, that the queue is never accessed outside of a locked
- // context.
- inner: UnsafeCell<SemInner<Q>>
-}
-
-struct SemInner<Q> {
- count: int,
- waiters: WaitQueue,
- // Can be either unit or another waitqueue. Some sems shouldn't come with
- // a condition variable attached, others should.
- blocked: Q,
-}
-
-#[must_use]
-struct SemGuard<'a, Q:'a> {
- sem: &'a Sem<Q>,
-}
-
-impl<Q: Send> Sem<Q> {
- fn new(count: int, q: Q) -> Sem<Q> {
- assert!(count >= 0,
- "semaphores cannot be initialized with negative values");
- Sem {
- lock: mutex::Mutex::new(),
- inner: UnsafeCell::new(SemInner {
- waiters: WaitQueue::new(),
- count: count,
- blocked: q,
- })
- }
- }
-
- unsafe fn with(&self, f: |&mut SemInner<Q>|) {
- let _g = self.lock.lock();
- // This &mut is safe because, due to the lock, we are the only one who can touch the data
- f(&mut *self.inner.get())
- }
-
- pub fn acquire(&self) {
- unsafe {
- let mut waiter_nobe = None;
- self.with(|state| {
- state.count -= 1;
- if state.count < 0 {
- // Create waiter nobe, enqueue ourself, and tell
- // outer scope we need to block.
- waiter_nobe = Some(state.waiters.wait_end());
- }
- });
- // Uncomment if you wish to test for sem races. Not
- // valgrind-friendly.
- /* for _ in range(0u, 1000) { task::deschedule(); } */
- // Need to wait outside the exclusive.
- if waiter_nobe.is_some() {
- let _ = waiter_nobe.unwrap().recv();
- }
- }
- }
-
- pub fn release(&self) {
- unsafe {
- self.with(|state| {
- state.count += 1;
- if state.count <= 0 {
- state.waiters.signal();
- }
- })
- }
- }
-
- pub fn access<'a>(&'a self) -> SemGuard<'a, Q> {
- self.acquire();
- SemGuard { sem: self }
- }
-}
-
-#[unsafe_destructor]
-impl<'a, Q: Send> Drop for SemGuard<'a, Q> {
- fn drop(&mut self) {
- self.sem.release();
- }
-}
-
-impl Sem<Vec<WaitQueue>> {
- fn new_and_signal(count: int, num_condvars: uint) -> Sem<Vec<WaitQueue>> {
- let mut queues = Vec::new();
- for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); }
- Sem::new(count, queues)
- }
-
- // The only other places that condvars get built are rwlock.write_cond()
- // and rwlock_write_mode.
- pub fn access_cond<'a>(&'a self) -> SemCondGuard<'a> {
- SemCondGuard {
- guard: self.access(),
- cvar: Condvar { sem: self, order: Nothing, nocopy: marker::NoCopy },
- }
- }
-}
-
-// FIXME(#3598): Want to use an Option down below, but we need a custom enum
-// that's not polymorphic to get around the fact that lifetimes are invariant
-// inside of type parameters.
-enum ReacquireOrderLock<'a> {
- Nothing, // c.c
- Just(&'a Semaphore),
-}
-
-/// A mechanism for atomic-unlock-and-deschedule blocking and signalling.
-pub struct Condvar<'a> {
- // The 'Sem' object associated with this condvar. This is the one that's
- // atomically-unlocked-and-descheduled upon and reacquired during wakeup.
- sem: &'a Sem<Vec<WaitQueue> >,
- // This is (can be) an extra semaphore which is held around the reacquire
- // operation on the first one. This is only used in cvars associated with
- // rwlocks, and is needed to ensure that, when a downgrader is trying to
- // hand off the access lock (which would be the first field, here), a 2nd
- // writer waking up from a cvar wait can't race with a reader to steal it,
- // See the comment in write_cond for more detail.
- order: ReacquireOrderLock<'a>,
- // Make sure condvars are non-copyable.
- nocopy: marker::NoCopy,
-}
-
-impl<'a> Condvar<'a> {
- /// Atomically drop the associated lock, and block until a signal is sent.
- ///
- /// # Panics
- ///
- /// A task which is killed while waiting on a condition variable will wake
- /// up, panic, and unlock the associated lock as it unwinds.
- pub fn wait(&self) { self.wait_on(0) }
-
- /// As wait(), but can specify which of multiple condition variables to
- /// wait on. Only a signal_on() or broadcast_on() with the same condvar_id
- /// will wake this thread.
- ///
- /// The associated lock must have been initialised with an appropriate
- /// number of condvars. The condvar_id must be between 0 and num_condvars-1
- /// or else this call will panic.
- ///
- /// wait() is equivalent to wait_on(0).
- pub fn wait_on(&self, condvar_id: uint) {
- let mut wait_end = None;
- let mut out_of_bounds = None;
- // Release lock, 'atomically' enqueuing ourselves in so doing.
- unsafe {
- self.sem.with(|state| {
- if condvar_id < state.blocked.len() {
- // Drop the lock.
- state.count += 1;
- if state.count <= 0 {
- state.waiters.signal();
- }
- // Create waiter nobe, and enqueue ourself to
- // be woken up by a signaller.
- wait_end = Some(state.blocked[condvar_id].wait_end());
- } else {
- out_of_bounds = Some(state.blocked.len());
- }
- })
- }
-
- // If deschedule checks start getting inserted anywhere, we can be
- // killed before or after enqueueing.
- check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || {
- // Unconditionally "block". (Might not actually block if a
- // signaller already sent -- I mean 'unconditionally' in contrast
- // with acquire().)
- (|| {
- let _ = wait_end.take().unwrap().recv();
- }).finally(|| {
- // Reacquire the condvar.
- match self.order {
- Just(lock) => {
- let _g = lock.access();
- self.sem.acquire();
- }
- Nothing => self.sem.acquire(),
- }
- })
- })
- }
-
- /// Wake up a blocked task. Returns false if there was no blocked task.
- pub fn signal(&self) -> bool { self.signal_on(0) }
-
- /// As signal, but with a specified condvar_id. See wait_on.
- pub fn signal_on(&self, condvar_id: uint) -> bool {
- unsafe {
- let mut out_of_bounds = None;
- let mut result = false;
- self.sem.with(|state| {
- if condvar_id < state.blocked.len() {
- result = state.blocked[condvar_id].signal();
- } else {
- out_of_bounds = Some(state.blocked.len());
- }
- });
- check_cvar_bounds(out_of_bounds,
- condvar_id,
- "cond.signal_on()",
- || result)
- }
- }
-
- /// Wake up all blocked tasks. Returns the number of tasks woken.
- pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
-
- /// As broadcast, but with a specified condvar_id. See wait_on.
- pub fn broadcast_on(&self, condvar_id: uint) -> uint {
- let mut out_of_bounds = None;
- let mut queue = None;
- unsafe {
- self.sem.with(|state| {
- if condvar_id < state.blocked.len() {
- // To avoid :broadcast_heavy, we make a new waitqueue,
- // swap it out with the old one, and broadcast on the
- // old one outside of the little-lock.
- queue = Some(mem::replace(&mut state.blocked[condvar_id],
- WaitQueue::new()));
- } else {
- out_of_bounds = Some(state.blocked.len());
- }
- });
- check_cvar_bounds(out_of_bounds,
- condvar_id,
- "cond.signal_on()",
- || {
- queue.take().unwrap().broadcast()
- })
- }
- }
-}
-
-// Checks whether a condvar ID was out of bounds, and panics if so, or does
-// something else next on success.
-#[inline]
-fn check_cvar_bounds<U>(
- out_of_bounds: Option<uint>,
- id: uint,
- act: &str,
- blk: || -> U)
- -> U {
- match out_of_bounds {
- Some(0) =>
- panic!("{} with illegal ID {} - this lock has no condvars!", act, id),
- Some(length) =>
- panic!("{} with illegal ID {} - ID must be less than {}", act, id, length),
- None => blk()
- }
-}
-
-#[must_use]
-struct SemCondGuard<'a> {
- guard: SemGuard<'a, Vec<WaitQueue>>,
- cvar: Condvar<'a>,
-}
-
-/****************************************************************************
- * Semaphores
- ****************************************************************************/
-
-/// A counting, blocking, bounded-waiting semaphore.
-pub struct Semaphore {
- sem: Sem<()>,
-}
-
-/// An RAII guard used to represent an acquired resource to a semaphore. When
-/// dropped, this value will release the resource back to the semaphore.
-#[must_use]
-pub struct SemaphoreGuard<'a> {
- _guard: SemGuard<'a, ()>,
-}
-
-impl Semaphore {
- /// Create a new semaphore with the specified count.
- ///
- /// # Panics
- ///
- /// This function will panic if `count` is negative.
- pub fn new(count: int) -> Semaphore {
- Semaphore { sem: Sem::new(count, ()) }
- }
-
- /// Acquire a resource represented by the semaphore. Blocks if necessary
- /// until resource(s) become available.
- pub fn acquire(&self) { self.sem.acquire() }
-
- /// Release a held resource represented by the semaphore. Wakes a blocked
- /// contending task, if any exist. Won't block the caller.
- pub fn release(&self) { self.sem.release() }
-
- /// Acquire a resource of this semaphore, returning an RAII guard which will
- /// release the resource when dropped.
- pub fn access<'a>(&'a self) -> SemaphoreGuard<'a> {
- SemaphoreGuard { _guard: self.sem.access() }
- }
-}
-
-/****************************************************************************
- * Mutexes
- ****************************************************************************/
-
-/// A blocking, bounded-waiting, mutual exclusion lock with an associated
-/// FIFO condition variable.
-///
-/// # Panics
-///
-/// A task which panicks while holding a mutex will unlock the mutex as it
-/// unwinds.
-pub struct Mutex {
- sem: Sem<Vec<WaitQueue>>,
-}
-
-/// An RAII structure which is used to gain access to a mutex's condition
-/// variable. Additionally, when a value of this type is dropped, the
-/// corresponding mutex is also unlocked.
-#[must_use]
-pub struct MutexGuard<'a> {
- _guard: SemGuard<'a, Vec<WaitQueue>>,
- /// Inner condition variable which is connected to the outer mutex, and can
- /// be used for atomic-unlock-and-deschedule.
- pub cond: Condvar<'a>,
-}
-
-impl Mutex {
- /// Create a new mutex, with one associated condvar.
- pub fn new() -> Mutex { Mutex::new_with_condvars(1) }
-
- /// Create a new mutex, with a specified number of associated condvars. This
- /// will allow calling wait_on/signal_on/broadcast_on with condvar IDs
- /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
- /// allowed but any operations on the condvar will panic.)
- pub fn new_with_condvars(num_condvars: uint) -> Mutex {
- Mutex { sem: Sem::new_and_signal(1, num_condvars) }
- }
-
- /// Acquires ownership of this mutex, returning an RAII guard which will
- /// unlock the mutex when dropped. The associated condition variable can
- /// also be accessed through the returned guard.
- pub fn lock<'a>(&'a self) -> MutexGuard<'a> {
- let SemCondGuard { guard, cvar } = self.sem.access_cond();
- MutexGuard { _guard: guard, cond: cvar }
- }
-}
-
-/****************************************************************************
- * Reader-writer locks
- ****************************************************************************/
-
-// NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
-
-/// A blocking, no-starvation, reader-writer lock with an associated condvar.
-///
-/// # Panics
-///
-/// A task which panics while holding an rwlock will unlock the rwlock as it
-/// unwinds.
-pub struct RWLock {
- order_lock: Semaphore,
- access_lock: Sem<Vec<WaitQueue>>,
-
- // The only way the count flag is ever accessed is with xadd. Since it is
- // a read-modify-write operation, multiple xadds on different cores will
- // always be consistent with respect to each other, so a monotonic/relaxed
- // consistency ordering suffices (i.e., no extra barriers are needed).
- //
- // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
- // acquire/release orderings superfluously. Change these someday.
- read_count: atomic::AtomicUint,
-}
-
-/// An RAII helper which is created by acquiring a read lock on an RWLock. When
-/// dropped, this will unlock the RWLock.
-#[must_use]
-pub struct RWLockReadGuard<'a> {
- lock: &'a RWLock,
-}
-
-/// An RAII helper which is created by acquiring a write lock on an RWLock. When
-/// dropped, this will unlock the RWLock.
-///
-/// A value of this type can also be consumed to downgrade to a read-only lock.
-#[must_use]
-pub struct RWLockWriteGuard<'a> {
- lock: &'a RWLock,
- /// Inner condition variable that is connected to the write-mode of the
- /// outer rwlock.
- pub cond: Condvar<'a>,
-}
-
-impl RWLock {
- /// Create a new rwlock, with one associated condvar.
- pub fn new() -> RWLock { RWLock::new_with_condvars(1) }
-
- /// Create a new rwlock, with a specified number of associated condvars.
- /// Similar to mutex_with_condvars.
- pub fn new_with_condvars(num_condvars: uint) -> RWLock {
- RWLock {
- order_lock: Semaphore::new(1),
- access_lock: Sem::new_and_signal(1, num_condvars),
- read_count: atomic::AtomicUint::new(0),
- }
- }
-
- /// Acquires a read-lock, returning an RAII guard that will unlock the lock
- /// when dropped. Calls to 'read' from other tasks may run concurrently with
- /// this one.
- pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
- let _guard = self.order_lock.access();
- let old_count = self.read_count.fetch_add(1, atomic::Acquire);
- if old_count == 0 {
- self.access_lock.acquire();
- }
- RWLockReadGuard { lock: self }
- }
-
- /// Acquire a write-lock, returning an RAII guard that will unlock the lock
- /// when dropped. No calls to 'read' or 'write' from other tasks will run
- /// concurrently with this one.
- ///
- /// You can also downgrade a write to a read by calling the `downgrade`
- /// method on the returned guard. Additionally, the guard will contain a
- /// `Condvar` attached to this lock.
- ///
- /// # Example
- ///
- /// ```rust
- /// use sync::raw::RWLock;
- ///
- /// let lock = RWLock::new();
- /// let write = lock.write();
- /// // ... exclusive access ...
- /// let read = write.downgrade();
- /// // ... shared access ...
- /// drop(read);
- /// ```
- pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a> {
- let _g = self.order_lock.access();
- self.access_lock.acquire();
-
- // It's important to thread our order lock into the condvar, so that
- // when a cond.wait() wakes up, it uses it while reacquiring the
- // access lock. If we permitted a waking-up writer to "cut in line",
- // there could arise a subtle race when a downgrader attempts to hand
- // off the reader cloud lock to a waiting reader. This race is tested
- // in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like:
- // T1 (writer) T2 (downgrader) T3 (reader)
- // [in cond.wait()]
- // [locks for writing]
- // [holds access_lock]
- // [is signalled, perhaps by
- // downgrader or a 4th thread]
- // tries to lock access(!)
- // lock order_lock
- // xadd read_count[0->1]
- // tries to lock access
- // [downgrade]
- // xadd read_count[1->2]
- // unlock access
- // Since T1 contended on the access lock before T3 did, it will steal
- // the lock handoff. Adding order_lock in the condvar reacquire path
- // solves this because T1 will hold order_lock while waiting on access,
- // which will cause T3 to have to wait until T1 finishes its write,
- // which can't happen until T2 finishes the downgrade-read entirely.
- // The astute reader will also note that making waking writers use the
- // order_lock is better for not starving readers.
- RWLockWriteGuard {
- lock: self,
- cond: Condvar {
- sem: &self.access_lock,
- order: Just(&self.order_lock),
- nocopy: marker::NoCopy,
- }
- }
- }
-}
-
-impl<'a> RWLockWriteGuard<'a> {
- /// Consumes this write lock and converts it into a read lock.
- pub fn downgrade(self) -> RWLockReadGuard<'a> {
- let lock = self.lock;
- // Don't run the destructor of the write guard, we're in charge of
- // things from now on
- unsafe { mem::forget(self) }
-
- let old_count = lock.read_count.fetch_add(1, atomic::Release);
- // If another reader was already blocking, we need to hand-off
- // the "reader cloud" access lock to them.
- if old_count != 0 {
- // Guaranteed not to let another writer in, because
- // another reader was holding the order_lock. Hence they
- // must be the one to get the access_lock (because all
- // access_locks are acquired with order_lock held). See
- // the comment in write_cond for more justification.
- lock.access_lock.release();
- }
- RWLockReadGuard { lock: lock }
- }
-}
-
-#[unsafe_destructor]
-impl<'a> Drop for RWLockWriteGuard<'a> {
- fn drop(&mut self) {
- self.lock.access_lock.release();
- }
-}
-
-#[unsafe_destructor]
-impl<'a> Drop for RWLockReadGuard<'a> {
- fn drop(&mut self) {
- let old_count = self.lock.read_count.fetch_sub(1, atomic::Release);
- assert!(old_count > 0);
- if old_count == 1 {
- // Note: this release used to be outside of a locked access
- // to exclusive-protected state. If this code is ever
- // converted back to such (instead of using atomic ops),
- // this access MUST NOT go inside the exclusive access.
- self.lock.access_lock.release();
- }
- }
-}
-
-/****************************************************************************
- * Tests
- ****************************************************************************/
-
-#[cfg(test)]
-mod tests {
- pub use self::RWLockMode::*;
-
- use std::prelude::*;
-
- use Arc;
- use super::{Semaphore, Mutex, RWLock, Condvar};
-
- use std::mem;
- use std::result;
- use std::task;
-
- /************************************************************************
- * Semaphore tests
- ************************************************************************/
- #[test]
- fn test_sem_acquire_release() {
- let s = Semaphore::new(1);
- s.acquire();
- s.release();
- s.acquire();
- }
- #[test]
- fn test_sem_basic() {
- let s = Semaphore::new(1);
- let _g = s.access();
- }
- #[test]
- #[should_fail]
- fn test_sem_basic2() {
- Semaphore::new(-1);
- }
- #[test]
- fn test_sem_as_mutex() {
- let s = Arc::new(Semaphore::new(1));
- let s2 = s.clone();
- task::spawn(proc() {
- let _g = s2.access();
- for _ in range(0u, 5) { task::deschedule(); }
- });
- let _g = s.access();
- for _ in range(0u, 5) { task::deschedule(); }
- }
- #[test]
- fn test_sem_as_cvar() {
- /* Child waits and parent signals */
- let (tx, rx) = channel();
- let s = Arc::new(Semaphore::new(0));
- let s2 = s.clone();
- task::spawn(proc() {
- s2.acquire();
- tx.send(());
- });
- for _ in range(0u, 5) { task::deschedule(); }
- s.release();
- let _ = rx.recv();
-
- /* Parent waits and child signals */
- let (tx, rx) = channel();
- let s = Arc::new(Semaphore::new(0));
- let s2 = s.clone();
- task::spawn(proc() {
- for _ in range(0u, 5) { task::deschedule(); }
- s2.release();
- let _ = rx.recv();
- });
- s.acquire();
- tx.send(());
- }
- #[test]
- fn test_sem_multi_resource() {
- // Parent and child both get in the critical section at the same
- // time, and shake hands.
- let s = Arc::new(Semaphore::new(2));
- let s2 = s.clone();
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- task::spawn(proc() {
- let _g = s2.access();
- let _ = rx2.recv();
- tx1.send(());
- });
- let _g = s.access();
- tx2.send(());
- let _ = rx1.recv();
- }
- #[test]
- fn test_sem_runtime_friendly_blocking() {
- // Force the runtime to schedule two threads on the same sched_loop.
- // When one blocks, it should schedule the other one.
- let s = Arc::new(Semaphore::new(1));
- let s2 = s.clone();
- let (tx, rx) = channel();
- {
- let _g = s.access();
- task::spawn(proc() {
- tx.send(());
- drop(s2.access());
- tx.send(());
- });
- rx.recv(); // wait for child to come alive
- for _ in range(0u, 5) { task::deschedule(); } // let the child contend
- }
- rx.recv(); // wait for child to be done
- }
- /************************************************************************
- * Mutex tests
- ************************************************************************/
- #[test]
- fn test_mutex_lock() {
- // Unsafely achieve shared state, and do the textbook
- // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance.
- let (tx, rx) = channel();
- let m = Arc::new(Mutex::new());
- let m2 = m.clone();
- let mut sharedstate = box 0;
- {
- let ptr: *mut int = &mut *sharedstate;
- task::spawn(proc() {
- access_shared(ptr, &m2, 10);
- tx.send(());
- });
- }
- {
- access_shared(&mut *sharedstate, &m, 10);
- let _ = rx.recv();
-
- assert_eq!(*sharedstate, 20);
- }
-
- fn access_shared(sharedstate: *mut int, m: &Arc<Mutex>, n: uint) {
- for _ in range(0u, n) {
- let _g = m.lock();
- let oldval = unsafe { *sharedstate };
- task::deschedule();
- unsafe { *sharedstate = oldval + 1; }
- }
- }
- }
- #[test]
- fn test_mutex_cond_wait() {
- let m = Arc::new(Mutex::new());
-
- // Child wakes up parent
- {
- let lock = m.lock();
- let m2 = m.clone();
- task::spawn(proc() {
- let lock = m2.lock();
- let woken = lock.cond.signal();
- assert!(woken);
- });
- lock.cond.wait();
- }
- // Parent wakes up child
- let (tx, rx) = channel();
- let m3 = m.clone();
- task::spawn(proc() {
- let lock = m3.lock();
- tx.send(());
- lock.cond.wait();
- tx.send(());
- });
- rx.recv(); // Wait until child gets in the mutex
- {
- let lock = m.lock();
- let woken = lock.cond.signal();
- assert!(woken);
- }
- rx.recv(); // Wait until child wakes up
- }
-
- fn test_mutex_cond_broadcast_helper(num_waiters: uint) {
- let m = Arc::new(Mutex::new());
- let mut rxs = Vec::new();
-
- for _ in range(0u, num_waiters) {
- let mi = m.clone();
- let (tx, rx) = channel();
- rxs.push(rx);
- task::spawn(proc() {
- let lock = mi.lock();
- tx.send(());
- lock.cond.wait();
- tx.send(());
- });
- }
-
- // wait until all children get in the mutex
- for rx in rxs.iter_mut() { rx.recv(); }
- {
- let lock = m.lock();
- let num_woken = lock.cond.broadcast();
- assert_eq!(num_woken, num_waiters);
- }
- // wait until all children wake up
- for rx in rxs.iter_mut() { rx.recv(); }
- }
- #[test]
- fn test_mutex_cond_broadcast() {
- test_mutex_cond_broadcast_helper(12);
- }
- #[test]
- fn test_mutex_cond_broadcast_none() {
- test_mutex_cond_broadcast_helper(0);
- }
- #[test]
- fn test_mutex_cond_no_waiter() {
- let m = Arc::new(Mutex::new());
- let m2 = m.clone();
- let _ = task::try(proc() {
- drop(m.lock());
- });
- let lock = m2.lock();
- assert!(!lock.cond.signal());
- }
- #[test]
- fn test_mutex_killed_simple() {
- use std::any::Any;
-
- // Mutex must get automatically unlocked if panicked/killed within.
- let m = Arc::new(Mutex::new());
- let m2 = m.clone();
-
- let result: result::Result<(), Box<Any + Send>> = task::try(proc() {
- let _lock = m2.lock();
- panic!();
- });
- assert!(result.is_err());
- // child task must have finished by the time try returns
- drop(m.lock());
- }
- #[test]
- fn test_mutex_cond_signal_on_0() {
- // Tests that signal_on(0) is equivalent to signal().
- let m = Arc::new(Mutex::new());
- let lock = m.lock();
- let m2 = m.clone();
- task::spawn(proc() {
- let lock = m2.lock();
- lock.cond.signal_on(0);
- });
- lock.cond.wait();
- }
- #[test]
- fn test_mutex_no_condvars() {
- let result = task::try(proc() {
- let m = Mutex::new_with_condvars(0);
- m.lock().cond.wait();
- });
- assert!(result.is_err());
- let result = task::try(proc() {
- let m = Mutex::new_with_condvars(0);
- m.lock().cond.signal();
- });
- assert!(result.is_err());
- let result = task::try(proc() {
- let m = Mutex::new_with_condvars(0);
- m.lock().cond.broadcast();
- });
- assert!(result.is_err());
- }
- /************************************************************************
- * Reader/writer lock tests
- ************************************************************************/
- #[cfg(test)]
- pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead }
- #[cfg(test)]
- fn lock_rwlock_in_mode(x: &Arc<RWLock>, mode: RWLockMode, blk: ||) {
- match mode {
- Read => { let _g = x.read(); blk() }
- Write => { let _g = x.write(); blk() }
- Downgrade => { let _g = x.write(); blk() }
- DowngradeRead => { let _g = x.write().downgrade(); blk() }
- }
- }
- #[cfg(test)]
- fn test_rwlock_exclusion(x: Arc<RWLock>,
- mode1: RWLockMode,
- mode2: RWLockMode) {
- // Test mutual exclusion between readers and writers. Just like the
- // mutex mutual exclusion test, a ways above.
- let (tx, rx) = channel();
- let x2 = x.clone();
- let mut sharedstate = box 0;
- {
- let ptr: *const int = &*sharedstate;
- task::spawn(proc() {
- let sharedstate: &mut int =
- unsafe { mem::transmute(ptr) };
- access_shared(sharedstate, &x2, mode1, 10);
- tx.send(());
- });
- }
- {
- access_shared(&mut *sharedstate, &x, mode2, 10);
- let _ = rx.recv();
-
- assert_eq!(*sharedstate, 20);
- }
-
- fn access_shared(sharedstate: &mut int, x: &Arc<RWLock>,
- mode: RWLockMode, n: uint) {
- for _ in range(0u, n) {
- lock_rwlock_in_mode(x, mode, || {
- let oldval = *sharedstate;
- task::deschedule();
- *sharedstate = oldval + 1;
- })
- }
- }
- }
- #[test]
- fn test_rwlock_readers_wont_modify_the_data() {
- test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Read);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Downgrade);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Read);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead);
- test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write);
- }
- #[test]
- fn test_rwlock_writers_and_writers() {
- test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Downgrade);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write);
- test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade);
- }
- #[cfg(test)]
- fn test_rwlock_handshake(x: Arc<RWLock>,
- mode1: RWLockMode,
- mode2: RWLockMode,
- make_mode2_go_first: bool) {
- // Much like sem_multi_resource.
- let x2 = x.clone();
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- task::spawn(proc() {
- if !make_mode2_go_first {
- rx2.recv(); // parent sends to us once it locks, or ...
- }
- lock_rwlock_in_mode(&x2, mode2, || {
- if make_mode2_go_first {
- tx1.send(()); // ... we send to it once we lock
- }
- rx2.recv();
- tx1.send(());
- })
- });
- if make_mode2_go_first {
- rx1.recv(); // child sends to us once it locks, or ...
- }
- lock_rwlock_in_mode(&x, mode1, || {
- if !make_mode2_go_first {
- tx2.send(()); // ... we send to it once we lock
- }
- tx2.send(());
- rx1.recv();
- })
- }
- #[test]
- fn test_rwlock_readers_and_readers() {
- test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false);
- // The downgrader needs to get in before the reader gets in, otherwise
- // they cannot end up reading at the same time.
- test_rwlock_handshake(Arc::new(RWLock::new()), DowngradeRead, Read, false);
- test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true);
- // Two downgrade_reads can never both end up reading at the same time.
- }
- #[test]
- fn test_rwlock_downgrade_unlock() {
- // Tests that downgrade can unlock the lock in both modes
- let x = Arc::new(RWLock::new());
- lock_rwlock_in_mode(&x, Downgrade, || { });
- test_rwlock_handshake(x, Read, Read, false);
- let y = Arc::new(RWLock::new());
- lock_rwlock_in_mode(&y, DowngradeRead, || { });
- test_rwlock_exclusion(y, Write, Write);
- }
- #[test]
- fn test_rwlock_read_recursive() {
- let x = RWLock::new();
- let _g1 = x.read();
- let _g2 = x.read();
- }
- #[test]
- fn test_rwlock_cond_wait() {
- // As test_mutex_cond_wait above.
- let x = Arc::new(RWLock::new());
-
- // Child wakes up parent
- {
- let lock = x.write();
- let x2 = x.clone();
- task::spawn(proc() {
- let lock = x2.write();
- assert!(lock.cond.signal());
- });
- lock.cond.wait();
- }
- // Parent wakes up child
- let (tx, rx) = channel();
- let x3 = x.clone();
- task::spawn(proc() {
- let lock = x3.write();
- tx.send(());
- lock.cond.wait();
- tx.send(());
- });
- rx.recv(); // Wait until child gets in the rwlock
- drop(x.read()); // Must be able to get in as a reader
- {
- let x = x.write();
- assert!(x.cond.signal());
- }
- rx.recv(); // Wait until child wakes up
- drop(x.read()); // Just for good measure
- }
- #[cfg(test)]
- fn test_rwlock_cond_broadcast_helper(num_waiters: uint) {
- // Much like the mutex broadcast test. Downgrade-enabled.
- fn lock_cond(x: &Arc<RWLock>, blk: |c: &Condvar|) {
- let lock = x.write();
- blk(&lock.cond);
- }
-
- let x = Arc::new(RWLock::new());
- let mut rxs = Vec::new();
-
- for _ in range(0u, num_waiters) {
- let xi = x.clone();
- let (tx, rx) = channel();
- rxs.push(rx);
- task::spawn(proc() {
- lock_cond(&xi, |cond| {
- tx.send(());
- cond.wait();
- tx.send(());
- })
- });
- }
-
- // wait until all children get in the mutex
- for rx in rxs.iter_mut() { let _ = rx.recv(); }
- lock_cond(&x, |cond| {
- let num_woken = cond.broadcast();
- assert_eq!(num_woken, num_waiters);
- });
- // wait until all children wake up
- for rx in rxs.iter_mut() { let _ = rx.recv(); }
- }
- #[test]
- fn test_rwlock_cond_broadcast() {
- test_rwlock_cond_broadcast_helper(0);
- test_rwlock_cond_broadcast_helper(12);
- }
- #[cfg(test)]
- fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) {
- use std::any::Any;
-
- // Mutex must get automatically unlocked if panicked/killed within.
- let x = Arc::new(RWLock::new());
- let x2 = x.clone();
-
- let result: result::Result<(), Box<Any + Send>> = task::try(proc() {
- lock_rwlock_in_mode(&x2, mode1, || {
- panic!();
- })
- });
- assert!(result.is_err());
- // child task must have finished by the time try returns
- lock_rwlock_in_mode(&x, mode2, || { })
- }
- #[test]
- fn test_rwlock_reader_killed_writer() {
- rwlock_kill_helper(Read, Write);
- }
- #[test]
- fn test_rwlock_writer_killed_reader() {
- rwlock_kill_helper(Write, Read);
- }
- #[test]
- fn test_rwlock_reader_killed_reader() {
- rwlock_kill_helper(Read, Read);
- }
- #[test]
- fn test_rwlock_writer_killed_writer() {
- rwlock_kill_helper(Write, Write);
- }
- #[test]
- fn test_rwlock_kill_downgrader() {
- rwlock_kill_helper(Downgrade, Read);
- rwlock_kill_helper(Read, Downgrade);
- rwlock_kill_helper(Downgrade, Write);
- rwlock_kill_helper(Write, Downgrade);
- rwlock_kill_helper(DowngradeRead, Read);
- rwlock_kill_helper(Read, DowngradeRead);
- rwlock_kill_helper(DowngradeRead, Write);
- rwlock_kill_helper(Write, DowngradeRead);
- rwlock_kill_helper(DowngradeRead, Downgrade);
- rwlock_kill_helper(DowngradeRead, Downgrade);
- rwlock_kill_helper(Downgrade, DowngradeRead);
- rwlock_kill_helper(Downgrade, DowngradeRead);
- }
-}
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
-
-//! A single-producer single-consumer concurrent queue
-//!
-//! This module contains the implementation of an SPSC queue which can be used
-//! concurrently between two tasks. This data structure is safe to use and
-//! enforces the semantics that there is one pusher and one popper.
-
-#![experimental]
-
-use core::prelude::*;
-
-use alloc::boxed::Box;
-use core::mem;
-use core::cell::UnsafeCell;
-use alloc::arc::Arc;
-
-use atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
-
-// Node within the linked list queue of messages to send
-struct Node<T> {
- // FIXME: this could be an uninitialized T if we're careful enough, and
- // that would reduce memory usage (and be a bit faster).
- // is it worth it?
- value: Option<T>, // nullable for re-use of nodes
- next: AtomicPtr<Node<T>>, // next node in the queue
-}
-
-/// The single-producer single-consumer queue. This structure is not cloneable,
-/// but it can be safely shared in an Arc if it is guaranteed that there
-/// is only one popper and one pusher touching the queue at any one point in
-/// time.
-pub struct Queue<T> {
- // consumer fields
- tail: UnsafeCell<*mut Node<T>>, // where to pop from
- tail_prev: AtomicPtr<Node<T>>, // where to pop from
-
- // producer fields
- head: UnsafeCell<*mut Node<T>>, // where to push to
- first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
- tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
-
- // Cache maintenance fields. Additions and subtractions are stored
- // separately in order to allow them to use nonatomic addition/subtraction.
- cache_bound: uint,
- cache_additions: AtomicUint,
- cache_subtractions: AtomicUint,
-}
-
-/// A safe abstraction for the consumer in a single-producer single-consumer
-/// queue.
-pub struct Consumer<T> {
- inner: Arc<Queue<T>>
-}
-
-impl<T: Send> Consumer<T> {
- /// Attempts to pop the value from the head of the queue, returning `None`
- /// if the queue is empty.
- pub fn pop(&mut self) -> Option<T> {
- self.inner.pop()
- }
-
- /// Attempts to peek at the head of the queue, returning `None` if the queue
- /// is empty.
- pub fn peek<'a>(&'a mut self) -> Option<&'a mut T> {
- self.inner.peek()
- }
-}
-
-/// A safe abstraction for the producer in a single-producer single-consumer
-/// queue.
-pub struct Producer<T> {
- inner: Arc<Queue<T>>
-}
-
-impl<T: Send> Producer<T> {
- /// Pushes a new value onto the queue.
- pub fn push(&mut self, t: T) {
- self.inner.push(t)
- }
-}
-
-impl<T: Send> Node<T> {
- fn new() -> *mut Node<T> {
- unsafe {
- mem::transmute(box Node {
- value: None,
- next: AtomicPtr::new(0 as *mut Node<T>),
- })
- }
- }
-}
-
-/// Creates a new queue with a consumer-producer pair.
-///
-/// The producer returned is connected to the consumer to push all data to
-/// the consumer.
-///
-/// # Arguments
-///
-/// * `bound` - This queue implementation is implemented with a linked
-/// list, and this means that a push is always a malloc. In
-/// order to amortize this cost, an internal cache of nodes is
-/// maintained to prevent a malloc from always being
-/// necessary. This bound is the limit on the size of the
-/// cache (if desired). If the value is 0, then the cache has
-/// no bound. Otherwise, the cache will never grow larger than
-/// `bound` (although the queue itself could be much larger.
-pub fn queue<T: Send>(bound: uint) -> (Consumer<T>, Producer<T>) {
- let q = unsafe { Queue::new(bound) };
- let arc = Arc::new(q);
- let consumer = Consumer { inner: arc.clone() };
- let producer = Producer { inner: arc };
-
- (consumer, producer)
-}
-
-impl<T: Send> Queue<T> {
- /// Creates a new queue.
- ///
- /// This is unsafe as the type system doesn't enforce a single
- /// consumer-producer relationship. It also allows the consumer to `pop`
- /// items while there is a `peek` active due to all methods having a
- /// non-mutable receiver.
- ///
- /// # Arguments
- ///
- /// * `bound` - This queue implementation is implemented with a linked
- /// list, and this means that a push is always a malloc. In
- /// order to amortize this cost, an internal cache of nodes is
- /// maintained to prevent a malloc from always being
- /// necessary. This bound is the limit on the size of the
- /// cache (if desired). If the value is 0, then the cache has
- /// no bound. Otherwise, the cache will never grow larger than
- /// `bound` (although the queue itself could be much larger.
- pub unsafe fn new(bound: uint) -> Queue<T> {
- let n1 = Node::new();
- let n2 = Node::new();
- (*n1).next.store(n2, Relaxed);
- Queue {
- tail: UnsafeCell::new(n2),
- tail_prev: AtomicPtr::new(n1),
- head: UnsafeCell::new(n2),
- first: UnsafeCell::new(n1),
- tail_copy: UnsafeCell::new(n1),
- cache_bound: bound,
- cache_additions: AtomicUint::new(0),
- cache_subtractions: AtomicUint::new(0),
- }
- }
-
- /// Pushes a new value onto this queue. Note that to use this function
- /// safely, it must be externally guaranteed that there is only one pusher.
- pub fn push(&self, t: T) {
- unsafe {
- // Acquire a node (which either uses a cached one or allocates a new
- // one), and then append this to the 'head' node.
- let n = self.alloc();
- assert!((*n).value.is_none());
- (*n).value = Some(t);
- (*n).next.store(0 as *mut Node<T>, Relaxed);
- (**self.head.get()).next.store(n, Release);
- *self.head.get() = n;
- }
- }
-
- unsafe fn alloc(&self) -> *mut Node<T> {
- // First try to see if we can consume the 'first' node for our uses.
- // We try to avoid as many atomic instructions as possible here, so
- // the addition to cache_subtractions is not atomic (plus we're the
- // only one subtracting from the cache).
- if *self.first.get() != *self.tail_copy.get() {
- if self.cache_bound > 0 {
- let b = self.cache_subtractions.load(Relaxed);
- self.cache_subtractions.store(b + 1, Relaxed);
- }
- let ret = *self.first.get();
- *self.first.get() = (*ret).next.load(Relaxed);
- return ret;
- }
- // If the above fails, then update our copy of the tail and try
- // again.
- *self.tail_copy.get() = self.tail_prev.load(Acquire);
- if *self.first.get() != *self.tail_copy.get() {
- if self.cache_bound > 0 {
- let b = self.cache_subtractions.load(Relaxed);
- self.cache_subtractions.store(b + 1, Relaxed);
- }
- let ret = *self.first.get();
- *self.first.get() = (*ret).next.load(Relaxed);
- return ret;
- }
- // If all of that fails, then we have to allocate a new node
- // (there's nothing in the node cache).
- Node::new()
- }
-
- /// Attempts to pop a value from this queue. Remember that to use this type
- /// safely you must ensure that there is only one popper at a time.
- pub fn pop(&self) -> Option<T> {
- unsafe {
- // The `tail` node is not actually a used node, but rather a
- // sentinel from where we should start popping from. Hence, look at
- // tail's next field and see if we can use it. If we do a pop, then
- // the current tail node is a candidate for going into the cache.
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
- if next.is_null() { return None }
- assert!((*next).value.is_some());
- let ret = (*next).value.take();
-
- *self.tail.get() = next;
- if self.cache_bound == 0 {
- self.tail_prev.store(tail, Release);
- } else {
- // FIXME: this is dubious with overflow.
- let additions = self.cache_additions.load(Relaxed);
- let subtractions = self.cache_subtractions.load(Relaxed);
- let size = additions - subtractions;
-
- if size < self.cache_bound {
- self.tail_prev.store(tail, Release);
- self.cache_additions.store(additions + 1, Relaxed);
- } else {
- (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed);
- // We have successfully erased all references to 'tail', so
- // now we can safely drop it.
- let _: Box<Node<T>> = mem::transmute(tail);
- }
- }
- return ret;
- }
- }
-
- /// Attempts to peek at the head of the queue, returning `None` if the queue
- /// has no data currently
- ///
- /// # Warning
- /// The reference returned is invalid if it is not used before the consumer
- /// pops the value off the queue. If the producer then pushes another value
- /// onto the queue, it will overwrite the value pointed to by the reference.
- pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
- // This is essentially the same as above with all the popping bits
- // stripped out.
- unsafe {
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
- if next.is_null() { return None }
- return (*next).value.as_mut();
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Queue<T> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.first.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Relaxed);
- let _n: Box<Node<T>> = mem::transmute(cur);
- cur = next;
- }
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use std::prelude::*;
-
- use super::{queue};
-
- #[test]
- fn smoke() {
- let (mut consumer, mut producer) = queue(0);
- producer.push(1i);
- producer.push(2);
- assert_eq!(consumer.pop(), Some(1i));
- assert_eq!(consumer.pop(), Some(2));
- assert_eq!(consumer.pop(), None);
- producer.push(3);
- producer.push(4);
- assert_eq!(consumer.pop(), Some(3));
- assert_eq!(consumer.pop(), Some(4));
- assert_eq!(consumer.pop(), None);
- }
-
- #[test]
- fn peek() {
- let (mut consumer, mut producer) = queue(0);
- producer.push(vec![1i]);
-
- // Ensure the borrowchecker works
- match consumer.peek() {
- Some(vec) => match vec.as_slice() {
- // Note that `pop` is not allowed here due to borrow
- [1] => {}
- _ => return
- },
- None => unreachable!()
- }
-
- consumer.pop();
- }
-
- #[test]
- fn drop_full() {
- let (_, mut producer) = queue(0);
- producer.push(box 1i);
- producer.push(box 2i);
- }
-
- #[test]
- fn smoke_bound() {
- let (mut consumer, mut producer) = queue(1);
- producer.push(1i);
- producer.push(2);
- assert_eq!(consumer.pop(), Some(1));
- assert_eq!(consumer.pop(), Some(2));
- assert_eq!(consumer.pop(), None);
- producer.push(3);
- producer.push(4);
- assert_eq!(consumer.pop(), Some(3));
- assert_eq!(consumer.pop(), Some(4));
- assert_eq!(consumer.pop(), None);
- }
-
- #[test]
- fn stress() {
- stress_bound(0);
- stress_bound(1);
-
- fn stress_bound(bound: uint) {
- let (consumer, mut producer) = queue(bound);
-
- let (tx, rx) = channel();
- spawn(proc() {
- // Move the consumer to a local mutable slot
- let mut consumer = consumer;
- for _ in range(0u, 100000) {
- loop {
- match consumer.pop() {
- Some(1i) => break,
- Some(_) => panic!(),
- None => {}
- }
- }
- }
- tx.send(());
- });
- for _ in range(0i, 100000) {
- producer.push(1);
- }
- rx.recv();
- }
- }
-}