1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Communication primitives for concurrent tasks
13 //! Rust makes it very difficult to share data among tasks to prevent race
14 //! conditions and to improve parallelism, but there is often a need for
15 //! communication between concurrent tasks. The primitives defined in this
16 //! module are the building blocks for synchronization in rust.
18 //! This module provides message-based communication over channels, concretely
19 //! defined among three types:
25 //! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
26 //! senders are clone-able such that many tasks can send simultaneously to one
27 //! receiver. These channels are *task blocking*, not *thread blocking*. This
28 //! means that if one task is blocked on a channel, other tasks can continue to
31 //! Rust channels come in one of two flavors:
33 //! 1. An asynchronous, infinitely buffered channel. The `channel()` function
34 //! will return a `(Sender, Receiver)` tuple where all sends will be
35 //! **asynchronous** (they never block). The channel conceptually has an
38 //! 2. A synchronous, bounded channel. The `sync_channel()` function will return
39 //! a `(SyncSender, Receiver)` tuple where the storage for pending messages
40 //! is a pre-allocated buffer of a fixed size. All sends will be
41 //! **synchronous** by blocking until there is buffer space available. Note
42 //! that a bound of 0 is allowed, causing the channel to become a
43 //! "rendezvous" channel where each sender atomically hands off a message to
46 //! ## Panic Propagation
48 //! In addition to being a core primitive for communicating in rust, channels
49 //! are the points at which panics are propagated among tasks. Whenever the one
50 //! half of channel is closed, the other half will have its next operation
51 //! `panic!`. The purpose of this is to allow propagation of panics among tasks
52 //! that are linked to one another via channels.
54 //! There are methods on both of senders and receivers to perform their
55 //! respective operations without panicking, however.
57 //! ## Runtime Requirements
59 //! The channel types defined in this module generally have very few runtime
60 //! requirements in order to operate. The major requirement they have is for a
61 //! local rust `Task` to be available if any *blocking* operation is performed.
63 //! If a local `Task` is not available (for example an FFI callback), then the
64 //! `send` operation is safe on a `Sender` (as well as a `send_opt`) as well as
65 //! the `try_send` method on a `SyncSender`, but no other operations are
66 //! guaranteed to be safe.
73 //! // Create a simple streaming channel
74 //! let (tx, rx) = channel();
78 //! assert_eq!(rx.recv(), 10i);
84 //! // Create a shared channel which can be sent along from many tasks
85 //! // where tx is the sending half (tx for transmission), and rx is the receiving
86 //! // half (rx for receiving).
87 //! let (tx, rx) = channel();
88 //! for i in range(0i, 10i) {
89 //! let tx = tx.clone();
95 //! for _ in range(0i, 10i) {
96 //! let j = rx.recv();
97 //! assert!(0 <= j && j < 10);
101 //! Propagating panics:
104 //! // The call to recv() will panic!() because the channel has already hung
105 //! // up (or been deallocated)
106 //! let (tx, rx) = channel::<int>();
111 //! Synchronous channels:
114 //! let (tx, rx) = sync_channel::<int>(0);
116 //! // This will wait for the parent task to start receiving
122 //! Reading from a channel with a timeout requires to use a Timer together
123 //! with the channel. You can use the select! macro to select either and
124 //! handle the timeout case. This first example will break out of the loop
125 //! after 10 seconds no matter what:
128 //! use std::io::timer::Timer;
129 //! use std::time::Duration;
131 //! let (tx, rx) = channel::<int>();
132 //! let mut timer = Timer::new().unwrap();
133 //! let timeout = timer.oneshot(Duration::seconds(10));
137 //! val = rx.recv() => println!("Received {}", val),
138 //! () = timeout.recv() => {
139 //! println!("timed out, total time was more than 10 seconds")
146 //! This second example is more costly since it allocates a new timer every
147 //! time a message is received, but it allows you to timeout after the channel
148 //! has been inactive for 5 seconds:
151 //! use std::io::timer::Timer;
152 //! use std::time::Duration;
154 //! let (tx, rx) = channel::<int>();
155 //! let mut timer = Timer::new().unwrap();
158 //! let timeout = timer.oneshot(Duration::seconds(5));
161 //! val = rx.recv() => println!("Received {}", val),
162 //! () = timeout.recv() => {
163 //! println!("timed out, no message received in 5 seconds")
170 // A description of how Rust's channel implementation works
172 // Channels are supposed to be the basic building block for all other
173 // concurrent primitives that are used in Rust. As a result, the channel type
174 // needs to be highly optimized, flexible, and broad enough for use everywhere.
176 // The choice of implementation of all channels is to be built on lock-free data
177 // structures. The channels themselves are then consequently also lock-free data
178 // structures. As always with lock-free code, this is a very "here be dragons"
179 // territory, especially because I'm unaware of any academic papers which have
180 // gone into great length about channels of these flavors.
182 // ## Flavors of channels
184 // From the perspective of a consumer of this library, there is only one flavor
185 // of channel. This channel can be used as a stream and cloned to allow multiple
186 // senders. Under the hood, however, there are actually three flavors of
189 // * Oneshots - these channels are highly optimized for the one-send use case.
190 // They contain as few atomics as possible and involve one and
191 // exactly one allocation.
192 // * Streams - these channels are optimized for the non-shared use case. They
193 // use a different concurrent queue which is more tailored for this
194 // use case. The initial allocation of this flavor of channel is not
196 // * Shared - this is the most general form of channel that this module offers,
197 // a channel with multiple senders. This type is as optimized as it
198 // can be, but the previous two types mentioned are much faster for
201 // ## Concurrent queues
203 // The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
204 // recv() obviously blocks. This means that under the hood there must be some
205 // shared and concurrent queue holding all of the actual data.
207 // With two flavors of channels, two flavors of queues are also used. We have
208 // chosen to use queues from a well-known author which are abbreviated as SPSC
209 // and MPSC (single producer, single consumer and multiple producer, single
210 // consumer). SPSC queues are used for streams while MPSC queues are used for
213 // ### SPSC optimizations
215 // The SPSC queue found online is essentially a linked list of nodes where one
216 // half of the nodes are the "queue of data" and the other half of nodes are a
217 // cache of unused nodes. The unused nodes are used such that an allocation is
218 // not required on every push() and a free doesn't need to happen on every
221 // As found online, however, the cache of nodes is of an infinite size. This
222 // means that if a channel at one point in its life had 50k items in the queue,
223 // then the queue will always have the capacity for 50k items. I believed that
224 // this was an unnecessary limitation of the implementation, so I have altered
225 // the queue to optionally have a bound on the cache size.
227 // By default, streams will have an unbounded SPSC queue with a small-ish cache
228 // size. The hope is that the cache is still large enough to have very fast
229 // send() operations while not too large such that millions of channels can
232 // ### MPSC optimizations
234 // Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
235 // a linked list under the hood to earn its unboundedness, but I have not put
236 // forth much effort into having a cache of nodes similar to the SPSC queue.
238 // For now, I believe that this is "ok" because shared channels are not the most
239 // common type, but soon we may wish to revisit this queue choice and determine
240 // another candidate for backend storage of shared channels.
242 // ## Overview of the Implementation
244 // Now that there's a little background on the concurrent queues used, it's
245 // worth going into much more detail about the channels themselves. The basic
246 // pseudocode for a send/recv are:
250 // queue.push(t) return if queue.pop()
251 // if increment() == -1 deschedule {
252 // wakeup() if decrement() > 0
253 // cancel_deschedule()
257 // As mentioned before, there are no locks in this implementation, only atomic
258 // instructions are used.
260 // ### The internal atomic counter
262 // Every channel has a shared counter with each half to keep track of the size
263 // of the queue. This counter is used to abort descheduling by the receiver and
264 // to know when to wake up on the sending side.
266 // As seen in the pseudocode, senders will increment this count and receivers
267 // will decrement the count. The theory behind this is that if a sender sees a
268 // -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
269 // then it doesn't need to block.
271 // The recv() method has a beginning call to pop(), and if successful, it needs
272 // to decrement the count. It is a crucial implementation detail that this
273 // decrement does *not* happen to the shared counter. If this were the case,
274 // then it would be possible for the counter to be very negative when there were
275 // no receivers waiting, in which case the senders would have to determine when
276 // it was actually appropriate to wake up a receiver.
278 // Instead, the "steal count" is kept track of separately (not atomically
279 // because it's only used by receivers), and then the decrement() call when
280 // descheduling will lump in all of the recent steals into one large decrement.
282 // The implication of this is that if a sender sees a -1 count, then there's
283 // guaranteed to be a waiter waiting!
285 // ## Native Implementation
287 // A major goal of these channels is to work seamlessly on and off the runtime.
288 // All of the previous race conditions have been worded in terms of
289 // scheduler-isms (which is obviously not available without the runtime).
291 // For now, native usage of channels (off the runtime) will fall back onto
292 // mutexes/cond vars for descheduling/atomic decisions. The no-contention path
293 // is still entirely lock-free, the "deschedule" blocks above are surrounded by
294 // a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
295 // condition variable.
299 // Being able to support selection over channels has greatly influenced this
300 // design, and not only does selection need to work inside the runtime, but also
301 // outside the runtime.
303 // The implementation is fairly straightforward. The goal of select() is not to
304 // return some data, but only to return which channel can receive data without
305 // blocking. The implementation is essentially the entire blocking procedure
306 // followed by an increment as soon as its woken up. The cancellation procedure
307 // involves an increment and swapping out of to_wake to acquire ownership of the
310 // Sadly this current implementation requires multiple allocations, so I have
311 // seen the throughput of select() be much worse than it should be. I do not
312 // believe that there is anything fundamental which needs to change about these
313 // channels, however, in order to support a more efficient select().
317 // And now that you've seen all the races that I found and attempted to fix,
318 // here's the code for you to find some more!
320 use core::prelude::*;
322 pub use self::TryRecvError::*;
323 pub use self::TrySendError::*;
327 use core::kinds::marker;
329 use core::cell::UnsafeCell;
330 use rustrt::task::BlockedTask;
332 pub use comm::select::{Select, Handle};
335 { fn $name:ident() $b:block $(#[$a:meta])*} => (
337 #![allow(unused_imports)]
347 $(#[$a])* #[test] fn f() { $b }
358 /// The receiving-half of Rust's channel type. This half can only be owned by
361 pub struct Receiver<T> {
362 inner: UnsafeCell<Flavor<T>>,
363 // can't share in an arc
364 _marker: marker::NoSync,
367 /// An iterator over messages on a receiver, this iterator will block
368 /// whenever `next` is called, waiting for a new message, and `None` will be
369 /// returned when the corresponding channel has hung up.
371 pub struct Messages<'a, T:'a> {
375 /// The sending-half of Rust's asynchronous channel type. This half can only be
376 /// owned by one task, but it can be cloned to send to other tasks.
378 pub struct Sender<T> {
379 inner: UnsafeCell<Flavor<T>>,
380 // can't share in an arc
381 _marker: marker::NoSync,
384 /// The sending-half of Rust's synchronous channel type. This half can only be
385 /// owned by one task, but it can be cloned to send to other tasks.
386 #[unstable = "this type may be renamed, but it will always exist"]
387 pub struct SyncSender<T> {
388 inner: Arc<UnsafeCell<sync::Packet<T>>>,
389 // can't share in an arc
390 _marker: marker::NoSync,
393 /// This enumeration is the list of the possible reasons that try_recv could not
394 /// return data when called.
395 #[deriving(PartialEq, Clone, Show)]
396 #[experimental = "this is likely to be removed in changing try_recv()"]
397 pub enum TryRecvError {
398 /// This channel is currently empty, but the sender(s) have not yet
399 /// disconnected, so data may yet become available.
401 /// This channel's sending half has become disconnected, and there will
402 /// never be any more data received on this channel
406 /// This enumeration is the list of the possible error outcomes for the
407 /// `SyncSender::try_send` method.
408 #[deriving(PartialEq, Clone, Show)]
409 #[experimental = "this is likely to be removed in changing try_send()"]
410 pub enum TrySendError<T> {
411 /// The data could not be sent on the channel because it would require that
412 /// the callee block to send the data.
414 /// If this is a buffered channel, then the buffer is full at this time. If
415 /// this is not a buffered channel, then there is no receiver available to
416 /// acquire the data.
418 /// This channel's receiving half has disconnected, so the data could not be
419 /// sent. The data is returned back to the callee in this case.
424 Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
425 Stream(Arc<UnsafeCell<stream::Packet<T>>>),
426 Shared(Arc<UnsafeCell<shared::Packet<T>>>),
427 Sync(Arc<UnsafeCell<sync::Packet<T>>>),
431 trait UnsafeFlavor<T> {
432 fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>>;
433 unsafe fn inner_mut<'a>(&'a self) -> &'a mut Flavor<T> {
434 &mut *self.inner_unsafe().get()
436 unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
437 &*self.inner_unsafe().get()
440 impl<T> UnsafeFlavor<T> for Sender<T> {
441 fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
445 impl<T> UnsafeFlavor<T> for Receiver<T> {
446 fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
451 /// Creates a new asynchronous channel, returning the sender/receiver halves.
453 /// All data sent on the sender will become available on the receiver, and no
454 /// send will block the calling task (this channel has an "infinite buffer").
459 /// // tx is is the sending half (tx for transmission), and rx is the receiving
460 /// // half (rx for receiving).
461 /// let (tx, rx) = channel();
463 /// // Spawn off an expensive computation
465 /// # fn expensive_computation() {}
466 /// tx.send(expensive_computation());
469 /// // Do some useful work for awhile
471 /// // Let's see what that answer was
472 /// println!("{}", rx.recv());
475 pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
476 let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
477 (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
480 /// Creates a new synchronous, bounded channel.
482 /// Like asynchronous channels, the `Receiver` will block until a message
483 /// becomes available. These channels differ greatly in the semantics of the
484 /// sender from asynchronous channels, however.
486 /// This channel has an internal buffer on which messages will be queued. When
487 /// the internal buffer becomes full, future sends will *block* waiting for the
488 /// buffer to open up. Note that a buffer size of 0 is valid, in which case this
489 /// becomes "rendezvous channel" where each send will not return until a recv
490 /// is paired with it.
492 /// As with asynchronous channels, all senders will panic in `send` if the
493 /// `Receiver` has been destroyed.
498 /// let (tx, rx) = sync_channel(1);
500 /// // this returns immediately
504 /// // this will block until the previous message has been received
508 /// assert_eq!(rx.recv(), 1i);
509 /// assert_eq!(rx.recv(), 2i);
511 #[unstable = "this function may be renamed to more accurately reflect the type \
512 of channel that is is creating"]
513 pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
514 let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
515 (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
518 ////////////////////////////////////////////////////////////////////////////////
520 ////////////////////////////////////////////////////////////////////////////////
522 impl<T: Send> Sender<T> {
523 fn new(inner: Flavor<T>) -> Sender<T> {
525 inner: UnsafeCell::new(inner),
526 _marker: marker::NoSync,
530 /// Sends a value along this channel to be received by the corresponding
533 /// Rust channels are infinitely buffered so this method will never block.
537 /// This function will panic if the other end of the channel has hung up.
538 /// This means that if the corresponding receiver has fallen out of scope,
539 /// this function will trigger a panic message saying that a message is
540 /// being sent on a closed channel.
542 /// Note that if this function does *not* panic, it does not mean that the
543 /// data will be successfully received. All sends are placed into a queue,
544 /// so it is possible for a send to succeed (the other end is alive), but
545 /// then the other end could immediately disconnect.
547 /// The purpose of this functionality is to propagate panicks among tasks.
548 /// If a panic is not desired, then consider using the `send_opt` method
549 #[experimental = "this function is being considered candidate for removal \
550 to adhere to the general guidelines of rust"]
551 pub fn send(&self, t: T) {
552 if self.send_opt(t).is_err() {
553 panic!("sending on a closed channel");
557 /// Attempts to send a value on this channel, returning it back if it could
560 /// A successful send occurs when it is determined that the other end of
561 /// the channel has not hung up already. An unsuccessful send would be one
562 /// where the corresponding receiver has already been deallocated. Note
563 /// that a return value of `Err` means that the data will never be
564 /// received, but a return value of `Ok` does *not* mean that the data
565 /// will be received. It is possible for the corresponding receiver to
566 /// hang up immediately after this function returns `Ok`.
568 /// Like `send`, this method will never block.
572 /// This method will never panic, it will return the message back to the
573 /// caller if the other end is disconnected
578 /// let (tx, rx) = channel();
580 /// // This send is always successful
581 /// assert_eq!(tx.send_opt(1i), Ok(()));
583 /// // This send will fail because the receiver is gone
585 /// assert_eq!(tx.send_opt(1i), Err(1));
587 #[unstable = "this function may be renamed to send() in the future"]
588 pub fn send_opt(&self, t: T) -> Result<(), T> {
589 let (new_inner, ret) = match *unsafe { self.inner() } {
596 let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
597 match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
598 oneshot::UpSuccess => {
599 let ret = (*a.get()).send(t);
602 oneshot::UpDisconnected => (a, Err(t)),
603 oneshot::UpWoke(task) => {
604 // This send cannot panic because the task is
605 // asleep (we're looking at it), so the receiver
607 (*a.get()).send(t).ok().unwrap();
608 task.wake().map(|t| t.reawaken());
615 Stream(ref p) => return unsafe { (*p.get()).send(t) },
616 Shared(ref p) => return unsafe { (*p.get()).send(t) },
617 Sync(..) => unreachable!(),
621 let tmp = Sender::new(Stream(new_inner));
622 mem::swap(self.inner_mut(), tmp.inner_mut());
629 impl<T: Send> Clone for Sender<T> {
630 fn clone(&self) -> Sender<T> {
631 let (packet, sleeper) = match *unsafe { self.inner() } {
633 let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
635 (*a.get()).postinit_lock();
636 match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
637 oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
638 oneshot::UpWoke(task) => (a, Some(task))
643 let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
645 (*a.get()).postinit_lock();
646 match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
647 stream::UpSuccess | stream::UpDisconnected => (a, None),
648 stream::UpWoke(task) => (a, Some(task)),
653 unsafe { (*p.get()).clone_chan(); }
654 return Sender::new(Shared(p.clone()));
656 Sync(..) => unreachable!(),
660 (*packet.get()).inherit_blocker(sleeper);
662 let tmp = Sender::new(Shared(packet.clone()));
663 mem::swap(self.inner_mut(), tmp.inner_mut());
665 Sender::new(Shared(packet))
670 impl<T: Send> Drop for Sender<T> {
672 match *unsafe { self.inner_mut() } {
673 Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
674 Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
675 Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
676 Sync(..) => unreachable!(),
681 ////////////////////////////////////////////////////////////////////////////////
683 ////////////////////////////////////////////////////////////////////////////////
685 impl<T: Send> SyncSender<T> {
686 fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
687 SyncSender { inner: inner, _marker: marker::NoSync }
690 /// Sends a value on this synchronous channel.
692 /// This function will *block* until space in the internal buffer becomes
693 /// available or a receiver is available to hand off the message to.
695 /// Note that a successful send does *not* guarantee that the receiver will
696 /// ever see the data if there is a buffer on this channel. Messages may be
697 /// enqueued in the internal buffer for the receiver to receive at a later
698 /// time. If the buffer size is 0, however, it can be guaranteed that the
699 /// receiver has indeed received the data if this function returns success.
703 /// Similarly to `Sender::send`, this function will panic if the
704 /// corresponding `Receiver` for this channel has disconnected. This
705 /// behavior is used to propagate panics among tasks.
707 /// If a panic is not desired, you can achieve the same semantics with the
708 /// `SyncSender::send_opt` method which will not panic if the receiver
710 #[experimental = "this function is being considered candidate for removal \
711 to adhere to the general guidelines of rust"]
712 pub fn send(&self, t: T) {
713 if self.send_opt(t).is_err() {
714 panic!("sending on a closed channel");
718 /// Send a value on a channel, returning it back if the receiver
721 /// This method will *block* to send the value `t` on the channel, but if
722 /// the value could not be sent due to the receiver disconnecting, the value
723 /// is returned back to the callee. This function is similar to `try_send`,
724 /// except that it will block if the channel is currently full.
728 /// This function cannot panic.
729 #[unstable = "this function may be renamed to send() in the future"]
730 pub fn send_opt(&self, t: T) -> Result<(), T> {
731 unsafe { (*self.inner.get()).send(t) }
734 /// Attempts to send a value on this channel without blocking.
736 /// This method differs from `send_opt` by returning immediately if the
737 /// channel's buffer is full or no receiver is waiting to acquire some
738 /// data. Compared with `send_opt`, this function has two failure cases
739 /// instead of one (one for disconnection, one for a full buffer).
741 /// See `SyncSender::send` for notes about guarantees of whether the
742 /// receiver has received the data or not if this function is successful.
746 /// This function cannot panic
747 #[unstable = "the return type of this function is candidate for \
749 pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
750 unsafe { (*self.inner.get()).try_send(t) }
755 impl<T: Send> Clone for SyncSender<T> {
756 fn clone(&self) -> SyncSender<T> {
757 unsafe { (*self.inner.get()).clone_chan(); }
758 return SyncSender::new(self.inner.clone());
763 impl<T: Send> Drop for SyncSender<T> {
765 unsafe { (*self.inner.get()).drop_chan(); }
769 ////////////////////////////////////////////////////////////////////////////////
771 ////////////////////////////////////////////////////////////////////////////////
773 impl<T: Send> Receiver<T> {
774 fn new(inner: Flavor<T>) -> Receiver<T> {
775 Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
778 /// Blocks waiting for a value on this receiver
780 /// This function will block if necessary to wait for a corresponding send
781 /// on the channel from its paired `Sender` structure. This receiver will
782 /// be woken up when data is ready, and the data will be returned.
786 /// Similar to channels, this method will trigger a task panic if the
787 /// other end of the channel has hung up (been deallocated). The purpose of
788 /// this is to propagate panicks among tasks.
790 /// If a panic is not desired, then there are two options:
792 /// * If blocking is still desired, the `recv_opt` method will return `None`
793 /// when the other end hangs up
795 /// * If blocking is not desired, then the `try_recv` method will attempt to
796 /// peek at a value on this receiver.
797 #[experimental = "this function is being considered candidate for removal \
798 to adhere to the general guidelines of rust"]
799 pub fn recv(&self) -> T {
800 match self.recv_opt() {
802 Err(()) => panic!("receiving on a closed channel"),
806 /// Attempts to return a pending value on this receiver without blocking
808 /// This method will never block the caller in order to wait for data to
809 /// become available. Instead, this will always return immediately with a
810 /// possible option of pending data on the channel.
812 /// This is useful for a flavor of "optimistic check" before deciding to
813 /// block on a receiver.
817 /// This function cannot panic.
818 #[unstable = "the return type of this function may be altered"]
819 pub fn try_recv(&self) -> Result<T, TryRecvError> {
821 let new_port = match *unsafe { self.inner() } {
823 match unsafe { (*p.get()).try_recv() } {
824 Ok(t) => return Ok(t),
825 Err(oneshot::Empty) => return Err(Empty),
826 Err(oneshot::Disconnected) => return Err(Disconnected),
827 Err(oneshot::Upgraded(rx)) => rx,
831 match unsafe { (*p.get()).try_recv() } {
832 Ok(t) => return Ok(t),
833 Err(stream::Empty) => return Err(Empty),
834 Err(stream::Disconnected) => return Err(Disconnected),
835 Err(stream::Upgraded(rx)) => rx,
839 match unsafe { (*p.get()).try_recv() } {
840 Ok(t) => return Ok(t),
841 Err(shared::Empty) => return Err(Empty),
842 Err(shared::Disconnected) => return Err(Disconnected),
846 match unsafe { (*p.get()).try_recv() } {
847 Ok(t) => return Ok(t),
848 Err(sync::Empty) => return Err(Empty),
849 Err(sync::Disconnected) => return Err(Disconnected),
854 mem::swap(self.inner_mut(),
855 new_port.inner_mut());
860 /// Attempt to wait for a value on this receiver, but does not panic if the
861 /// corresponding channel has hung up.
863 /// This implementation of iterators for ports will always block if there is
864 /// not data available on the receiver, but it will not panic in the case
865 /// that the channel has been deallocated.
867 /// In other words, this function has the same semantics as the `recv`
868 /// method except for the panic aspect.
870 /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
871 /// the value found on the receiver is returned.
872 #[unstable = "this function may be renamed to recv()"]
873 pub fn recv_opt(&self) -> Result<T, ()> {
875 let new_port = match *unsafe { self.inner() } {
877 match unsafe { (*p.get()).recv() } {
878 Ok(t) => return Ok(t),
879 Err(oneshot::Empty) => return unreachable!(),
880 Err(oneshot::Disconnected) => return Err(()),
881 Err(oneshot::Upgraded(rx)) => rx,
885 match unsafe { (*p.get()).recv() } {
886 Ok(t) => return Ok(t),
887 Err(stream::Empty) => return unreachable!(),
888 Err(stream::Disconnected) => return Err(()),
889 Err(stream::Upgraded(rx)) => rx,
893 match unsafe { (*p.get()).recv() } {
894 Ok(t) => return Ok(t),
895 Err(shared::Empty) => return unreachable!(),
896 Err(shared::Disconnected) => return Err(()),
899 Sync(ref p) => return unsafe { (*p.get()).recv() }
902 mem::swap(self.inner_mut(), new_port.inner_mut());
907 /// Returns an iterator which will block waiting for messages, but never
908 /// `panic!`. It will return `None` when the channel has hung up.
910 pub fn iter<'a>(&'a self) -> Messages<'a, T> {
911 Messages { rx: self }
915 impl<T: Send> select::Packet for Receiver<T> {
916 fn can_recv(&self) -> bool {
918 let new_port = match *unsafe { self.inner() } {
920 match unsafe { (*p.get()).can_recv() } {
921 Ok(ret) => return ret,
922 Err(upgrade) => upgrade,
926 match unsafe { (*p.get()).can_recv() } {
927 Ok(ret) => return ret,
928 Err(upgrade) => upgrade,
932 return unsafe { (*p.get()).can_recv() };
935 return unsafe { (*p.get()).can_recv() };
939 mem::swap(self.inner_mut(),
940 new_port.inner_mut());
945 fn start_selection(&self, mut task: BlockedTask) -> Result<(), BlockedTask>{
947 let (t, new_port) = match *unsafe { self.inner() } {
949 match unsafe { (*p.get()).start_selection(task) } {
950 oneshot::SelSuccess => return Ok(()),
951 oneshot::SelCanceled(task) => return Err(task),
952 oneshot::SelUpgraded(t, rx) => (t, rx),
956 match unsafe { (*p.get()).start_selection(task) } {
957 stream::SelSuccess => return Ok(()),
958 stream::SelCanceled(task) => return Err(task),
959 stream::SelUpgraded(t, rx) => (t, rx),
963 return unsafe { (*p.get()).start_selection(task) };
966 return unsafe { (*p.get()).start_selection(task) };
971 mem::swap(self.inner_mut(),
972 new_port.inner_mut());
977 fn abort_selection(&self) -> bool {
978 let mut was_upgrade = false;
980 let result = match *unsafe { self.inner() } {
981 Oneshot(ref p) => unsafe { (*p.get()).abort_selection() },
982 Stream(ref p) => unsafe {
983 (*p.get()).abort_selection(was_upgrade)
985 Shared(ref p) => return unsafe {
986 (*p.get()).abort_selection(was_upgrade)
988 Sync(ref p) => return unsafe {
989 (*p.get()).abort_selection()
992 let new_port = match result { Ok(b) => return b, Err(p) => p };
995 mem::swap(self.inner_mut(),
996 new_port.inner_mut());
1003 impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
1004 fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
1007 #[unsafe_destructor]
1008 impl<T: Send> Drop for Receiver<T> {
1009 fn drop(&mut self) {
1010 match *unsafe { self.inner_mut() } {
1011 Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
1012 Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
1013 Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
1014 Sync(ref mut p) => unsafe { (*p.get()).drop_port(); },
1026 pub fn stress_factor() -> uint {
1027 match os::getenv("RUST_TEST_STRESS") {
1028 Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
1034 let (tx, rx) = channel::<int>();
1036 assert_eq!(rx.recv(), 1);
1039 test!(fn drop_full() {
1040 let (tx, _rx) = channel();
1044 test!(fn drop_full_shared() {
1045 let (tx, _rx) = channel();
1051 test!(fn smoke_shared() {
1052 let (tx, rx) = channel::<int>();
1054 assert_eq!(rx.recv(), 1);
1055 let tx = tx.clone();
1057 assert_eq!(rx.recv(), 1);
1060 test!(fn smoke_threads() {
1061 let (tx, rx) = channel::<int>();
1065 assert_eq!(rx.recv(), 1);
1068 test!(fn smoke_port_gone() {
1069 let (tx, rx) = channel::<int>();
1074 test!(fn smoke_shared_port_gone() {
1075 let (tx, rx) = channel::<int>();
1080 test!(fn smoke_shared_port_gone2() {
1081 let (tx, rx) = channel::<int>();
1083 let tx2 = tx.clone();
1088 test!(fn port_gone_concurrent() {
1089 let (tx, rx) = channel::<int>();
1096 test!(fn port_gone_concurrent_shared() {
1097 let (tx, rx) = channel::<int>();
1098 let tx2 = tx.clone();
1108 test!(fn smoke_chan_gone() {
1109 let (tx, rx) = channel::<int>();
1114 test!(fn smoke_chan_gone_shared() {
1115 let (tx, rx) = channel::<()>();
1116 let tx2 = tx.clone();
1122 test!(fn chan_gone_concurrent() {
1123 let (tx, rx) = channel::<int>();
1132 let (tx, rx) = channel::<int>();
1134 for _ in range(0u, 10000) { tx.send(1i); }
1136 for _ in range(0u, 10000) {
1137 assert_eq!(rx.recv(), 1);
1141 test!(fn stress_shared() {
1142 static AMT: uint = 10000;
1143 static NTHREADS: uint = 8;
1144 let (tx, rx) = channel::<int>();
1145 let (dtx, drx) = channel::<()>();
1148 for _ in range(0, AMT * NTHREADS) {
1149 assert_eq!(rx.recv(), 1);
1151 match rx.try_recv() {
1158 for _ in range(0, NTHREADS) {
1159 let tx = tx.clone();
1161 for _ in range(0, AMT) { tx.send(1); }
1169 fn send_from_outside_runtime() {
1170 let (tx1, rx1) = channel::<()>();
1171 let (tx2, rx2) = channel::<int>();
1172 let (tx3, rx3) = channel::<()>();
1173 let tx4 = tx3.clone();
1176 for _ in range(0i, 40) {
1177 assert_eq!(rx2.recv(), 1);
1183 for _ in range(0i, 40) {
1193 fn recv_from_outside_runtime() {
1194 let (tx, rx) = channel::<int>();
1195 let (dtx, drx) = channel();
1197 for _ in range(0i, 40) {
1198 assert_eq!(rx.recv(), 1);
1202 for _ in range(0u, 40) {
1210 let (tx1, rx1) = channel::<int>();
1211 let (tx2, rx2) = channel::<int>();
1212 let (tx3, rx3) = channel::<()>();
1213 let tx4 = tx3.clone();
1215 assert_eq!(rx1.recv(), 1);
1221 assert_eq!(rx2.recv(), 2);
1228 test!(fn oneshot_single_thread_close_port_first() {
1229 // Simple test of closing without sending
1230 let (_tx, rx) = channel::<int>();
1234 test!(fn oneshot_single_thread_close_chan_first() {
1235 // Simple test of closing without sending
1236 let (tx, _rx) = channel::<int>();
1240 test!(fn oneshot_single_thread_send_port_close() {
1241 // Testing that the sender cleans up the payload if receiver is closed
1242 let (tx, rx) = channel::<Box<int>>();
1247 test!(fn oneshot_single_thread_recv_chan_close() {
1248 // Receiving on a closed chan will panic
1249 let res = task::try(proc() {
1250 let (tx, rx) = channel::<int>();
1255 assert!(res.is_err());
1258 test!(fn oneshot_single_thread_send_then_recv() {
1259 let (tx, rx) = channel::<Box<int>>();
1261 assert!(rx.recv() == box 10);
1264 test!(fn oneshot_single_thread_try_send_open() {
1265 let (tx, rx) = channel::<int>();
1266 assert!(tx.send_opt(10).is_ok());
1267 assert!(rx.recv() == 10);
1270 test!(fn oneshot_single_thread_try_send_closed() {
1271 let (tx, rx) = channel::<int>();
1273 assert!(tx.send_opt(10).is_err());
1276 test!(fn oneshot_single_thread_try_recv_open() {
1277 let (tx, rx) = channel::<int>();
1279 assert!(rx.recv_opt() == Ok(10));
1282 test!(fn oneshot_single_thread_try_recv_closed() {
1283 let (tx, rx) = channel::<int>();
1285 assert!(rx.recv_opt() == Err(()));
1288 test!(fn oneshot_single_thread_peek_data() {
1289 let (tx, rx) = channel::<int>();
1290 assert_eq!(rx.try_recv(), Err(Empty))
1292 assert_eq!(rx.try_recv(), Ok(10));
1295 test!(fn oneshot_single_thread_peek_close() {
1296 let (tx, rx) = channel::<int>();
1298 assert_eq!(rx.try_recv(), Err(Disconnected));
1299 assert_eq!(rx.try_recv(), Err(Disconnected));
1302 test!(fn oneshot_single_thread_peek_open() {
1303 let (_tx, rx) = channel::<int>();
1304 assert_eq!(rx.try_recv(), Err(Empty));
1307 test!(fn oneshot_multi_task_recv_then_send() {
1308 let (tx, rx) = channel::<Box<int>>();
1310 assert!(rx.recv() == box 10);
1316 test!(fn oneshot_multi_task_recv_then_close() {
1317 let (tx, rx) = channel::<Box<int>>();
1321 let res = task::try(proc() {
1322 assert!(rx.recv() == box 10);
1324 assert!(res.is_err());
1327 test!(fn oneshot_multi_thread_close_stress() {
1328 for _ in range(0, stress_factor()) {
1329 let (tx, rx) = channel::<int>();
1337 test!(fn oneshot_multi_thread_send_close_stress() {
1338 for _ in range(0, stress_factor()) {
1339 let (tx, rx) = channel::<int>();
1343 let _ = task::try(proc() {
1349 test!(fn oneshot_multi_thread_recv_close_stress() {
1350 for _ in range(0, stress_factor()) {
1351 let (tx, rx) = channel::<int>();
1353 let res = task::try(proc() {
1356 assert!(res.is_err());
1366 test!(fn oneshot_multi_thread_send_recv_stress() {
1367 for _ in range(0, stress_factor()) {
1368 let (tx, rx) = channel();
1373 assert!(rx.recv() == box 10i);
1378 test!(fn stream_send_recv_stress() {
1379 for _ in range(0, stress_factor()) {
1380 let (tx, rx) = channel();
1385 fn send(tx: Sender<Box<int>>, i: int) {
1386 if i == 10 { return }
1394 fn recv(rx: Receiver<Box<int>>, i: int) {
1395 if i == 10 { return }
1398 assert!(rx.recv() == box i);
1405 test!(fn recv_a_lot() {
1406 // Regression test that we don't run out of stack in scheduler context
1407 let (tx, rx) = channel();
1408 for _ in range(0i, 10000) { tx.send(()); }
1409 for _ in range(0i, 10000) { rx.recv(); }
1412 test!(fn shared_chan_stress() {
1413 let (tx, rx) = channel();
1414 let total = stress_factor() + 100;
1415 for _ in range(0, total) {
1416 let tx = tx.clone();
1422 for _ in range(0, total) {
1427 test!(fn test_nested_recv_iter() {
1428 let (tx, rx) = channel::<int>();
1429 let (total_tx, total_rx) = channel::<int>();
1433 for x in rx.iter() {
1443 assert_eq!(total_rx.recv(), 6);
1446 test!(fn test_recv_iter_break() {
1447 let (tx, rx) = channel::<int>();
1448 let (count_tx, count_rx) = channel();
1452 for x in rx.iter() {
1459 count_tx.send(count);
1465 let _ = tx.send_opt(2);
1467 assert_eq!(count_rx.recv(), 4);
1470 test!(fn try_recv_states() {
1471 let (tx1, rx1) = channel::<int>();
1472 let (tx2, rx2) = channel::<()>();
1473 let (tx3, rx3) = channel::<()>();
1483 assert_eq!(rx1.try_recv(), Err(Empty));
1486 assert_eq!(rx1.try_recv(), Ok(1));
1487 assert_eq!(rx1.try_recv(), Err(Empty));
1490 assert_eq!(rx1.try_recv(), Err(Disconnected));
1493 // This bug used to end up in a livelock inside of the Receiver destructor
1494 // because the internal state of the Shared packet was corrupted
1495 test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
1496 let (tx, rx) = channel();
1497 let (tx2, rx2) = channel();
1499 rx.recv(); // wait on a oneshot
1500 drop(rx); // destroy a shared
1503 // make sure the other task has gone to sleep
1504 for _ in range(0u, 5000) { task::deschedule(); }
1506 // upgrade to a shared chan and send a message
1511 // wait for the child task to exit before we exit
1515 test!(fn sends_off_the_runtime() {
1516 use rustrt::thread::Thread;
1518 let (tx, rx) = channel();
1519 let t = Thread::start(proc() {
1520 for _ in range(0u, 1000) {
1524 for _ in range(0u, 1000) {
1530 test!(fn try_recvs_off_the_runtime() {
1531 use rustrt::thread::Thread;
1533 let (tx, rx) = channel();
1534 let (cdone, pdone) = channel();
1535 let t = Thread::start(proc() {
1538 match rx.try_recv() {
1539 Ok(()) => { hits += 1; }
1540 Err(Empty) => { Thread::yield_now(); }
1541 Err(Disconnected) => return,
1546 for _ in range(0u, 10) {
1559 pub fn stress_factor() -> uint {
1560 match os::getenv("RUST_TEST_STRESS") {
1561 Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
1567 let (tx, rx) = sync_channel::<int>(1);
1569 assert_eq!(rx.recv(), 1);
1572 test!(fn drop_full() {
1573 let (tx, _rx) = sync_channel(1);
1577 test!(fn smoke_shared() {
1578 let (tx, rx) = sync_channel::<int>(1);
1580 assert_eq!(rx.recv(), 1);
1581 let tx = tx.clone();
1583 assert_eq!(rx.recv(), 1);
1586 test!(fn smoke_threads() {
1587 let (tx, rx) = sync_channel::<int>(0);
1591 assert_eq!(rx.recv(), 1);
1594 test!(fn smoke_port_gone() {
1595 let (tx, rx) = sync_channel::<int>(0);
1600 test!(fn smoke_shared_port_gone2() {
1601 let (tx, rx) = sync_channel::<int>(0);
1603 let tx2 = tx.clone();
1608 test!(fn port_gone_concurrent() {
1609 let (tx, rx) = sync_channel::<int>(0);
1616 test!(fn port_gone_concurrent_shared() {
1617 let (tx, rx) = sync_channel::<int>(0);
1618 let tx2 = tx.clone();
1628 test!(fn smoke_chan_gone() {
1629 let (tx, rx) = sync_channel::<int>(0);
1634 test!(fn smoke_chan_gone_shared() {
1635 let (tx, rx) = sync_channel::<()>(0);
1636 let tx2 = tx.clone();
1642 test!(fn chan_gone_concurrent() {
1643 let (tx, rx) = sync_channel::<int>(0);
1652 let (tx, rx) = sync_channel::<int>(0);
1654 for _ in range(0u, 10000) { tx.send(1); }
1656 for _ in range(0u, 10000) {
1657 assert_eq!(rx.recv(), 1);
1661 test!(fn stress_shared() {
1662 static AMT: uint = 1000;
1663 static NTHREADS: uint = 8;
1664 let (tx, rx) = sync_channel::<int>(0);
1665 let (dtx, drx) = sync_channel::<()>(0);
1668 for _ in range(0, AMT * NTHREADS) {
1669 assert_eq!(rx.recv(), 1);
1671 match rx.try_recv() {
1678 for _ in range(0, NTHREADS) {
1679 let tx = tx.clone();
1681 for _ in range(0, AMT) { tx.send(1); }
1688 test!(fn oneshot_single_thread_close_port_first() {
1689 // Simple test of closing without sending
1690 let (_tx, rx) = sync_channel::<int>(0);
1694 test!(fn oneshot_single_thread_close_chan_first() {
1695 // Simple test of closing without sending
1696 let (tx, _rx) = sync_channel::<int>(0);
1700 test!(fn oneshot_single_thread_send_port_close() {
1701 // Testing that the sender cleans up the payload if receiver is closed
1702 let (tx, rx) = sync_channel::<Box<int>>(0);
1707 test!(fn oneshot_single_thread_recv_chan_close() {
1708 // Receiving on a closed chan will panic
1709 let res = task::try(proc() {
1710 let (tx, rx) = sync_channel::<int>(0);
1715 assert!(res.is_err());
1718 test!(fn oneshot_single_thread_send_then_recv() {
1719 let (tx, rx) = sync_channel::<Box<int>>(1);
1721 assert!(rx.recv() == box 10);
1724 test!(fn oneshot_single_thread_try_send_open() {
1725 let (tx, rx) = sync_channel::<int>(1);
1726 assert_eq!(tx.try_send(10), Ok(()));
1727 assert!(rx.recv() == 10);
1730 test!(fn oneshot_single_thread_try_send_closed() {
1731 let (tx, rx) = sync_channel::<int>(0);
1733 assert_eq!(tx.try_send(10), Err(RecvDisconnected(10)));
1736 test!(fn oneshot_single_thread_try_send_closed2() {
1737 let (tx, _rx) = sync_channel::<int>(0);
1738 assert_eq!(tx.try_send(10), Err(Full(10)));
1741 test!(fn oneshot_single_thread_try_recv_open() {
1742 let (tx, rx) = sync_channel::<int>(1);
1744 assert!(rx.recv_opt() == Ok(10));
1747 test!(fn oneshot_single_thread_try_recv_closed() {
1748 let (tx, rx) = sync_channel::<int>(0);
1750 assert!(rx.recv_opt() == Err(()));
1753 test!(fn oneshot_single_thread_peek_data() {
1754 let (tx, rx) = sync_channel::<int>(1);
1755 assert_eq!(rx.try_recv(), Err(Empty))
1757 assert_eq!(rx.try_recv(), Ok(10));
1760 test!(fn oneshot_single_thread_peek_close() {
1761 let (tx, rx) = sync_channel::<int>(0);
1763 assert_eq!(rx.try_recv(), Err(Disconnected));
1764 assert_eq!(rx.try_recv(), Err(Disconnected));
1767 test!(fn oneshot_single_thread_peek_open() {
1768 let (_tx, rx) = sync_channel::<int>(0);
1769 assert_eq!(rx.try_recv(), Err(Empty));
1772 test!(fn oneshot_multi_task_recv_then_send() {
1773 let (tx, rx) = sync_channel::<Box<int>>(0);
1775 assert!(rx.recv() == box 10);
1781 test!(fn oneshot_multi_task_recv_then_close() {
1782 let (tx, rx) = sync_channel::<Box<int>>(0);
1786 let res = task::try(proc() {
1787 assert!(rx.recv() == box 10);
1789 assert!(res.is_err());
1792 test!(fn oneshot_multi_thread_close_stress() {
1793 for _ in range(0, stress_factor()) {
1794 let (tx, rx) = sync_channel::<int>(0);
1802 test!(fn oneshot_multi_thread_send_close_stress() {
1803 for _ in range(0, stress_factor()) {
1804 let (tx, rx) = sync_channel::<int>(0);
1808 let _ = task::try(proc() {
1814 test!(fn oneshot_multi_thread_recv_close_stress() {
1815 for _ in range(0, stress_factor()) {
1816 let (tx, rx) = sync_channel::<int>(0);
1818 let res = task::try(proc() {
1821 assert!(res.is_err());
1831 test!(fn oneshot_multi_thread_send_recv_stress() {
1832 for _ in range(0, stress_factor()) {
1833 let (tx, rx) = sync_channel::<Box<int>>(0);
1838 assert!(rx.recv() == box 10i);
1843 test!(fn stream_send_recv_stress() {
1844 for _ in range(0, stress_factor()) {
1845 let (tx, rx) = sync_channel::<Box<int>>(0);
1850 fn send(tx: SyncSender<Box<int>>, i: int) {
1851 if i == 10 { return }
1859 fn recv(rx: Receiver<Box<int>>, i: int) {
1860 if i == 10 { return }
1863 assert!(rx.recv() == box i);
1870 test!(fn recv_a_lot() {
1871 // Regression test that we don't run out of stack in scheduler context
1872 let (tx, rx) = sync_channel(10000);
1873 for _ in range(0u, 10000) { tx.send(()); }
1874 for _ in range(0u, 10000) { rx.recv(); }
1877 test!(fn shared_chan_stress() {
1878 let (tx, rx) = sync_channel(0);
1879 let total = stress_factor() + 100;
1880 for _ in range(0, total) {
1881 let tx = tx.clone();
1887 for _ in range(0, total) {
1892 test!(fn test_nested_recv_iter() {
1893 let (tx, rx) = sync_channel::<int>(0);
1894 let (total_tx, total_rx) = sync_channel::<int>(0);
1898 for x in rx.iter() {
1908 assert_eq!(total_rx.recv(), 6);
1911 test!(fn test_recv_iter_break() {
1912 let (tx, rx) = sync_channel::<int>(0);
1913 let (count_tx, count_rx) = sync_channel(0);
1917 for x in rx.iter() {
1924 count_tx.send(count);
1930 let _ = tx.try_send(2);
1932 assert_eq!(count_rx.recv(), 4);
1935 test!(fn try_recv_states() {
1936 let (tx1, rx1) = sync_channel::<int>(1);
1937 let (tx2, rx2) = sync_channel::<()>(1);
1938 let (tx3, rx3) = sync_channel::<()>(1);
1948 assert_eq!(rx1.try_recv(), Err(Empty));
1951 assert_eq!(rx1.try_recv(), Ok(1));
1952 assert_eq!(rx1.try_recv(), Err(Empty));
1955 assert_eq!(rx1.try_recv(), Err(Disconnected));
1958 // This bug used to end up in a livelock inside of the Receiver destructor
1959 // because the internal state of the Shared packet was corrupted
1960 test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
1961 let (tx, rx) = sync_channel::<()>(0);
1962 let (tx2, rx2) = sync_channel::<()>(0);
1964 rx.recv(); // wait on a oneshot
1965 drop(rx); // destroy a shared
1968 // make sure the other task has gone to sleep
1969 for _ in range(0u, 5000) { task::deschedule(); }
1971 // upgrade to a shared chan and send a message
1976 // wait for the child task to exit before we exit
1980 test!(fn try_recvs_off_the_runtime() {
1981 use rustrt::thread::Thread;
1983 let (tx, rx) = sync_channel::<()>(0);
1984 let (cdone, pdone) = channel();
1985 let t = Thread::start(proc() {
1988 match rx.try_recv() {
1989 Ok(()) => { hits += 1; }
1990 Err(Empty) => { Thread::yield_now(); }
1991 Err(Disconnected) => return,
1996 for _ in range(0u, 10) {
2003 test!(fn send_opt1() {
2004 let (tx, rx) = sync_channel::<int>(0);
2005 spawn(proc() { rx.recv(); });
2006 assert_eq!(tx.send_opt(1), Ok(()));
2009 test!(fn send_opt2() {
2010 let (tx, rx) = sync_channel::<int>(0);
2011 spawn(proc() { drop(rx); });
2012 assert_eq!(tx.send_opt(1), Err(1));
2015 test!(fn send_opt3() {
2016 let (tx, rx) = sync_channel::<int>(1);
2017 assert_eq!(tx.send_opt(1), Ok(()));
2018 spawn(proc() { drop(rx); });
2019 assert_eq!(tx.send_opt(1), Err(1));
2022 test!(fn send_opt4() {
2023 let (tx, rx) = sync_channel::<int>(0);
2024 let tx2 = tx.clone();
2025 let (done, donerx) = channel();
2026 let done2 = done.clone();
2028 assert_eq!(tx.send_opt(1), Err(1));
2032 assert_eq!(tx2.send_opt(2), Err(2));
2040 test!(fn try_send1() {
2041 let (tx, _rx) = sync_channel::<int>(0);
2042 assert_eq!(tx.try_send(1), Err(Full(1)));
2045 test!(fn try_send2() {
2046 let (tx, _rx) = sync_channel::<int>(1);
2047 assert_eq!(tx.try_send(1), Ok(()));
2048 assert_eq!(tx.try_send(1), Err(Full(1)));
2051 test!(fn try_send3() {
2052 let (tx, rx) = sync_channel::<int>(1);
2053 assert_eq!(tx.try_send(1), Ok(()));
2055 assert_eq!(tx.try_send(1), Err(RecvDisconnected(1)));
2058 test!(fn try_send4() {
2059 let (tx, rx) = sync_channel::<int>(0);
2061 for _ in range(0u, 1000) { task::deschedule(); }
2062 assert_eq!(tx.try_send(1), Ok(()));
2064 assert_eq!(rx.recv(), 1);
2065 } #[ignore(reason = "flaky on libnative")])
2067 test!(fn issue_15761() {
2069 let (tx1, rx1) = sync_channel::<()>(3);
2070 let (tx2, rx2) = sync_channel::<()>(3);
2074 tx2.try_send(()).unwrap();
2077 tx1.try_send(()).unwrap();
2081 for _ in range(0u, 100) {