1 /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions are met:
5 * 1. Redistributions of source code must retain the above copyright notice,
6 * this list of conditions and the following disclaimer.
8 * 2. Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
12 * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
15 * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
16 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
17 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
20 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
21 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 * The views and conclusions contained in the software and documentation are
24 * those of the authors and should not be interpreted as representing official
25 * policies, either expressed or implied, of Dmitry Vyukov.
28 // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
30 //! A single-producer single-consumer concurrent queue
32 //! This module contains the implementation of an SPSC queue which can be used
33 //! concurrently between two tasks. This data structure is safe to use and
34 //! enforces the semantics that there is one pusher and one popper.
36 #![unstable(feature = "std_misc")]
41 use alloc::boxed::Box;
43 use core::cell::UnsafeCell;
45 use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
47 // Node within the linked list queue of messages to send
49 // FIXME: this could be an uninitialized T if we're careful enough, and
50 // that would reduce memory usage (and be a bit faster).
52 value: Option<T>, // nullable for re-use of nodes
53 next: AtomicPtr<Node<T>>, // next node in the queue
56 /// The single-producer single-consumer queue. This structure is not cloneable,
57 /// but it can be safely shared in an Arc if it is guaranteed that there
58 /// is only one popper and one pusher touching the queue at any one point in
62 tail: UnsafeCell<*mut Node<T>>, // where to pop from
63 tail_prev: AtomicPtr<Node<T>>, // where to pop from
66 head: UnsafeCell<*mut Node<T>>, // where to push to
67 first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
68 tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
70 // Cache maintenance fields. Additions and subtractions are stored
71 // separately in order to allow them to use nonatomic addition/subtraction.
73 cache_additions: AtomicUsize,
74 cache_subtractions: AtomicUsize,
77 unsafe impl<T: Send> Send for Queue<T> { }
79 unsafe impl<T: Send> Sync for Queue<T> { }
82 fn new() -> *mut Node<T> {
84 boxed::into_raw(box Node {
86 next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
93 /// Creates a new queue.
95 /// This is unsafe as the type system doesn't enforce a single
96 /// consumer-producer relationship. It also allows the consumer to `pop`
97 /// items while there is a `peek` active due to all methods having a
98 /// non-mutable receiver.
102 /// * `bound` - This queue implementation is implemented with a linked
103 /// list, and this means that a push is always a malloc. In
104 /// order to amortize this cost, an internal cache of nodes is
105 /// maintained to prevent a malloc from always being
106 /// necessary. This bound is the limit on the size of the
107 /// cache (if desired). If the value is 0, then the cache has
108 /// no bound. Otherwise, the cache will never grow larger than
109 /// `bound` (although the queue itself could be much larger.
110 pub unsafe fn new(bound: usize) -> Queue<T> {
111 let n1 = Node::new();
112 let n2 = Node::new();
113 (*n1).next.store(n2, Ordering::Relaxed);
115 tail: UnsafeCell::new(n2),
116 tail_prev: AtomicPtr::new(n1),
117 head: UnsafeCell::new(n2),
118 first: UnsafeCell::new(n1),
119 tail_copy: UnsafeCell::new(n1),
121 cache_additions: AtomicUsize::new(0),
122 cache_subtractions: AtomicUsize::new(0),
126 /// Pushes a new value onto this queue. Note that to use this function
127 /// safely, it must be externally guaranteed that there is only one pusher.
128 pub fn push(&self, t: T) {
130 // Acquire a node (which either uses a cached one or allocates a new
131 // one), and then append this to the 'head' node.
132 let n = self.alloc();
133 assert!((*n).value.is_none());
134 (*n).value = Some(t);
135 (*n).next.store(ptr::null_mut(), Ordering::Relaxed);
136 (**self.head.get()).next.store(n, Ordering::Release);
137 *self.head.get() = n;
141 unsafe fn alloc(&self) -> *mut Node<T> {
142 // First try to see if we can consume the 'first' node for our uses.
143 // We try to avoid as many atomic instructions as possible here, so
144 // the addition to cache_subtractions is not atomic (plus we're the
145 // only one subtracting from the cache).
146 if *self.first.get() != *self.tail_copy.get() {
147 if self.cache_bound > 0 {
148 let b = self.cache_subtractions.load(Ordering::Relaxed);
149 self.cache_subtractions.store(b + 1, Ordering::Relaxed);
151 let ret = *self.first.get();
152 *self.first.get() = (*ret).next.load(Ordering::Relaxed);
155 // If the above fails, then update our copy of the tail and try
157 *self.tail_copy.get() = self.tail_prev.load(Ordering::Acquire);
158 if *self.first.get() != *self.tail_copy.get() {
159 if self.cache_bound > 0 {
160 let b = self.cache_subtractions.load(Ordering::Relaxed);
161 self.cache_subtractions.store(b + 1, Ordering::Relaxed);
163 let ret = *self.first.get();
164 *self.first.get() = (*ret).next.load(Ordering::Relaxed);
167 // If all of that fails, then we have to allocate a new node
168 // (there's nothing in the node cache).
172 /// Attempts to pop a value from this queue. Remember that to use this type
173 /// safely you must ensure that there is only one popper at a time.
174 pub fn pop(&self) -> Option<T> {
176 // The `tail` node is not actually a used node, but rather a
177 // sentinel from where we should start popping from. Hence, look at
178 // tail's next field and see if we can use it. If we do a pop, then
179 // the current tail node is a candidate for going into the cache.
180 let tail = *self.tail.get();
181 let next = (*tail).next.load(Ordering::Acquire);
182 if next.is_null() { return None }
183 assert!((*next).value.is_some());
184 let ret = (*next).value.take();
186 *self.tail.get() = next;
187 if self.cache_bound == 0 {
188 self.tail_prev.store(tail, Ordering::Release);
190 // FIXME: this is dubious with overflow.
191 let additions = self.cache_additions.load(Ordering::Relaxed);
192 let subtractions = self.cache_subtractions.load(Ordering::Relaxed);
193 let size = additions - subtractions;
195 if size < self.cache_bound {
196 self.tail_prev.store(tail, Ordering::Release);
197 self.cache_additions.store(additions + 1, Ordering::Relaxed);
199 (*self.tail_prev.load(Ordering::Relaxed))
200 .next.store(next, Ordering::Relaxed);
201 // We have successfully erased all references to 'tail', so
202 // now we can safely drop it.
203 let _: Box<Node<T>> = Box::from_raw(tail);
210 /// Attempts to peek at the head of the queue, returning `None` if the queue
211 /// has no data currently
214 /// The reference returned is invalid if it is not used before the consumer
215 /// pops the value off the queue. If the producer then pushes another value
216 /// onto the queue, it will overwrite the value pointed to by the reference.
217 pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
218 // This is essentially the same as above with all the popping bits
221 let tail = *self.tail.get();
222 let next = (*tail).next.load(Ordering::Acquire);
223 if next.is_null() { return None }
224 return (*next).value.as_mut();
230 impl<T> Drop for Queue<T> {
233 let mut cur = *self.first.get();
234 while !cur.is_null() {
235 let next = (*cur).next.load(Ordering::Relaxed);
236 let _n: Box<Node<T>> = Box::from_raw(cur);
250 use sync::mpsc::channel;
255 let queue = Queue::new(0);
258 assert_eq!(queue.pop(), Some(1));
259 assert_eq!(queue.pop(), Some(2));
260 assert_eq!(queue.pop(), None);
263 assert_eq!(queue.pop(), Some(3));
264 assert_eq!(queue.pop(), Some(4));
265 assert_eq!(queue.pop(), None);
272 let queue = Queue::new(0);
275 // Ensure the borrowchecker works
277 Some(vec) => match &**vec {
278 // Note that `pop` is not allowed here due to borrow
282 None => unreachable!()
292 let q: Queue<Box<_>> = Queue::new(0);
301 let q = Queue::new(0);
304 assert_eq!(q.pop(), Some(1));
305 assert_eq!(q.pop(), Some(2));
306 assert_eq!(q.pop(), None);
309 assert_eq!(q.pop(), Some(3));
310 assert_eq!(q.pop(), Some(4));
311 assert_eq!(q.pop(), None);
322 unsafe fn stress_bound(bound: usize) {
323 let q = Arc::new(Queue::new(bound));
325 let (tx, rx) = channel();
327 let _t = thread::spawn(move|| {
337 tx.send(()).unwrap();