1 /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions are met:
5 * 1. Redistributions of source code must retain the above copyright notice,
6 * this list of conditions and the following disclaimer.
8 * 2. Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
12 * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
15 * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
16 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
17 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
20 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
21 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 * The views and conclusions contained in the software and documentation are
24 * those of the authors and should not be interpreted as representing official
25 * policies, either expressed or implied, of Dmitry Vyukov.
28 // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
30 //! A single-producer single-consumer concurrent queue
32 //! This module contains the implementation of an SPSC queue which can be used
33 //! concurrently between two threads. This data structure is safe to use and
34 //! enforces the semantics that there is one pusher and one popper.
36 use alloc::boxed::Box;
38 use core::cell::UnsafeCell;
40 use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
42 // Node within the linked list queue of messages to send
44 // FIXME: this could be an uninitialized T if we're careful enough, and
45 // that would reduce memory usage (and be a bit faster).
47 value: Option<T>, // nullable for re-use of nodes
48 next: AtomicPtr<Node<T>>, // next node in the queue
51 /// The single-producer single-consumer queue. This structure is not cloneable,
52 /// but it can be safely shared in an Arc if it is guaranteed that there
53 /// is only one popper and one pusher touching the queue at any one point in
57 tail: UnsafeCell<*mut Node<T>>, // where to pop from
58 tail_prev: AtomicPtr<Node<T>>, // where to pop from
61 head: UnsafeCell<*mut Node<T>>, // where to push to
62 first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
63 tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
65 // Cache maintenance fields. Additions and subtractions are stored
66 // separately in order to allow them to use nonatomic addition/subtraction.
68 cache_additions: AtomicUsize,
69 cache_subtractions: AtomicUsize,
72 unsafe impl<T: Send> Send for Queue<T> { }
74 unsafe impl<T: Send> Sync for Queue<T> { }
77 fn new() -> *mut Node<T> {
78 Box::into_raw(box Node {
80 next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
86 /// Creates a new queue.
88 /// This is unsafe as the type system doesn't enforce a single
89 /// consumer-producer relationship. It also allows the consumer to `pop`
90 /// items while there is a `peek` active due to all methods having a
91 /// non-mutable receiver.
95 /// * `bound` - This queue implementation is implemented with a linked
96 /// list, and this means that a push is always a malloc. In
97 /// order to amortize this cost, an internal cache of nodes is
98 /// maintained to prevent a malloc from always being
99 /// necessary. This bound is the limit on the size of the
100 /// cache (if desired). If the value is 0, then the cache has
101 /// no bound. Otherwise, the cache will never grow larger than
102 /// `bound` (although the queue itself could be much larger.
103 pub unsafe fn new(bound: usize) -> Queue<T> {
104 let n1 = Node::new();
105 let n2 = Node::new();
106 (*n1).next.store(n2, Ordering::Relaxed);
108 tail: UnsafeCell::new(n2),
109 tail_prev: AtomicPtr::new(n1),
110 head: UnsafeCell::new(n2),
111 first: UnsafeCell::new(n1),
112 tail_copy: UnsafeCell::new(n1),
114 cache_additions: AtomicUsize::new(0),
115 cache_subtractions: AtomicUsize::new(0),
119 /// Pushes a new value onto this queue. Note that to use this function
120 /// safely, it must be externally guaranteed that there is only one pusher.
121 pub fn push(&self, t: T) {
123 // Acquire a node (which either uses a cached one or allocates a new
124 // one), and then append this to the 'head' node.
125 let n = self.alloc();
126 assert!((*n).value.is_none());
127 (*n).value = Some(t);
128 (*n).next.store(ptr::null_mut(), Ordering::Relaxed);
129 (**self.head.get()).next.store(n, Ordering::Release);
130 *self.head.get() = n;
134 unsafe fn alloc(&self) -> *mut Node<T> {
135 // First try to see if we can consume the 'first' node for our uses.
136 // We try to avoid as many atomic instructions as possible here, so
137 // the addition to cache_subtractions is not atomic (plus we're the
138 // only one subtracting from the cache).
139 if *self.first.get() != *self.tail_copy.get() {
140 if self.cache_bound > 0 {
141 let b = self.cache_subtractions.load(Ordering::Relaxed);
142 self.cache_subtractions.store(b + 1, Ordering::Relaxed);
144 let ret = *self.first.get();
145 *self.first.get() = (*ret).next.load(Ordering::Relaxed);
148 // If the above fails, then update our copy of the tail and try
150 *self.tail_copy.get() = self.tail_prev.load(Ordering::Acquire);
151 if *self.first.get() != *self.tail_copy.get() {
152 if self.cache_bound > 0 {
153 let b = self.cache_subtractions.load(Ordering::Relaxed);
154 self.cache_subtractions.store(b + 1, Ordering::Relaxed);
156 let ret = *self.first.get();
157 *self.first.get() = (*ret).next.load(Ordering::Relaxed);
160 // If all of that fails, then we have to allocate a new node
161 // (there's nothing in the node cache).
165 /// Attempts to pop a value from this queue. Remember that to use this type
166 /// safely you must ensure that there is only one popper at a time.
167 pub fn pop(&self) -> Option<T> {
169 // The `tail` node is not actually a used node, but rather a
170 // sentinel from where we should start popping from. Hence, look at
171 // tail's next field and see if we can use it. If we do a pop, then
172 // the current tail node is a candidate for going into the cache.
173 let tail = *self.tail.get();
174 let next = (*tail).next.load(Ordering::Acquire);
175 if next.is_null() { return None }
176 assert!((*next).value.is_some());
177 let ret = (*next).value.take();
179 *self.tail.get() = next;
180 if self.cache_bound == 0 {
181 self.tail_prev.store(tail, Ordering::Release);
183 // FIXME: this is dubious with overflow.
184 let additions = self.cache_additions.load(Ordering::Relaxed);
185 let subtractions = self.cache_subtractions.load(Ordering::Relaxed);
186 let size = additions - subtractions;
188 if size < self.cache_bound {
189 self.tail_prev.store(tail, Ordering::Release);
190 self.cache_additions.store(additions + 1, Ordering::Relaxed);
192 (*self.tail_prev.load(Ordering::Relaxed))
193 .next.store(next, Ordering::Relaxed);
194 // We have successfully erased all references to 'tail', so
195 // now we can safely drop it.
196 let _: Box<Node<T>> = Box::from_raw(tail);
203 /// Attempts to peek at the head of the queue, returning `None` if the queue
204 /// has no data currently
207 /// The reference returned is invalid if it is not used before the consumer
208 /// pops the value off the queue. If the producer then pushes another value
209 /// onto the queue, it will overwrite the value pointed to by the reference.
210 pub fn peek(&self) -> Option<&mut T> {
211 // This is essentially the same as above with all the popping bits
214 let tail = *self.tail.get();
215 let next = (*tail).next.load(Ordering::Acquire);
216 if next.is_null() { None } else { (*next).value.as_mut() }
221 impl<T> Drop for Queue<T> {
224 let mut cur = *self.first.get();
225 while !cur.is_null() {
226 let next = (*cur).next.load(Ordering::Relaxed);
227 let _n: Box<Node<T>> = Box::from_raw(cur);
241 use sync::mpsc::channel;
246 let queue = Queue::new(0);
249 assert_eq!(queue.pop(), Some(1));
250 assert_eq!(queue.pop(), Some(2));
251 assert_eq!(queue.pop(), None);
254 assert_eq!(queue.pop(), Some(3));
255 assert_eq!(queue.pop(), Some(4));
256 assert_eq!(queue.pop(), None);
263 let queue = Queue::new(0);
266 // Ensure the borrowchecker works
268 Some(vec) => match &**vec {
269 // Note that `pop` is not allowed here due to borrow
273 None => unreachable!()
283 let q: Queue<Box<_>> = Queue::new(0);
292 let q = Queue::new(0);
295 assert_eq!(q.pop(), Some(1));
296 assert_eq!(q.pop(), Some(2));
297 assert_eq!(q.pop(), None);
300 assert_eq!(q.pop(), Some(3));
301 assert_eq!(q.pop(), Some(4));
302 assert_eq!(q.pop(), None);
313 unsafe fn stress_bound(bound: usize) {
314 let q = Arc::new(Queue::new(bound));
316 let (tx, rx) = channel();
318 let _t = thread::spawn(move|| {
328 tx.send(()).unwrap();