Improving documentation, consistency, removes evils empty lines etc...
DEPS_alloc := core libc native:jemalloc
DEPS_debug := std
DEPS_rustrt := alloc core libc collections native:rustrt_native
-DEPS_std := core libc rand alloc collections rustrt \
+DEPS_std := core libc rand alloc collections rustrt sync \
native:rust_builtin native:backtrace
DEPS_graphviz := std
DEPS_green := std native:context_switch
DEPS_rustuv := std native:uv native:uv_support
DEPS_native := std
DEPS_syntax := std term serialize log fmt_macros debug
-DEPS_rustc := syntax native:rustllvm flate arena serialize sync getopts \
+DEPS_rustc := syntax native:rustllvm flate arena serialize getopts \
time log graphviz debug
-DEPS_rustdoc := rustc native:hoedown serialize sync getopts \
+DEPS_rustdoc := rustc native:hoedown serialize getopts \
test time debug
DEPS_flate := std native:miniz
DEPS_arena := std
DEPS_term := std log
DEPS_semver := std
DEPS_uuid := std serialize
-DEPS_sync := std alloc
+DEPS_sync := core alloc rustrt collections
DEPS_getopts := std
DEPS_collections := core alloc
DEPS_fourcc := rustc syntax std
DEPS_hexfloat := rustc syntax std
DEPS_num := std
DEPS_test := std getopts serialize term time regex native:rust_test_helpers
-DEPS_time := std serialize sync
+DEPS_time := std serialize
DEPS_rand := core
DEPS_url := std
-DEPS_log := std sync
+DEPS_log := std
DEPS_regex := std
DEPS_regex_macros = rustc syntax std regex
DEPS_fmt_macros = std
The basic example below illustrates this.
~~~
-extern crate sync;
+use std::sync::Future;
# fn main() {
# fn make_a_sandwich() {};
12586269025
}
-let mut delayed_fib = sync::Future::spawn(proc() fib(50));
+let mut delayed_fib = Future::spawn(proc() fib(50));
make_a_sandwich();
println!("fib(50) = {}", delayed_fib.get())
# }
be distributed on the available cores.
~~~
-# extern crate sync;
+# use std::sync::Future;
fn partial_sum(start: uint) -> f64 {
let mut local_sum = 0f64;
for num in range(start*100000, (start+1)*100000) {
}
fn main() {
- let mut futures = Vec::from_fn(1000, |ind| sync::Future::spawn( proc() { partial_sum(ind) }));
+ let mut futures = Vec::from_fn(1000, |ind| Future::spawn( proc() { partial_sum(ind) }));
let mut final_res = 0f64;
for ft in futures.mut_iter() {
a single large vector of floats. Each task needs the full vector to perform its duty.
~~~
-extern crate sync;
-
-use sync::Arc;
use std::rand;
+use std::sync::Arc;
fn pnorm(nums: &[f64], p: uint) -> f64 {
nums.iter().fold(0.0, |a, b| a + b.powf(p as f64)).powf(1.0 / (p as f64))
created by the line
~~~
-# extern crate sync;
# use std::rand;
-# use sync::Arc;
+# use std::sync::Arc;
# fn main() {
# let numbers = Vec::from_fn(1000000, |_| rand::random::<f64>());
let numbers_arc=Arc::new(numbers);
reference to the underlying vector as if it were local.
~~~
-# extern crate sync;
# use std::rand;
-# use sync::Arc;
+# use std::sync::Arc;
# fn pnorm(nums: &[f64], p: uint) -> f64 { 4.0 }
# fn main() {
# let numbers=Vec::from_fn(1000000, |_| rand::random::<f64>());
Here is the function that implements the child task:
~~~
-extern crate sync;
+use std::comm::DuplexStream;
# fn main() {
-fn stringifier(channel: &sync::DuplexStream<String, uint>) {
+fn stringifier(channel: &DuplexStream<String, uint>) {
let mut value: uint;
loop {
value = channel.recv();
Here is the code for the parent task:
~~~
-extern crate sync;
+use std::comm::duplex;
# use std::task::spawn;
-# use sync::DuplexStream;
-# fn stringifier(channel: &sync::DuplexStream<String, uint>) {
+# use std::comm::DuplexStream;
+# fn stringifier(channel: &DuplexStream<String, uint>) {
# let mut value: uint;
# loop {
# value = channel.recv();
# }
# fn main() {
-let (from_child, to_child) = sync::duplex();
+let (from_child, to_child) = duplex();
spawn(proc() {
stringifier(&to_child);
Here's some code:
```
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn main() {
let numbers = vec![1,2,3];
and modify it to mutate the shared state:
```
-extern crate sync;
-use sync::{Arc, Mutex};
+use std::sync::{Arc, Mutex};
fn main() {
let numbers = vec![1,2,3];
not have a destructor.
~~~
-use std::gc::Gc;
+use std::gc::GC;
// A fixed-size array allocated in a garbage-collected box
-let x = Gc::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+let x = box(GC) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let y = x; // does not perform a move, unlike with `Rc`
let z = x;
-assert!(*z.borrow() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+assert!(*z == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
~~~
With shared ownership, mutability cannot be inherited so the boxes are always immutable. However,
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
- "libstd/sync/mpsc_queue.rs", # BSD
- "libstd/sync/spsc_queue.rs", # BSD
- "libstd/sync/mpmc_bounded_queue.rs", # BSD
+ "libsync/mpsc_queue.rs", # BSD
+ "libsync/spsc_queue.rs", # BSD
+ "libsync/mpmc_bounded_queue.rs", # BSD
"libsync/mpsc_intrusive.rs", # BSD
"test/bench/shootout-fannkuch-redux.rs", # BSD
"test/bench/shootout-meteor.rs", # BSD
/// task.
///
/// ```rust
-/// extern crate sync;
-///
-/// use sync::Arc;
+/// use std::sync::Arc;
///
/// fn main() {
/// let numbers = Vec::from_fn(100, |i| i as f32);
use std::task;
use std::vec::Vec;
use super::{Arc, Weak};
- use sync::Mutex;
+ use std::sync::Mutex;
struct Canary(*mut atomics::AtomicUint);
alloc as *mut u8
}
-// hack for libcore
-#[no_mangle]
-#[doc(hidden)]
-#[deprecated]
-#[cfg(not(test))]
-pub unsafe extern "C" fn rust_allocate(size: uint, align: uint) -> *mut u8 {
- allocate(size, align)
-}
-
-// hack for libcore
-#[no_mangle]
-#[doc(hidden)]
-#[deprecated]
-#[cfg(not(test))]
-pub unsafe extern "C" fn rust_deallocate(ptr: *mut u8, size: uint, align: uint) {
- deallocate(ptr, size, align)
-}
-
#[cfg(test)]
mod bench {
extern crate test;
box 10
})
}
-
- #[bench]
- fn alloc_owned_big(b: &mut Bencher) {
- b.iter(|| {
- box [10, ..1000]
- })
- }
}
// Allow testing this library
#[cfg(test)] extern crate debug;
-#[cfg(test)] extern crate sync;
#[cfg(test)] extern crate native;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate std;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate log;
#[test]
fn gc_inside() {
// see issue #11532
- use std::gc::Gc;
- let a = Rc::new(RefCell::new(Gc::new(1)));
+ use std::gc::GC;
+ let a = Rc::new(RefCell::new(box(GC) 1));
assert!(a.try_borrow_mut().is_some());
}
}
}
-impl<S: Writer, T: Hash<S>> Hash<S> for ~[T] {
- #[inline]
- fn hash(&self, state: &mut S) {
- self.as_slice().hash(state);
- }
-}
-
impl<S: Writer, T: Hash<S>> Hash<S> for Vec<T> {
#[inline]
fn hash(&self, state: &mut S) {
}
}
-impl<S: Writer, T: Hash<S>> Hash<S> for @T {
- #[inline]
- fn hash(&self, state: &mut S) {
- (**self).hash(state);
- }
-}
-
impl<S: Writer, T: Hash<S>> Hash<S> for Rc<T> {
#[inline]
fn hash(&self, state: &mut S) {
use str::Str;
use string::String;
use slice::{Vector, ImmutableVector};
+ use vec::Vec;
use super::super::{Hash, Writer};
use super::{SipState, hash, hash_with_keys};
s
}
- fn result_bytes(h: u64) -> ~[u8] {
- box [(h >> 0) as u8,
+ fn result_bytes(h: u64) -> Vec<u8> {
+ vec![(h >> 0) as u8,
(h >> 8) as u8,
(h >> 16) as u8,
(h >> 24) as u8,
pub use priority_queue::PriorityQueue;
pub use ringbuf::RingBuf;
pub use smallintmap::SmallIntMap;
+pub use string::String;
pub use treemap::{TreeMap, TreeSet};
pub use trie::{TrieMap, TrieSet};
+pub use vec::Vec;
mod macros;
use core::prelude::*;
-use alloc::heap::{allocate, deallocate};
use core::cmp;
-use core::finally::try_finally;
use core::mem::size_of;
-use core::mem::transmute;
use core::mem;
use core::ptr;
use core::iter::{range_step, MultiplicativeIterator};
/// Generates even and odd permutations alternately.
pub struct Permutations<T> {
swaps: ElementSwaps,
- v: ~[T],
+ v: Vec<T>,
}
-impl<T: Clone> Iterator<~[T]> for Permutations<T> {
+impl<T: Clone> Iterator<Vec<T>> for Permutations<T> {
#[inline]
- fn next(&mut self) -> Option<~[T]> {
+ fn next(&mut self) -> Option<Vec<T>> {
match self.swaps.next() {
None => None,
Some((0,0)) => Some(self.v.clone()),
Some((a, b)) => {
let elt = self.v.clone();
- self.v.swap(a, b);
+ self.v.as_mut_slice().swap(a, b);
Some(elt)
}
}
/// Extension methods for vector slices with cloneable elements
pub trait CloneableVector<T> {
/// Copy `self` into a new owned vector
- fn to_owned(&self) -> ~[T];
+ fn to_owned(&self) -> Vec<T>;
/// Convert `self` into an owned vector, not making a copy if possible.
- fn into_owned(self) -> ~[T];
+ fn into_owned(self) -> Vec<T>;
}
/// Extension methods for vector slices
impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
/// Returns a copy of `v`.
#[inline]
- fn to_owned(&self) -> ~[T] {
- use RawVec = core::raw::Vec;
- use core::num::{CheckedAdd, CheckedMul};
- use core::ptr;
-
- let len = self.len();
- let data_size = len.checked_mul(&mem::size_of::<T>());
- let data_size = data_size.expect("overflow in to_owned()");
- let size = mem::size_of::<RawVec<()>>().checked_add(&data_size);
- let size = size.expect("overflow in to_owned()");
-
- unsafe {
- // this should pass the real required alignment
- let ret = allocate(size, 8) as *mut RawVec<()>;
-
- let a_size = mem::size_of::<T>();
- let a_size = if a_size == 0 {1} else {a_size};
- (*ret).fill = len * a_size;
- (*ret).alloc = len * a_size;
-
- // Be careful with the following loop. We want it to be optimized
- // to a memcpy (or something similarly fast) when T is Copy. LLVM
- // is easily confused, so any extra operations during the loop can
- // prevent this optimization.
- let mut i = 0;
- let p = &mut (*ret).data as *mut _ as *mut T;
- try_finally(
- &mut i, (),
- |i, ()| while *i < len {
- ptr::write(
- &mut(*p.offset(*i as int)),
- self.unsafe_ref(*i).clone());
- *i += 1;
- },
- |i| if *i < len {
- // we must be failing, clean up after ourselves
- for j in range(0, *i as int) {
- ptr::read(&*p.offset(j));
- }
- // FIXME: #13994 (should pass align and size here)
- deallocate(ret as *mut u8, 0, 8);
- });
- mem::transmute(ret)
- }
- }
+ fn to_owned(&self) -> Vec<T> { Vec::from_slice(*self) }
#[inline(always)]
- fn into_owned(self) -> ~[T] { self.to_owned() }
-}
-
-/// Extension methods for owned vectors
-impl<T: Clone> CloneableVector<T> for ~[T] {
- #[inline]
- fn to_owned(&self) -> ~[T] { self.clone() }
-
- #[inline(always)]
- fn into_owned(self) -> ~[T] { self }
+ fn into_owned(self) -> Vec<T> { self.to_owned() }
}
/// Extension methods for vectors containing `Clone` elements.
}
-/// Extension methods for owned vectors.
-pub trait OwnedVector<T> {
- /// Creates a consuming iterator, that is, one that moves each
- /// value out of the vector (from start to end). The vector cannot
- /// be used after calling this.
- ///
- /// # Examples
- ///
- /// ```rust
- /// let v = ~["a".to_string(), "b".to_string()];
- /// for s in v.move_iter() {
- /// // s has type ~str, not &~str
- /// println!("{}", s);
- /// }
- /// ```
- fn move_iter(self) -> MoveItems<T>;
-
- /**
- * Partitions the vector into two vectors `(A,B)`, where all
- * elements of `A` satisfy `f` and all elements of `B` do not.
- */
- fn partition(self, f: |&T| -> bool) -> (Vec<T>, Vec<T>);
-}
-
-impl<T> OwnedVector<T> for ~[T] {
- #[inline]
- fn move_iter(self) -> MoveItems<T> {
- unsafe {
- let iter = transmute(self.iter());
- let ptr = transmute(self);
- MoveItems { allocation: ptr, iter: iter }
- }
- }
-
- #[inline]
- fn partition(self, f: |&T| -> bool) -> (Vec<T>, Vec<T>) {
- let mut lefts = Vec::new();
- let mut rights = Vec::new();
-
- for elt in self.move_iter() {
- if f(&elt) {
- lefts.push(elt);
- } else {
- rights.push(elt);
- }
- }
-
- (lefts, rights)
- }
-}
-
fn insertion_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
let len = v.len() as int;
let buf_v = v.as_mut_ptr();
* * start - The index into `src` to start copying from
* * end - The index into `str` to stop copying from
*/
- fn move_from(self, src: ~[T], start: uint, end: uint) -> uint;
+ fn move_from(self, src: Vec<T>, start: uint, end: uint) -> uint;
}
impl<'a,T> MutableVectorAllocating<'a, T> for &'a mut [T] {
}
#[inline]
- fn move_from(self, mut src: ~[T], start: uint, end: uint) -> uint {
+ fn move_from(self, mut src: Vec<T>, start: uint, end: uint) -> uint {
for (a, b) in self.mut_iter().zip(src.mut_slice(start, end).mut_iter()) {
mem::swap(a, b);
}
pub use core::slice::raw::{shift_ptr, pop_ptr};
}
-/// An iterator that moves out of a vector.
-pub struct MoveItems<T> {
- allocation: *mut u8, // the block of memory allocated for the vector
- iter: Items<'static, T>
-}
-
-impl<T> Iterator<T> for MoveItems<T> {
- #[inline]
- fn next(&mut self) -> Option<T> {
- unsafe {
- self.iter.next().map(|x| ptr::read(x))
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) {
- self.iter.size_hint()
- }
-}
-
-impl<T> DoubleEndedIterator<T> for MoveItems<T> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- unsafe {
- self.iter.next_back().map(|x| ptr::read(x))
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for MoveItems<T> {
- fn drop(&mut self) {
- // destroy the remaining elements
- for _x in *self {}
- unsafe {
- // FIXME: #13994 (should pass align and size here)
- deallocate(self.allocation, 0, 8)
- }
- }
-}
-
#[cfg(test)]
mod tests {
use std::cell::Cell;
#[test]
fn test_get() {
- let mut a = box [11];
- assert_eq!(a.get(1), None);
- a = box [11, 12];
- assert_eq!(a.get(1).unwrap(), &12);
- a = box [11, 12, 13];
- assert_eq!(a.get(1).unwrap(), &12);
+ let mut a = vec![11];
+ assert_eq!(a.as_slice().get(1), None);
+ a = vec![11, 12];
+ assert_eq!(a.as_slice().get(1).unwrap(), &12);
+ a = vec![11, 12, 13];
+ assert_eq!(a.as_slice().get(1).unwrap(), &12);
}
#[test]
fn test_head() {
- let mut a = box [];
- assert_eq!(a.head(), None);
- a = box [11];
- assert_eq!(a.head().unwrap(), &11);
- a = box [11, 12];
- assert_eq!(a.head().unwrap(), &11);
+ let mut a = vec![];
+ assert_eq!(a.as_slice().head(), None);
+ a = vec![11];
+ assert_eq!(a.as_slice().head().unwrap(), &11);
+ a = vec![11, 12];
+ assert_eq!(a.as_slice().head().unwrap(), &11);
}
#[test]
fn test_tail() {
- let mut a = box [11];
+ let mut a = vec![11];
assert_eq!(a.tail(), &[]);
- a = box [11, 12];
+ a = vec![11, 12];
assert_eq!(a.tail(), &[12]);
}
#[test]
#[should_fail]
fn test_tail_empty() {
- let a: ~[int] = box [];
+ let a: Vec<int> = vec![];
a.tail();
}
#[test]
fn test_tailn() {
- let mut a = box [11, 12, 13];
+ let mut a = vec![11, 12, 13];
assert_eq!(a.tailn(0), &[11, 12, 13]);
- a = box [11, 12, 13];
+ a = vec![11, 12, 13];
assert_eq!(a.tailn(2), &[13]);
}
#[test]
#[should_fail]
fn test_tailn_empty() {
- let a: ~[int] = box [];
+ let a: Vec<int> = vec![];
a.tailn(2);
}
#[test]
fn test_init() {
- let mut a = box [11];
+ let mut a = vec![11];
assert_eq!(a.init(), &[]);
- a = box [11, 12];
+ a = vec![11, 12];
assert_eq!(a.init(), &[11]);
}
#[test]
#[should_fail]
fn test_init_empty() {
- let a: ~[int] = box [];
+ let a: Vec<int> = vec![];
a.init();
}
#[test]
fn test_initn() {
- let mut a = box [11, 12, 13];
- assert_eq!(a.initn(0), &[11, 12, 13]);
- a = box [11, 12, 13];
- assert_eq!(a.initn(2), &[11]);
+ let mut a = vec![11, 12, 13];
+ assert_eq!(a.as_slice().initn(0), &[11, 12, 13]);
+ a = vec![11, 12, 13];
+ assert_eq!(a.as_slice().initn(2), &[11]);
}
#[test]
#[should_fail]
fn test_initn_empty() {
- let a: ~[int] = box [];
- a.initn(2);
+ let a: Vec<int> = vec![];
+ a.as_slice().initn(2);
}
#[test]
fn test_last() {
- let mut a = box [];
- assert_eq!(a.last(), None);
- a = box [11];
- assert_eq!(a.last().unwrap(), &11);
- a = box [11, 12];
- assert_eq!(a.last().unwrap(), &12);
+ let mut a = vec![];
+ assert_eq!(a.as_slice().last(), None);
+ a = vec![11];
+ assert_eq!(a.as_slice().last().unwrap(), &11);
+ a = vec![11, 12];
+ assert_eq!(a.as_slice().last().unwrap(), &12);
}
#[test]
let vec_fixed = [1, 2, 3, 4];
let v_a = vec_fixed.slice(1u, vec_fixed.len()).to_owned();
assert_eq!(v_a.len(), 3u);
+ let v_a = v_a.as_slice();
assert_eq!(v_a[0], 2);
assert_eq!(v_a[1], 3);
assert_eq!(v_a[2], 4);
let vec_stack = &[1, 2, 3];
let v_b = vec_stack.slice(1u, 3u).to_owned();
assert_eq!(v_b.len(), 2u);
+ let v_b = v_b.as_slice();
assert_eq!(v_b[0], 2);
assert_eq!(v_b[1], 3);
// Test `Box<[T]>`
- let vec_unique = box [1, 2, 3, 4, 5, 6];
+ let vec_unique = vec![1, 2, 3, 4, 5, 6];
let v_d = vec_unique.slice(1u, 6u).to_owned();
assert_eq!(v_d.len(), 5u);
+ let v_d = v_d.as_slice();
assert_eq!(v_d[0], 2);
assert_eq!(v_d[1], 3);
assert_eq!(v_d[2], 4);
let (min_size, max_opt) = it.size_hint();
assert_eq!(min_size, 3*2);
assert_eq!(max_opt.unwrap(), 3*2);
- assert_eq!(it.next(), Some(box [1,2,3]));
- assert_eq!(it.next(), Some(box [1,3,2]));
- assert_eq!(it.next(), Some(box [3,1,2]));
+ assert_eq!(it.next(), Some(vec![1,2,3]));
+ assert_eq!(it.next(), Some(vec![1,3,2]));
+ assert_eq!(it.next(), Some(vec![3,1,2]));
let (min_size, max_opt) = it.size_hint();
assert_eq!(min_size, 3);
assert_eq!(max_opt.unwrap(), 3);
- assert_eq!(it.next(), Some(box [3,2,1]));
- assert_eq!(it.next(), Some(box [2,3,1]));
- assert_eq!(it.next(), Some(box [2,1,3]));
+ assert_eq!(it.next(), Some(vec![3,2,1]));
+ assert_eq!(it.next(), Some(vec![2,3,1]));
+ assert_eq!(it.next(), Some(vec![2,1,3]));
assert_eq!(it.next(), None);
}
{
fn test_position_elem() {
assert!([].position_elem(&1).is_none());
- let v1 = box [1, 2, 3, 3, 2, 5];
- assert_eq!(v1.position_elem(&1), Some(0u));
- assert_eq!(v1.position_elem(&2), Some(1u));
- assert_eq!(v1.position_elem(&5), Some(5u));
- assert!(v1.position_elem(&4).is_none());
+ let v1 = vec![1, 2, 3, 3, 2, 5];
+ assert_eq!(v1.as_slice().position_elem(&1), Some(0u));
+ assert_eq!(v1.as_slice().position_elem(&2), Some(1u));
+ assert_eq!(v1.as_slice().position_elem(&5), Some(5u));
+ assert!(v1.as_slice().position_elem(&4).is_none());
}
#[test]
#[test]
fn test_reverse() {
- let mut v: ~[int] = box [10, 20];
- assert_eq!(v[0], 10);
- assert_eq!(v[1], 20);
+ let mut v: Vec<int> = vec![10, 20];
+ assert_eq!(*v.get(0), 10);
+ assert_eq!(*v.get(1), 20);
v.reverse();
- assert_eq!(v[0], 20);
- assert_eq!(v[1], 10);
+ assert_eq!(*v.get(0), 20);
+ assert_eq!(*v.get(1), 10);
- let mut v3: ~[int] = box [];
+ let mut v3: Vec<int> = vec![];
v3.reverse();
assert!(v3.is_empty());
}
#[test]
fn test_partition() {
- assert_eq!((box []).partition(|x: &int| *x < 3), (vec![], vec![]));
- assert_eq!((box [1, 2, 3]).partition(|x: &int| *x < 4), (vec![1, 2, 3], vec![]));
- assert_eq!((box [1, 2, 3]).partition(|x: &int| *x < 2), (vec![1], vec![2, 3]));
- assert_eq!((box [1, 2, 3]).partition(|x: &int| *x < 0), (vec![], vec![1, 2, 3]));
+ assert_eq!((vec![]).partition(|x: &int| *x < 3), (vec![], vec![]));
+ assert_eq!((vec![1, 2, 3]).partition(|x: &int| *x < 4), (vec![1, 2, 3], vec![]));
+ assert_eq!((vec![1, 2, 3]).partition(|x: &int| *x < 2), (vec![1], vec![2, 3]));
+ assert_eq!((vec![1, 2, 3]).partition(|x: &int| *x < 0), (vec![], vec![1, 2, 3]));
}
#[test]
#[test]
fn test_concat() {
- let v: [~[int], ..0] = [];
+ let v: [Vec<int>, ..0] = [];
assert_eq!(v.concat_vec(), vec![]);
- assert_eq!([box [1], box [2,3]].concat_vec(), vec![1, 2, 3]);
+ assert_eq!([vec![1], vec![2,3]].concat_vec(), vec![1, 2, 3]);
assert_eq!([&[1], &[2,3]].concat_vec(), vec![1, 2, 3]);
}
#[test]
fn test_connect() {
- let v: [~[int], ..0] = [];
+ let v: [Vec<int>, ..0] = [];
assert_eq!(v.connect_vec(&0), vec![]);
- assert_eq!([box [1], box [2, 3]].connect_vec(&0), vec![1, 0, 2, 3]);
- assert_eq!([box [1], box [2], box [3]].connect_vec(&0), vec![1, 0, 2, 0, 3]);
+ assert_eq!([vec![1], vec![2, 3]].connect_vec(&0), vec![1, 0, 2, 3]);
+ assert_eq!([vec![1], vec![2], vec![3]].connect_vec(&0), vec![1, 0, 2, 0, 3]);
assert_eq!([&[1], &[2, 3]].connect_vec(&0), vec![1, 0, 2, 3]);
assert_eq!([&[1], &[2], &[3]].connect_vec(&0), vec![1, 0, 2, 0, 3]);
#[test]
fn test_move_iterator() {
- let xs = box [1u,2,3,4,5];
+ let xs = vec![1u,2,3,4,5];
assert_eq!(xs.move_iter().fold(0, |a: uint, b: uint| 10*a + b), 12345);
}
#[test]
fn test_move_rev_iterator() {
- let xs = box [1u,2,3,4,5];
+ let xs = vec![1u,2,3,4,5];
assert_eq!(xs.move_iter().rev().fold(0, |a: uint, b: uint| 10*a + b), 54321);
}
#[test]
fn test_move_from() {
let mut a = [1,2,3,4,5];
- let b = box [6,7,8];
+ let b = vec![6,7,8];
assert_eq!(a.move_from(b, 0, 3), 3);
assert!(a == [6,7,8,4,5]);
let mut a = [7,2,8,1];
- let b = box [3,1,4,1,5,9];
+ let b = vec![3,1,4,1,5,9];
assert_eq!(a.move_from(b, 0, 6), 4);
assert!(a == [3,1,4,1]);
let mut a = [1,2,3,4];
- let b = box [5,6,7,8,9,0];
+ let b = vec![5,6,7,8,9,0];
assert_eq!(a.move_from(b, 2, 3), 1);
assert!(a == [7,2,3,4]);
let mut a = [1,2,3,4,5];
- let b = box [5,6,7,8,9,0];
+ let b = vec![5,6,7,8,9,0];
assert_eq!(a.mut_slice(2,4).move_from(b,1,6), 2);
assert!(a == [1,2,6,7,5]);
}
assert_eq!(format!("{}", x.as_slice()), x_str);
})
)
- let empty: ~[int] = box [];
+ let empty: Vec<int> = vec![];
test_show_vec!(empty, "[]".to_string());
- test_show_vec!(box [1], "[1]".to_string());
- test_show_vec!(box [1, 2, 3], "[1, 2, 3]".to_string());
- test_show_vec!(box [box [], box [1u], box [1u, 1u]],
+ test_show_vec!(vec![1], "[1]".to_string());
+ test_show_vec!(vec![1, 2, 3], "[1, 2, 3]".to_string());
+ test_show_vec!(vec![vec![], vec![1u], vec![1u, 1u]],
"[[], [1], [1, 1]]".to_string());
let empty_mut: &mut [int] = &mut[];
);
t!(&[int]);
- t!(~[int]);
t!(Vec<int>);
}
});
}
- #[bench]
- fn zero_1kb_fixed_repeat(b: &mut Bencher) {
- b.iter(|| {
- box [0u8, ..1024]
- });
- }
-
#[bench]
fn zero_1kb_loop_set(b: &mut Bencher) {
b.iter(|| {
use str::StrAllocating;
unsafe {
- let a = ~[65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8];
+ let a = vec![65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8];
let b = a.as_ptr();
let c = from_buf_len(b, 3u);
assert_eq!(c, "AAA".to_string());
assert!(half_a_million_letter_a() ==
unsafe {raw::slice_bytes(letters.as_slice(),
0u,
- 500000)}.to_owned());
+ 500000)}.to_string());
}
#[test]
assert_eq!("", data.slice(3, 3));
assert_eq!("华", data.slice(30, 33));
- fn a_million_letter_X() -> String {
+ fn a_million_letter_x() -> String {
let mut i = 0;
let mut rs = String::new();
while i < 100000 {
}
rs
}
- fn half_a_million_letter_X() -> String {
+ fn half_a_million_letter_x() -> String {
let mut i = 0;
let mut rs = String::new();
while i < 100000 {
}
rs
}
- let letters = a_million_letter_X();
- assert!(half_a_million_letter_X() ==
- letters.as_slice().slice(0u, 3u * 500000u).to_owned());
+ let letters = a_million_letter_x();
+ assert!(half_a_million_letter_x() ==
+ letters.as_slice().slice(0u, 3u * 500000u).to_string());
}
#[test]
#[test]
fn test_raw_from_c_str() {
unsafe {
- let a = box [65, 65, 65, 65, 65, 65, 65, 0];
+ let a = vec![65, 65, 65, 65, 65, 65, 65, 0];
let b = a.as_ptr();
let c = raw::from_c_str(b);
assert_eq!(c, "AAAAAAA".to_string());
#[test]
fn test_char_at() {
let s = "ศไทย中华Việt Nam";
- let v = box ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
+ let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
let mut pos = 0;
for ch in v.iter() {
assert!(s.char_at(pos) == *ch);
#[test]
fn test_char_at_reverse() {
let s = "ศไทย中华Việt Nam";
- let v = box ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
+ let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
let mut pos = s.len();
for ch in v.iter().rev() {
assert!(s.char_at_reverse(pos) == *ch);
#[test]
fn test_iterator() {
let s = "ศไทย中华Việt Nam";
- let v = box ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
+ let v = ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
let mut pos = 0;
let mut it = s.chars();
#[test]
fn test_rev_iterator() {
let s = "ศไทย中华Việt Nam";
- let v = box ['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ'];
+ let v = ['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ'];
let mut pos = 0;
let mut it = s.chars().rev();
use core::prelude::*;
use alloc::heap::{allocate, reallocate, deallocate};
-use RawVec = core::raw::Vec;
use core::raw::Slice;
use core::cmp::max;
use core::default::Default;
use core::uint;
use {Collection, Mutable};
-use slice::{MutableOrdVector, OwnedVector, MutableVectorAllocating};
+use slice::{MutableOrdVector, MutableVectorAllocating, CloneableVector};
use slice::{Items, MutItems};
/// An owned, growable vector.
impl<T: Eq> Eq for Vec<T> {}
+impl<T: PartialEq, V: Vector<T>> Equiv<V> for Vec<T> {
+ #[inline]
+ fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() }
+}
+
impl<T: Ord> Ord for Vec<T> {
#[inline]
fn cmp(&self, other: &Vec<T>) -> Ordering {
}
}
+impl<T: Clone> CloneableVector<T> for Vec<T> {
+ fn to_owned(&self) -> Vec<T> { self.clone() }
+ fn into_owned(self) -> Vec<T> { self }
+}
+
// FIXME: #13996: need a way to mark the return value as `noalias`
#[inline(never)]
unsafe fn alloc_or_realloc<T>(ptr: *mut T, size: uint, old_size: uint) -> *mut T {
(ts, us)
}
-/// Mechanism to convert from a `Vec<T>` to a `[T]`.
-///
-/// In a post-DST world this will be used to convert to any `Ptr<[T]>`.
-///
-/// This could be implemented on more types than just pointers to vectors, but
-/// the recommended approach for those types is to implement `FromIterator`.
-// FIXME(#12938): Update doc comment when DST lands
-pub trait FromVec<T> {
- /// Convert a `Vec<T>` into the receiver type.
- fn from_vec(v: Vec<T>) -> Self;
-}
-
-impl<T> FromVec<T> for ~[T] {
- fn from_vec(mut v: Vec<T>) -> ~[T] {
- let len = v.len();
- let data_size = len.checked_mul(&mem::size_of::<T>());
- let data_size = data_size.expect("overflow in from_vec()");
- let size = mem::size_of::<RawVec<()>>().checked_add(&data_size);
- let size = size.expect("overflow in from_vec()");
-
- // In a post-DST world, we can attempt to reuse the Vec allocation by calling
- // shrink_to_fit() on it. That may involve a reallocation+memcpy, but that's no
- // different than what we're doing manually here.
-
- let vp = v.as_mut_ptr();
-
- unsafe {
- let ret = allocate(size, 8) as *mut RawVec<()>;
-
- let a_size = mem::size_of::<T>();
- let a_size = if a_size == 0 {1} else {a_size};
- (*ret).fill = len * a_size;
- (*ret).alloc = len * a_size;
-
- ptr::copy_nonoverlapping_memory(&mut (*ret).data as *mut _ as *mut u8,
- vp as *u8, data_size);
-
- // we've transferred ownership of the contents from v, but we can't drop it
- // as it still needs to free its own allocation.
- v.set_len(0);
-
- mem::transmute(ret)
- }
- }
-}
-
/// Unsafe operations
pub mod raw {
use super::Vec;
mod tests {
use std::prelude::*;
use std::mem::size_of;
- use std::kinds::marker;
- use super::{unzip, raw, FromVec, Vec};
+ use super::{unzip, raw, Vec};
#[test]
fn test_small_vec_struct() {
assert_eq!(b, vec![1, 2, 3]);
// Test on-heap copy-from-buf.
- let c = box [1, 2, 3, 4, 5];
+ let c = vec![1, 2, 3, 4, 5];
let ptr = c.as_ptr();
let d = raw::from_buf(ptr, 5u);
assert_eq!(d, vec![1, 2, 3, 4, 5]);
}
}
- #[test]
- fn test_from_vec() {
- let a = vec![1u, 2, 3];
- let b: ~[uint] = FromVec::from_vec(a);
- assert_eq!(b.as_slice(), &[1u, 2, 3]);
-
- let a = vec![];
- let b: ~[u8] = FromVec::from_vec(a);
- assert_eq!(b.as_slice(), &[]);
-
- let a = vec!["one".to_string(), "two".to_string()];
- let b: ~[String] = FromVec::from_vec(a);
- assert_eq!(b.as_slice(), &["one".to_string(), "two".to_string()]);
-
- struct Foo {
- x: uint,
- nocopy: marker::NoCopy
- }
-
- let a = vec![Foo{x: 42, nocopy: marker::NoCopy}, Foo{x: 84, nocopy: marker::NoCopy}];
- let b: ~[Foo] = FromVec::from_vec(a);
- assert_eq!(b.len(), 2);
- assert_eq!(b[0].x, 42);
- assert_eq!(b[1].x, 84);
- }
-
#[test]
fn test_vec_truncate_drop() {
static mut drops: uint = 0;
///
/// # Examples
///
- /// ```ignore
- /// # // FIXME: Needs PR #12430
- /// extern crate sync;
- ///
- /// use sync::Arc;
+ /// ```rust
+ /// use std::sync::Arc;
/// use std::sync::atomics::{AtomicBool, SeqCst};
+ /// use std::task::deschedule;
///
/// fn main() {
/// let spinlock = Arc::new(AtomicBool::new(false));
- /// let spinlock_clone = spin_lock.clone();
+ /// let spinlock_clone = spinlock.clone();
///
/// spawn(proc() {
/// with_lock(&spinlock, || println!("task 1 in lock"));
/// f();
///
/// // Release the lock
- /// spinlock.store(false);
+ /// spinlock.store(false, SeqCst);
/// }
/// ```
#[inline]
}
}
-impl<T> Clone for @T {
- /// Return a shallow copy of the managed box.
- #[inline]
- fn clone(&self) -> @T { *self }
-}
-
impl<'a, T> Clone for &'a T {
/// Return a shallow copy of the reference.
#[inline]
mod test {
use prelude::*;
use realstd::owned::Box;
+ use realstd::gc::{Gc, GC};
fn realclone<T: ::realstd::clone::Clone>(t: &T) -> T {
use realstd::clone::Clone;
#[test]
fn test_managed_clone() {
- let a = @5i;
- let b: @int = a.clone();
- assert_eq!(a, b);
+ let a = box(GC) 5i;
+ let b: Gc<int> = realclone(&a);
+ assert!(a == b);
}
#[test]
fn test_fn_a() -> f64 { 1.0 }
fn test_fn_b<T: Empty>(x: T) -> T { x }
- fn test_fn_c(_: int, _: f64, _: ~[int], _: int, _: int, _: int) {}
+ fn test_fn_c(_: int, _: f64, _: int, _: int, _: int) {}
let _ = test_fn_a.clone();
let _ = test_fn_b::<int>.clone();
fn cmp(&self, other: &&'a mut T) -> Ordering { (**self).cmp(*other) }
}
impl<'a, T: Eq> Eq for &'a mut T {}
-
- // @ pointers
- impl<T:PartialEq> PartialEq for @T {
- #[inline]
- fn eq(&self, other: &@T) -> bool { *(*self) == *(*other) }
- #[inline]
- fn ne(&self, other: &@T) -> bool { *(*self) != *(*other) }
- }
- impl<T:PartialOrd> PartialOrd for @T {
- #[inline]
- fn lt(&self, other: &@T) -> bool { *(*self) < *(*other) }
- #[inline]
- fn le(&self, other: &@T) -> bool { *(*self) <= *(*other) }
- #[inline]
- fn ge(&self, other: &@T) -> bool { *(*self) >= *(*other) }
- #[inline]
- fn gt(&self, other: &@T) -> bool { *(*self) > *(*other) }
- }
- impl<T: Ord> Ord for @T {
- #[inline]
- fn cmp(&self, other: &@T) -> Ordering { (**self).cmp(*other) }
- }
- impl<T: Eq> Eq for @T {}
}
#[cfg(test)]
default_impl!(f32, 0.0f32)
default_impl!(f64, 0.0f64)
-
-impl<T: Default + 'static> Default for @T {
- fn default() -> @T { @Default::default() }
-}
#![allow(dead_code, missing_doc)]
use fmt;
-use intrinsics;
+#[cfg(not(test))] use intrinsics;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="fail_"]
// Implementations of the core formatting traits
-impl<T: Show> Show for @T {
- fn fmt(&self, f: &mut Formatter) -> Result { secret_show(&**self, f) }
-}
impl<'a, T: Show> Show for &'a T {
fn fmt(&self, f: &mut Formatter) -> Result { secret_show(*self, f) }
}
}
}
-impl<T: Show> Show for ~[T] {
- fn fmt(&self, f: &mut Formatter) -> Result {
- secret_show(&self.as_slice(), f)
- }
-}
-
impl Show for () {
fn fmt(&self, f: &mut Formatter) -> Result {
f.pad("()")
fn visit_char(&mut self) -> bool;
+ #[cfg(stage0)]
fn visit_estr_box(&mut self) -> bool;
+ #[cfg(stage0)]
fn visit_estr_uniq(&mut self) -> bool;
fn visit_estr_slice(&mut self) -> bool;
fn visit_estr_fixed(&mut self, n: uint, sz: uint, align: uint) -> bool;
fn visit_ptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_rptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
+ #[cfg(stage0)]
fn visit_evec_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
+ #[cfg(stage0)]
fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
translated to the `loop` below.
```rust
-let values = ~[1, 2, 3];
+let values = vec![1, 2, 3];
// "Syntactical sugar" taking advantage of an iterator
for &x in values.iter() {
/// }
/// sum
/// }
- /// let x = ~[1,2,3,7,8,9];
+ /// let x = vec![1,2,3,7,8,9];
/// assert_eq!(process(x.move_iter()), 1006);
/// ```
#[inline]
#[test]
fn test_iterator_peekable() {
- let xs = box [0u, 1, 2, 3, 4, 5];
+ let xs = vec![0u, 1, 2, 3, 4, 5];
let mut it = xs.iter().map(|&x|x).peekable();
assert_eq!(it.peek().unwrap(), &0);
assert_eq!(it.next().unwrap(), 0);
#[test]
fn test_double_ended_chain() {
let xs = [1, 2, 3, 4, 5];
- let ys = box [7, 9, 11];
+ let ys = [7, 9, 11];
let mut it = xs.iter().chain(ys.iter()).rev();
assert_eq!(it.next().unwrap(), &11)
assert_eq!(it.next().unwrap(), &9)
fn test_rposition() {
fn f(xy: &(int, char)) -> bool { let (_x, y) = *xy; y == 'b' }
fn g(xy: &(int, char)) -> bool { let (_x, y) = *xy; y == 'd' }
- let v = box [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')];
+ let v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')];
assert_eq!(v.iter().rposition(f), Some(3u));
assert!(v.iter().rposition(g).is_none());
#[test]
fn test_random_access_chain() {
let xs = [1, 2, 3, 4, 5];
- let ys = box [7, 9, 11];
+ let ys = [7, 9, 11];
let mut it = xs.iter().chain(ys.iter());
assert_eq!(it.idx(0).unwrap(), &1);
assert_eq!(it.idx(5).unwrap(), &7);
}
#[test]
- fn test_MinMaxResult() {
+ fn test_min_max_result() {
let r: MinMaxResult<int> = NoElements;
assert_eq!(r.into_option(), None)
pub mod tuple;
pub mod fmt;
-// FIXME: this module should not exist. Once owned allocations are no longer a
-// language type, this module can move outside to the owned allocation
-// crate.
-mod should_not_exist;
-
#[doc(hidden)]
mod core {
pub use failure;
format_args_method!($dst, write_fmt, $($arg)*)
})
)
+
+#[macro_export]
+macro_rules! unreachable( () => (fail!("unreachable code")) )
}
}
- fn R(i: Rc<RefCell<int>>) -> R {
+ fn r(i: Rc<RefCell<int>>) -> R {
R {
i: i
}
let i = Rc::new(RefCell::new(0));
{
- let x = R(realclone(&i));
+ let x = r(realclone(&i));
let opt = Some(x);
let _y = opt.unwrap();
}
}
#[cfg(test)]
-pub mod ptr_tests {
+#[allow(deprecated, experimental)]
+pub mod test {
use super::*;
use prelude::*;
use libc;
use realstd::str;
use realstd::str::Str;
+ use realstd::vec::Vec;
+ use realstd::collections::Collection;
use slice::{ImmutableVector, MutableVector};
#[test]
assert_eq!(p.fst, 50);
assert_eq!(p.snd, 60);
- let v0 = box [32000u16, 32001u16, 32002u16];
- let mut v1 = box [0u16, 0u16, 0u16];
+ let v0 = vec![32000u16, 32001u16, 32002u16];
+ let mut v1 = vec![0u16, 0u16, 0u16];
copy_memory(v1.as_mut_ptr().offset(1),
v0.as_ptr().offset(1), 1);
- assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
+ assert!((*v1.get(0) == 0u16 &&
+ *v1.get(1) == 32001u16 &&
+ *v1.get(2) == 0u16));
copy_memory(v1.as_mut_ptr(),
v0.as_ptr().offset(2), 1);
- assert!((v1[0] == 32002u16 && v1[1] == 32001u16 &&
- v1[2] == 0u16));
+ assert!((*v1.get(0) == 32002u16 &&
+ *v1.get(1) == 32001u16 &&
+ *v1.get(2) == 0u16));
copy_memory(v1.as_mut_ptr().offset(2),
v0.as_ptr(), 1u);
- assert!((v1[0] == 32002u16 && v1[1] == 32001u16 &&
- v1[2] == 32000u16));
+ assert!((*v1.get(0) == 32002u16 &&
+ *v1.get(1) == 32001u16 &&
+ *v1.get(2) == 32000u16));
}
}
"hello".with_c_str(|p0| {
"there".with_c_str(|p1| {
"thing".with_c_str(|p2| {
- let v = box [p0, p1, p2, null()];
+ let v = vec![p0, p1, p2, null()];
unsafe {
assert_eq!(buf_len(v.as_ptr()), 3u);
}
#[test]
fn test_ptr_addition() {
unsafe {
- let xs = box [5, ..16];
+ let xs = Vec::from_elem(16, 5);
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
ptr = ptr.offset(1);
}
- let mut xs_mut = xs.clone();
+ let mut xs_mut = xs;
let mut m_ptr = xs_mut.as_mut_ptr();
let m_end = m_ptr.offset(16);
m_ptr = m_ptr.offset(1);
}
- assert_eq!(xs_mut, box [10, ..16]);
+ assert!(xs_mut == Vec::from_elem(16, 10));
}
}
#[test]
fn test_ptr_subtraction() {
unsafe {
- let xs = box [0,1,2,3,4,5,6,7,8,9];
+ let xs = vec![0,1,2,3,4,5,6,7,8,9];
let mut idx = 9i8;
let ptr = xs.as_ptr();
idx = idx - 1i8;
}
- let mut xs_mut = xs.clone();
+ let mut xs_mut = xs;
let m_start = xs_mut.as_mut_ptr();
let mut m_ptr = m_start.offset(9);
m_ptr = m_ptr.offset(-1);
}
- assert_eq!(xs_mut, box [0,2,4,6,8,10,12,14,16,18]);
+ assert!(xs_mut == vec![0,2,4,6,8,10,12,14,16,18]);
}
}
let one = "oneOne".to_c_str();
let two = "twoTwo".to_c_str();
let three = "threeThree".to_c_str();
- let arr = box [
+ let arr = vec![
one.with_ref(|buf| buf),
two.with_ref(|buf| buf),
- three.with_ref(|buf| buf),
+ three.with_ref(|buf| buf)
];
let expected_arr = [
one, two, three
let one = "oneOne".to_c_str();
let two = "twoTwo".to_c_str();
let three = "threeThree".to_c_str();
- let arr = box [
+ let arr = vec![
one.with_ref(|buf| buf),
two.with_ref(|buf| buf),
three.with_ref(|buf| buf),
// fake a null terminator
- null(),
+ null()
];
let expected_arr = [
one, two, three
pub data: T,
}
-/// The representation of a Rust vector
-pub struct Vec<T> {
- pub fill: uint,
- pub alloc: uint,
- pub data: T,
-}
-
-/// The representation of a Rust string
-pub type String = Vec<u8>;
-
/// The representation of a Rust slice
pub struct Slice<T> {
pub data: *T,
impl<'a, T> Repr<Slice<T>> for &'a [T] {}
impl<'a> Repr<Slice<u8>> for &'a str {}
-impl<T> Repr<*Box<T>> for @T {}
-impl<T> Repr<*Vec<T>> for ~[T] {}
#[cfg(test)]
mod tests {
}
}
- impl<T:PartialEq> PartialEq for ~[T] {
- #[inline]
- fn eq(&self, other: &~[T]) -> bool { self.as_slice() == *other }
- #[inline]
- fn ne(&self, other: &~[T]) -> bool { !self.eq(other) }
- }
-
impl<'a,T:Eq> Eq for &'a [T] {}
- impl<T:Eq> Eq for ~[T] {}
-
impl<'a,T:PartialEq, V: Vector<T>> Equiv<V> for &'a [T] {
#[inline]
fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() }
}
- impl<'a,T:PartialEq, V: Vector<T>> Equiv<V> for ~[T] {
- #[inline]
- fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() }
- }
-
impl<'a,T:Ord> Ord for &'a [T] {
fn cmp(&self, other: & &'a [T]) -> Ordering {
order::cmp(self.iter(), other.iter())
}
}
- impl<T: Ord> Ord for ~[T] {
- #[inline]
- fn cmp(&self, other: &~[T]) -> Ordering { self.as_slice().cmp(&other.as_slice()) }
- }
-
impl<'a, T: PartialOrd> PartialOrd for &'a [T] {
fn lt(&self, other: & &'a [T]) -> bool {
order::lt(self.iter(), other.iter())
order::gt(self.iter(), other.iter())
}
}
-
- impl<T: PartialOrd> PartialOrd for ~[T] {
- #[inline]
- fn lt(&self, other: &~[T]) -> bool { self.as_slice() < other.as_slice() }
- #[inline]
- fn le(&self, other: &~[T]) -> bool { self.as_slice() <= other.as_slice() }
- #[inline]
- fn ge(&self, other: &~[T]) -> bool { self.as_slice() >= other.as_slice() }
- #[inline]
- fn gt(&self, other: &~[T]) -> bool { self.as_slice() > other.as_slice() }
- }
}
#[cfg(test)]
fn as_slice<'a>(&'a self) -> &'a [T] { *self }
}
-impl<T> Vector<T> for ~[T] {
- #[inline(always)]
- fn as_slice<'a>(&'a self) -> &'a [T] { let v: &'a [T] = *self; v }
-}
-
impl<'a, T> Collection for &'a [T] {
/// Returns the length of a vector
#[inline]
}
}
-impl<T> Collection for ~[T] {
- /// Returns the length of a vector
- #[inline]
- fn len(&self) -> uint {
- self.as_slice().len()
- }
-}
-
/// Extension methods for vectors
pub trait ImmutableVector<'a, T> {
/**
/// # Example
///
/// ```rust
- /// let mut v = ~["foo".to_string(), "bar".to_string(), "baz".to_string()];
+ /// let mut v = ["foo".to_string(), "bar".to_string(), "baz".to_string()];
///
/// unsafe {
/// // `"baz".to_string()` is deallocated.
impl<'a, T> Default for &'a [T] {
fn default() -> &'a [T] { &[] }
}
-
-impl<T> Default for ~[T] {
- fn default() -> ~[T] { ~[] }
-}
true
}
+ #[cfg(stage0)]
fn visit_estr_box(&mut self) -> bool {
true
}
+ #[cfg(stage0)]
fn visit_estr_uniq(&mut self) -> bool {
- self.align_to::<~str>();
- if ! self.inner.visit_estr_uniq() { return false; }
- self.bump_past::<~str>();
- true
+ false
}
fn visit_estr_slice(&mut self) -> bool {
true
}
+ #[cfg(stage0)]
fn visit_evec_box(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool {
true
}
- fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
- self.align_to::<~[u8]>();
- if ! self.inner.visit_evec_uniq(mtbl, inner) { return false; }
- self.bump_past::<~[u8]>();
- true
+ #[cfg(stage0)]
+ fn visit_evec_uniq(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool {
+ false
}
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
true
}
- pub fn write_unboxed_vec_repr(&mut self, _: uint, v: &raw::Vec<()>, inner: *TyDesc) -> bool {
- self.write_vec_range(&v.data, v.fill, inner)
- }
-
fn write_escaped_char(&mut self, ch: char, is_str: bool) -> bool {
try!(self, match ch {
'\t' => self.writer.write("\\t".as_bytes()),
})
}
+ #[cfg(stage0)]
fn visit_estr_box(&mut self) -> bool {
- true
+ false
}
+ #[cfg(stage0)]
fn visit_estr_uniq(&mut self) -> bool {
- self.get::<~str>(|this, s| {
- try!(this, this.writer.write(['~' as u8]));
- this.write_escaped_slice(*s)
- })
+ false
}
fn visit_estr_slice(&mut self) -> bool {
})
}
- fn visit_evec_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
- self.get::<&raw::Box<raw::Vec<()>>>(|this, b| {
- try!(this, this.writer.write(['@' as u8]));
- this.write_mut_qualifier(mtbl);
- this.write_unboxed_vec_repr(mtbl, &b.data, inner)
- })
+ #[cfg(stage0)]
+ fn visit_evec_box(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool {
+ true
}
- fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
- self.get::<&raw::Vec<()>>(|this, b| {
- try!(this, this.writer.write("box ".as_bytes()));
- this.write_unboxed_vec_repr(mtbl, *b, inner)
- })
+ #[cfg(stage0)]
+ fn visit_evec_uniq(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool {
+ true
}
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![feature(phase)]
-#![deny(deprecated_owned_vector)]
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate log;
#[cfg(test, not(stage0))] #[phase(plugin, link)] extern crate log;
use std::rand::Rng;
#[test]
- #[allow(deprecated_owned_vector)]
fn test_flate_round_trip() {
let mut r = rand::task_rng();
let mut words = vec!();
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
-#![deny(deprecated_owned_vector)]
#![feature(plugin_registrar, managed_boxes)]
extern crate syntax;
html_playground_url = "http://play.rust-lang.org/")]
#![feature(globs, phase)]
#![deny(missing_doc)]
-#![deny(deprecated_owned_vector)]
#[cfg(test)] extern crate debug;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate log;
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
-#![deny(deprecated_owned_vector)]
use std::cell::Cell;
use std::{cmp, os, path};
// of the contents of `Vec<T>`, since we anticipate that to be a
// frequent way to dynamically construct a vector.
-/// MaybeOwnedVector<'a,T> abstracts over `Vec<T>`, `~[T]`, `&'a [T]`.
+/// MaybeOwnedVector<'a,T> abstracts over `Vec<T>`, `&'a [T]`.
///
/// Some clients will have a pre-allocated vector ready to hand off in
/// a slice; others will want to create the set on the fly and hand
-/// off ownership, via either `Growable` or `FixedLen` depending on
-/// which kind of vector they have constructed. (The `FixedLen`
-/// variant is provided for interoperability with `std::slice` methods
-/// that return `~[T]`.)
+/// off ownership, via `Growable`.
pub enum MaybeOwnedVector<'a,T> {
Growable(Vec<T>),
- FixedLen(~[T]),
Borrowed(&'a [T]),
}
fn into_maybe_owned(self) -> MaybeOwnedVector<'a,T> { Growable(self) }
}
-impl<'a,T> IntoMaybeOwnedVector<'a,T> for ~[T] {
- #[inline]
- fn into_maybe_owned(self) -> MaybeOwnedVector<'a,T> { FixedLen(self) }
-}
-
impl<'a,T> IntoMaybeOwnedVector<'a,T> for &'a [T] {
#[inline]
fn into_maybe_owned(self) -> MaybeOwnedVector<'a,T> { Borrowed(self) }
pub fn iter(&'a self) -> slice::Items<'a,T> {
match self {
&Growable(ref v) => v.iter(),
- &FixedLen(ref v) => v.iter(),
&Borrowed(ref v) => v.iter(),
}
}
fn as_slice<'a>(&'a self) -> &'a [T] {
match self {
&Growable(ref v) => v.as_slice(),
- &FixedLen(ref v) => v.as_slice(),
&Borrowed(ref v) => v.as_slice(),
}
}
impl<'a,T:Clone> CloneableVector<T> for MaybeOwnedVector<'a,T> {
/// Returns a copy of `self`.
- fn to_owned(&self) -> ~[T] {
+ fn to_owned(&self) -> Vec<T> {
self.as_slice().to_owned()
}
/// Convert `self` into an owned slice, not making a copy if possible.
- fn into_owned(self) -> ~[T] {
+ fn into_owned(self) -> Vec<T> {
match self {
Growable(v) => v.as_slice().to_owned(),
- FixedLen(v) => v,
Borrowed(v) => v.to_owned(),
}
}
pub fn into_vec(self) -> Vec<T> {
match self {
Growable(v) => v,
- FixedLen(v) => Vec::from_slice(v.as_slice()),
Borrowed(v) => Vec::from_slice(v),
}
}
type Registers = [uint, ..22];
#[cfg(windows, target_arch = "x86_64")]
-fn new_regs() -> Box<Registers> { box [0, .. 34] }
+fn new_regs() -> Box<Registers> { box() ([0, .. 34]) }
#[cfg(not(windows), target_arch = "x86_64")]
-fn new_regs() -> Box<Registers> { box {let v = [0, .. 22]; v} }
+fn new_regs() -> Box<Registers> { box() ([0, .. 22]) }
#[cfg(target_arch = "x86_64")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
// NB this does *not* include globs, please keep it that way.
#![feature(macro_rules, phase)]
#![allow(visible_private_types)]
-#![deny(deprecated_owned_vector)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate rustuv;
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
-
-#![deny(deprecated_owned_vector)]
#![feature(plugin_registrar, managed_boxes)]
extern crate syntax;
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules)]
-#![deny(missing_doc, deprecated_owned_vector)]
-
-extern crate sync;
+#![deny(missing_doc)]
use std::fmt;
use std::io::LineBufferedWriter;
use std::os;
use std::rt;
use std::slice;
-
-use sync::one::{Once, ONCE_INIT};
+use std::sync::{Once, ONCE_INIT};
use directive::LOG_LEVEL_NAMES;
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
-#![deny(deprecated_owned_vector)]
-
extern crate rand;
pub use bigint::{BigInt, BigUint};
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, phase)]
-#![deny(missing_doc, deprecated_owned_vector)]
+#![deny(missing_doc)]
#[cfg(test)]
extern crate stdtest = "test";
}
unsafe fn configure_llvm(sess: &Session) {
- use sync::one::{Once, ONCE_INIT};
+ use std::sync::{Once, ONCE_INIT};
static mut INIT: Once = ONCE_INIT;
// Copy what clang does by turning on loop vectorization at O2 and
block: ast::P<ast::Block>,
mut out: W) -> io::IoResult<()> {
let ty_cx = &analysis.ty_cx;
- let cfg = cfg::CFG::new(ty_cx, block);
+ let cfg = cfg::CFG::new(ty_cx, &*block);
let lcfg = LabelledCFG { ast_map: &ty_cx.map,
cfg: &cfg,
name: format!("block{}", block.id).to_string(), };
pub fn main_args(args: &[String]) -> int {
let owned_args = args.to_owned();
- monitor(proc() run_compiler(owned_args));
+ monitor(proc() run_compiler(owned_args.as_slice()));
0
}
use syntax::{ast, fold, attr};
use syntax::codemap;
+use std::gc::Gc;
+
struct Context<'a> {
in_cfg: |attrs: &[ast::Attribute]|: 'a -> bool,
}
fn fold_item_underscore(&mut self, item: &ast::Item_) -> ast::Item_ {
fold_item_underscore(self, item)
}
- fn fold_expr(&mut self, expr: @ast::Expr) -> @ast::Expr {
+ fn fold_expr(&mut self, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
fold_expr(self, expr)
}
}
}
fn fold_mod(cx: &mut Context, m: &ast::Mod) -> ast::Mod {
- let filtered_items: Vec<&@ast::Item> = m.items.iter()
- .filter(|&a| item_in_cfg(cx, *a))
+ let filtered_items: Vec<&Gc<ast::Item>> = m.items.iter()
+ .filter(|a| item_in_cfg(cx, &***a))
.collect();
let flattened_items = filtered_items.move_iter()
.flat_map(|&x| cx.fold_item(x).move_iter())
}
}
-fn filter_foreign_item(cx: &mut Context, item: @ast::ForeignItem)
- -> Option<@ast::ForeignItem> {
- if foreign_item_in_cfg(cx, item) {
+fn filter_foreign_item(cx: &mut Context, item: Gc<ast::ForeignItem>)
+ -> Option<Gc<ast::ForeignItem>> {
+ if foreign_item_in_cfg(cx, &*item) {
Some(item)
} else {
None
fn fold_item_underscore(cx: &mut Context, item: &ast::Item_) -> ast::Item_ {
let item = match *item {
ast::ItemImpl(ref a, ref b, c, ref methods) => {
- let methods = methods.iter().filter(|m| method_in_cfg(cx, **m))
+ let methods = methods.iter().filter(|m| method_in_cfg(cx, &***m))
.map(|x| *x).collect();
ast::ItemImpl((*a).clone(), (*b).clone(), c, methods)
}
.collect();
ast::ItemTrait((*a).clone(), b, (*c).clone(), methods)
}
- ast::ItemStruct(def, ref generics) => {
- ast::ItemStruct(fold_struct(cx, def), generics.clone())
+ ast::ItemStruct(ref def, ref generics) => {
+ ast::ItemStruct(fold_struct(cx, &**def), generics.clone())
}
ast::ItemEnum(ref def, ref generics) => {
let mut variants = def.variants.iter().map(|c| c.clone()).
} else {
Some(match v.node.kind {
ast::TupleVariantKind(..) => v,
- ast::StructVariantKind(def) => {
- let def = fold_struct(cx, def);
- @codemap::Spanned {
+ ast::StructVariantKind(ref def) => {
+ let def = fold_struct(cx, &**def);
+ box(GC) codemap::Spanned {
node: ast::Variant_ {
- kind: ast::StructVariantKind(def),
+ kind: ast::StructVariantKind(def.clone()),
..v.node.clone()
},
..*v
fold::noop_fold_item_underscore(&item, cx)
}
-fn fold_struct(cx: &mut Context, def: &ast::StructDef) -> @ast::StructDef {
+fn fold_struct(cx: &mut Context, def: &ast::StructDef) -> Gc<ast::StructDef> {
let mut fields = def.fields.iter().map(|c| c.clone()).filter(|m| {
(cx.in_cfg)(m.node.attrs.as_slice())
});
- @ast::StructDef {
+ box(GC) ast::StructDef {
fields: fields.collect(),
ctor_id: def.ctor_id,
super_struct: def.super_struct.clone(),
}
}
-fn retain_stmt(cx: &mut Context, stmt: @ast::Stmt) -> bool {
+fn retain_stmt(cx: &mut Context, stmt: Gc<ast::Stmt>) -> bool {
match stmt.node {
ast::StmtDecl(decl, _) => {
match decl.node {
- ast::DeclItem(item) => {
- item_in_cfg(cx, item)
+ ast::DeclItem(ref item) => {
+ item_in_cfg(cx, &**item)
}
_ => true
}
}
fn fold_block(cx: &mut Context, b: ast::P<ast::Block>) -> ast::P<ast::Block> {
- let resulting_stmts: Vec<&@ast::Stmt> =
+ let resulting_stmts: Vec<&Gc<ast::Stmt>> =
b.stmts.iter().filter(|&a| retain_stmt(cx, *a)).collect();
let resulting_stmts = resulting_stmts.move_iter()
- .flat_map(|&stmt| cx.fold_stmt(stmt).move_iter())
+ .flat_map(|stmt| cx.fold_stmt(&**stmt).move_iter())
.collect();
let filtered_view_items = b.view_items.iter().filter_map(|a| {
filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
})
}
-fn fold_expr(cx: &mut Context, expr: @ast::Expr) -> @ast::Expr {
+fn fold_expr(cx: &mut Context, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
let expr = match expr.node {
ast::ExprMatch(ref m, ref arms) => {
let arms = arms.iter()
.filter(|a| (cx.in_cfg)(a.attrs.as_slice()))
.map(|a| a.clone())
.collect();
- @ast::Expr {
+ box(GC) ast::Expr {
id: expr.id,
span: expr.span.clone(),
node: ast::ExprMatch(m.clone(), arms),
// Determine if an item should be translated in the current crate
// configuration based on the item's attributes
-fn in_cfg(cfg: &[@ast::MetaItem], attrs: &[ast::Attribute]) -> bool {
+fn in_cfg(cfg: &[Gc<ast::MetaItem>], attrs: &[ast::Attribute]) -> bool {
attr::test_cfg(cfg, attrs.iter().map(|x| *x))
}
use syntax::util::small_vector::SmallVector;
use std::mem;
+use std::gc::Gc;
pub static VERSION: &'static str = "0.11.0-pre";
krate
}
- fn fold_item(&mut self, item: @ast::Item) -> SmallVector<@ast::Item> {
+ fn fold_item(&mut self, item: Gc<ast::Item>) -> SmallVector<Gc<ast::Item>> {
if !no_prelude(item.attrs.as_slice()) {
// only recur if there wasn't `#![no_implicit_prelude]`
// on this item, i.e. this means that the prelude is not
// implicitly imported though the whole subtree
- fold::noop_fold_item(item, self)
+ fold::noop_fold_item(&*item, self)
} else {
SmallVector::one(item)
}
}),
};
- let vp = @codemap::dummy_spanned(ast::ViewPathGlob(prelude_path, ast::DUMMY_NODE_ID));
+ let vp = box(GC) codemap::dummy_spanned(ast::ViewPathGlob(prelude_path,
+ ast::DUMMY_NODE_ID));
let vi2 = ast::ViewItem {
node: ast::ViewItemUse(vp),
attrs: Vec::new(),
use front::std_inject::with_version;
use std::cell::RefCell;
+use std::gc::Gc;
use std::slice;
-use std::vec::Vec;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
}
}
- fn fold_item(&mut self, i: @ast::Item) -> SmallVector<@ast::Item> {
+ fn fold_item(&mut self, i: Gc<ast::Item>) -> SmallVector<Gc<ast::Item>> {
self.cx.path.borrow_mut().push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.borrow().as_slice()));
}
}
- let res = fold::noop_fold_item(i, self);
+ let res = fold::noop_fold_item(&*i, self);
self.cx.path.borrow_mut().pop();
res
}
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
- fn nomain(item: @ast::Item) -> @ast::Item {
- @ast::Item {
+ fn nomain(item: Gc<ast::Item>) -> Gc<ast::Item> {
+ box(GC) ast::Item {
attrs: item.attrs.iter().filter_map(|attr| {
if !attr.name().equiv(&("main")) {
Some(*attr)
})
}
-fn is_test_fn(cx: &TestCtxt, i: @ast::Item) -> bool {
+fn is_test_fn(cx: &TestCtxt, i: Gc<ast::Item>) -> bool {
let has_test_attr = attr::contains_name(i.attrs.as_slice(), "test");
- fn has_test_signature(i: @ast::Item) -> bool {
+ fn has_test_signature(i: Gc<ast::Item>) -> bool {
match &i.node {
&ast::ItemFn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
return has_test_attr && has_test_signature(i);
}
-fn is_bench_fn(cx: &TestCtxt, i: @ast::Item) -> bool {
+fn is_bench_fn(cx: &TestCtxt, i: Gc<ast::Item>) -> bool {
let has_bench_attr = attr::contains_name(i.attrs.as_slice(), "bench");
- fn has_test_signature(i: @ast::Item) -> bool {
+ fn has_test_signature(i: Gc<ast::Item>) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
return has_bench_attr && has_test_signature(i);
}
-fn is_ignored(cx: &TestCtxt, i: @ast::Item) -> bool {
+fn is_ignored(cx: &TestCtxt, i: Gc<ast::Item>) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
attr.check_name("ignore") && match attr.meta_item_list() {
})
}
-fn should_fail(i: @ast::Item) -> bool {
+fn should_fail(i: Gc<ast::Item>) -> bool {
attr::contains_name(i.attrs.as_slice(), "should_fail")
}
let id_test = token::str_to_ident("test");
let (vi, vis) = if cx.is_test_crate {
(ast::ViewItemUse(
- @nospan(ast::ViewPathSimple(id_test,
+ box(GC) nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)),
ast::DUMMY_NODE_ID))),
ast::Public)
}
}
-fn mk_test_module(cx: &TestCtxt) -> @ast::Item {
+fn mk_test_module(cx: &TestCtxt) -> Gc<ast::Item> {
// Link to test crate
let view_items = vec!(mk_std(cx));
debug!("Synthetic test module:\n{}\n", pprust::item_to_str(&item));
- return @item;
+ box(GC) item
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
}
}
-fn mk_tests(cx: &TestCtxt) -> @ast::Item {
+fn mk_tests(cx: &TestCtxt) -> Gc<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
}
}
-fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
+fn mk_test_descs(cx: &TestCtxt) -> Gc<ast::Expr> {
debug!("building test vector from {} tests", cx.testfns.borrow().len());
- @ast::Expr {
+ box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
- node: ast::ExprVstore(@ast::Expr {
+ node: ast::ExprVstore(box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(cx.testfns.borrow().iter().map(|test| {
mk_test_desc_and_fn_rec(cx, test)
}
}
-fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
+fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> Gc<ast::Expr> {
let span = test.span;
let path = test.path.clone();
ast_util::path_name_i(path.as_slice()).as_slice()),
ast::CookedStr));
- let name_expr = @ast::Expr {
+ let name_expr = box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
- node: ast::ExprLit(@name_lit),
+ node: ast::ExprLit(box(GC) name_lit),
span: span
};
let fn_path = path_node_global(path);
- let fn_expr = @ast::Expr {
+ let fn_expr = box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
extern crate graphviz;
extern crate libc;
extern crate serialize;
-extern crate sync;
extern crate syntax;
extern crate time;
pub fn get_item_attrs(cstore: &cstore::CStore,
def_id: ast::DefId,
- f: |Vec<ast::Attribute> |) {
+ f: |Vec<ast::Attribute>|) {
let cdata = cstore.get_crate_data(def_id.krate);
decoder::get_item_attrs(&*cdata, def_id.node, f)
}
use middle::typeck;
use middle::astencode::vtable_decoder_helpers;
-use std::u64;
-use std::hash;
+use std::gc::Gc;
use std::hash::Hash;
-use std::io;
+use std::hash;
use std::io::extensions::u64_from_be_bytes;
-use std::option;
+use std::io;
use std::rc::Rc;
+use std::u64;
use serialize::ebml::reader;
use serialize::ebml;
use serialize::Decodable;
result
}
-fn get_meta_items(md: ebml::Doc) -> Vec<@ast::MetaItem> {
- let mut items: Vec<@ast::MetaItem> = Vec::new();
+fn get_meta_items(md: ebml::Doc) -> Vec<Gc<ast::MetaItem>> {
+ let mut items: Vec<Gc<ast::MetaItem>> = Vec::new();
reader::tagged_docs(md, tag_meta_item_word, |meta_item_doc| {
let nd = reader::get_doc(meta_item_doc, tag_meta_item_name);
let n = token::intern_and_get_ident(nd.as_str_slice());
fn get_attributes(md: ebml::Doc) -> Vec<ast::Attribute> {
let mut attrs: Vec<ast::Attribute> = Vec::new();
match reader::maybe_get_doc(md, tag_attributes) {
- option::Some(attrs_d) => {
+ Some(attrs_d) => {
reader::tagged_docs(attrs_d, tag_attribute, |attr_doc| {
let meta_items = get_meta_items(attr_doc);
// Currently it's only possible to have a single meta item on
true
});
}
- option::None => ()
+ None => ()
}
return attrs;
}
use util::nodemap::{NodeMap, NodeSet};
use serialize::Encodable;
-use std::mem;
use std::cell::RefCell;
-use std::hash;
+use std::gc::Gc;
use std::hash::Hash;
+use std::hash;
use std::io::MemWriter;
+use std::mem;
use std::str;
use std::collections::HashMap;
use syntax::abi;
/// * For enums, iterates through the node IDs of the variants.
///
/// * For newtype structs, iterates through the node ID of the constructor.
-fn each_auxiliary_node_id(item: @Item, callback: |NodeId| -> bool) -> bool {
+fn each_auxiliary_node_id(item: Gc<Item>, callback: |NodeId| -> bool) -> bool {
let mut continue_ = true;
match item.node {
ItemEnum(ref enum_def, _) => {
impl_path: PathElems,
is_default_impl: bool,
parent_id: NodeId,
- ast_method_opt: Option<@Method>) {
+ ast_method_opt: Option<Gc<Method>>) {
debug!("encode_info_for_method: {:?} {}", m.def_id,
token::get_ident(m.ident));
is_default_impl ||
should_inline(ast_method.attrs.as_slice()) {
encode_inlined_item(ecx, ebml_w,
- IIMethodRef(local_def(parent_id), false, ast_method));
+ IIMethodRef(local_def(parent_id), false,
+ &*ast_method));
} else {
encode_symbol(ecx, ebml_w, m.def_id.node);
}
}
encode_method_sort(ebml_w, 'p');
encode_inlined_item(ecx, ebml_w,
- IIMethodRef(def_id, true, m));
+ IIMethodRef(def_id, true, &*m));
encode_method_argument_names(ebml_w, &*m.decl);
}
}
wr.write_be_u32(n as u32);
}
-fn encode_meta_item(ebml_w: &mut Encoder, mi: @MetaItem) {
+fn encode_meta_item(ebml_w: &mut Encoder, mi: Gc<MetaItem>) {
match mi.node {
MetaWord(ref name) => {
ebml_w.start_tag(tag_meta_item_word);
use std::io::MemWriter;
use std::mem;
use std::rc::Rc;
-use std::string::String;
use serialize::ebml::reader;
use serialize::ebml;
#[cfg(test)] use syntax::parse;
#[cfg(test)] use syntax::print::pprust;
+#[cfg(test)] use std::gc::Gc;
struct DecodeContext<'a> {
cdata: &'a cstore::crate_metadata,
match ii {
ast::IIItem(i) => {
debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
- syntax::print::pprust::item_to_str(i));
+ syntax::print::pprust::item_to_str(&*i));
}
_ => { }
}
def::DefUse(did) => def::DefUse(did.tr(xcx)),
def::DefUpvar(nid1, def, nid2, nid3) => {
def::DefUpvar(xcx.tr_id(nid1),
- @(*def).tr(xcx),
+ box(GC) (*def).tr(xcx),
xcx.tr_id(nid2),
xcx.tr_id(nid3))
}
// Testing of astencode_gen
#[cfg(test)]
-fn encode_item_ast(ebml_w: &mut Encoder, item: @ast::Item) {
+fn encode_item_ast(ebml_w: &mut Encoder, item: Gc<ast::Item>) {
ebml_w.start_tag(c::tag_tree as uint);
(*item).encode(ebml_w);
ebml_w.end_tag();
}
#[cfg(test)]
-fn decode_item_ast(par_doc: ebml::Doc) -> @ast::Item {
+fn decode_item_ast(par_doc: ebml::Doc) -> Gc<ast::Item> {
let chi_doc = par_doc.get(c::tag_tree as uint);
let mut d = reader::Decoder::new(chi_doc);
- @Decodable::decode(&mut d).unwrap()
+ box(GC) Decodable::decode(&mut d).unwrap()
}
#[cfg(test)]
}
#[cfg(test)]
-fn roundtrip(in_item: Option<@ast::Item>) {
+fn roundtrip(in_item: Option<Gc<ast::Item>>) {
use std::io::MemWriter;
let in_item = in_item.unwrap();
impl<'a> visit::Visitor<()> for StaticInitializerCtxt<'a> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
match ex.node {
- ast::ExprAddrOf(mutbl, base) => {
- let base_cmt = self.bccx.cat_expr(base);
+ ast::ExprAddrOf(mutbl, ref base) => {
+ let base_cmt = self.bccx.cat_expr(&**base);
let borrow_kind = ty::BorrowKind::from_mutbl(mutbl);
// Check that we don't allow borrows of unsafe static items.
if check_aliasability(self.bccx, ex.span, euv::AddrOf,
// flow dependent conditions.
match item.node {
ast::ItemStatic(_, _, ex) => {
- gather_loans::gather_loans_in_static_initializer(this, ex);
+ gather_loans::gather_loans_in_static_initializer(this, &*ex);
}
_ => {
visit::walk_item(this, item, ());
move_data::MoveExpr => {
let (expr_ty, expr_span) = match self.tcx.map.find(move.id) {
Some(ast_map::NodeExpr(expr)) => {
- (ty::expr_ty_adjusted(self.tcx, expr), expr.span)
+ (ty::expr_ty_adjusted(self.tcx, &*expr), expr.span)
}
r => {
self.tcx.sess.bug(format!("MoveExpr({:?}) maps to \
move_data::Captured => {
let (expr_ty, expr_span) = match self.tcx.map.find(move.id) {
Some(ast_map::NodeExpr(expr)) => {
- (ty::expr_ty_adjusted(self.tcx, expr), expr.span)
+ (ty::expr_ty_adjusted(self.tcx, &*expr), expr.span)
}
r => {
self.tcx.sess.bug(format!("Captured({:?}) maps to \
use syntax::ast_util;
use util::nodemap::NodeMap;
+use std::gc::Gc;
+
struct CFGBuilder<'a> {
tcx: &'a ty::ctxt,
exit_map: NodeMap<CFGIndex>,
impl<'a> CFGBuilder<'a> {
fn block(&mut self, blk: &ast::Block, pred: CFGIndex) -> CFGIndex {
let mut stmts_exit = pred;
- for &stmt in blk.stmts.iter() {
- stmts_exit = self.stmt(stmt, stmts_exit);
+ for stmt in blk.stmts.iter() {
+ stmts_exit = self.stmt(stmt.clone(), stmts_exit);
}
- let expr_exit = self.opt_expr(blk.expr, stmts_exit);
+ let expr_exit = self.opt_expr(blk.expr.clone(), stmts_exit);
self.add_node(blk.id, [expr_exit])
}
- fn stmt(&mut self, stmt: @ast::Stmt, pred: CFGIndex) -> CFGIndex {
+ fn stmt(&mut self, stmt: Gc<ast::Stmt>, pred: CFGIndex) -> CFGIndex {
match stmt.node {
- ast::StmtDecl(decl, _) => {
- self.decl(decl, pred)
+ ast::StmtDecl(ref decl, _) => {
+ self.decl(&**decl, pred)
}
- ast::StmtExpr(expr, _) | ast::StmtSemi(expr, _) => {
- self.expr(expr, pred)
+ ast::StmtExpr(ref expr, _) | ast::StmtSemi(ref expr, _) => {
+ self.expr(expr.clone(), pred)
}
ast::StmtMac(..) => {
}
}
- fn decl(&mut self, decl: @ast::Decl, pred: CFGIndex) -> CFGIndex {
+ fn decl(&mut self, decl: &ast::Decl, pred: CFGIndex) -> CFGIndex {
match decl.node {
- ast::DeclLocal(local) => {
- let init_exit = self.opt_expr(local.init, pred);
- self.pat(local.pat, init_exit)
+ ast::DeclLocal(ref local) => {
+ let init_exit = self.opt_expr(local.init.clone(), pred);
+ self.pat(&*local.pat, init_exit)
}
ast::DeclItem(_) => {
}
}
- fn pat(&mut self, pat: @ast::Pat, pred: CFGIndex) -> CFGIndex {
+ fn pat(&mut self, pat: &ast::Pat, pred: CFGIndex) -> CFGIndex {
match pat.node {
ast::PatIdent(_, _, None) |
ast::PatEnum(_, None) |
self.add_node(pat.id, [pred])
}
- ast::PatBox(subpat) |
- ast::PatRegion(subpat) |
- ast::PatIdent(_, _, Some(subpat)) => {
- let subpat_exit = self.pat(subpat, pred);
+ ast::PatBox(ref subpat) |
+ ast::PatRegion(ref subpat) |
+ ast::PatIdent(_, _, Some(ref subpat)) => {
+ let subpat_exit = self.pat(&**subpat, pred);
self.add_node(pat.id, [subpat_exit])
}
ast::PatEnum(_, Some(ref subpats)) |
ast::PatTup(ref subpats) => {
let pats_exit =
- self.pats_all(subpats.iter().map(|p| *p), pred);
+ self.pats_all(subpats.iter().map(|p| p.clone()), pred);
self.add_node(pat.id, [pats_exit])
}
ast::PatStruct(_, ref subpats, _) => {
let pats_exit =
- self.pats_all(subpats.iter().map(|f| f.pat), pred);
+ self.pats_all(subpats.iter().map(|f| f.pat.clone()), pred);
self.add_node(pat.id, [pats_exit])
}
}
}
- fn pats_all<I: Iterator<@ast::Pat>>(&mut self,
+ fn pats_all<I: Iterator<Gc<ast::Pat>>>(&mut self,
pats: I,
pred: CFGIndex) -> CFGIndex {
//! Handles case where all of the patterns must match.
let mut pats = pats;
- pats.fold(pred, |pred, pat| self.pat(pat, pred))
+ pats.fold(pred, |pred, pat| self.pat(&*pat, pred))
}
fn pats_any(&mut self,
- pats: &[@ast::Pat],
+ pats: &[Gc<ast::Pat>],
pred: CFGIndex) -> CFGIndex {
//! Handles case where just one of the patterns must match.
if pats.len() == 1 {
- self.pat(pats[0], pred)
+ self.pat(&*pats[0], pred)
} else {
let collect = self.add_dummy_node([]);
for &pat in pats.iter() {
- let pat_exit = self.pat(pat, pred);
+ let pat_exit = self.pat(&*pat, pred);
self.add_contained_edge(pat_exit, collect);
}
collect
}
}
- fn expr(&mut self, expr: @ast::Expr, pred: CFGIndex) -> CFGIndex {
+ fn expr(&mut self, expr: Gc<ast::Expr>, pred: CFGIndex) -> CFGIndex {
match expr.node {
- ast::ExprBlock(blk) => {
- let blk_exit = self.block(blk, pred);
+ ast::ExprBlock(ref blk) => {
+ let blk_exit = self.block(&**blk, pred);
self.add_node(expr.id, [blk_exit])
}
- ast::ExprIf(cond, then, None) => {
+ ast::ExprIf(ref cond, ref then, None) => {
//
// [pred]
// |
// v 3 v 4
// [..expr..]
//
- let cond_exit = self.expr(cond, pred); // 1
- let then_exit = self.block(then, cond_exit); // 2
- self.add_node(expr.id, [cond_exit, then_exit]) // 3,4
+ let cond_exit = self.expr(cond.clone(), pred); // 1
+ let then_exit = self.block(&**then, cond_exit); // 2
+ self.add_node(expr.id, [cond_exit, then_exit]) // 3,4
}
- ast::ExprIf(cond, then, Some(otherwise)) => {
+ ast::ExprIf(ref cond, ref then, Some(ref otherwise)) => {
//
// [pred]
// |
// v 4 v 5
// [..expr..]
//
- let cond_exit = self.expr(cond, pred); // 1
- let then_exit = self.block(then, cond_exit); // 2
- let else_exit = self.expr(otherwise, cond_exit); // 3
- self.add_node(expr.id, [then_exit, else_exit]) // 4, 5
+ let cond_exit = self.expr(cond.clone(), pred); // 1
+ let then_exit = self.block(&**then, cond_exit); // 2
+ let else_exit = self.expr(otherwise.clone(), cond_exit); // 3
+ self.add_node(expr.id, [then_exit, else_exit]) // 4, 5
}
- ast::ExprWhile(cond, body) => {
+ ast::ExprWhile(ref cond, ref body) => {
//
// [pred]
// |
// may cause additional edges.
// Is the condition considered part of the loop?
- let loopback = self.add_dummy_node([pred]); // 1
- let cond_exit = self.expr(cond, loopback); // 2
- let expr_exit = self.add_node(expr.id, [cond_exit]); // 3
+ let loopback = self.add_dummy_node([pred]); // 1
+ let cond_exit = self.expr(cond.clone(), loopback); // 2
+ let expr_exit = self.add_node(expr.id, [cond_exit]); // 3
self.loop_scopes.push(LoopScope {
loop_id: expr.id,
continue_index: loopback,
break_index: expr_exit
});
- let body_exit = self.block(body, cond_exit); // 4
- self.add_contained_edge(body_exit, loopback); // 5
+ let body_exit = self.block(&**body, cond_exit); // 4
+ self.add_contained_edge(body_exit, loopback); // 5
expr_exit
}
ast::ExprForLoop(..) => fail!("non-desugared expr_for_loop"),
- ast::ExprLoop(body, _) => {
+ ast::ExprLoop(ref body, _) => {
//
// [pred]
// |
// Note that `break` and `loop` statements
// may cause additional edges.
- let loopback = self.add_dummy_node([pred]); // 1
- let expr_exit = self.add_node(expr.id, []); // 2
+ let loopback = self.add_dummy_node([pred]); // 1
+ let expr_exit = self.add_node(expr.id, []); // 2
self.loop_scopes.push(LoopScope {
loop_id: expr.id,
continue_index: loopback,
break_index: expr_exit,
});
- let body_exit = self.block(body, loopback); // 3
- self.add_contained_edge(body_exit, loopback); // 4
+ let body_exit = self.block(&**body, loopback); // 3
+ self.add_contained_edge(body_exit, loopback); // 4
self.loop_scopes.pop();
expr_exit
}
- ast::ExprMatch(discr, ref arms) => {
+ ast::ExprMatch(ref discr, ref arms) => {
//
// [pred]
// |
// v 5 v v
// [....expr....]
//
- let discr_exit = self.expr(discr, pred); // 1
+ let discr_exit = self.expr(discr.clone(), pred); // 1
let expr_exit = self.add_node(expr.id, []);
let mut guard_exit = discr_exit;
for arm in arms.iter() {
- guard_exit = self.opt_expr(arm.guard, guard_exit); // 2
+ guard_exit = self.opt_expr(arm.guard, guard_exit); // 2
let pats_exit = self.pats_any(arm.pats.as_slice(),
- guard_exit); // 3
- let body_exit = self.expr(arm.body, pats_exit); // 4
+ guard_exit); // 3
+ let body_exit = self.expr(arm.body.clone(), pats_exit); // 4
self.add_contained_edge(body_exit, expr_exit); // 5
}
expr_exit
}
- ast::ExprBinary(op, l, r) if ast_util::lazy_binop(op) => {
+ ast::ExprBinary(op, ref l, ref r) if ast_util::lazy_binop(op) => {
//
// [pred]
// |
// v 3 v 4
// [..exit..]
//
- let l_exit = self.expr(l, pred); // 1
- let r_exit = self.expr(r, l_exit); // 2
+ let l_exit = self.expr(l.clone(), pred); // 1
+ let r_exit = self.expr(r.clone(), l_exit); // 2
self.add_node(expr.id, [l_exit, r_exit]) // 3,4
}
- ast::ExprRet(v) => {
- let v_exit = self.opt_expr(v, pred);
+ ast::ExprRet(ref v) => {
+ let v_exit = self.opt_expr(v.clone(), pred);
let b = self.add_node(expr.id, [v_exit]);
self.add_returning_edge(expr, b);
self.add_node(ast::DUMMY_NODE_ID, [])
self.straightline(expr, pred, elems.as_slice())
}
- ast::ExprCall(func, ref args) => {
- self.call(expr, pred, func, args.as_slice())
+ ast::ExprCall(ref func, ref args) => {
+ self.call(expr, pred, func.clone(), args.as_slice())
}
ast::ExprMethodCall(_, _, ref args) => {
self.call(expr, pred, *args.get(0), args.slice_from(1))
}
- ast::ExprIndex(l, r) |
- ast::ExprBinary(_, l, r) if self.is_method_call(expr) => {
- self.call(expr, pred, l, [r])
+ ast::ExprIndex(ref l, ref r) |
+ ast::ExprBinary(_, ref l, ref r) if self.is_method_call(&*expr) => {
+ self.call(expr, pred, l.clone(), [r.clone()])
}
- ast::ExprUnary(_, e) if self.is_method_call(expr) => {
- self.call(expr, pred, e, [])
+ ast::ExprUnary(_, ref e) if self.is_method_call(&*expr) => {
+ self.call(expr, pred, e.clone(), [])
}
ast::ExprTup(ref exprs) => {
ast::ExprStruct(_, ref fields, base) => {
let base_exit = self.opt_expr(base, pred);
- let field_exprs: Vec<@ast::Expr> =
+ let field_exprs: Vec<Gc<ast::Expr>> =
fields.iter().map(|f| f.expr).collect();
self.straightline(expr, base_exit, field_exprs.as_slice())
}
}
fn call(&mut self,
- call_expr: @ast::Expr,
+ call_expr: Gc<ast::Expr>,
pred: CFGIndex,
- func_or_rcvr: @ast::Expr,
- args: &[@ast::Expr]) -> CFGIndex {
+ func_or_rcvr: Gc<ast::Expr>,
+ args: &[Gc<ast::Expr>]) -> CFGIndex {
let func_or_rcvr_exit = self.expr(func_or_rcvr, pred);
self.straightline(call_expr, func_or_rcvr_exit, args)
}
fn exprs(&mut self,
- exprs: &[@ast::Expr],
+ exprs: &[Gc<ast::Expr>],
pred: CFGIndex) -> CFGIndex {
//! Constructs graph for `exprs` evaluated in order
}
fn opt_expr(&mut self,
- opt_expr: Option<@ast::Expr>,
+ opt_expr: Option<Gc<ast::Expr>>,
pred: CFGIndex) -> CFGIndex {
//! Constructs graph for `opt_expr` evaluated, if Some
}
fn straightline(&mut self,
- expr: @ast::Expr,
+ expr: Gc<ast::Expr>,
pred: CFGIndex,
- subexprs: &[@ast::Expr]) -> CFGIndex {
+ subexprs: &[Gc<ast::Expr>]) -> CFGIndex {
//! Handles case of an expression that evaluates `subexprs` in order
let subexprs_exit = self.exprs(subexprs, pred);
}
fn add_exiting_edge(&mut self,
- from_expr: @ast::Expr,
+ from_expr: Gc<ast::Expr>,
from_index: CFGIndex,
to_loop: LoopScope,
to_index: CFGIndex) {
}
fn add_returning_edge(&mut self,
- _from_expr: @ast::Expr,
+ _from_expr: Gc<ast::Expr>,
from_index: CFGIndex) {
let mut data = CFGEdgeData {
exiting_scopes: vec!(),
}
fn find_scope(&self,
- expr: @ast::Expr,
+ expr: Gc<ast::Expr>,
label: Option<ast::Ident>) -> LoopScope {
match label {
None => {
fn check_item(v: &mut CheckCrateVisitor, it: &Item, _is_const: bool) {
match it.node {
ItemStatic(_, _, ex) => {
- v.visit_expr(ex, true);
+ v.visit_expr(&*ex, true);
check_item_recursion(&v.tcx.sess, &v.tcx.map, &v.tcx.def_map, it);
}
ItemEnum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
for ex in var.node.disr_expr.iter() {
- v.visit_expr(*ex, true);
+ v.visit_expr(&**ex, true);
}
}
}
}
match p.node {
// Let through plain ~-string literals here
- PatLit(a) => if !is_str(a) { v.visit_expr(a, true); },
- PatRange(a, b) => {
- if !is_str(a) { v.visit_expr(a, true); }
- if !is_str(b) { v.visit_expr(b, true); }
+ PatLit(ref a) => if !is_str(&**a) { v.visit_expr(&**a, true); },
+ PatRange(ref a, ref b) => {
+ if !is_str(&**a) { v.visit_expr(&**a, true); }
+ if !is_str(&**b) { v.visit_expr(&**b, true); }
}
_ => visit::walk_pat(v, p, false)
}
match self.def_map.borrow().find(&e.id) {
Some(&DefStatic(def_id, _)) if
ast_util::is_local(def_id) => {
- self.visit_item(self.ast_map.expect_item(def_id.node), ());
+ self.visit_item(&*self.ast_map.expect_item(def_id.node), ());
}
_ => ()
}
fn visit_expr(&mut self, e: &ast::Expr, cx:Context) {
match e.node {
- ast::ExprWhile(e, b) => {
- self.visit_expr(e, cx);
- self.visit_block(b, Loop);
+ ast::ExprWhile(ref e, ref b) => {
+ self.visit_expr(&**e, cx);
+ self.visit_block(&**b, Loop);
}
- ast::ExprLoop(b, _) => {
- self.visit_block(b, Loop);
+ ast::ExprLoop(ref b, _) => {
+ self.visit_block(&**b, Loop);
}
- ast::ExprFnBlock(_, b) | ast::ExprProc(_, b) => {
- self.visit_block(b, Closure);
+ ast::ExprFnBlock(_, ref b) | ast::ExprProc(_, ref b) => {
+ self.visit_block(&**b, Closure);
}
ast::ExprBreak(_) => self.require_loop("break", cx, e.span),
ast::ExprAgain(_) => self.require_loop("continue", cx, e.span),
use util::ppaux::ty_to_str;
use std::cmp;
+use std::gc::Gc;
use std::iter;
use syntax::ast::*;
use syntax::ast_util::{is_unguarded, walk_pat};
match opt_def {
Some(DefStatic(did, false)) => {
let const_expr = lookup_const_by_id(cx.tcx, did).unwrap();
- match eval_const_expr(cx.tcx, const_expr) {
+ match eval_const_expr(cx.tcx, &*const_expr) {
const_float(f) if f.is_nan() => true,
_ => false
}
}
};
- walk_pat(*pat, |p| {
+ walk_pat(&**pat, |p| {
if pat_matches_nan(p) {
cx.tcx.sess.span_warn(p.span, "unmatchable NaN in pattern, \
use the is_nan method in a guard instead");
}
}
-fn raw_pat(p: @Pat) -> @Pat {
+fn raw_pat(p: Gc<Pat>) -> Gc<Pat> {
match p.node {
PatIdent(_, _, Some(s)) => { raw_pat(s) }
_ => { p }
cx.tcx.sess.span_err(sp, msg.as_slice());
}
-type matrix = Vec<Vec<@Pat> > ;
+type matrix = Vec<Vec<Gc<Pat>>>;
#[deriving(Clone)]
enum useful {
// Note: is_useful doesn't work on empty types, as the paper notes.
// So it assumes that v is non-empty.
-fn is_useful(cx: &MatchCheckCtxt, m: &matrix, v: &[@Pat]) -> useful {
+fn is_useful(cx: &MatchCheckCtxt, m: &matrix, v: &[Gc<Pat>]) -> useful {
if m.len() == 0u {
return useful_;
}
fn is_useful_specialized(cx: &MatchCheckCtxt,
m: &matrix,
- v: &[@Pat],
+ v: &[Gc<Pat>],
ctor: ctor,
arity: uint,
lty: ty::t)
}
}
-fn pat_ctor_id(cx: &MatchCheckCtxt, p: @Pat) -> Option<ctor> {
+fn pat_ctor_id(cx: &MatchCheckCtxt, p: Gc<Pat>) -> Option<ctor> {
let pat = raw_pat(p);
match pat.node {
PatWild | PatWildMulti => { None }
Some(DefVariant(_, id, _)) => Some(variant(id)),
Some(DefStatic(did, false)) => {
let const_expr = lookup_const_by_id(cx.tcx, did).unwrap();
- Some(val(eval_const_expr(cx.tcx, const_expr)))
+ Some(val(eval_const_expr(cx.tcx, &*const_expr)))
}
_ => None
}
}
- PatLit(expr) => { Some(val(eval_const_expr(cx.tcx, expr))) }
- PatRange(lo, hi) => {
- Some(range(eval_const_expr(cx.tcx, lo), eval_const_expr(cx.tcx, hi)))
+ PatLit(ref expr) => { Some(val(eval_const_expr(cx.tcx, &**expr))) }
+ PatRange(ref lo, ref hi) => {
+ Some(range(eval_const_expr(cx.tcx, &**lo), eval_const_expr(cx.tcx, &**hi)))
}
PatStruct(..) => {
match cx.tcx.def_map.borrow().find(&pat.id) {
}
}
-fn is_wild(cx: &MatchCheckCtxt, p: @Pat) -> bool {
+fn is_wild(cx: &MatchCheckCtxt, p: Gc<Pat>) -> bool {
let pat = raw_pat(p);
match pat.node {
PatWild | PatWildMulti => { true }
}
}
-fn wild() -> @Pat {
- @Pat {id: 0, node: PatWild, span: DUMMY_SP}
+fn wild() -> Gc<Pat> {
+ box(GC) Pat {id: 0, node: PatWild, span: DUMMY_SP}
}
-fn wild_multi() -> @Pat {
- @Pat {id: 0, node: PatWildMulti, span: DUMMY_SP}
+fn wild_multi() -> Gc<Pat> {
+ box(GC) Pat {id: 0, node: PatWildMulti, span: DUMMY_SP}
}
fn range_covered_by_constructor(ctor_id: &ctor, from: &const_val, to: &const_val) -> Option<bool> {
}
fn specialize(cx: &MatchCheckCtxt,
- r: &[@Pat],
+ r: &[Gc<Pat>],
ctor_id: &ctor,
arity: uint,
left_ty: ty::t)
- -> Option<Vec<@Pat> > {
+ -> Option<Vec<Gc<Pat>>> {
let &Pat{id: ref pat_id, node: ref n, span: ref pat_span} = &(*raw_pat(r[0]));
- let head: Option<Vec<@Pat>> = match n {
+ let head: Option<Vec<Gc<Pat>>> = match n {
&PatWild => {
Some(Vec::from_elem(arity, wild()))
}
}
Some(DefStatic(did, _)) => {
let const_expr = lookup_const_by_id(cx.tcx, did).unwrap();
- let e_v = eval_const_expr(cx.tcx, const_expr);
+ let e_v = eval_const_expr(cx.tcx, &*const_expr);
match range_covered_by_constructor(ctor_id, &e_v, &e_v) {
Some(true) => Some(vec!()),
Some(false) => None,
match def {
DefStatic(did, _) => {
let const_expr = lookup_const_by_id(cx.tcx, did).unwrap();
- let e_v = eval_const_expr(cx.tcx, const_expr);
+ let e_v = eval_const_expr(cx.tcx, &*const_expr);
match range_covered_by_constructor(ctor_id, &e_v, &e_v) {
Some(true) => Some(vec!()),
Some(false) => None,
Some(vec!(inner.clone()))
}
&PatLit(ref expr) => {
- let expr_value = eval_const_expr(cx.tcx, *expr);
+ let expr_value = eval_const_expr(cx.tcx, &**expr);
match range_covered_by_constructor(ctor_id, &expr_value, &expr_value) {
Some(true) => Some(vec!()),
Some(false) => None,
}
}
&PatRange(ref from, ref to) => {
- let from_value = eval_const_expr(cx.tcx, *from);
- let to_value = eval_const_expr(cx.tcx, *to);
+ let from_value = eval_const_expr(cx.tcx, &**from);
+ let to_value = eval_const_expr(cx.tcx, &**to);
match range_covered_by_constructor(ctor_id, &from_value, &to_value) {
Some(true) => Some(vec!()),
Some(false) => None,
head.map(|head| head.append(r.tail()))
}
-fn default(cx: &MatchCheckCtxt, r: &[@Pat]) -> Option<Vec<@Pat> > {
+fn default(cx: &MatchCheckCtxt, r: &[Gc<Pat>]) -> Option<Vec<Gc<Pat>>> {
if is_wild(cx, r[0]) {
Some(Vec::from_slice(r.tail()))
} else {
};
let mut spans = vec![];
- find_refutable(cx, loc.pat, &mut spans);
+ find_refutable(cx, &*loc.pat, &mut spans);
for span in spans.iter() {
cx.tcx.sess.span_err(*span,
visit::walk_fn(cx, kind, decl, body, sp, ());
for input in decl.inputs.iter() {
let mut spans = vec![];
- find_refutable(cx, input.pat, &mut spans);
+ find_refutable(cx, &*input.pat, &mut spans);
for span in spans.iter() {
cx.tcx.sess.span_err(*span,
}
match pat.node {
- PatBox(sub) | PatRegion(sub) | PatIdent(_, _, Some(sub)) => {
- find_refutable(cx, sub, spans)
+ PatBox(ref sub) | PatRegion(ref sub) | PatIdent(_, _, Some(ref sub)) => {
+ find_refutable(cx, &**sub, spans)
}
PatWild | PatWildMulti | PatIdent(_, _, None) => {}
PatLit(lit) => {
PatRange(_, _) => { this_pattern!() }
PatStruct(_, ref fields, _) => {
for f in fields.iter() {
- find_refutable(cx, f.pat, spans);
+ find_refutable(cx, &*f.pat, spans);
}
}
PatTup(ref elts) | PatEnum(_, Some(ref elts))=> {
for elt in elts.iter() {
- find_refutable(cx, *elt, spans)
+ find_refutable(cx, &**elt, spans)
}
}
PatEnum(_,_) => {}
fn check_legality_of_move_bindings(cx: &MatchCheckCtxt,
has_guard: bool,
- pats: &[@Pat]) {
+ pats: &[Gc<Pat>]) {
let tcx = cx.tcx;
let def_map = &tcx.def_map;
let mut by_ref_span = None;
for pat in pats.iter() {
- pat_bindings(def_map, *pat, |bm, _, span, _path| {
+ pat_bindings(def_map, &**pat, |bm, _, span, _path| {
match bm {
BindByRef(_) => {
by_ref_span = Some(span);
})
}
- let check_move: |&Pat, Option<@Pat>| = |p, sub| {
+ let check_move: |&Pat, Option<Gc<Pat>>| = |p, sub| {
// check legality of moving out of the enum
// x @ Foo(..) is legal, but x @ Foo(y) isn't.
- if sub.map_or(false, |p| pat_contains_bindings(def_map, p)) {
+ if sub.map_or(false, |p| pat_contains_bindings(def_map, &*p)) {
tcx.sess.span_err(
p.span,
"cannot bind by-move with sub-bindings");
};
for pat in pats.iter() {
- walk_pat(*pat, |p| {
- if pat_is_binding(def_map, p) {
+ walk_pat(&**pat, |p| {
+ if pat_is_binding(def_map, &*p) {
match p.node {
PatIdent(BindByValue(_), _, sub) => {
let pat_ty = ty::node_id_to_type(tcx, p.id);
fn visit_item(&mut self, i: &ast::Item, _is_const: bool) {
debug!("visit_item(item={})", pprust::item_to_str(i));
match i.node {
- ast::ItemStatic(_, mutability, expr) => {
+ ast::ItemStatic(_, mutability, ref expr) => {
match mutability {
ast::MutImmutable => {
- self.visit_expr(expr, true);
+ self.visit_expr(&**expr, true);
}
ast::MutMutable => {
- self.report_error(expr.span, safe_type_for_static_mut(self.tcx, expr));
+ let safe = safe_type_for_static_mut(self.tcx, &**expr);
+ self.report_error(expr.span, safe);
}
}
}
use syntax::{ast, ast_map, ast_util};
use std::rc::Rc;
+use std::gc::Gc;
//
// This pass classifies expressions by their constant-ness.
cs.fold(integral_const, |a, b| join(a, b))
}
-pub fn lookup_const(tcx: &ty::ctxt, e: &Expr) -> Option<@Expr> {
+pub fn lookup_const(tcx: &ty::ctxt, e: &Expr) -> Option<Gc<Expr>> {
let opt_def = tcx.def_map.borrow().find_copy(&e.id);
match opt_def {
Some(def::DefStatic(def_id, false)) => {
pub fn lookup_variant_by_id(tcx: &ty::ctxt,
enum_def: ast::DefId,
variant_def: ast::DefId)
- -> Option<@Expr> {
- fn variant_expr(variants: &[ast::P<ast::Variant>], id: ast::NodeId) -> Option<@Expr> {
+ -> Option<Gc<Expr>> {
+ fn variant_expr(variants: &[ast::P<ast::Variant>],
+ id: ast::NodeId) -> Option<Gc<Expr>> {
for variant in variants.iter() {
if variant.node.id == id {
return variant.node.disr_expr;
}
pub fn lookup_const_by_id(tcx: &ty::ctxt, def_id: ast::DefId)
- -> Option<@Expr> {
+ -> Option<Gc<Expr>> {
if ast_util::is_local(def_id) {
{
match tcx.map.find(def_id.node) {
None => {}
}
let cn = match e.node {
- ast::ExprLit(lit) => {
+ ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(..) | ast::LitFloat(..) => general_const,
_ => integral_const
}
}
- ast::ExprUnary(_, inner) | ast::ExprParen(inner) =>
- self.classify(inner),
+ ast::ExprUnary(_, ref inner) | ast::ExprParen(ref inner) =>
+ self.classify(&**inner),
- ast::ExprBinary(_, a, b) =>
- join(self.classify(a), self.classify(b)),
+ ast::ExprBinary(_, ref a, ref b) =>
+ join(self.classify(&**a), self.classify(&**b)),
ast::ExprTup(ref es) |
ast::ExprVec(ref es) =>
- join_all(es.iter().map(|e| self.classify(*e))),
+ join_all(es.iter().map(|e| self.classify(&**e))),
- ast::ExprVstore(e, vstore) => {
+ ast::ExprVstore(ref e, vstore) => {
match vstore {
- ast::ExprVstoreSlice => self.classify(e),
+ ast::ExprVstoreSlice => self.classify(&**e),
ast::ExprVstoreUniq |
ast::ExprVstoreMutSlice => non_const
}
}
ast::ExprStruct(_, ref fs, None) => {
- let cs = fs.iter().map(|f| self.classify(f.expr));
+ let cs = fs.iter().map(|f| self.classify(&*f.expr));
join_all(cs)
}
- ast::ExprCast(base, _) => {
+ ast::ExprCast(ref base, _) => {
let ty = ty::expr_ty(self.tcx, e);
- let base = self.classify(base);
+ let base = self.classify(&**base);
if ty::type_is_integral(ty) {
join(integral_const, base)
} else if ty::type_is_fp(ty) {
}
}
- ast::ExprField(base, _, _) => self.classify(base),
+ ast::ExprField(ref base, _, _) => self.classify(&**base),
- ast::ExprIndex(base, idx) =>
- join(self.classify(base), self.classify(idx)),
+ ast::ExprIndex(ref base, ref idx) =>
+ join(self.classify(&**base), self.classify(&**idx)),
- ast::ExprAddrOf(ast::MutImmutable, base) => self.classify(base),
+ ast::ExprAddrOf(ast::MutImmutable, ref base) =>
+ self.classify(&**base),
// FIXME: (#3728) we can probably do something CCI-ish
// surrounding nonlocal constants. But we don't yet.
fn lookup_constness(&self, e: &Expr) -> constness {
match lookup_const(self.tcx, e) {
Some(rhs) => {
- let ty = ty::expr_ty(self.tcx, rhs);
+ let ty = ty::expr_ty(self.tcx, &*rhs);
if ty::type_is_integral(ty) {
integral_const
} else {
-> Result<const_val, String> {
fn fromb(b: bool) -> Result<const_val, String> { Ok(const_int(b as i64)) }
match e.node {
- ExprUnary(UnNeg, inner) => {
- match eval_const_expr_partial(tcx, inner) {
+ ExprUnary(UnNeg, ref inner) => {
+ match eval_const_expr_partial(tcx, &**inner) {
Ok(const_float(f)) => Ok(const_float(-f)),
Ok(const_int(i)) => Ok(const_int(-i)),
Ok(const_uint(i)) => Ok(const_uint(-i)),
ref err => ((*err).clone())
}
}
- ExprUnary(UnNot, inner) => {
- match eval_const_expr_partial(tcx, inner) {
+ ExprUnary(UnNot, ref inner) => {
+ match eval_const_expr_partial(tcx, &**inner) {
Ok(const_int(i)) => Ok(const_int(!i)),
Ok(const_uint(i)) => Ok(const_uint(!i)),
Ok(const_bool(b)) => Ok(const_bool(!b)),
_ => Err("not on float or string".to_string())
}
}
- ExprBinary(op, a, b) => {
- match (eval_const_expr_partial(tcx, a),
- eval_const_expr_partial(tcx, b)) {
+ ExprBinary(op, ref a, ref b) => {
+ match (eval_const_expr_partial(tcx, &**a),
+ eval_const_expr_partial(tcx, &**b)) {
(Ok(const_float(a)), Ok(const_float(b))) => {
match op {
BiAdd => Ok(const_float(a + b)),
_ => Err("bad operands for binary".to_string())
}
}
- ExprCast(base, target_ty) => {
+ ExprCast(ref base, ref target_ty) => {
// This tends to get called w/o the type actually having been
// populated in the ctxt, which was causing things to blow up
// (#5900). Fall back to doing a limited lookup to get past it.
let ety = ty::expr_ty_opt(tcx.ty_ctxt(), e)
- .or_else(|| astconv::ast_ty_to_prim_ty(tcx.ty_ctxt(), target_ty))
+ .or_else(|| astconv::ast_ty_to_prim_ty(tcx.ty_ctxt(), &**target_ty))
.unwrap_or_else(|| {
tcx.ty_ctxt().sess.span_fatal(target_ty.span,
"target type not found for \
const cast")
});
- let base = eval_const_expr_partial(tcx, base);
+ let base = eval_const_expr_partial(tcx, &**base);
match base {
Err(_) => base,
Ok(val) => {
}
ExprPath(_) => {
match lookup_const(tcx.ty_ctxt(), e) {
- Some(actual_e) => eval_const_expr_partial(tcx.ty_ctxt(), actual_e),
+ Some(actual_e) => eval_const_expr_partial(tcx.ty_ctxt(), &*actual_e),
None => Err("non-constant path in constant expr".to_string())
}
}
- ExprLit(lit) => Ok(lit_to_const(lit)),
+ ExprLit(ref lit) => Ok(lit_to_const(&**lit)),
// If we have a vstore, just keep going; it has to be a string
- ExprVstore(e, _) => eval_const_expr_partial(tcx, e),
- ExprParen(e) => eval_const_expr_partial(tcx, e),
+ ExprVstore(ref e, _) => eval_const_expr_partial(tcx, &**e),
+ ExprParen(ref e) => eval_const_expr_partial(tcx, &**e),
ExprBlock(ref block) => {
match block.expr {
Some(ref expr) => eval_const_expr_partial(tcx, &**expr),
use middle::ty;
use middle::typeck;
use std::io;
-use std::string::String;
+use std::gc::Gc;
use std::uint;
use syntax::ast;
use syntax::ast_util;
self.merge_with_entry_set(blk.id, in_out);
- for &stmt in blk.stmts.iter() {
- self.walk_stmt(stmt, in_out, loop_scopes);
+ for stmt in blk.stmts.iter() {
+ self.walk_stmt(stmt.clone(), in_out, loop_scopes);
}
self.walk_opt_expr(blk.expr, in_out, loop_scopes);
}
fn walk_stmt(&mut self,
- stmt: @ast::Stmt,
+ stmt: Gc<ast::Stmt>,
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
match stmt.node {
- ast::StmtDecl(decl, _) => {
- self.walk_decl(decl, in_out, loop_scopes);
+ ast::StmtDecl(ref decl, _) => {
+ self.walk_decl(decl.clone(), in_out, loop_scopes);
}
- ast::StmtExpr(expr, _) | ast::StmtSemi(expr, _) => {
- self.walk_expr(expr, in_out, loop_scopes);
+ ast::StmtExpr(ref expr, _) | ast::StmtSemi(ref expr, _) => {
+ self.walk_expr(&**expr, in_out, loop_scopes);
}
ast::StmtMac(..) => {
}
fn walk_decl(&mut self,
- decl: @ast::Decl,
+ decl: Gc<ast::Decl>,
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
match decl.node {
- ast::DeclLocal(local) => {
+ ast::DeclLocal(ref local) => {
self.walk_opt_expr(local.init, in_out, loop_scopes);
self.walk_pat(local.pat, in_out, loop_scopes);
}
// v v
// ( succ )
//
- self.walk_expr(cond, in_out, loop_scopes);
+ self.walk_expr(&*cond, in_out, loop_scopes);
let mut then_bits = in_out.to_owned();
- self.walk_block(then, then_bits, loop_scopes);
+ self.walk_block(&*then, then_bits.as_mut_slice(), loop_scopes);
self.walk_opt_expr(els, in_out, loop_scopes);
- join_bits(&self.dfcx.oper, then_bits, in_out);
+ join_bits(&self.dfcx.oper, then_bits.as_slice(), in_out);
}
ast::ExprWhile(cond, blk) => {
// <--+ (break)
//
- self.walk_expr(cond, in_out, loop_scopes);
+ self.walk_expr(&*cond, in_out, loop_scopes);
let mut body_bits = in_out.to_owned();
loop_scopes.push(LoopScope {
loop_id: expr.id,
break_bits: Vec::from_slice(in_out)
});
- self.walk_block(blk, body_bits, loop_scopes);
- self.add_to_entry_set(expr.id, body_bits);
+ self.walk_block(&*blk, body_bits.as_mut_slice(), loop_scopes);
+ self.add_to_entry_set(expr.id, body_bits.as_slice());
let new_loop_scope = loop_scopes.pop().unwrap();
copy_bits(new_loop_scope.break_bits.as_slice(), in_out);
}
ast::ExprForLoop(..) => fail!("non-desugared expr_for_loop"),
- ast::ExprLoop(blk, _) => {
+ ast::ExprLoop(ref blk, _) => {
//
// (expr) <--+
// | |
loop_id: expr.id,
break_bits: Vec::from_slice(in_out)
});
- self.walk_block(blk, body_bits, loop_scopes);
- self.add_to_entry_set(expr.id, body_bits);
+ self.walk_block(&**blk, body_bits.as_mut_slice(), loop_scopes);
+ self.add_to_entry_set(expr.id, body_bits.as_slice());
let new_loop_scope = loop_scopes.pop().unwrap();
assert_eq!(new_loop_scope.loop_id, expr.id);
copy_bits(new_loop_scope.break_bits.as_slice(), in_out);
}
- ast::ExprMatch(discr, ref arms) => {
+ ast::ExprMatch(ref discr, ref arms) => {
//
// (discr)
// / | \
// ( succ )
//
//
- self.walk_expr(discr, in_out, loop_scopes);
+ self.walk_expr(&**discr, in_out, loop_scopes);
let mut guards = in_out.to_owned();
for arm in arms.iter() {
// in_out reflects the discr and all guards to date
- self.walk_opt_expr(arm.guard, guards, loop_scopes);
+ self.walk_opt_expr(arm.guard, guards.as_mut_slice(),
+ loop_scopes);
// determine the bits for the body and then union
// them into `in_out`, which reflects all bodies to date
let mut body = guards.to_owned();
self.walk_pat_alternatives(arm.pats.as_slice(),
- body,
+ body.as_mut_slice(),
loop_scopes);
- self.walk_expr(arm.body, body, loop_scopes);
- join_bits(&self.dfcx.oper, body, in_out);
+ self.walk_expr(&*arm.body, body.as_mut_slice(), loop_scopes);
+ join_bits(&self.dfcx.oper, body.as_slice(), in_out);
}
}
self.reset(in_out);
}
- ast::ExprAssign(l, r) |
- ast::ExprAssignOp(_, l, r) => {
- self.walk_expr(r, in_out, loop_scopes);
- self.walk_expr(l, in_out, loop_scopes);
+ ast::ExprAssign(ref l, ref r) |
+ ast::ExprAssignOp(_, ref l, ref r) => {
+ self.walk_expr(&**r, in_out, loop_scopes);
+ self.walk_expr(&**l, in_out, loop_scopes);
}
ast::ExprVec(ref exprs) => {
self.walk_exprs(exprs.as_slice(), in_out, loop_scopes)
}
- ast::ExprRepeat(l, r) => {
- self.walk_expr(l, in_out, loop_scopes);
- self.walk_expr(r, in_out, loop_scopes);
+ ast::ExprRepeat(ref l, ref r) => {
+ self.walk_expr(&**l, in_out, loop_scopes);
+ self.walk_expr(&**r, in_out, loop_scopes);
}
ast::ExprStruct(_, ref fields, with_expr) => {
for field in fields.iter() {
- self.walk_expr(field.expr, in_out, loop_scopes);
+ self.walk_expr(&*field.expr, in_out, loop_scopes);
}
self.walk_opt_expr(with_expr, in_out, loop_scopes);
}
- ast::ExprCall(f, ref args) => {
- self.walk_expr(f, in_out, loop_scopes);
+ ast::ExprCall(ref f, ref args) => {
+ self.walk_expr(&**f, in_out, loop_scopes);
self.walk_call(expr.id, args.as_slice(), in_out, loop_scopes);
}
self.walk_exprs(exprs.as_slice(), in_out, loop_scopes);
}
- ast::ExprBinary(op, l, r) if ast_util::lazy_binop(op) => {
- self.walk_expr(l, in_out, loop_scopes);
+ ast::ExprBinary(op, ref l, ref r) if ast_util::lazy_binop(op) => {
+ self.walk_expr(&**l, in_out, loop_scopes);
let temp = in_out.to_owned();
- self.walk_expr(r, in_out, loop_scopes);
- join_bits(&self.dfcx.oper, temp, in_out);
+ self.walk_expr(&**r, in_out, loop_scopes);
+ join_bits(&self.dfcx.oper, temp.as_slice(), in_out);
}
ast::ExprIndex(l, r) |
ast::ExprLit(..) |
ast::ExprPath(..) => {}
- ast::ExprAddrOf(_, e) |
- ast::ExprCast(e, _) |
- ast::ExprUnary(_, e) |
- ast::ExprParen(e) |
- ast::ExprVstore(e, _) |
- ast::ExprField(e, _, _) => {
- self.walk_expr(e, in_out, loop_scopes);
+ ast::ExprAddrOf(_, ref e) |
+ ast::ExprCast(ref e, _) |
+ ast::ExprUnary(_, ref e) |
+ ast::ExprParen(ref e) |
+ ast::ExprVstore(ref e, _) |
+ ast::ExprField(ref e, _, _) => {
+ self.walk_expr(&**e, in_out, loop_scopes);
}
- ast::ExprBox(s, e) => {
- self.walk_expr(s, in_out, loop_scopes);
- self.walk_expr(e, in_out, loop_scopes);
+ ast::ExprBox(ref s, ref e) => {
+ self.walk_expr(&**s, in_out, loop_scopes);
+ self.walk_expr(&**e, in_out, loop_scopes);
}
ast::ExprInlineAsm(ref inline_asm) => {
- for &(_, expr) in inline_asm.inputs.iter() {
- self.walk_expr(expr, in_out, loop_scopes);
+ for &(_, ref expr) in inline_asm.inputs.iter() {
+ self.walk_expr(&**expr, in_out, loop_scopes);
}
- for &(_, expr) in inline_asm.outputs.iter() {
- self.walk_expr(expr, in_out, loop_scopes);
+ for &(_, ref expr) in inline_asm.outputs.iter() {
+ self.walk_expr(&**expr, in_out, loop_scopes);
}
}
- ast::ExprBlock(blk) => {
- self.walk_block(blk, in_out, loop_scopes);
+ ast::ExprBlock(ref blk) => {
+ self.walk_block(&**blk, in_out, loop_scopes);
}
ast::ExprMac(..) => {
}
fn walk_exprs(&mut self,
- exprs: &[@ast::Expr],
+ exprs: &[Gc<ast::Expr>],
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
- for &expr in exprs.iter() {
- self.walk_expr(expr, in_out, loop_scopes);
+ for expr in exprs.iter() {
+ self.walk_expr(&**expr, in_out, loop_scopes);
}
}
fn walk_opt_expr(&mut self,
- opt_expr: Option<@ast::Expr>,
+ opt_expr: Option<Gc<ast::Expr>>,
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
- for &expr in opt_expr.iter() {
- self.walk_expr(expr, in_out, loop_scopes);
+ for expr in opt_expr.iter() {
+ self.walk_expr(&**expr, in_out, loop_scopes);
}
}
fn walk_call(&mut self,
call_id: ast::NodeId,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
self.walk_exprs(args, in_out, loop_scopes);
}
fn walk_pat(&mut self,
- pat: @ast::Pat,
+ pat: Gc<ast::Pat>,
in_out: &mut [uint],
_loop_scopes: &mut Vec<LoopScope> ) {
debug!("DataFlowContext::walk_pat(pat={}, in_out={})",
pat.repr(self.dfcx.tcx), bits_to_str(in_out));
- ast_util::walk_pat(pat, |p| {
+ ast_util::walk_pat(&*pat, |p| {
debug!(" p.id={} in_out={}", p.id, bits_to_str(in_out));
self.merge_with_entry_set(p.id, in_out);
self.dfcx.apply_gen_kill(p.id, in_out);
}
fn walk_pat_alternatives(&mut self,
- pats: &[@ast::Pat],
+ pats: &[Gc<ast::Pat>],
in_out: &mut [uint],
loop_scopes: &mut Vec<LoopScope> ) {
if pats.len() == 1 {
let initial_state = in_out.to_owned();
for &pat in pats.iter() {
let mut temp = initial_state.clone();
- self.walk_pat(pat, temp, loop_scopes);
- join_bits(&self.dfcx.oper, temp, in_out);
+ self.walk_pat(pat, temp.as_mut_slice(), loop_scopes);
+ join_bits(&self.dfcx.oper, temp.as_slice(), in_out);
}
}
}
});
self.live_symbols.extend(live_fields.map(|f| f.node.id));
- visit::walk_item(self, item, ());
+ visit::walk_item(self, &*item, ());
}
ast::ItemFn(..)
| ast::ItemTy(..)
| ast::ItemEnum(..)
| ast::ItemStatic(..) => {
- visit::walk_item(self, item, ());
+ visit::walk_item(self, &*item, ());
}
_ => ()
}
}
ast_map::NodeTraitMethod(trait_method) => {
- visit::walk_trait_method(self, trait_method, ());
+ visit::walk_trait_method(self, &*trait_method, ());
}
ast_map::NodeMethod(method) => {
- visit::walk_block(self, method.body, ());
+ visit::walk_block(self, &*method.body, ());
}
ast_map::NodeForeignItem(foreign_item) => {
- visit::walk_foreign_item(self, foreign_item, ());
+ visit::walk_foreign_item(self, &*foreign_item, ());
}
_ => ()
}
self.lookup_and_handle_method(expr.id, expr.span);
}
ast::ExprField(ref lhs, ref ident, _) => {
- self.handle_field_access(*lhs, ident);
+ self.handle_field_access(&**lhs, ident);
}
_ => ()
}
// Overwrite so that we don't warn the trait method itself.
fn visit_trait_method(&mut self, trait_method: &ast::TraitMethod, _: ()) {
match *trait_method {
- ast::Provided(method) => visit::walk_block(self, method.body, ()),
+ ast::Provided(ref method) => visit::walk_block(self, &*method.body, ()),
ast::Required(_) => ()
}
}
use syntax::ast;
use syntax::ast_util::local_def;
+use std::gc::Gc;
+
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Def {
DefFn(ast::DefId, ast::FnStyle),
DefBinding(ast::NodeId, ast::BindingMode),
DefUse(ast::DefId),
DefUpvar(ast::NodeId, // id of closed over var
- @Def, // closed over def
+ Gc<Def>, // closed over def
ast::NodeId, // expr node that creates the closure
ast::NodeId), // id for the block/body of the closure expr
}
}
- fn check_str_index(&mut self, e: @ast::Expr) {
+ fn check_str_index(&mut self, e: &ast::Expr) {
let base_type = match e.node {
ast::ExprIndex(base, _) => ty::node_id_to_type(self.tcx, base.id),
_ => return
_ => {}
}
}
- ast::ExprAssign(base, _) | ast::ExprAssignOp(_, base, _) => {
- self.check_str_index(base);
+ ast::ExprAssign(ref base, _) | ast::ExprAssignOp(_, ref base, _) => {
+ self.check_str_index(&**base);
}
- ast::ExprAddrOf(ast::MutMutable, base) => {
- self.check_str_index(base);
+ ast::ExprAddrOf(ast::MutMutable, ref base) => {
+ self.check_str_index(&**base);
}
ast::ExprInlineAsm(..) => {
self.require_unsafe(expr.span, "use of inline assembly")
use syntax::codemap::{Span};
use util::ppaux::Repr;
+use std::gc::Gc;
+
///////////////////////////////////////////////////////////////////////////
// The Delegate trait
ty::ReScope(body.id), // Args live only as long as the fn body.
arg_ty);
- self.walk_pat(arg_cmt, arg.pat);
+ self.walk_pat(arg_cmt, arg.pat.clone());
}
}
self.delegate.consume(consume_id, consume_span, cmt, mode);
}
- fn consume_exprs(&mut self, exprs: &Vec<@ast::Expr>) {
- for &expr in exprs.iter() {
- self.consume_expr(expr);
+ fn consume_exprs(&mut self, exprs: &Vec<Gc<ast::Expr>>) {
+ for expr in exprs.iter() {
+ self.consume_expr(&**expr);
}
}
self.delegate_consume(expr.id, expr.span, cmt);
match expr.node {
- ast::ExprParen(subexpr) => {
+ ast::ExprParen(ref subexpr) => {
// Argh but is ExprParen horrible. So, if we consume
// `(x)`, that generally is also consuming `x`, UNLESS
// there are adjustments on the `(x)` expression
if self.typer.adjustments().borrow().contains_key(&expr.id) {
self.walk_expr(expr);
} else {
- self.consume_expr(subexpr);
+ self.consume_expr(&**subexpr);
}
}
self.walk_adjustment(expr);
match expr.node {
- ast::ExprParen(subexpr) => {
- self.walk_expr(subexpr)
+ ast::ExprParen(ref subexpr) => {
+ self.walk_expr(&**subexpr)
}
ast::ExprPath(..) => { }
- ast::ExprUnary(ast::UnDeref, base) => { // *base
- if !self.walk_overloaded_operator(expr, base, []) {
- self.select_from_expr(base);
+ ast::ExprUnary(ast::UnDeref, ref base) => { // *base
+ if !self.walk_overloaded_operator(expr, &**base, []) {
+ self.select_from_expr(&**base);
}
}
- ast::ExprField(base, _, _) => { // base.f
- self.select_from_expr(base);
+ ast::ExprField(ref base, _, _) => { // base.f
+ self.select_from_expr(&**base);
}
- ast::ExprIndex(lhs, rhs) => { // lhs[rhs]
- if !self.walk_overloaded_operator(expr, lhs, [rhs]) {
- self.select_from_expr(lhs);
- self.consume_expr(rhs);
+ ast::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs]
+ if !self.walk_overloaded_operator(expr, &**lhs, [rhs.clone()]) {
+ self.select_from_expr(&**lhs);
+ self.consume_expr(&**rhs);
}
}
- ast::ExprCall(callee, ref args) => { // callee(args)
- self.walk_callee(expr, callee);
+ ast::ExprCall(ref callee, ref args) => { // callee(args)
+ self.walk_callee(expr, &**callee);
self.consume_exprs(args);
}
self.consume_exprs(args);
}
- ast::ExprStruct(_, ref fields, opt_with) => {
- self.walk_struct_expr(expr, fields, opt_with);
+ ast::ExprStruct(_, ref fields, ref opt_with) => {
+ self.walk_struct_expr(expr, fields, opt_with.clone());
}
ast::ExprTup(ref exprs) => {
self.consume_exprs(exprs);
}
- ast::ExprIf(cond_expr, then_blk, opt_else_expr) => {
- self.consume_expr(cond_expr);
- self.walk_block(then_blk);
+ ast::ExprIf(ref cond_expr, ref then_blk, ref opt_else_expr) => {
+ self.consume_expr(&**cond_expr);
+ self.walk_block(&**then_blk);
for else_expr in opt_else_expr.iter() {
- self.consume_expr(*else_expr);
+ self.consume_expr(&**else_expr);
}
}
- ast::ExprMatch(discr, ref arms) => {
+ ast::ExprMatch(ref discr, ref arms) => {
// treatment of the discriminant is handled while
// walking the arms:
- self.walk_expr(discr);
- let discr_cmt = return_if_err!(self.mc.cat_expr(discr));
+ self.walk_expr(&**discr);
+ let discr_cmt = return_if_err!(self.mc.cat_expr(&**discr));
for arm in arms.iter() {
self.walk_arm(discr_cmt.clone(), arm);
}
self.consume_exprs(exprs);
}
- ast::ExprAddrOf(m, base) => { // &base
+ ast::ExprAddrOf(m, ref base) => { // &base
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
let expr_ty = ty::expr_ty(self.tcx(), expr);
if !ty::type_is_bot(expr_ty) {
let r = ty::ty_region(self.tcx(), expr.span, expr_ty);
let bk = ty::BorrowKind::from_mutbl(m);
- self.borrow_expr(base, r, bk, AddrOf);
+ self.borrow_expr(&**base, r, bk, AddrOf);
} else {
- self.walk_expr(base);
+ self.walk_expr(&**base);
}
}
ast::ExprInlineAsm(ref ia) => {
- for &(_, input) in ia.inputs.iter() {
- self.consume_expr(input);
+ for &(_, ref input) in ia.inputs.iter() {
+ self.consume_expr(&**input);
}
- for &(_, output) in ia.outputs.iter() {
- self.mutate_expr(expr, output, JustWrite);
+ for &(_, ref output) in ia.outputs.iter() {
+ self.mutate_expr(expr, &**output, JustWrite);
}
}
ast::ExprAgain(..) |
ast::ExprLit(..) => {}
- ast::ExprLoop(blk, _) => {
- self.walk_block(blk);
+ ast::ExprLoop(ref blk, _) => {
+ self.walk_block(&**blk);
}
- ast::ExprWhile(cond_expr, blk) => {
- self.consume_expr(cond_expr);
- self.walk_block(blk);
+ ast::ExprWhile(ref cond_expr, ref blk) => {
+ self.consume_expr(&**cond_expr);
+ self.walk_block(&**blk);
}
ast::ExprForLoop(..) => fail!("non-desugared expr_for_loop"),
- ast::ExprUnary(_, lhs) => {
- if !self.walk_overloaded_operator(expr, lhs, []) {
- self.consume_expr(lhs);
+ ast::ExprUnary(_, ref lhs) => {
+ if !self.walk_overloaded_operator(expr, &**lhs, []) {
+ self.consume_expr(&**lhs);
}
}
- ast::ExprBinary(_, lhs, rhs) => {
- if !self.walk_overloaded_operator(expr, lhs, [rhs]) {
- self.consume_expr(lhs);
- self.consume_expr(rhs);
+ ast::ExprBinary(_, ref lhs, ref rhs) => {
+ if !self.walk_overloaded_operator(expr, &**lhs, [rhs.clone()]) {
+ self.consume_expr(&**lhs);
+ self.consume_expr(&**rhs);
}
}
- ast::ExprBlock(blk) => {
- self.walk_block(blk);
+ ast::ExprBlock(ref blk) => {
+ self.walk_block(&**blk);
}
ast::ExprRet(ref opt_expr) => {
for expr in opt_expr.iter() {
- self.consume_expr(*expr);
+ self.consume_expr(&**expr);
}
}
- ast::ExprAssign(lhs, rhs) => {
- self.mutate_expr(expr, lhs, JustWrite);
- self.consume_expr(rhs);
+ ast::ExprAssign(ref lhs, ref rhs) => {
+ self.mutate_expr(expr, &**lhs, JustWrite);
+ self.consume_expr(&**rhs);
}
- ast::ExprCast(base, _) => {
- self.consume_expr(base);
+ ast::ExprCast(ref base, _) => {
+ self.consume_expr(&**base);
}
- ast::ExprAssignOp(_, lhs, rhs) => {
+ ast::ExprAssignOp(_, ref lhs, ref rhs) => {
// This will have to change if/when we support
// overloaded operators for `+=` and so forth.
- self.mutate_expr(expr, lhs, WriteAndRead);
- self.consume_expr(rhs);
+ self.mutate_expr(expr, &**lhs, WriteAndRead);
+ self.consume_expr(&**rhs);
}
- ast::ExprRepeat(base, count) => {
- self.consume_expr(base);
- self.consume_expr(count);
+ ast::ExprRepeat(ref base, ref count) => {
+ self.consume_expr(&**base);
+ self.consume_expr(&**count);
}
ast::ExprFnBlock(..) |
self.walk_captures(expr)
}
- ast::ExprVstore(base, _) => {
- self.consume_expr(base);
+ ast::ExprVstore(ref base, _) => {
+ self.consume_expr(&**base);
}
- ast::ExprBox(place, base) => {
- self.consume_expr(place);
- self.consume_expr(base);
+ ast::ExprBox(ref place, ref base) => {
+ self.consume_expr(&**place);
+ self.consume_expr(&**base);
}
ast::ExprMac(..) => {
fn walk_stmt(&mut self, stmt: &ast::Stmt) {
match stmt.node {
- ast::StmtDecl(decl, _) => {
+ ast::StmtDecl(ref decl, _) => {
match decl.node {
- ast::DeclLocal(local) => {
- self.walk_local(local);
+ ast::DeclLocal(ref local) => {
+ self.walk_local(local.clone());
}
ast::DeclItem(_) => {
}
}
- ast::StmtExpr(expr, _) |
- ast::StmtSemi(expr, _) => {
- self.consume_expr(expr);
+ ast::StmtExpr(ref expr, _) |
+ ast::StmtSemi(ref expr, _) => {
+ self.consume_expr(&**expr);
}
ast::StmtMac(..) => {
}
}
- fn walk_local(&mut self, local: @ast::Local) {
+ fn walk_local(&mut self, local: Gc<ast::Local>) {
match local.init {
None => {
let delegate = &mut self.delegate;
- pat_util::pat_bindings(&self.typer.tcx().def_map, local.pat, |_, id, span, _| {
+ pat_util::pat_bindings(&self.typer.tcx().def_map, &*local.pat,
+ |_, id, span, _| {
delegate.decl_without_init(id, span);
})
}
- Some(expr) => {
+ Some(ref expr) => {
// Variable declarations with
// initializers are considered
// "assigns", which is handled by
// `walk_pat`:
- self.walk_expr(expr);
- let init_cmt = return_if_err!(self.mc.cat_expr(expr));
+ self.walk_expr(&**expr);
+ let init_cmt = return_if_err!(self.mc.cat_expr(&**expr));
self.walk_pat(init_cmt, local.pat);
}
}
debug!("walk_block(blk.id={:?})", blk.id);
for stmt in blk.stmts.iter() {
- self.walk_stmt(*stmt);
+ self.walk_stmt(&**stmt);
}
for tail_expr in blk.expr.iter() {
- self.consume_expr(*tail_expr);
+ self.consume_expr(&**tail_expr);
}
}
fn walk_struct_expr(&mut self,
_expr: &ast::Expr,
fields: &Vec<ast::Field>,
- opt_with: Option<@ast::Expr>) {
+ opt_with: Option<Gc<ast::Expr>>) {
// Consume the expressions supplying values for each field.
for field in fields.iter() {
- self.consume_expr(field.expr);
+ self.consume_expr(&*field.expr);
}
let with_expr = match opt_with {
- Some(w) => { w }
+ Some(ref w) => { w.clone() }
None => { return; }
};
- let with_cmt = return_if_err!(self.mc.cat_expr(with_expr));
+ let with_cmt = return_if_err!(self.mc.cat_expr(&*with_expr));
// Select just those fields of the `with`
// expression that will actually be used
// Consume those fields of the with expression that are needed.
for with_field in with_fields.iter() {
if !contains_field_named(with_field, fields) {
- let cmt_field = self.mc.cat_field(with_expr,
+ let cmt_field = self.mc.cat_field(&*with_expr,
with_cmt.clone(),
with_field.ident,
with_field.mt.ty);
fn walk_overloaded_operator(&mut self,
expr: &ast::Expr,
receiver: &ast::Expr,
- args: &[@ast::Expr])
+ args: &[Gc<ast::Expr>])
-> bool
{
if !self.typer.is_method_call(expr.id) {
let r = ty::ReScope(expr.id);
let bk = ty::ImmBorrow;
- for &arg in args.iter() {
- self.borrow_expr(arg, r, bk, OverloadedOperator);
+ for arg in args.iter() {
+ self.borrow_expr(&**arg, r, bk, OverloadedOperator);
}
return true;
}
}
for guard in arm.guard.iter() {
- self.consume_expr(*guard);
+ self.consume_expr(&**guard);
}
- self.consume_expr(arm.body);
+ self.consume_expr(&*arm.body);
}
- fn walk_pat(&mut self, cmt_discr: mc::cmt, pat: @ast::Pat) {
+ fn walk_pat(&mut self, cmt_discr: mc::cmt, pat: Gc<ast::Pat>) {
debug!("walk_pat cmt_discr={} pat={}", cmt_discr.repr(self.tcx()),
pat.repr(self.tcx()));
let mc = &self.mc;
let tcx = typer.tcx();
let def_map = &self.typer.tcx().def_map;
let delegate = &mut self.delegate;
- return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
+ return_if_err!(mc.cat_pattern(cmt_discr, &*pat, |mc, cmt_pat, pat| {
if pat_util::pat_is_binding(def_map, pat) {
let tcx = typer.tcx();
// matched.
let (slice_cmt, slice_mutbl, slice_r) = {
- match mc.cat_slice_pattern(cmt_pat, slice_pat) {
+ match mc.cat_slice_pattern(cmt_pat, &*slice_pat) {
Ok(v) => v,
Err(()) => {
tcx.sess.span_bug(slice_pat.span,
fn check_item(cx: &mut Context, item: &Item) {
if !attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
- ItemImpl(_, Some(ref trait_ref), self_type, _) => {
- check_impl_of_trait(cx, item, trait_ref, self_type);
+ ItemImpl(_, Some(ref trait_ref), ref self_type, _) => {
+ check_impl_of_trait(cx, item, trait_ref, &**self_type);
}
_ => {}
}
}
match e.node {
- ExprUnary(UnBox, interior) => {
- let interior_type = ty::expr_ty(cx.tcx, interior);
+ ExprUnary(UnBox, ref interior) => {
+ let interior_type = ty::expr_ty(cx.tcx, &**interior);
let _ = check_static(cx.tcx, interior_type, interior.span);
}
- ExprCast(source, _) => {
- let source_ty = ty::expr_ty(cx.tcx, source);
+ ExprCast(ref source, _) => {
+ let source_ty = ty::expr_ty(cx.tcx, &**source);
let target_ty = ty::expr_ty(cx.tcx, e);
check_trait_cast(cx, source_ty, target_ty, source.span);
}
- ExprRepeat(element, count_expr) => {
- let count = ty::eval_repeat_count(cx.tcx, count_expr);
+ ExprRepeat(ref element, ref count_expr) => {
+ let count = ty::eval_repeat_count(cx.tcx, &**count_expr);
if count > 1 {
- let element_ty = ty::expr_ty(cx.tcx, element);
+ let element_ty = ty::expr_ty(cx.tcx, &**element);
check_copy(cx, element_ty, element.span,
"repeated element will be copied");
}
use std::i64;
use std::i8;
use std::rc::Rc;
+use std::gc::Gc;
use std::to_str::ToStr;
use std::u16;
use std::u32;
UnusedMustUse,
UnusedResult,
- DeprecatedOwnedVector,
-
Warnings,
RawPointerDeriving,
default: Allow,
}),
- ("deprecated_owned_vector",
- LintSpec {
- lint: DeprecatedOwnedVector,
- desc: "use of a `~[T]` vector",
- default: Allow,
- }),
-
("raw_pointer_deriving",
LintSpec {
lint: RawPointerDeriving,
/// Return true if that's the case. Otherwise return false.
pub fn each_lint(sess: &session::Session,
attrs: &[ast::Attribute],
- f: |@ast::MetaItem, Level, InternedString| -> bool)
+ f: |Gc<ast::MetaItem>, Level, InternedString| -> bool)
-> bool {
let xs = [Allow, Warn, Deny, Forbid];
for &level in xs.iter() {
fn check_unused_casts(cx: &Context, e: &ast::Expr) {
return match e.node {
ast::ExprCast(expr, ty) => {
- let t_t = ast_ty_to_ty(cx, &infer::new_infer_ctxt(cx.tcx), ty);
- if ty::get(ty::expr_ty(cx.tcx, expr)).sty == ty::get(t_t).sty {
+ let t_t = ast_ty_to_ty(cx, &infer::new_infer_ctxt(cx.tcx), &*ty);
+ if ty::get(ty::expr_ty(cx.tcx, &*expr)).sty == ty::get(t_t).sty {
cx.span_lint(UnnecessaryTypecast, ty.span,
"unnecessary type cast");
}
}
},
_ => {
- let t = ty::expr_ty(cx.tcx, ex);
+ let t = ty::expr_ty(cx.tcx, &*ex);
match ty::get(t).sty {
ty::ty_uint(_) => {
cx.span_lint(UnsignedNegate, e.span,
}
},
ast::ExprBinary(binop, l, r) => {
- if is_comparison(binop) && !check_limits(cx.tcx, binop, l, r) {
+ if is_comparison(binop) && !check_limits(cx.tcx, binop, &*l, &*r) {
cx.span_lint(TypeLimits, e.span,
"comparison is useless due to type limits");
}
_ => ()
}
}
- ast::TyPtr(ref mt) => { check_ty(cx, mt.ty) }
+ ast::TyPtr(ref mt) => { check_ty(cx, &*mt.ty) }
_ => {}
}
}
fn check_foreign_fn(cx: &Context, decl: &ast::FnDecl) {
for input in decl.inputs.iter() {
- check_ty(cx, input.ty);
+ check_ty(cx, &*input.ty);
}
- check_ty(cx, decl.output)
+ check_ty(cx, &*decl.output)
}
match it.node {
ast::ItemForeignMod(ref nmod) if nmod.abi != abi::RustIntrinsic => {
for ni in nmod.items.iter() {
match ni.node {
- ast::ForeignItemFn(decl, _) => check_foreign_fn(cx, decl),
- ast::ForeignItemStatic(t, _) => check_ty(cx, t)
+ ast::ForeignItemFn(decl, _) => check_foreign_fn(cx, &*decl),
+ ast::ForeignItemStatic(t, _) => check_ty(cx, &*t)
}
}
}
match item.node {
ast::ItemStruct(..) | ast::ItemEnum(..) => {
let mut visitor = RawPtrDerivingVisitor { cx: cx };
- visit::walk_item(&mut visitor, item, ());
+ visit::walk_item(&mut visitor, &*item, ());
}
_ => {}
}
ast::StmtSemi(expr, _) => expr,
_ => return
};
- let t = ty::expr_ty(cx.tcx, expr);
+ let t = ty::expr_ty(cx.tcx, &*expr);
match ty::get(t).sty {
ty::ty_nil | ty::ty_bot | ty::ty_bool => return,
_ => {}
_ => {}
}
- let t = ty::expr_ty(cx.tcx, expr);
+ let t = ty::expr_ty(cx.tcx, &*expr);
let mut warned = false;
match ty::get(t).sty {
ty::ty_struct(did, _) |
}
}
-fn check_deprecated_owned_vector(cx: &Context, e: &ast::Expr) {
- let t = ty::expr_ty(cx.tcx, e);
- match ty::get(t).sty {
- ty::ty_uniq(t) => match ty::get(t).sty {
- ty::ty_vec(_, None) => {
- cx.span_lint(DeprecatedOwnedVector, e.span,
- "use of deprecated `~[]` vector; replaced by `std::vec::Vec`")
- }
- _ => {}
- },
- _ => {}
- }
-}
-
fn check_item_non_camel_case_types(cx: &Context, it: &ast::Item) {
fn is_camel_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
ast::ExprAssignOp(_, _, value) => (value, "assigned value"),
_ => return
};
- check_unnecessary_parens_core(cx, value, msg);
+ check_unnecessary_parens_core(cx, &*value, msg);
}
fn check_unnecessary_parens_stmt(cx: &Context, s: &ast::Stmt) {
},
_ => return
};
- check_unnecessary_parens_core(cx, value, msg);
+ check_unnecessary_parens_core(cx, &*value, msg);
}
fn check_unused_unsafe(cx: &Context, e: &ast::Expr) {
}
}
-fn check_unused_mut_pat(cx: &Context, pats: &[@ast::Pat]) {
+fn check_unused_mut_pat(cx: &Context, pats: &[Gc<ast::Pat>]) {
// collect all mutable pattern and group their NodeIDs by their Identifier to
// avoid false warnings in match arms with multiple patterns
let mut mutables = HashMap::new();
for &p in pats.iter() {
- pat_util::pat_bindings(&cx.tcx.def_map, p, |mode, id, _, path| {
+ pat_util::pat_bindings(&cx.tcx.def_map, &*p, |mode, id, _, path| {
match mode {
ast::BindByValue(ast::MutMutable) => {
if path.segments.len() != 1 {
check_type_limits(self, e);
check_unused_casts(self, e);
- check_deprecated_owned_vector(self, e);
visit::walk_expr(self, e, ());
}
use middle::ty;
use util::nodemap::NodeMap;
-use std::mem::transmute;
use std::fmt;
+use std::gc::Gc;
use std::io;
+use std::mem::transmute;
use std::rc::Rc;
use std::str;
use std::uint;
for arg in decl.inputs.iter() {
pat_util::pat_bindings(&ir.tcx.def_map,
- arg.pat,
+ &*arg.pat,
|_bm, arg_id, _x, path| {
debug!("adding argument {}", arg_id);
let ident = ast_util::path_to_ident(path);
}
fn visit_local(ir: &mut IrMaps, local: &Local) {
- pat_util::pat_bindings(&ir.tcx.def_map, local.pat, |_, p_id, sp, path| {
+ pat_util::pat_bindings(&ir.tcx.def_map, &*local.pat, |_, p_id, sp, path| {
debug!("adding local variable {}", p_id);
let name = ast_util::path_to_ident(path);
ir.add_live_node_for_node(p_id, VarDefNode(sp));
fn visit_arm(ir: &mut IrMaps, arm: &Arm) {
for pat in arm.pats.iter() {
- pat_util::pat_bindings(&ir.tcx.def_map, *pat, |bm, p_id, sp, path| {
+ pat_util::pat_bindings(&ir.tcx.def_map, &**pat, |bm, p_id, sp, path| {
debug!("adding local variable {} from match with bm {:?}",
p_id, bm);
let name = ast_util::path_to_ident(path);
}
fn arm_pats_bindings(&mut self,
- pats: &[@Pat],
+ pats: &[Gc<Pat>],
f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) {
// only consider the first pattern; any later patterns must have
// the same bindings, and we also consider the first pattern to be
// the "authoritative" set of ids
if !pats.is_empty() {
- self.pat_bindings(pats[0], f)
+ self.pat_bindings(&*pats[0], f)
}
}
- fn define_bindings_in_pat(&mut self, pat: @Pat, succ: LiveNode)
+ fn define_bindings_in_pat(&mut self, pat: Gc<Pat>, succ: LiveNode)
-> LiveNode {
self.define_bindings_in_arm_pats([pat], succ)
}
- fn define_bindings_in_arm_pats(&mut self, pats: &[@Pat], succ: LiveNode)
+ fn define_bindings_in_arm_pats(&mut self, pats: &[Gc<Pat>], succ: LiveNode)
-> LiveNode {
let mut succ = succ;
self.arm_pats_bindings(pats, |this, ln, var, _sp, _id| {
-> LiveNode {
let succ = self.propagate_through_opt_expr(blk.expr, succ);
blk.stmts.iter().rev().fold(succ, |succ, stmt| {
- self.propagate_through_stmt(*stmt, succ)
+ self.propagate_through_stmt(&**stmt, succ)
})
}
fn propagate_through_stmt(&mut self, stmt: &Stmt, succ: LiveNode)
-> LiveNode {
match stmt.node {
- StmtDecl(decl, _) => {
- self.propagate_through_decl(decl, succ)
+ StmtDecl(ref decl, _) => {
+ self.propagate_through_decl(&**decl, succ)
}
- StmtExpr(expr, _) | StmtSemi(expr, _) => {
- self.propagate_through_expr(expr, succ)
+ StmtExpr(ref expr, _) | StmtSemi(ref expr, _) => {
+ self.propagate_through_expr(&**expr, succ)
}
StmtMac(..) => {
-> LiveNode {
match decl.node {
DeclLocal(ref local) => {
- self.propagate_through_local(*local, succ)
+ self.propagate_through_local(&**local, succ)
}
DeclItem(_) => succ,
}
self.define_bindings_in_pat(local.pat, succ)
}
- fn propagate_through_exprs(&mut self, exprs: &[@Expr], succ: LiveNode)
+ fn propagate_through_exprs(&mut self, exprs: &[Gc<Expr>], succ: LiveNode)
-> LiveNode {
exprs.iter().rev().fold(succ, |succ, expr| {
- self.propagate_through_expr(*expr, succ)
+ self.propagate_through_expr(&**expr, succ)
})
}
fn propagate_through_opt_expr(&mut self,
- opt_expr: Option<@Expr>,
+ opt_expr: Option<Gc<Expr>>,
succ: LiveNode)
-> LiveNode {
opt_expr.iter().fold(succ, |succ, expr| {
- self.propagate_through_expr(*expr, succ)
+ self.propagate_through_expr(&**expr, succ)
})
}
self.access_path(expr, succ, ACC_READ | ACC_USE)
}
- ExprField(e, _, _) => {
- self.propagate_through_expr(e, succ)
+ ExprField(ref e, _, _) => {
+ self.propagate_through_expr(&**e, succ)
}
- ExprFnBlock(_, blk) | ExprProc(_, blk) => {
+ ExprFnBlock(_, ref blk) | ExprProc(_, ref blk) => {
debug!("{} is an ExprFnBlock or ExprProc", expr_to_str(expr));
/*
})
}
- ExprIf(cond, then, els) => {
+ ExprIf(ref cond, ref then, ref els) => {
//
// (cond)
// |
// v v
// ( succ )
//
- let else_ln = self.propagate_through_opt_expr(els, succ);
- let then_ln = self.propagate_through_block(then, succ);
+ let else_ln = self.propagate_through_opt_expr(els.clone(), succ);
+ let then_ln = self.propagate_through_block(&**then, succ);
let ln = self.live_node(expr.id, expr.span);
self.init_from_succ(ln, else_ln);
self.merge_from_succ(ln, then_ln, false);
- self.propagate_through_expr(cond, ln)
+ self.propagate_through_expr(&**cond, ln)
}
- ExprWhile(cond, blk) => {
- self.propagate_through_loop(expr, Some(cond), blk, succ)
+ ExprWhile(ref cond, ref blk) => {
+ self.propagate_through_loop(expr, Some(cond.clone()), &**blk, succ)
}
ExprForLoop(..) => fail!("non-desugared expr_for_loop"),
// Note that labels have been resolved, so we don't need to look
// at the label ident
- ExprLoop(blk, _) => {
- self.propagate_through_loop(expr, None, blk, succ)
+ ExprLoop(ref blk, _) => {
+ self.propagate_through_loop(expr, None, &**blk, succ)
}
- ExprMatch(e, ref arms) => {
+ ExprMatch(ref e, ref arms) => {
//
// (e)
// |
let mut first_merge = true;
for arm in arms.iter() {
let body_succ =
- self.propagate_through_expr(arm.body, succ);
+ self.propagate_through_expr(&*arm.body, succ);
let guard_succ =
self.propagate_through_opt_expr(arm.guard, body_succ);
let arm_succ =
self.merge_from_succ(ln, arm_succ, first_merge);
first_merge = false;
};
- self.propagate_through_expr(e, ln)
+ self.propagate_through_expr(&**e, ln)
}
ExprRet(o_e) => {
}
}
- ExprAssign(l, r) => {
+ ExprAssign(ref l, ref r) => {
// see comment on lvalues in
// propagate_through_lvalue_components()
- let succ = self.write_lvalue(l, succ, ACC_WRITE);
- let succ = self.propagate_through_lvalue_components(l, succ);
- self.propagate_through_expr(r, succ)
+ let succ = self.write_lvalue(&**l, succ, ACC_WRITE);
+ let succ = self.propagate_through_lvalue_components(&**l, succ);
+ self.propagate_through_expr(&**r, succ)
}
- ExprAssignOp(_, l, r) => {
+ ExprAssignOp(_, ref l, ref r) => {
// see comment on lvalues in
// propagate_through_lvalue_components()
- let succ = self.write_lvalue(l, succ, ACC_WRITE|ACC_READ);
- let succ = self.propagate_through_expr(r, succ);
- self.propagate_through_lvalue_components(l, succ)
+ let succ = self.write_lvalue(&**l, succ, ACC_WRITE|ACC_READ);
+ let succ = self.propagate_through_expr(&**r, succ);
+ self.propagate_through_lvalue_components(&**l, succ)
}
// Uninteresting cases: just propagate in rev exec order
- ExprVstore(expr, _) => {
- self.propagate_through_expr(expr, succ)
+ ExprVstore(ref expr, _) => {
+ self.propagate_through_expr(&**expr, succ)
}
ExprVec(ref exprs) => {
self.propagate_through_exprs(exprs.as_slice(), succ)
}
- ExprRepeat(element, count) => {
- let succ = self.propagate_through_expr(count, succ);
- self.propagate_through_expr(element, succ)
+ ExprRepeat(ref element, ref count) => {
+ let succ = self.propagate_through_expr(&**count, succ);
+ self.propagate_through_expr(&**element, succ)
}
- ExprStruct(_, ref fields, with_expr) => {
- let succ = self.propagate_through_opt_expr(with_expr, succ);
+ ExprStruct(_, ref fields, ref with_expr) => {
+ let succ = self.propagate_through_opt_expr(with_expr.clone(), succ);
fields.iter().rev().fold(succ, |succ, field| {
- self.propagate_through_expr(field.expr, succ)
+ self.propagate_through_expr(&*field.expr, succ)
})
}
- ExprCall(f, ref args) => {
+ ExprCall(ref f, ref args) => {
// calling a fn with bot return type means that the fn
// will fail, and hence the successors can be ignored
let is_bot = !self.ir.tcx.is_method_call(expr.id) && {
- let t_ret = ty::ty_fn_ret(ty::expr_ty(self.ir.tcx, f));
+ let t_ret = ty::ty_fn_ret(ty::expr_ty(self.ir.tcx, &**f));
ty::type_is_bot(t_ret)
};
let succ = if is_bot {
succ
};
let succ = self.propagate_through_exprs(args.as_slice(), succ);
- self.propagate_through_expr(f, succ)
+ self.propagate_through_expr(&**f, succ)
}
ExprMethodCall(_, _, ref args) => {
self.propagate_through_exprs(exprs.as_slice(), succ)
}
- ExprBinary(op, l, r) if ast_util::lazy_binop(op) => {
- let r_succ = self.propagate_through_expr(r, succ);
+ ExprBinary(op, ref l, ref r) if ast_util::lazy_binop(op) => {
+ let r_succ = self.propagate_through_expr(&**r, succ);
let ln = self.live_node(expr.id, expr.span);
self.init_from_succ(ln, succ);
self.merge_from_succ(ln, r_succ, false);
- self.propagate_through_expr(l, ln)
+ self.propagate_through_expr(&**l, ln)
}
- ExprIndex(l, r) |
- ExprBinary(_, l, r) |
- ExprBox(l, r) => {
- self.propagate_through_exprs([l, r], succ)
+ ExprIndex(ref l, ref r) |
+ ExprBinary(_, ref l, ref r) |
+ ExprBox(ref l, ref r) => {
+ self.propagate_through_exprs([l.clone(), r.clone()], succ)
}
- ExprAddrOf(_, e) |
- ExprCast(e, _) |
- ExprUnary(_, e) |
- ExprParen(e) => {
- self.propagate_through_expr(e, succ)
+ ExprAddrOf(_, ref e) |
+ ExprCast(ref e, _) |
+ ExprUnary(_, ref e) |
+ ExprParen(ref e) => {
+ self.propagate_through_expr(&**e, succ)
}
ExprInlineAsm(ref ia) => {
- let succ = ia.outputs.iter().rev().fold(succ, |succ, &(_, expr)| {
+ let succ = ia.outputs.iter().rev().fold(succ, |succ, &(_, ref expr)| {
// see comment on lvalues in
// propagate_through_lvalue_components()
- let succ = self.write_lvalue(expr, succ, ACC_WRITE);
- self.propagate_through_lvalue_components(expr, succ)
+ let succ = self.write_lvalue(&**expr, succ, ACC_WRITE);
+ self.propagate_through_lvalue_components(&**expr, succ)
});
// Inputs are executed first. Propagate last because of rev order
- ia.inputs.iter().rev().fold(succ, |succ, &(_, expr)| {
- self.propagate_through_expr(expr, succ)
+ ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| {
+ self.propagate_through_expr(&**expr, succ)
})
}
succ
}
- ExprBlock(blk) => {
- self.propagate_through_block(blk, succ)
+ ExprBlock(ref blk) => {
+ self.propagate_through_block(&**blk, succ)
}
ExprMac(..) => {
match expr.node {
ExprPath(_) => succ,
- ExprField(e, _, _) => self.propagate_through_expr(e, succ),
+ ExprField(ref e, _, _) => self.propagate_through_expr(&**e, succ),
_ => self.propagate_through_expr(expr, succ)
}
}
fn propagate_through_loop(&mut self,
expr: &Expr,
- cond: Option<@Expr>,
+ cond: Option<Gc<Expr>>,
body: &Block,
succ: LiveNode)
-> LiveNode {
fn check_local(this: &mut Liveness, local: &Local) {
match local.init {
Some(_) => {
- this.warn_about_unused_or_dead_vars_in_pat(local.pat);
+ this.warn_about_unused_or_dead_vars_in_pat(&*local.pat);
},
None => {
- this.pat_bindings(local.pat, |this, ln, var, sp, id| {
+ this.pat_bindings(&*local.pat, |this, ln, var, sp, id| {
this.warn_about_unused(sp, id, ln, var);
})
}
fn check_expr(this: &mut Liveness, expr: &Expr) {
match expr.node {
- ExprAssign(l, r) => {
- this.check_lvalue(l);
- this.visit_expr(r, ());
+ ExprAssign(ref l, ref r) => {
+ this.check_lvalue(&**l);
+ this.visit_expr(&**r, ());
visit::walk_expr(this, expr, ());
}
- ExprAssignOp(_, l, _) => {
- this.check_lvalue(l);
+ ExprAssignOp(_, ref l, _) => {
+ this.check_lvalue(&**l);
visit::walk_expr(this, expr, ());
}
ExprInlineAsm(ref ia) => {
- for &(_, input) in ia.inputs.iter() {
- this.visit_expr(input, ());
+ for &(_, ref input) in ia.inputs.iter() {
+ this.visit_expr(&**input, ());
}
// Output operands must be lvalues
- for &(_, out) in ia.outputs.iter() {
- this.check_lvalue(out);
- this.visit_expr(out, ());
+ for &(_, ref out) in ia.outputs.iter() {
+ this.check_lvalue(&**out);
+ this.visit_expr(&**out, ());
}
visit::walk_expr(this, expr, ());
let ends_with_stmt = match body.expr {
None if body.stmts.len() > 0 =>
match body.stmts.last().unwrap().node {
- StmtSemi(e, _) => {
- let t_stmt = ty::expr_ty(self.ir.tcx, e);
+ StmtSemi(ref e, _) => {
+ let t_stmt = ty::expr_ty(self.ir.tcx, &**e);
ty::get(t_stmt).sty == ty::get(t_ret).sty
},
_ => false
fn warn_about_unused_args(&self, decl: &FnDecl, entry_ln: LiveNode) {
for arg in decl.inputs.iter() {
pat_util::pat_bindings(&self.ir.tcx.def_map,
- arg.pat,
+ &*arg.pat,
|_bm, p_id, sp, path| {
let var = self.variable(p_id, sp);
// Ignore unused self.
let expr_ty = if_ok!(self.expr_ty(expr));
match expr.node {
- ast::ExprUnary(ast::UnDeref, e_base) => {
- let base_cmt = if_ok!(self.cat_expr(e_base));
+ ast::ExprUnary(ast::UnDeref, ref e_base) => {
+ let base_cmt = if_ok!(self.cat_expr(&**e_base));
Ok(self.cat_deref(expr, base_cmt, 0))
}
- ast::ExprField(base, f_name, _) => {
- let base_cmt = if_ok!(self.cat_expr(base));
+ ast::ExprField(ref base, f_name, _) => {
+ let base_cmt = if_ok!(self.cat_expr(&**base));
Ok(self.cat_field(expr, base_cmt, f_name, expr_ty))
}
- ast::ExprIndex(base, _) => {
+ ast::ExprIndex(ref base, _) => {
if self.typer.is_method_call(expr.id) {
return Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty));
}
- let base_cmt = if_ok!(self.cat_expr(base));
+ let base_cmt = if_ok!(self.cat_expr(&**base));
Ok(self.cat_index(expr, base_cmt, 0))
}
self.cat_def(expr.id, expr.span, expr_ty, def)
}
- ast::ExprParen(e) => {
- self.cat_expr(e)
+ ast::ExprParen(ref e) => {
+ self.cat_expr(&**e)
}
ast::ExprAddrOf(..) | ast::ExprCall(..) |
}
};
- for (i, &subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(subpat)); // see (*2)
+ for (i, subpat) in subpats.iter().enumerate() {
+ let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
let subcmt =
self.cat_imm_interior(
pat, downcast_cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(subcmt, subpat, |x,y,z| op(x,y,z)));
+ if_ok!(self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z)));
}
}
Some(&def::DefFn(..)) |
Some(&def::DefStruct(..)) => {
- for (i, &subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(subpat)); // see (*2)
+ for (i, subpat) in subpats.iter().enumerate() {
+ let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
let cmt_field =
self.cat_imm_interior(
pat, cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(cmt_field, subpat, |x,y,z| op(x,y,z)));
+ if_ok!(self.cat_pattern(cmt_field, &**subpat,
+ |x,y,z| op(x,y,z)));
}
}
Some(&def::DefStatic(..)) => {
- for &subpat in subpats.iter() {
- if_ok!(self.cat_pattern(cmt.clone(), subpat, |x,y,z| op(x,y,z)));
+ for subpat in subpats.iter() {
+ if_ok!(self.cat_pattern(cmt.clone(), &**subpat, |x,y,z| op(x,y,z)));
}
}
_ => {
}
}
- ast::PatIdent(_, _, Some(subpat)) => {
- if_ok!(self.cat_pattern(cmt, subpat, op));
+ ast::PatIdent(_, _, Some(ref subpat)) => {
+ if_ok!(self.cat_pattern(cmt, &**subpat, op));
}
ast::PatIdent(_, _, None) => {
ast::PatStruct(_, ref field_pats, _) => {
// {f1: p1, ..., fN: pN}
for fp in field_pats.iter() {
- let field_ty = if_ok!(self.pat_ty(fp.pat)); // see (*2)
+ let field_ty = if_ok!(self.pat_ty(&*fp.pat)); // see (*2)
let cmt_field = self.cat_field(pat, cmt.clone(), fp.ident, field_ty);
- if_ok!(self.cat_pattern(cmt_field, fp.pat, |x,y,z| op(x,y,z)));
+ if_ok!(self.cat_pattern(cmt_field, &*fp.pat, |x,y,z| op(x,y,z)));
}
}
ast::PatTup(ref subpats) => {
// (p1, ..., pN)
- for (i, &subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(subpat)); // see (*2)
+ for (i, subpat) in subpats.iter().enumerate() {
+ let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
let subcmt =
self.cat_imm_interior(
pat, cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(subcmt, subpat, |x,y,z| op(x,y,z)));
+ if_ok!(self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z)));
}
}
- ast::PatBox(subpat) | ast::PatRegion(subpat) => {
+ ast::PatBox(ref subpat) | ast::PatRegion(ref subpat) => {
// @p1, ~p1
let subcmt = self.cat_deref(pat, cmt, 0);
- if_ok!(self.cat_pattern(subcmt, subpat, op));
+ if_ok!(self.cat_pattern(subcmt, &**subpat, op));
}
ast::PatVec(ref before, slice, ref after) => {
let elt_cmt = self.cat_index(pat, cmt, 0);
- for &before_pat in before.iter() {
- if_ok!(self.cat_pattern(elt_cmt.clone(), before_pat, |x,y,z| op(x,y,z)));
+ for before_pat in before.iter() {
+ if_ok!(self.cat_pattern(elt_cmt.clone(), &**before_pat,
+ |x,y,z| op(x,y,z)));
}
- for &slice_pat in slice.iter() {
- let slice_ty = if_ok!(self.pat_ty(slice_pat));
+ for slice_pat in slice.iter() {
+ let slice_ty = if_ok!(self.pat_ty(&**slice_pat));
let slice_cmt = self.cat_rvalue_node(pat.id(), pat.span(), slice_ty);
- if_ok!(self.cat_pattern(slice_cmt, slice_pat, |x,y,z| op(x,y,z)));
+ if_ok!(self.cat_pattern(slice_cmt, &**slice_pat, |x,y,z| op(x,y,z)));
}
- for &after_pat in after.iter() {
- if_ok!(self.cat_pattern(elt_cmt.clone(), after_pat, |x,y,z| op(x,y,z)));
+ for after_pat in after.iter() {
+ if_ok!(self.cat_pattern(elt_cmt.clone(), &**after_pat, |x,y,z| op(x,y,z)));
}
}
//! outside their scopes. This pass will also generate a set of exported items
//! which are available for use externally when compiled as a library.
+use std::gc::Gc;
use std::mem::replace;
use metadata::csearch;
fn visit_expr(&mut self, expr: &ast::Expr, _: ()) {
match expr.node {
- ast::ExprField(base, ident, _) => {
- match ty::get(ty::expr_ty_adjusted(self.tcx, base)).sty {
+ ast::ExprField(ref base, ident, _) => {
+ match ty::get(ty::expr_ty_adjusted(self.tcx, &**base)).sty {
ty::ty_struct(id, _) => {
self.check_field(expr.span, id, NamedField(ident));
}
tcx.sess.span_err(sp, "visibility has no effect inside functions");
}
}
- let check_struct = |def: &@ast::StructDef| {
+ let check_struct = |def: &Gc<ast::StructDef>| {
for f in def.fields.iter() {
match f.node.kind {
ast::NamedField(_, p) => check_inherited(tcx, f.span, p),
at_outer_type: true,
outer_type_is_public_path: false,
};
- visitor.visit_ty(self_, ());
+ visitor.visit_ty(&*self_, ());
self_contains_private = visitor.contains_private;
self_is_public_path = visitor.outer_type_is_public_path;
}
match *trait_ref {
None => {
for method in methods.iter() {
- visit::walk_method_helper(self, *method, ())
+ visit::walk_method_helper(self, &**method, ())
}
}
Some(ref tr) => {
if method.explicit_self.node == ast::SelfStatic &&
self.exported_items.contains(&method.id) {
found_pub_static = true;
- visit::walk_method_helper(self, *method, ());
+ visit::walk_method_helper(self, &**method, ());
}
}
if found_pub_static {
{
match tcx.map.find(impl_src.node) {
Some(ast_map::NodeItem(item)) => {
- item_might_be_inlined(item)
+ item_might_be_inlined(&*item)
}
Some(..) | None => {
tcx.sess.span_bug(method.span, "impl did is not an item")
match self.tcx.map.find(node_id) {
Some(ast_map::NodeItem(item)) => {
match item.node {
- ast::ItemFn(..) => item_might_be_inlined(item),
+ ast::ItemFn(..) => item_might_be_inlined(&*item),
_ => false,
}
}
match *node {
ast_map::NodeItem(item) => {
match item.node {
- ast::ItemFn(_, _, _, _, search_block) => {
- if item_might_be_inlined(item) {
- visit::walk_block(self, search_block, ())
+ ast::ItemFn(_, _, _, _, ref search_block) => {
+ if item_might_be_inlined(&*item) {
+ visit::walk_block(self, &**search_block, ())
}
}
// Statics with insignificant addresses are not reachable
// because they're inlined specially into all other crates.
- ast::ItemStatic(_, _, init) => {
+ ast::ItemStatic(_, _, ref init) => {
if attr::contains_name(item.attrs.as_slice(),
"address_insignificant") {
self.reachable_symbols.remove(&search_item);
}
- visit::walk_expr(self, init, ());
+ visit::walk_expr(self, &**init, ());
}
// These are normal, nothing reachable about these
// Keep going, nothing to get exported
}
ast::Provided(ref method) => {
- visit::walk_block(self, method.body, ())
+ visit::walk_block(self, &*method.body, ())
}
}
}
ast_map::NodeMethod(method) => {
let did = self.tcx.map.get_parent_did(search_item);
- if method_might_be_inlined(self.tcx, method, did) {
- visit::walk_block(self, method.body, ())
+ if method_might_be_inlined(self.tcx, &*method, did) {
+ visit::walk_block(self, &*method.body, ())
}
}
// Nothing to recurse on for these
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
+use std::gc::Gc;
use syntax::codemap::Span;
use syntax::{ast, visit};
use syntax::visit::{Visitor, FnKind};
// FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST.
match local.init {
- Some(expr) => {
- record_rvalue_scope_if_borrow_expr(visitor, expr, blk_id);
+ Some(ref expr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &**expr, blk_id);
- if is_binding_pat(local.pat) || is_borrowed_ty(local.ty) {
- record_rvalue_scope(visitor, expr, blk_id);
+ if is_binding_pat(&*local.pat) || is_borrowed_ty(&*local.ty) {
+ record_rvalue_scope(visitor, &**expr, blk_id);
}
}
ast::PatIdent(ast::BindByRef(_), _, _) => true,
ast::PatStruct(_, ref field_pats, _) => {
- field_pats.iter().any(|fp| is_binding_pat(fp.pat))
+ field_pats.iter().any(|fp| is_binding_pat(&*fp.pat))
}
ast::PatVec(ref pats1, ref pats2, ref pats3) => {
- pats1.iter().any(|&p| is_binding_pat(p)) ||
- pats2.iter().any(|&p| is_binding_pat(p)) ||
- pats3.iter().any(|&p| is_binding_pat(p))
+ pats1.iter().any(|p| is_binding_pat(&**p)) ||
+ pats2.iter().any(|p| is_binding_pat(&**p)) ||
+ pats3.iter().any(|p| is_binding_pat(&**p))
}
ast::PatEnum(_, Some(ref subpats)) |
ast::PatTup(ref subpats) => {
- subpats.iter().any(|&p| is_binding_pat(p))
+ subpats.iter().any(|p| is_binding_pat(&**p))
}
- ast::PatBox(subpat) => {
- is_binding_pat(subpat)
+ ast::PatBox(ref subpat) => {
+ is_binding_pat(&**subpat)
}
_ => false,
*/
match expr.node {
- ast::ExprAddrOf(_, subexpr) => {
- record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
- record_rvalue_scope(visitor, subexpr, blk_id);
+ ast::ExprAddrOf(_, ref subexpr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id);
+ record_rvalue_scope(visitor, &**subexpr, blk_id);
}
ast::ExprStruct(_, ref fields, _) => {
for field in fields.iter() {
record_rvalue_scope_if_borrow_expr(
- visitor, field.expr, blk_id);
+ visitor, &*field.expr, blk_id);
}
}
- ast::ExprVstore(subexpr, _) => {
+ ast::ExprVstore(ref subexpr, _) => {
visitor.region_maps.record_rvalue_scope(subexpr.id, blk_id);
- record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
+ record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id);
}
ast::ExprVec(ref subexprs) |
ast::ExprTup(ref subexprs) => {
- for &subexpr in subexprs.iter() {
+ for subexpr in subexprs.iter() {
record_rvalue_scope_if_borrow_expr(
- visitor, subexpr, blk_id);
+ visitor, &**subexpr, blk_id);
}
}
- ast::ExprUnary(ast::UnUniq, subexpr) => {
- record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
+ ast::ExprUnary(ast::UnUniq, ref subexpr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id);
}
- ast::ExprCast(subexpr, _) |
- ast::ExprParen(subexpr) => {
- record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id)
+ ast::ExprCast(ref subexpr, _) |
+ ast::ExprParen(ref subexpr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id)
}
ast::ExprBlock(ref block) => {
match block.expr {
- Some(subexpr) => {
+ Some(ref subexpr) => {
record_rvalue_scope_if_borrow_expr(
- visitor, subexpr, blk_id);
+ visitor, &**subexpr, blk_id);
}
None => { }
}
ast::ExprField(ref subexpr, _, _) |
ast::ExprIndex(ref subexpr, _) |
ast::ExprParen(ref subexpr) => {
- let subexpr: &'a @Expr = subexpr; // FIXME(#11586)
+ let subexpr: &'a Gc<Expr> = subexpr; // FIXME(#11586)
expr = &**subexpr;
}
_ => {
use std::collections::{HashMap, HashSet};
use std::cell::{Cell, RefCell};
+use std::gc::Gc;
use std::mem::replace;
use std::rc::{Rc, Weak};
-use std::string::String;
use std::uint;
// Definition mapping
name_bindings.define_type
(DefTy(local_def(item.id)), sp, is_public);
- for &variant in (*enum_definition).variants.iter() {
+ for variant in (*enum_definition).variants.iter() {
self.build_reduced_graph_for_variant(
- variant,
+ &**variant,
local_def(item.id),
parent.clone(),
is_public);
FunctionRibKind(function_id, body_id) => {
if !is_ty_param {
def = DefUpvar(def.def_id().node,
- @def,
+ box(GC) def,
function_id,
body_id);
}
// resolve the discriminator expr
// as a constant
self.with_constant_rib(|this| {
- this.resolve_expr(*dis_expr);
+ this.resolve_expr(&**dis_expr);
});
}
}
}
ItemImpl(ref generics,
- ref implemented_traits,
- self_type,
- ref methods) => {
+ ref implemented_traits,
+ ref self_type,
+ ref methods) => {
self.resolve_implementation(item.id,
generics,
implemented_traits,
- self_type,
+ &**self_type,
methods.as_slice());
}
&ty_m.generics.ty_params);
for argument in ty_m.decl.inputs.iter() {
- this.resolve_type(argument.ty);
+ this.resolve_type(&*argument.ty);
}
- this.resolve_type(ty_m.decl.output);
+ this.resolve_type(&*ty_m.decl.output);
});
}
- ast::Provided(m) => {
+ ast::Provided(ref m) => {
this.resolve_method(MethodRibKind(item.id,
Provided(m.id)),
- m,
+ &**m,
generics.ty_params.len())
}
}
generics, foreign_item.id, 0,
ItemRibKind),
|this| visit::walk_foreign_item(this,
- *foreign_item,
+ &**foreign_item,
()));
}
ForeignItemStatic(..) => {
visit::walk_foreign_item(this,
- *foreign_item,
+ &**foreign_item,
());
}
}
}
Some(declaration) => {
for argument in declaration.inputs.iter() {
- this.resolve_pattern(argument.pat,
+ this.resolve_pattern(&*argument.pat,
ArgumentIrrefutableMode,
None);
- this.resolve_type(argument.ty);
+ this.resolve_type(&*argument.ty);
debug!("(resolving function) recorded argument");
}
- this.resolve_type(declaration.output);
+ this.resolve_type(&*declaration.output);
}
}
// Resolve the function body.
- this.resolve_block(block);
+ this.resolve_block(&*block);
debug!("(resolving function) leaving function");
});
self.resolve_type_parameter_bound(type_parameter.id, bound);
}
match type_parameter.default {
- Some(ty) => self.resolve_type(ty),
+ Some(ref ty) => self.resolve_type(&**ty),
None => {}
}
}
}
UnboxedFnTyParamBound(ref unboxed_function) => {
for argument in unboxed_function.decl.inputs.iter() {
- self.resolve_type(argument.ty);
+ self.resolve_type(&*argument.ty);
}
- self.resolve_type(unboxed_function.decl.output);
+ self.resolve_type(&*unboxed_function.decl.output);
}
StaticRegionTyParamBound | OtherRegionTyParamBound(_) => {}
}
// Resolve fields.
for field in fields.iter() {
- this.resolve_type(field.node.ty);
+ this.resolve_type(&*field.node.ty);
}
});
}
generics: &Generics,
opt_trait_reference: &Option<TraitRef>,
self_type: &Ty,
- methods: &[@Method]) {
+ methods: &[Gc<Method>]) {
// If applicable, create a rib for the type parameters.
let outer_type_parameter_count = generics.ty_params.len();
self.with_type_parameter_rib(HasTypeParameters(generics,
for method in methods.iter() {
// We also need a new scope for the method-specific type parameters.
this.resolve_method(MethodRibKind(id, Provided(method.id)),
- *method,
+ &**method,
outer_type_parameter_count);
}
});
fn resolve_local(&mut self, local: &Local) {
// Resolve the type.
- self.resolve_type(local.ty);
+ self.resolve_type(&*local.ty);
// Resolve the initializer, if necessary.
match local.init {
None => {
// Nothing to do.
}
- Some(initializer) => {
- self.resolve_expr(initializer);
+ Some(ref initializer) => {
+ self.resolve_expr(&**initializer);
}
}
// Resolve the pattern.
- self.resolve_pattern(local.pat, LocalIrrefutableMode, None);
+ self.resolve_pattern(&*local.pat, LocalIrrefutableMode, None);
}
// build a map from pattern identifiers to binding-info's.
if arm.pats.len() == 0 {
return
}
- let map_0 = self.binding_mode_map(*arm.pats.get(0));
+ let map_0 = self.binding_mode_map(&**arm.pats.get(0));
for (i, p) in arm.pats.iter().enumerate() {
- let map_i = self.binding_mode_map(*p);
+ let map_i = self.binding_mode_map(&**p);
for (&key, &binding_0) in map_0.iter() {
match map_i.find(&key) {
let mut bindings_list = HashMap::new();
for pattern in arm.pats.iter() {
- self.resolve_pattern(*pattern,
+ self.resolve_pattern(&**pattern,
RefutableMode,
Some(&mut bindings_list));
}
self.check_consistent_bindings(arm);
visit::walk_expr_opt(self, arm.guard, ());
- self.resolve_expr(arm.body);
+ self.resolve_expr(&*arm.body);
self.value_ribs.borrow_mut().pop();
}
}
// Check the types in the path pattern.
- for &ty in path.segments
+ for ty in path.segments
.iter()
.flat_map(|seg| seg.types.iter()) {
- self.resolve_type(ty);
+ self.resolve_type(&**ty);
}
}
}
// Check the types in the path pattern.
- for &ty in path.segments
+ for ty in path.segments
.iter()
.flat_map(|s| s.types.iter()) {
- self.resolve_type(ty);
+ self.resolve_type(&**ty);
}
}
}
// Check the types in the path pattern.
- for &ty in path.segments
+ for ty in path.segments
.iter()
.flat_map(|s| s.types.iter()) {
- self.resolve_type(ty);
+ self.resolve_type(&**ty);
}
}
- PatLit(expr) => {
- self.resolve_expr(expr);
+ PatLit(ref expr) => {
+ self.resolve_expr(&**expr);
}
- PatRange(first_expr, last_expr) => {
- self.resolve_expr(first_expr);
- self.resolve_expr(last_expr);
+ PatRange(ref first_expr, ref last_expr) => {
+ self.resolve_expr(&**first_expr);
+ self.resolve_expr(&**last_expr);
}
PatStruct(ref path, _, _) => {
namespace: Namespace,
check_ribs: bool) -> Option<(Def, LastPrivate)> {
// First, resolve the types.
- for &ty in path.segments.iter().flat_map(|s| s.types.iter()) {
- self.resolve_type(ty);
+ for ty in path.segments.iter().flat_map(|s| s.types.iter()) {
+ self.resolve_type(&**ty);
}
if path.global {
-> Option<(Path, NodeId, FallbackChecks)> {
match t.node {
TyPath(ref path, _, node_id) => Some((path.clone(), node_id, allow)),
- TyPtr(mut_ty) => extract_path_and_node_id(mut_ty.ty, OnlyTraitAndStatics),
- TyRptr(_, mut_ty) => extract_path_and_node_id(mut_ty.ty, allow),
+ TyPtr(mut_ty) => extract_path_and_node_id(&*mut_ty.ty, OnlyTraitAndStatics),
+ TyRptr(_, mut_ty) => extract_path_and_node_id(&*mut_ty.ty, allow),
// This doesn't handle the remaining `Ty` variants as they are not
// that commonly the self_type, it might be interesting to provide
// support for those in future.
use std::collections::HashMap;
use std::cell::Cell;
use std::rc::Rc;
+use std::gc::Gc;
use syntax::ast;
use syntax::ast::Ident;
use syntax::ast_util::path_to_ident;
// expression.
enum Lit {
UnitLikeStructLit(ast::NodeId), // the node ID of the pattern
- ExprLit(@ast::Expr),
+ ExprLit(Gc<ast::Expr>),
ConstLit(ast::DefId), // the def ID of the constant
}
enum Opt {
lit(Lit),
var(ty::Disr, Rc<adt::Repr>),
- range(@ast::Expr, @ast::Expr),
+ range(Gc<ast::Expr>, Gc<ast::Expr>),
vec_len(/* length */ uint, VecLenOpt, /*range of matches*/(uint, uint))
}
-fn lit_to_expr(tcx: &ty::ctxt, a: &Lit) -> @ast::Expr {
+fn lit_to_expr(tcx: &ty::ctxt, a: &Lit) -> Gc<ast::Expr> {
match *a {
ExprLit(existing_a_expr) => existing_a_expr,
ConstLit(a_const) => const_eval::lookup_const_by_id(tcx, a_const).unwrap(),
(&lit(a), &lit(b)) => {
let a_expr = lit_to_expr(tcx, &a);
let b_expr = lit_to_expr(tcx, &b);
- match const_eval::compare_lit_exprs(tcx, a_expr, b_expr) {
+ match const_eval::compare_lit_exprs(tcx, &*a_expr, &*b_expr) {
Some(val1) => val1 == 0,
None => fail!("compare_list_exprs: type mismatch"),
}
}
- (&range(a1, a2), &range(b1, b2)) => {
- let m1 = const_eval::compare_lit_exprs(tcx, a1, b1);
- let m2 = const_eval::compare_lit_exprs(tcx, a2, b2);
+ (&range(ref a1, ref a2), &range(ref b1, ref b2)) => {
+ let m1 = const_eval::compare_lit_exprs(tcx, &**a1, &**b1);
+ let m2 = const_eval::compare_lit_exprs(tcx, &**a2, &**b2);
match (m1, m2) {
(Some(val1), Some(val2)) => (val1 == 0 && val2 == 0),
_ => fail!("compare_list_exprs: type mismatch"),
let ccx = bcx.ccx();
let mut bcx = bcx;
match *o {
- lit(ExprLit(lit_expr)) => {
- let lit_datum = unpack_datum!(bcx, expr::trans(bcx, lit_expr));
+ lit(ExprLit(ref lit_expr)) => {
+ let lit_datum = unpack_datum!(bcx, expr::trans(bcx, &**lit_expr));
let lit_datum = lit_datum.assert_rvalue(bcx); // literals are rvalues
let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
return single_result(Result::new(bcx, lit_datum.val));
var(disr_val, ref repr) => {
return adt::trans_case(bcx, &**repr, disr_val);
}
- range(l1, l2) => {
- let (l1, _) = consts::const_expr(ccx, l1, true);
- let (l2, _) = consts::const_expr(ccx, l2, true);
+ range(ref l1, ref l2) => {
+ let (l1, _) = consts::const_expr(ccx, &**l1, true);
+ let (l2, _) = consts::const_expr(ccx, &**l2, true);
return range_result(Result::new(bcx, l1), Result::new(bcx, l2));
}
vec_len(n, vec_len_eq, _) => {
* these pointers are stored in llmatch variables just before executing `data` arm.
*/
struct Match<'a, 'b> {
- pats: Vec<@ast::Pat>,
+ pats: Vec<Gc<ast::Pat>>,
data: &'a ArmData<'a, 'b>,
bound_ptrs: Vec<(Ident, ValueRef)>
}
}).collect()
}
-fn assert_is_binding_or_wild(bcx: &Block, p: @ast::Pat) {
- if !pat_is_binding_or_wild(&bcx.tcx().def_map, p) {
+fn assert_is_binding_or_wild(bcx: &Block, p: Gc<ast::Pat>) {
+ if !pat_is_binding_or_wild(&bcx.tcx().def_map, &*p) {
bcx.sess().span_bug(
p.span,
format!("expected an identifier pattern but found p: {}",
}
}
-type enter_pat<'a> = |@ast::Pat|: 'a -> Option<Vec<@ast::Pat>>;
+type enter_pat<'a> = |Gc<ast::Pat>|: 'a -> Option<Vec<Gc<ast::Pat>>>;
fn enter_match<'a, 'b>(
bcx: &'b Block<'b>,
let mut bound_ptrs = br.bound_ptrs.clone();
match this.node {
ast::PatIdent(_, ref path, None) => {
- if pat_is_binding(dm, this) {
+ if pat_is_binding(dm, &*this) {
bound_ptrs.push((path_to_ident(path), val));
}
}
let matches = enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatWild | ast::PatWildMulti => Some(Vec::new()),
- ast::PatIdent(_, _, None) if pat_is_binding(dm, p) => Some(Vec::new()),
+ ast::PatIdent(_, _, None) if pat_is_binding(dm, &*p) => Some(Vec::new()),
_ => None
}
});
let _indenter = indenter();
let tcx = bcx.tcx();
- let dummy = @ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
+ let dummy = box(GC) ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
let mut i = 0;
enter_match(bcx, &tcx.def_map, m, col, val, |p| {
let answer = match p.node {
ast::PatEnum(..) |
- ast::PatIdent(_, _, None) if pat_is_const(&tcx.def_map, p) => {
+ ast::PatIdent(_, _, None) if pat_is_const(&tcx.def_map, &*p) => {
let const_def = tcx.def_map.borrow().get_copy(&p.id);
let const_def_id = const_def.def_id();
if opt_eq(tcx, &lit(ConstLit(const_def_id)), opt) {
}
}
ast::PatIdent(_, _, None)
- if pat_is_variant_or_struct(&tcx.def_map, p) => {
+ if pat_is_variant_or_struct(&tcx.def_map, &*p) => {
if opt_eq(tcx, &variant_opt(bcx, p.id), opt) {
Some(Vec::new())
} else {
bcx.val_to_str(val));
let _indenter = indenter();
- let dummy = @ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
+ let dummy = box(GC) ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatStruct(_, ref fpats, _) => {
bcx.val_to_str(val));
let _indenter = indenter();
- let dummy = @ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
+ let dummy = box(GC) ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatTup(ref elts) => {
bcx.val_to_str(val));
let _indenter = indenter();
- let dummy = @ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
+ let dummy = box(GC) ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatEnum(_, Some(ref elts)) => {
bcx.val_to_str(val));
let _indenter = indenter();
- let dummy = @ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
+ let dummy = box(GC) ast::Pat {id: 0, node: ast::PatWild, span: DUMMY_SP};
enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatBox(sub) => {
bcx.val_to_str(val));
let _indenter = indenter();
- let dummy = @ast::Pat { id: 0, node: ast::PatWild, span: DUMMY_SP };
+ let dummy = box(GC) ast::Pat { id: 0, node: ast::PatWild, span: DUMMY_SP };
enter_match(bcx, dm, m, col, val, |p| {
match p.node {
ast::PatRegion(sub) => {
fn score(p: &ast::Pat) -> uint {
match p.node {
ast::PatLit(_) | ast::PatEnum(_, _) | ast::PatRange(_, _) => 1u,
- ast::PatIdent(_, _, Some(p)) => score(p),
+ ast::PatIdent(_, _, Some(ref p)) => score(&**p),
_ => 0u
}
}
let mut scores = Vec::from_elem(m[0].pats.len(), 0u);
for br in m.iter() {
- for (i, p) in br.pats.iter().enumerate() {
- *scores.get_mut(i) += score(*p);
+ for (i, ref p) in br.pats.iter().enumerate() {
+ *scores.get_mut(i) += score(&***p);
}
}
let mut max_score = 0u;
Store(bcx, *value_ptr, llmatch);
}
match data.arm.guard {
- Some(guard_expr) => {
+ Some(ref guard_expr) => {
bcx = compile_guard(bcx,
- guard_expr,
+ &**guard_expr,
m[0].data,
m.slice(1, m.len()),
vals,
trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
}
-fn create_bindings_map(bcx: &Block, pat: @ast::Pat) -> BindingsMap {
+fn create_bindings_map(bcx: &Block, pat: Gc<ast::Pat>) -> BindingsMap {
// Create the bindings map, which is a mapping from each binding name
// to an alloca() that will be the value for that local variable.
// Note that we use the names because each binding will have many ids
let ccx = bcx.ccx();
let tcx = bcx.tcx();
let mut bindings_map = HashMap::new();
- pat_bindings(&tcx.def_map, pat, |bm, p_id, span, path| {
+ pat_bindings(&tcx.def_map, &*pat, |bm, p_id, span, path| {
let ident = path_to_ident(path);
let variable_ty = node_id_type(bcx, p_id);
let llvariable_ty = type_of::type_of(ccx, variable_ty);
let cleanup_scope = fcx.push_custom_cleanup_scope();
bcx = insert_lllocals(bcx, &arm_data.bindings_map,
cleanup::CustomScope(cleanup_scope));
- bcx = expr::trans_into(bcx, arm_data.arm.body, dest);
+ bcx = expr::trans_into(bcx, &*arm_data.arm.body, dest);
bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
arm_cxs.push(bcx);
}
//
// In such cases, the more general path is unsafe, because
// it assumes it is matching against a valid value.
- match simple_identifier(pat) {
+ match simple_identifier(&*pat) {
Some(path) => {
let var_scope = cleanup::var_scope(tcx, local.id);
return mk_binding_alloca(
bcx, pat.id, path, BindLocal, var_scope, (),
- |(), bcx, v, _| expr::trans_into(bcx, init_expr,
+ |(), bcx, v, _| expr::trans_into(bcx, &*init_expr,
expr::SaveIn(v)));
}
// General path.
let init_datum =
- unpack_datum!(bcx, expr::trans_to_lvalue(bcx, init_expr, "let"));
- if ty::type_is_bot(expr_ty(bcx, init_expr)) {
+ unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &*init_expr, "let"));
+ if ty::type_is_bot(expr_ty(bcx, &*init_expr)) {
create_dummy_locals(bcx, pat)
} else {
if bcx.sess().asm_comments() {
};
fn create_dummy_locals<'a>(mut bcx: &'a Block<'a>,
- pat: @ast::Pat)
+ pat: Gc<ast::Pat>)
-> &'a Block<'a> {
// create dummy memory for the variables if we have no
// value to store into them immediately
let tcx = bcx.tcx();
- pat_bindings(&tcx.def_map, pat, |_, p_id, _, path| {
+ pat_bindings(&tcx.def_map, &*pat, |_, p_id, _, path| {
let scope = cleanup::var_scope(tcx, p_id);
bcx = mk_binding_alloca(
bcx, p_id, path, BindLocal, scope, (),
}
pub fn store_arg<'a>(mut bcx: &'a Block<'a>,
- pat: @ast::Pat,
+ pat: Gc<ast::Pat>,
arg: Datum<Rvalue>,
arg_scope: cleanup::ScopeId)
-> &'a Block<'a> {
let _icx = push_ctxt("match::store_arg");
- match simple_identifier(pat) {
+ match simple_identifier(&*pat) {
Some(path) => {
// Generate nicer LLVM for the common case of fn a pattern
// like `x: T`
fn bind_irrefutable_pat<'a>(
bcx: &'a Block<'a>,
- pat: @ast::Pat,
+ pat: Gc<ast::Pat>,
val: ValueRef,
binding_mode: IrrefutablePatternBindingMode,
cleanup_scope: cleanup::ScopeId)
let ccx = bcx.ccx();
match pat.node {
ast::PatIdent(pat_binding_mode, ref path, inner) => {
- if pat_is_binding(&tcx.def_map, pat) {
+ if pat_is_binding(&tcx.def_map, &*pat) {
// Allocate the stack slot where the value of this
// binding will live and place it into the appropriate
// map.
let temp_scope = fcx.push_custom_cleanup_scope();
// Prepare the output operands
- let outputs = ia.outputs.iter().map(|&(ref c, out)| {
+ let outputs = ia.outputs.iter().map(|&(ref c, ref out)| {
constraints.push((*c).clone());
- let out_datum = unpack_datum!(bcx, expr::trans(bcx, out));
+ let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
out_datum.val
}).collect::<Vec<_>>();
// Now the input operands
- let inputs = ia.inputs.iter().map(|&(ref c, input)| {
+ let inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
- let in_datum = unpack_datum!(bcx, expr::trans(bcx, input));
+ let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
- expr_ty(bcx, input),
+ expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use std::{i8, i16, i32, i64};
+use std::gc::Gc;
use syntax::abi::{X86, X86_64, Arm, Mips, Rust, RustIntrinsic};
use syntax::ast_util::{local_def, is_local};
use syntax::attr::AttrMetaMethods;
if ignore_lhs(bcx, local) {
// Handle let _ = e; just like e;
match local.init {
- Some(init) => {
- return controlflow::trans_stmt_semi(bcx, init)
+ Some(ref init) => {
+ return controlflow::trans_stmt_semi(bcx, &**init)
}
None => { return bcx; }
}
fn trans_enum_def(ccx: &CrateContext, enum_definition: &ast::EnumDef,
sp: Span, id: ast::NodeId, vi: &[Rc<ty::VariantInfo>],
i: &mut uint) {
- for &variant in enum_definition.variants.iter() {
+ for variant in enum_definition.variants.iter() {
let disr_val = vi[*i].disr_val;
*i += 1;
match variant.node.kind {
ast::TupleVariantKind(ref args) if args.len() > 0 => {
let llfn = get_item_val(ccx, variant.node.id);
- trans_enum_variant(ccx, id, variant, args.as_slice(),
+ trans_enum_variant(ccx, id, &**variant, args.as_slice(),
disr_val, ¶m_substs::empty(), llfn);
}
ast::TupleVariantKind(_) => {
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
let _icx = push_ctxt("trans_item");
match item.node {
- ast::ItemFn(decl, _fn_style, abi, ref generics, body) => {
+ ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
if abi != Rust {
let llfndecl = get_item_val(ccx, item.id);
foreign::trans_rust_fn_with_foreign_abi(
- ccx, decl, body, item.attrs.as_slice(), llfndecl, item.id);
+ ccx, &**decl, &**body, item.attrs.as_slice(), llfndecl, item.id);
} else if !generics.is_type_parameterized() {
let llfn = get_item_val(ccx, item.id);
trans_fn(ccx,
- decl,
- body,
+ &**decl,
+ &**body,
llfn,
¶m_substs::empty(),
item.id,
// Be sure to travel more than just one layer deep to catch nested
// items in blocks and such.
let mut v = TransItemVisitor{ ccx: ccx };
- v.visit_block(body, ());
+ v.visit_block(&**body, ());
}
}
ast::ItemImpl(ref generics, _, _, ref ms) => {
trans_enum_def(ccx, enum_definition, item.span, item.id, vi.as_slice(), &mut i);
}
}
- ast::ItemStatic(_, m, expr) => {
+ ast::ItemStatic(_, m, ref expr) => {
// Recurse on the expression to catch items in blocks
let mut v = TransItemVisitor{ ccx: ccx };
- v.visit_expr(expr, ());
+ v.visit_expr(&**expr, ());
consts::trans_const(ccx, m, item.id);
// Do static_assert checking. It can't really be done much earlier
// because we need to get the value of the bool out of LLVM
}
}
-pub fn trans_struct_def(ccx: &CrateContext, struct_def: @ast::StructDef) {
+pub fn trans_struct_def(ccx: &CrateContext, struct_def: Gc<ast::StructDef>) {
// If this is a tuple-like struct, translate the constructor.
match struct_def.ctor_id {
// We only need to translate a constructor if there are fields;
pub fn trans_mod(ccx: &CrateContext, m: &ast::Mod) {
let _icx = push_ctxt("trans_mod");
for item in m.items.iter() {
- trans_item(ccx, *item);
+ trans_item(ccx, &**item);
}
}
let sym = exported_name(ccx, id, ty, i.attrs.as_slice());
let v = match i.node {
- ast::ItemStatic(_, _, expr) => {
+ ast::ItemStatic(_, _, ref expr) => {
// If this static came from an external crate, then
// we need to get the symbol from csearch instead of
// using the current crate's name/version
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
- let (v, inlineable) = consts::const_expr(ccx, expr, is_local);
+ let (v, inlineable) = consts::const_expr(ccx, &**expr, is_local);
ccx.const_values.borrow_mut().insert(id, v);
let mut inlineable = inlineable;
get_item_val()");
}
ast::Provided(m) => {
- register_method(ccx, id, m)
+ register_method(ccx, id, &*m)
}
}
}
ast_map::NodeMethod(m) => {
- register_method(ccx, id, m)
+ register_method(ccx, id, &*m)
}
ast_map::NodeForeignItem(ni) => {
ast::ForeignItemFn(..) => {
let abi = ccx.tcx.map.get_foreign_abi(id);
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
- let name = foreign::link_name(ni);
+ let name = foreign::link_name(&*ni);
foreign::register_foreign_item_fn(ccx, abi, ty,
name.get().as_slice(),
Some(ni.span))
}
ast::ForeignItemStatic(..) => {
- foreign::register_static(ccx, ni)
+ foreign::register_static(ccx, &*ni)
}
}
}
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
- use sync::one::{Once, ONCE_INIT};
+ use std::sync::{Once, ONCE_INIT};
static mut INIT: Once = ONCE_INIT;
static mut POISONED: bool = false;
INIT.doit(|| {
use synabi = syntax::abi;
use syntax::ast_map;
+use std::gc::Gc;
+
pub struct MethodData {
pub llfn: ValueRef,
pub llself: ValueRef,
let mut llargs = Vec::new();
let arg_tys = match args {
- ArgExprs(a) => a.iter().map(|x| expr_ty(bcx, *x)).collect(),
+ ArgExprs(a) => a.iter().map(|x| expr_ty(bcx, &**x)).collect(),
_ => fail!("expected arg exprs.")
};
bcx = trans_args(bcx, args, callee_ty, &mut llargs,
pub enum CallArgs<'a> {
// Supply value of arguments as a list of expressions that must be
// translated. This is used in the common case of `foo(bar, qux)`.
- ArgExprs(&'a [@ast::Expr]),
+ ArgExprs(&'a [Gc<ast::Expr>]),
// Supply value of arguments as a list of LLVM value refs; frequently
// used with lang items and so forth, when the argument is an internal
match args {
ArgExprs(arg_exprs) => {
let num_formal_args = arg_tys.len();
- for (i, &arg_expr) in arg_exprs.iter().enumerate() {
+ for (i, arg_expr) in arg_exprs.iter().enumerate() {
if i == 0 && ignore_self {
continue;
}
let arg_ty = if i >= num_formal_args {
assert!(variadic);
- expr_ty_adjusted(cx, arg_expr)
+ expr_ty_adjusted(cx, &**arg_expr)
} else {
*arg_tys.get(i)
};
- let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_expr));
+ let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &**arg_expr));
llargs.push(unpack_result!(bcx, {
trans_arg_datum(bcx, arg_ty, arg_datum,
arg_cleanup_scope,
use util::ppaux::{Repr, ty_to_str};
use std::c_str::ToCStr;
+use std::gc::Gc;
use std::vec;
-use std::vec::Vec;
use libc::c_uint;
use syntax::{ast, ast_util};
}
fn const_vec(cx: &CrateContext, e: &ast::Expr,
- es: &[@ast::Expr], is_local: bool) -> (ValueRef, Type, bool) {
+ es: &[Gc<ast::Expr>], is_local: bool) -> (ValueRef, Type, bool) {
let vec_ty = ty::expr_ty(cx.tcx(), e);
let unit_ty = ty::sequence_element_type(cx.tcx(), vec_ty);
let llunitty = type_of::type_of(cx, unit_ty);
- let (vs, inlineable) = vec::unzip(es.iter().map(|e| const_expr(cx, *e, is_local)));
+ let (vs, inlineable) = vec::unzip(es.iter().map(|e| const_expr(cx, &**e, is_local)));
// If the vector contains enums, an LLVM array won't work.
let v = if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
C_struct(cx, vs.as_slice(), false)
// if it's assigned to a static.
fn const_expr_unadjusted(cx: &CrateContext, e: &ast::Expr,
is_local: bool) -> (ValueRef, bool) {
- let map_list = |exprs: &[@ast::Expr]| {
- exprs.iter().map(|&e| const_expr(cx, e, is_local))
+ let map_list = |exprs: &[Gc<ast::Expr>]| {
+ exprs.iter().map(|e| const_expr(cx, &**e, is_local))
.fold((Vec::new(), true),
|(l, all_inlineable), (val, inlineable)| {
(l.append_one(val), all_inlineable && inlineable)
unsafe {
let _icx = push_ctxt("const_expr");
return match e.node {
- ast::ExprLit(lit) => {
- (consts::const_lit(cx, e, (*lit).clone()), true)
+ ast::ExprLit(ref lit) => {
+ (consts::const_lit(cx, e, (**lit).clone()), true)
}
- ast::ExprBinary(b, e1, e2) => {
- let (te1, _) = const_expr(cx, e1, is_local);
- let (te2, _) = const_expr(cx, e2, is_local);
+ ast::ExprBinary(b, ref e1, ref e2) => {
+ let (te1, _) = const_expr(cx, &**e1, is_local);
+ let (te2, _) = const_expr(cx, &**e2, is_local);
let te2 = base::cast_shift_const_rhs(b, te1, te2);
/* Neither type is bottom, and we expect them to be unified
* already, so the following is safe. */
- let ty = ty::expr_ty(cx.tcx(), e1);
+ let ty = ty::expr_ty(cx.tcx(), &**e1);
let is_float = ty::type_is_fp(ty);
let signed = ty::type_is_signed(ty);
return (match b {
},
}, true)
},
- ast::ExprUnary(u, e) => {
- let (te, _) = const_expr(cx, e, is_local);
- let ty = ty::expr_ty(cx.tcx(), e);
+ ast::ExprUnary(u, ref e) => {
+ let (te, _) = const_expr(cx, &**e, is_local);
+ let ty = ty::expr_ty(cx.tcx(), &**e);
let is_float = ty::type_is_fp(ty);
return (match u {
ast::UnBox | ast::UnUniq | ast::UnDeref => {
}
}, true)
}
- ast::ExprField(base, field, _) => {
- let bt = ty::expr_ty_adjusted(cx.tcx(), base);
+ ast::ExprField(ref base, field, _) => {
+ let bt = ty::expr_ty_adjusted(cx.tcx(), &**base);
let brepr = adt::represent_type(cx, bt);
- let (bv, inlineable) = const_expr(cx, base, is_local);
+ let (bv, inlineable) = const_expr(cx, &**base, is_local);
expr::with_field_tys(cx.tcx(), bt, None, |discr, field_tys| {
let ix = ty::field_idx_strict(cx.tcx(), field.name, field_tys);
(adt::const_get_field(cx, &*brepr, bv, discr, ix), inlineable)
})
}
- ast::ExprIndex(base, index) => {
- let bt = ty::expr_ty_adjusted(cx.tcx(), base);
- let (bv, inlineable) = const_expr(cx, base, is_local);
- let iv = match const_eval::eval_const_expr(cx.tcx(), index) {
+ ast::ExprIndex(ref base, ref index) => {
+ let bt = ty::expr_ty_adjusted(cx.tcx(), &**base);
+ let (bv, inlineable) = const_expr(cx, &**base, is_local);
+ let iv = match const_eval::eval_const_expr(cx.tcx(), &**index) {
const_eval::const_int(i) => i as u64,
const_eval::const_uint(u) => u,
_ => cx.sess().span_bug(index.span,
}
(const_get_elt(cx, arr, [iv as c_uint]), inlineable)
}
- ast::ExprCast(base, _) => {
+ ast::ExprCast(ref base, _) => {
let ety = ty::expr_ty(cx.tcx(), e);
let llty = type_of::type_of(cx, ety);
- let basety = ty::expr_ty(cx.tcx(), base);
- let (v, inlineable) = const_expr(cx, base, is_local);
+ let basety = ty::expr_ty(cx.tcx(), &**base);
+ let (v, inlineable) = const_expr(cx, &**base, is_local);
return (match (expr::cast_type_kind(basety),
expr::cast_type_kind(ety)) {
}
}, inlineable)
}
- ast::ExprAddrOf(ast::MutImmutable, sub) => {
- let (e, _) = const_expr(cx, sub, is_local);
+ ast::ExprAddrOf(ast::MutImmutable, ref sub) => {
+ let (e, _) = const_expr(cx, &**sub, is_local);
(const_addr_of(cx, e), false)
}
ast::ExprTup(ref es) => {
let tcx = cx.tcx();
let base_val = match *base_opt {
- Some(base) => Some(const_expr(cx, base, is_local)),
+ Some(ref base) => Some(const_expr(cx, &**base, is_local)),
None => None
};
let (cs, inlineable) = vec::unzip(field_tys.iter().enumerate()
.map(|(ix, &field_ty)| {
match fs.iter().find(|f| field_ty.ident.name == f.ident.node.name) {
- Some(f) => const_expr(cx, (*f).expr, is_local),
+ Some(ref f) => const_expr(cx, &*f.expr, is_local),
None => {
match base_val {
Some((bv, inlineable)) => {
is_local);
(v, inlineable)
}
- ast::ExprVstore(sub, store @ ast::ExprVstoreSlice) |
- ast::ExprVstore(sub, store @ ast::ExprVstoreMutSlice) => {
+ ast::ExprVstore(ref sub, store @ ast::ExprVstoreSlice) |
+ ast::ExprVstore(ref sub, store @ ast::ExprVstoreMutSlice) => {
match sub.node {
ast::ExprLit(ref lit) => {
match lit.node {
- ast::LitStr(..) => { const_expr(cx, sub, is_local) }
+ ast::LitStr(..) => { const_expr(cx, &**sub, is_local) }
_ => { cx.sess().span_bug(e.span, "bad const-slice lit") }
}
}
_ => cx.sess().span_bug(e.span, "bad const-slice expr")
}
}
- ast::ExprRepeat(elem, count) => {
+ ast::ExprRepeat(ref elem, ref count) => {
let vec_ty = ty::expr_ty(cx.tcx(), e);
let unit_ty = ty::sequence_element_type(cx.tcx(), vec_ty);
let llunitty = type_of::type_of(cx, unit_ty);
- let n = match const_eval::eval_const_expr(cx.tcx(), count) {
+ let n = match const_eval::eval_const_expr(cx.tcx(), &**count) {
const_eval::const_int(i) => i as uint,
const_eval::const_uint(i) => i as uint,
_ => cx.sess().span_bug(count.span, "count must be integral const expression.")
};
- let vs = Vec::from_elem(n, const_expr(cx, elem, is_local).val0());
+ let vs = Vec::from_elem(n, const_expr(cx, &**elem, is_local).val0());
let v = if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
C_struct(cx, vs.as_slice(), false)
} else {
_ => cx.sess().span_bug(e.span, "expected a struct or variant def")
}
}
- ast::ExprParen(e) => { const_expr(cx, e, is_local) }
+ ast::ExprParen(ref e) => { const_expr(cx, &**e, is_local) }
ast::ExprBlock(ref block) => {
match block.expr {
Some(ref expr) => const_expr(cx, &**expr, is_local),
use syntax::parse::token;
use syntax::visit::Visitor;
+use std::gc::Gc;
+
pub fn trans_stmt<'a>(cx: &'a Block<'a>,
s: &ast::Stmt)
-> &'a Block<'a> {
fcx.push_ast_cleanup_scope(id);
match s.node {
- ast::StmtExpr(e, _) | ast::StmtSemi(e, _) => {
- bcx = trans_stmt_semi(bcx, e);
+ ast::StmtExpr(ref e, _) | ast::StmtSemi(ref e, _) => {
+ bcx = trans_stmt_semi(bcx, &**e);
}
ast::StmtDecl(d, _) => {
match d.node {
ast::DeclLocal(ref local) => {
- bcx = init_local(bcx, *local);
+ bcx = init_local(bcx, &**local);
if cx.sess().opts.debuginfo == FullDebugInfo {
- debuginfo::create_local_var_metadata(bcx, *local);
+ debuginfo::create_local_var_metadata(bcx, &**local);
}
}
- ast::DeclItem(i) => trans_item(cx.fcx.ccx, i)
+ ast::DeclItem(ref i) => trans_item(cx.fcx.ccx, &**i)
}
}
ast::StmtMac(..) => cx.tcx().sess.bug("unexpanded macro")
fcx.push_ast_cleanup_scope(b.id);
for s in b.stmts.iter() {
- bcx = trans_stmt(bcx, *s);
+ bcx = trans_stmt(bcx, &**s);
}
if dest != expr::Ignore {
}
match b.expr {
- Some(e) => {
- bcx = expr::trans_into(bcx, e, dest);
+ Some(ref e) => {
+ bcx = expr::trans_into(bcx, &**e, dest);
}
None => {
assert!(dest == expr::Ignore || bcx.unreachable.get());
if_id: ast::NodeId,
cond: &ast::Expr,
thn: ast::P<ast::Block>,
- els: Option<@ast::Expr>,
+ els: Option<Gc<ast::Expr>>,
dest: expr::Dest)
-> &'a Block<'a> {
debug!("trans_if(bcx={}, if_id={}, cond={}, thn={:?}, dest={})",
match els {
Some(elexpr) => {
let mut trans = TransItemVisitor { ccx: bcx.fcx.ccx };
- trans.visit_expr(elexpr, ());
+ trans.visit_expr(&*elexpr, ());
}
None => {}
}
// if true { .. } [else { .. }]
- bcx = trans_block(bcx, thn, dest);
+ bcx = trans_block(bcx, &*thn, dest);
debuginfo::clear_source_location(bcx.fcx);
} else {
let mut trans = TransItemVisitor { ccx: bcx.fcx.ccx } ;
- trans.visit_block(thn, ());
+ trans.visit_block(&*thn, ());
match els {
// if false { .. } else { .. }
Some(elexpr) => {
- bcx = expr::trans_into(bcx, elexpr, dest);
+ bcx = expr::trans_into(bcx, &*elexpr, dest);
debuginfo::clear_source_location(bcx.fcx);
}
let name = format!("then-block-{}-", thn.id);
let then_bcx_in = bcx.fcx.new_id_block(name.as_slice(), thn.id);
- let then_bcx_out = trans_block(then_bcx_in, thn, dest);
+ let then_bcx_out = trans_block(then_bcx_in, &*thn, dest);
debuginfo::clear_source_location(bcx.fcx);
let next_bcx;
match els {
Some(elexpr) => {
let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
- let else_bcx_out = expr::trans_into(else_bcx_in, elexpr, dest);
+ let else_bcx_out = expr::trans_into(else_bcx_in, &*elexpr, dest);
next_bcx = bcx.fcx.join_blocks(if_id,
[then_bcx_out, else_bcx_out]);
CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb);
}
pub fn trans_ret<'a>(bcx: &'a Block<'a>,
- e: Option<@ast::Expr>)
+ e: Option<Gc<ast::Expr>>)
-> &'a Block<'a> {
let _icx = push_ctxt("trans_ret");
let fcx = bcx.fcx;
};
match e {
Some(x) => {
- bcx = expr::trans_into(bcx, x, dest);
+ bcx = expr::trans_into(bcx, &*x, dest);
}
_ => {}
}
use middle::pat_util;
use util::ppaux;
+use libc::{c_uint, c_ulonglong, c_longlong};
use std::c_str::{CString, ToCStr};
use std::cell::{Cell, RefCell};
-use std::rc::{Rc, Weak};
use std::collections::HashMap;
use std::collections::HashSet;
-use libc::{c_uint, c_ulonglong, c_longlong};
+use std::gc::Gc;
use std::ptr;
-use std::string::String;
+use std::rc::{Rc, Weak};
use std::sync::atomics;
use syntax::codemap::{Span, Pos};
use syntax::{abi, ast, codemap, ast_util, ast_map};
let cx = bcx.ccx();
let def_map = &cx.tcx.def_map;
- pat_util::pat_bindings(def_map, local.pat, |_, node_id, span, path_ref| {
+ pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path_ref| {
let var_ident = ast_util::path_to_ident(path_ref);
let datum = match bcx.fcx.lllocals.borrow().find_copy(&node_id) {
let def_map = &cx.tcx.def_map;
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
- pat_util::pat_bindings(def_map, arg.pat, |_, node_id, span, path_ref| {
+ pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path_ref| {
let llarg = match bcx.fcx.llargs.borrow().find_copy(&node_id) {
Some(v) => v,
None => {
let file_metadata = file_metadata(cx, loc.file.name.as_slice());
let function_type_metadata = unsafe {
- let fn_signature = get_function_signature(cx, fn_ast_id, fn_decl, param_substs, span);
+ let fn_signature = get_function_signature(cx, fn_ast_id, &*fn_decl, param_substs, span);
llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
};
let arg_pats = fn_decl.inputs.iter().map(|arg_ref| arg_ref.pat).collect::<Vec<_>>();
populate_scope_map(cx,
arg_pats.as_slice(),
- top_level_block,
+ &*top_level_block,
fn_metadata,
&mut *fn_debug_context.scope_map.borrow_mut());
// descriptors where necessary. These artificial scopes allow GDB to correctly handle name
// shadowing.
fn populate_scope_map(cx: &CrateContext,
- arg_pats: &[@ast::Pat],
+ arg_pats: &[Gc<ast::Pat>],
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
// Push argument identifiers onto the stack so arguments integrate nicely with variable
// shadowing.
for &arg_pat in arg_pats.iter() {
- pat_util::pat_bindings(def_map, arg_pat, |_, _, _, path_ref| {
+ pat_util::pat_bindings(def_map, &*arg_pat, |_, _, _, path_ref| {
let ident = ast_util::path_to_ident(path_ref);
scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata, ident: Some(ident) });
})
// The interesting things here are statements and the concluding expression.
for statement in block.stmts.iter() {
- scope_map.insert(ast_util::stmt_id(*statement),
+ scope_map.insert(ast_util::stmt_id(&**statement),
scope_stack.last().unwrap().scope_metadata);
match statement.node {
- ast::StmtDecl(decl, _) => walk_decl(cx, decl, scope_stack, scope_map),
- ast::StmtExpr(exp, _) |
- ast::StmtSemi(exp, _) => walk_expr(cx, exp, scope_stack, scope_map),
+ ast::StmtDecl(ref decl, _) =>
+ walk_decl(cx, &**decl, scope_stack, scope_map),
+ ast::StmtExpr(ref exp, _) |
+ ast::StmtSemi(ref exp, _) =>
+ walk_expr(cx, &**exp, scope_stack, scope_map),
ast::StmtMac(..) => () // ignore macros (which should be expanded anyway)
}
}
for exp in block.expr.iter() {
- walk_expr(cx, *exp, scope_stack, scope_map);
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
walk_pattern(cx, local.pat, scope_stack, scope_map);
for exp in local.init.iter() {
- walk_expr(cx, *exp, scope_stack, scope_map);
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
_ => ()
}
fn walk_pattern(cx: &CrateContext,
- pat: @ast::Pat,
+ pat: Gc<ast::Pat>,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
// Check if this is a binding. If so we need to put it on the scope stack and maybe
// introduce an artificial scope
- if pat_util::pat_is_binding(def_map, pat) {
+ if pat_util::pat_is_binding(def_map, &*pat) {
let ident = ast_util::path_to_ident(path_ref);
ast::PatTup(ref sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- for &sub_pat in sub_pats.iter() {
- walk_pattern(cx, sub_pat, scope_stack, scope_map);
+ for sub_pat in sub_pats.iter() {
+ walk_pattern(cx, sub_pat.clone(), scope_stack, scope_map);
}
}
- ast::PatBox(sub_pat) | ast::PatRegion(sub_pat) => {
+ ast::PatBox(ref sub_pat) | ast::PatRegion(ref sub_pat) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_pattern(cx, sub_pat, scope_stack, scope_map);
+ walk_pattern(cx, sub_pat.clone(), scope_stack, scope_map);
}
- ast::PatLit(exp) => {
+ ast::PatLit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, exp, scope_stack, scope_map);
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
- ast::PatRange(exp1, exp2) => {
+ ast::PatRange(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, exp1, scope_stack, scope_map);
- walk_expr(cx, exp2, scope_stack, scope_map);
+ walk_expr(cx, &**exp1, scope_stack, scope_map);
+ walk_expr(cx, &**exp2, scope_stack, scope_map);
}
ast::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
ast::ExprAgain(_) |
ast::ExprPath(_) => {}
- ast::ExprVstore(sub_exp, _) |
- ast::ExprCast(sub_exp, _) |
- ast::ExprAddrOf(_, sub_exp) |
- ast::ExprField(sub_exp, _, _) |
- ast::ExprParen(sub_exp) => walk_expr(cx, sub_exp, scope_stack, scope_map),
+ ast::ExprVstore(ref sub_exp, _) |
+ ast::ExprCast(ref sub_exp, _) |
+ ast::ExprAddrOf(_, ref sub_exp) |
+ ast::ExprField(ref sub_exp, _, _) |
+ ast::ExprParen(ref sub_exp) =>
+ walk_expr(cx, &**sub_exp, scope_stack, scope_map),
- ast::ExprBox(place, sub_expr) => {
- walk_expr(cx, place, scope_stack, scope_map);
- walk_expr(cx, sub_expr, scope_stack, scope_map);
+ ast::ExprBox(ref place, ref sub_expr) => {
+ walk_expr(cx, &**place, scope_stack, scope_map);
+ walk_expr(cx, &**sub_expr, scope_stack, scope_map);
}
ast::ExprRet(exp_opt) => match exp_opt {
- Some(sub_exp) => walk_expr(cx, sub_exp, scope_stack, scope_map),
+ Some(sub_exp) => walk_expr(cx, &*sub_exp, scope_stack, scope_map),
None => ()
},
- ast::ExprUnary(_, sub_exp) => {
- walk_expr(cx, sub_exp, scope_stack, scope_map);
+ ast::ExprUnary(_, ref sub_exp) => {
+ walk_expr(cx, &**sub_exp, scope_stack, scope_map);
}
- ast::ExprAssignOp(_, lhs, rhs) |
- ast::ExprIndex(lhs, rhs) |
- ast::ExprBinary(_, lhs, rhs) => {
- walk_expr(cx, lhs, scope_stack, scope_map);
- walk_expr(cx, rhs, scope_stack, scope_map);
+ ast::ExprAssignOp(_, ref lhs, ref rhs) |
+ ast::ExprIndex(ref lhs, ref rhs) |
+ ast::ExprBinary(_, ref lhs, ref rhs) => {
+ walk_expr(cx, &**lhs, scope_stack, scope_map);
+ walk_expr(cx, &**rhs, scope_stack, scope_map);
}
ast::ExprVec(ref init_expressions) |
ast::ExprTup(ref init_expressions) => {
for ie in init_expressions.iter() {
- walk_expr(cx, *ie, scope_stack, scope_map);
+ walk_expr(cx, &**ie, scope_stack, scope_map);
}
}
- ast::ExprAssign(sub_exp1, sub_exp2) |
- ast::ExprRepeat(sub_exp1, sub_exp2) => {
- walk_expr(cx, sub_exp1, scope_stack, scope_map);
- walk_expr(cx, sub_exp2, scope_stack, scope_map);
+ ast::ExprAssign(ref sub_exp1, ref sub_exp2) |
+ ast::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
+ walk_expr(cx, &**sub_exp1, scope_stack, scope_map);
+ walk_expr(cx, &**sub_exp2, scope_stack, scope_map);
}
- ast::ExprIf(cond_exp, then_block, ref opt_else_exp) => {
- walk_expr(cx, cond_exp, scope_stack, scope_map);
+ ast::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
+ walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
then_block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
- walk_block(cx, then_block, scope_stack, scope_map);
+ walk_block(cx, &**then_block, scope_stack, scope_map);
});
match *opt_else_exp {
- Some(else_exp) => walk_expr(cx, else_exp, scope_stack, scope_map),
+ Some(ref else_exp) =>
+ walk_expr(cx, &**else_exp, scope_stack, scope_map),
_ => ()
}
}
- ast::ExprWhile(cond_exp, loop_body) => {
- walk_expr(cx, cond_exp, scope_stack, scope_map);
+ ast::ExprWhile(ref cond_exp, ref loop_body) => {
+ walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
loop_body.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
- walk_block(cx, loop_body, scope_stack, scope_map);
+ walk_block(cx, &**loop_body, scope_stack, scope_map);
})
}
Found unexpanded macro.");
}
- ast::ExprLoop(block, _) |
- ast::ExprBlock(block) => {
+ ast::ExprLoop(ref block, _) |
+ ast::ExprBlock(ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
- walk_block(cx, block, scope_stack, scope_map);
+ walk_block(cx, &**block, scope_stack, scope_map);
})
}
- ast::ExprFnBlock(decl, block) |
- ast::ExprProc(decl, block) => {
+ ast::ExprFnBlock(ref decl, ref block) |
+ ast::ExprProc(ref decl, ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
- for &ast::Arg { pat: pattern, .. } in decl.inputs.iter() {
- walk_pattern(cx, pattern, scope_stack, scope_map);
+ for &ast::Arg { pat: ref pattern, .. } in decl.inputs.iter() {
+ walk_pattern(cx, pattern.clone(), scope_stack, scope_map);
}
- walk_block(cx, block, scope_stack, scope_map);
+ walk_block(cx, &**block, scope_stack, scope_map);
})
}
- ast::ExprCall(fn_exp, ref args) => {
- walk_expr(cx, fn_exp, scope_stack, scope_map);
+ ast::ExprCall(ref fn_exp, ref args) => {
+ walk_expr(cx, &**fn_exp, scope_stack, scope_map);
for arg_exp in args.iter() {
- walk_expr(cx, *arg_exp, scope_stack, scope_map);
+ walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
ast::ExprMethodCall(_, _, ref args) => {
for arg_exp in args.iter() {
- walk_expr(cx, *arg_exp, scope_stack, scope_map);
+ walk_expr(cx, &**arg_exp, scope_stack, scope_map);
}
}
- ast::ExprMatch(discriminant_exp, ref arms) => {
- walk_expr(cx, discriminant_exp, scope_stack, scope_map);
+ ast::ExprMatch(ref discriminant_exp, ref arms) => {
+ walk_expr(cx, &**discriminant_exp, scope_stack, scope_map);
// for each arm we have to first walk the pattern as these might introduce new
// artificial scopes. It should be sufficient to walk only one pattern per arm, as
}
for guard_exp in arm_ref.guard.iter() {
- walk_expr(cx, *guard_exp, scope_stack, scope_map)
+ walk_expr(cx, &**guard_exp, scope_stack, scope_map)
}
- walk_expr(cx, arm_ref.body, scope_stack, scope_map);
+ walk_expr(cx, &*arm_ref.body, scope_stack, scope_map);
})
}
}
ast::ExprStruct(_, ref fields, ref base_exp) => {
- for &ast::Field { expr: exp, .. } in fields.iter() {
- walk_expr(cx, exp, scope_stack, scope_map);
+ for &ast::Field { expr: ref exp, .. } in fields.iter() {
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
match *base_exp {
- Some(exp) => walk_expr(cx, exp, scope_stack, scope_map),
+ Some(ref exp) => walk_expr(cx, &**exp, scope_stack, scope_map),
None => ()
}
}
ast::ExprInlineAsm(ast::InlineAsm { inputs: ref inputs,
outputs: ref outputs,
.. }) => {
- // inputs, outputs: ~[(String, @expr)]
- for &(_, exp) in inputs.iter() {
- walk_expr(cx, exp, scope_stack, scope_map);
+ // inputs, outputs: ~[(String, Gc<expr>)]
+ for &(_, ref exp) in inputs.iter() {
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
- for &(_, exp) in outputs.iter() {
- walk_expr(cx, exp, scope_stack, scope_map);
+ for &(_, ref exp) in outputs.iter() {
+ walk_expr(cx, &**exp, scope_stack, scope_map);
}
}
}
use syntax::codemap;
use syntax::print::pprust::{expr_to_str};
+use std::gc::Gc;
+
// Destinations
// These are passed around by the code generating functions to track the
let _icx = push_ctxt("trans_datum_unadjusted");
match expr.node {
- ast::ExprParen(e) => {
- trans(bcx, e)
+ ast::ExprParen(ref e) => {
+ trans(bcx, &**e)
}
ast::ExprPath(_) => {
trans_def(bcx, expr, bcx.def(expr.id))
}
- ast::ExprField(base, ident, _) => {
- trans_rec_field(bcx, base, ident)
+ ast::ExprField(ref base, ident, _) => {
+ trans_rec_field(bcx, &**base, ident)
}
- ast::ExprIndex(base, idx) => {
- trans_index(bcx, expr, base, idx)
+ ast::ExprIndex(ref base, ref idx) => {
+ trans_index(bcx, expr, &**base, &**idx)
}
- ast::ExprVstore(contents, ast::ExprVstoreUniq) => {
+ ast::ExprVstore(ref contents, ast::ExprVstoreUniq) => {
fcx.push_ast_cleanup_scope(contents.id);
let datum = unpack_datum!(
- bcx, tvec::trans_uniq_vstore(bcx, expr, contents));
+ bcx, tvec::trans_uniq_vstore(bcx, expr, &**contents));
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, contents.id);
DatumBlock::new(bcx, datum)
}
- ast::ExprBox(_, contents) => {
- // Special case for `box T`. (The other case, for GC, is handled
- // in `trans_rvalue_dps_unadjusted`.)
+ ast::ExprBox(_, ref contents) => {
+ // Special case for `Box<T>` and `Gc<T>`
let box_ty = expr_ty(bcx, expr);
- let contents_ty = expr_ty(bcx, contents);
- trans_uniq_expr(bcx, box_ty, contents, contents_ty)
+ let contents_ty = expr_ty(bcx, &**contents);
+ match ty::get(box_ty).sty {
+ ty::ty_uniq(..) => {
+ trans_uniq_expr(bcx, box_ty, &**contents, contents_ty)
+ }
+ ty::ty_box(..) => {
+ trans_managed_expr(bcx, box_ty, &**contents, contents_ty)
+ }
+ _ => bcx.sess().span_bug(expr.span,
+ "expected unique or managed box")
+ }
}
- ast::ExprLit(lit) => trans_immediate_lit(bcx, expr, (*lit).clone()),
- ast::ExprBinary(op, lhs, rhs) => {
- trans_binary(bcx, expr, op, lhs, rhs)
+ ast::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, (**lit).clone()),
+ ast::ExprBinary(op, ref lhs, ref rhs) => {
+ trans_binary(bcx, expr, op, &**lhs, &**rhs)
}
- ast::ExprUnary(op, x) => {
- trans_unary(bcx, expr, op, x)
+ ast::ExprUnary(op, ref x) => {
+ trans_unary(bcx, expr, op, &**x)
}
- ast::ExprAddrOf(_, x) => {
- trans_addr_of(bcx, expr, x)
+ ast::ExprAddrOf(_, ref x) => {
+ trans_addr_of(bcx, expr, &**x)
}
- ast::ExprCast(val, _) => {
+ ast::ExprCast(ref val, _) => {
// Datum output mode means this is a scalar cast:
- trans_imm_cast(bcx, val, expr.id)
+ trans_imm_cast(bcx, &**val, expr.id)
}
_ => {
bcx.tcx().sess.span_bug(
}
match expr.node {
- ast::ExprParen(e) => {
- trans_into(bcx, e, Ignore)
+ ast::ExprParen(ref e) => {
+ trans_into(bcx, &**e, Ignore)
}
ast::ExprBreak(label_opt) => {
controlflow::trans_break(bcx, expr.id, label_opt)
ast::ExprRet(ex) => {
controlflow::trans_ret(bcx, ex)
}
- ast::ExprWhile(cond, body) => {
- controlflow::trans_while(bcx, expr.id, cond, body)
+ ast::ExprWhile(ref cond, ref body) => {
+ controlflow::trans_while(bcx, expr.id, &**cond, &**body)
}
- ast::ExprLoop(body, _) => {
- controlflow::trans_loop(bcx, expr.id, body)
+ ast::ExprLoop(ref body, _) => {
+ controlflow::trans_loop(bcx, expr.id, &**body)
}
- ast::ExprAssign(dst, src) => {
- let src_datum = unpack_datum!(bcx, trans(bcx, src));
- let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign"));
+ ast::ExprAssign(ref dst, ref src) => {
+ let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
+ let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
if ty::type_needs_drop(bcx.tcx(), dst_datum.ty) {
// If there are destructors involved, make sure we
src_datum.store_to(bcx, dst_datum.val)
}
}
- ast::ExprAssignOp(op, dst, src) => {
- trans_assign_op(bcx, expr, op, dst, src)
+ ast::ExprAssignOp(op, ref dst, ref src) => {
+ trans_assign_op(bcx, expr, op, &**dst, src.clone())
}
ast::ExprInlineAsm(ref a) => {
asm::trans_inline_asm(bcx, a)
let fcx = bcx.fcx;
match expr.node {
- ast::ExprParen(e) => {
- trans_into(bcx, e, dest)
+ ast::ExprParen(ref e) => {
+ trans_into(bcx, &**e, dest)
}
ast::ExprPath(_) => {
trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
}
- ast::ExprIf(cond, thn, els) => {
- controlflow::trans_if(bcx, expr.id, cond, thn, els, dest)
+ ast::ExprIf(ref cond, ref thn, els) => {
+ controlflow::trans_if(bcx, expr.id, &**cond, thn.clone(), els, dest)
}
- ast::ExprMatch(discr, ref arms) => {
- _match::trans_match(bcx, expr, discr, arms.as_slice(), dest)
+ ast::ExprMatch(ref discr, ref arms) => {
+ _match::trans_match(bcx, expr, &**discr, arms.as_slice(), dest)
}
- ast::ExprBlock(blk) => {
- controlflow::trans_block(bcx, blk, dest)
+ ast::ExprBlock(ref blk) => {
+ controlflow::trans_block(bcx, &**blk, dest)
}
ast::ExprStruct(_, ref fields, base) => {
trans_rec_or_struct(bcx,
}
ast::ExprTup(ref args) => {
let repr = adt::represent_type(bcx.ccx(), expr_ty(bcx, expr));
- let numbered_fields: Vec<(uint, @ast::Expr)> =
+ let numbered_fields: Vec<(uint, Gc<ast::Expr>)> =
args.iter().enumerate().map(|(i, arg)| (i, *arg)).collect();
trans_adt(bcx, &*repr, 0, numbered_fields.as_slice(), None, dest)
}
}
}
}
- ast::ExprVstore(contents, ast::ExprVstoreSlice) |
- ast::ExprVstore(contents, ast::ExprVstoreMutSlice) => {
+ ast::ExprVstore(ref contents, ast::ExprVstoreSlice) |
+ ast::ExprVstore(ref contents, ast::ExprVstoreMutSlice) => {
fcx.push_ast_cleanup_scope(contents.id);
- bcx = tvec::trans_slice_vstore(bcx, expr, contents, dest);
+ bcx = tvec::trans_slice_vstore(bcx, expr, &**contents, dest);
fcx.pop_and_trans_ast_cleanup_scope(bcx, contents.id)
}
ast::ExprVec(..) | ast::ExprRepeat(..) => {
tvec::trans_fixed_vstore(bcx, expr, expr, dest)
}
- ast::ExprFnBlock(decl, body) |
- ast::ExprProc(decl, body) => {
+ ast::ExprFnBlock(ref decl, ref body) |
+ ast::ExprProc(ref decl, ref body) => {
let expr_ty = expr_ty(bcx, expr);
let store = ty::ty_closure_store(expr_ty);
debug!("translating block function {} with type {}",
expr_to_str(expr), expr_ty.repr(tcx));
- closure::trans_expr_fn(bcx, store, decl, body, expr.id, dest)
+ closure::trans_expr_fn(bcx, store, &**decl, &**body, expr.id, dest)
}
- ast::ExprCall(f, ref args) => {
+ ast::ExprCall(ref f, ref args) => {
if bcx.tcx().is_method_call(expr.id) {
- let callee_datum = unpack_datum!(bcx, trans(bcx, f));
+ let callee_datum = unpack_datum!(bcx, trans(bcx, &**f));
trans_overloaded_call(bcx,
expr,
callee_datum,
} else {
callee::trans_call(bcx,
expr,
- f,
+ &**f,
callee::ArgExprs(args.as_slice()),
dest)
}
ast::ExprMethodCall(_, _, ref args) => {
callee::trans_method_call(bcx,
expr,
- *args.get(0),
+ &**args.get(0),
callee::ArgExprs(args.as_slice()),
dest)
}
- ast::ExprBinary(_, lhs, rhs) => {
+ ast::ExprBinary(_, ref lhs, ref rhs) => {
// if not overloaded, would be RvalueDatumExpr
- let lhs = unpack_datum!(bcx, trans(bcx, lhs));
- let rhs_datum = unpack_datum!(bcx, trans(bcx, rhs));
+ let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
+ let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
Some((rhs_datum, rhs.id)), Some(dest)).bcx
}
- ast::ExprUnary(_, subexpr) => {
+ ast::ExprUnary(_, ref subexpr) => {
// if not overloaded, would be RvalueDatumExpr
- let arg = unpack_datum!(bcx, trans(bcx, subexpr));
+ let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
arg, None, Some(dest)).bcx
}
- ast::ExprIndex(base, idx) => {
+ ast::ExprIndex(ref base, ref idx) => {
// if not overloaded, would be RvalueDatumExpr
- let base = unpack_datum!(bcx, trans(bcx, base));
- let idx_datum = unpack_datum!(bcx, trans(bcx, idx));
+ let base = unpack_datum!(bcx, trans(bcx, &**base));
+ let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
Some((idx_datum, idx.id)), Some(dest)).bcx
}
- ast::ExprCast(val, _) => {
+ ast::ExprCast(ref val, _) => {
// DPS output mode means this is a trait cast:
match ty::get(node_id_type(bcx, expr.id)).sty {
ty::ty_trait(..) => {
- let datum = unpack_datum!(bcx, trans(bcx, val));
+ let datum = unpack_datum!(bcx, trans(bcx, &**val));
meth::trans_trait_cast(bcx, datum, expr.id, dest)
}
_ => {
}
}
}
- ast::ExprAssignOp(op, dst, src) => {
- trans_assign_op(bcx, expr, op, dst, src)
- }
- ast::ExprBox(_, contents) => {
- // Special case for `Gc<T>` for now. The other case, for unique
- // pointers, is handled in `trans_rvalue_datum_unadjusted`.
- trans_gc(bcx, expr, contents, dest)
+ ast::ExprAssignOp(op, ref dst, ref src) => {
+ trans_assign_op(bcx, expr, op, &**dst, src.clone())
}
_ => {
bcx.tcx().sess.span_bug(
fn trans_rec_or_struct<'a>(
bcx: &'a Block<'a>,
fields: &[ast::Field],
- base: Option<@ast::Expr>,
+ base: Option<Gc<ast::Expr>>,
expr_span: codemap::Span,
id: ast::NodeId,
dest: Dest)
*/
struct StructBaseInfo {
/// The base expression; will be evaluated after all explicit fields.
- expr: @ast::Expr,
+ expr: Gc<ast::Expr>,
/// The indices of fields to copy paired with their types.
fields: Vec<(uint, ty::t)> }
bcx: &'a Block<'a>,
repr: &adt::Repr,
discr: ty::Disr,
- fields: &[(uint, @ast::Expr)],
+ fields: &[(uint, Gc<ast::Expr>)],
optbase: Option<StructBaseInfo>,
dest: Dest)
-> &'a Block<'a> {
let mut bcx = bcx;
let addr = match dest {
Ignore => {
- for &(_i, e) in fields.iter() {
- bcx = trans_into(bcx, e, Ignore);
+ for &(_i, ref e) in fields.iter() {
+ bcx = trans_into(bcx, &**e, Ignore);
}
for sbi in optbase.iter() {
// FIXME #7261: this moves entire base, not just certain fields
- bcx = trans_into(bcx, sbi.expr, Ignore);
+ bcx = trans_into(bcx, &*sbi.expr, Ignore);
}
return bcx;
}
adt::trans_start_init(bcx, repr, addr, discr);
- for &(i, e) in fields.iter() {
+ for &(i, ref e) in fields.iter() {
let dest = adt::trans_field_ptr(bcx, repr, addr, discr, i);
- let e_ty = expr_ty_adjusted(bcx, e);
- bcx = trans_into(bcx, e, SaveIn(dest));
+ let e_ty = expr_ty_adjusted(bcx, &**e);
+ bcx = trans_into(bcx, &**e, SaveIn(dest));
fcx.schedule_drop_mem(cleanup::CustomScope(custom_cleanup_scope),
dest, e_ty);
}
for base in optbase.iter() {
// FIXME #6573: is it sound to use the destination's repr on the base?
// And, would it ever be reasonable to be here with discr != 0?
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base.expr, "base"));
+ let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
for &(i, t) in base.fields.iter() {
let datum = base_datum.get_element(
t,
return immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock();
}
-fn trans_gc<'a>(mut bcx: &'a Block<'a>,
- expr: &ast::Expr,
- contents: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
- let contents_ty = expr_ty(bcx, contents);
- let box_ty = ty::mk_box(bcx.tcx(), contents_ty);
-
- let contents_datum = unpack_datum!(bcx, trans_managed_expr(bcx,
- box_ty,
- contents,
- contents_ty));
-
- match dest {
- Ignore => bcx,
- SaveIn(addr) => {
- let expr_ty = expr_ty(bcx, expr);
- let repr = adt::represent_type(bcx.ccx(), expr_ty);
- adt::trans_start_init(bcx, &*repr, addr, 0);
- let field_dest = adt::trans_field_ptr(bcx, &*repr, addr, 0, 0);
- contents_datum.store_to(bcx, field_dest)
- }
- }
-}
-
// Important to get types for both lhs and rhs, because one might be _|_
// and the other not.
fn trans_eager_binop<'a>(
mut bcx: &'a Block<'a>,
expr: &ast::Expr,
callee: Datum<Expr>,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
dest: Option<Dest>)
-> &'a Block<'a> {
// Evaluate and tuple the arguments.
let tuple_type = ty::mk_tup(bcx.tcx(),
args.iter()
- .map(|e| expr_ty(bcx, *e))
+ .map(|e| expr_ty(bcx, &**e))
.collect());
let repr = adt::represent_type(bcx.ccx(), tuple_type);
- let numbered_fields: Vec<(uint, @ast::Expr)> =
+ let numbered_fields: Vec<(uint, Gc<ast::Expr>)> =
args.iter().enumerate().map(|(i, arg)| (i, *arg)).collect();
let argument_scope = bcx.fcx.push_custom_cleanup_scope();
let tuple_datum =
expr: &ast::Expr,
op: ast::BinOp,
dst: &ast::Expr,
- src: @ast::Expr)
+ src: Gc<ast::Expr>)
-> &'a Block<'a> {
let _icx = push_ctxt("trans_assign_op");
let mut bcx = bcx;
let dst = Load(bcx, dst_datum.val);
// Evaluate RHS
- let rhs_datum = unpack_datum!(bcx, trans(bcx, src));
+ let rhs_datum = unpack_datum!(bcx, trans(bcx, &*src));
let rhs_ty = rhs_datum.ty;
let rhs = rhs_datum.to_llscalarish(bcx);
pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &ast::ForeignMod) {
let _icx = push_ctxt("foreign::trans_foreign_mod");
- for &foreign_item in foreign_mod.items.iter() {
- let lname = link_name(foreign_item);
+ for foreign_item in foreign_mod.items.iter() {
+ let lname = link_name(&**foreign_item);
match foreign_item.node {
ast::ForeignItemFn(..) => {
ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
- trans_item(ccx, item);
+ trans_item(ccx, &*item);
// We're bringing an external global into this crate, but we don't
// want to create two copies of the global. If we do this, then if
_ => ccx.sess().bug("maybe_instantiate_inline: item has a \
non-enum, non-struct parent")
}
- trans_item(ccx, item);
+ trans_item(ccx, &*item);
local_def(my_id)
}
csearch::found_parent(_, _) => {
if num_type_params == 0 {
let llfn = get_item_val(ccx, mth.id);
- trans_fn(ccx, mth.decl, mth.body, llfn,
+ trans_fn(ccx, &*mth.decl, &*mth.body, llfn,
¶m_substs::empty(), mth.id, []);
}
local_def(mth.id)
use util::ppaux::Repr;
use std::c_str::ToCStr;
+use std::gc::Gc;
use syntax::abi::Rust;
use syntax::parse::token;
use syntax::{ast, ast_map, visit};
*/
pub fn trans_impl(ccx: &CrateContext,
name: ast::Ident,
- methods: &[@ast::Method],
+ methods: &[Gc<ast::Method>],
generics: &ast::Generics,
id: ast::NodeId) {
let _icx = push_ctxt("meth::trans_impl");
if !generics.ty_params.is_empty() {
let mut v = TransItemVisitor{ ccx: ccx };
for method in methods.iter() {
- visit::walk_method_helper(&mut v, *method, ());
+ visit::walk_method_helper(&mut v, &**method, ());
}
return;
}
for method in methods.iter() {
if method.generics.ty_params.len() == 0u {
let llfn = get_item_val(ccx, method.id);
- trans_fn(ccx, method.decl, method.body,
+ trans_fn(ccx, &*method.decl, &*method.body,
llfn, ¶m_substs::empty(), method.id, []);
} else {
let mut v = TransItemVisitor{ ccx: ccx };
- visit::walk_method_helper(&mut v, *method, ());
+ visit::walk_method_helper(&mut v, &**method, ());
}
}
}
ast_map::NodeItem(i) => {
match *i {
ast::Item {
- node: ast::ItemFn(decl, _, _, _, body),
+ node: ast::ItemFn(ref decl, _, _, _, ref body),
..
} => {
let d = mk_lldecl();
set_llvm_fn_attrs(i.attrs.as_slice(), d);
- trans_fn(ccx, decl, body, d, &psubsts, fn_id.node, []);
+ trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
d
}
_ => {
}
}
ast_map::NodeForeignItem(i) => {
- let simple = intrinsic::get_simple_intrinsic(ccx, i);
+ let simple = intrinsic::get_simple_intrinsic(ccx, &*i);
match simple {
Some(decl) => decl,
None => {
let d = mk_lldecl();
- intrinsic::trans_intrinsic(ccx, d, i, &psubsts, ref_id);
+ intrinsic::trans_intrinsic(ccx, d, &*i, &psubsts, ref_id);
d
}
}
ast::TupleVariantKind(ref args) => {
trans_enum_variant(ccx,
parent,
- v,
+ &*v,
args.as_slice(),
this_tv.disr_val,
&psubsts,
ast_map::NodeMethod(mth) => {
let d = mk_lldecl();
set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx, mth.decl, mth.body, d, &psubsts, mth.id, []);
+ trans_fn(ccx, &*mth.decl, &*mth.body, d, &psubsts, mth.id, []);
d
}
ast_map::NodeTraitMethod(method) => {
ast::Provided(mth) => {
let d = mk_lldecl();
set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx, mth.decl, mth.body, d, &psubsts, mth.id, []);
+ trans_fn(ccx, &*mth.decl, &*mth.body, d, &psubsts, mth.id, []);
d
}
_ => {
match dest {
Ignore => {
for element in elements.iter() {
- bcx = expr::trans_into(bcx, *element, Ignore);
+ bcx = expr::trans_into(bcx, &**element, Ignore);
}
}
let lleltptr = GEPi(bcx, lldest, [i]);
debug!("writing index {:?} with lleltptr={:?}",
i, bcx.val_to_str(lleltptr));
- bcx = expr::trans_into(bcx, *element,
+ bcx = expr::trans_into(bcx, &**element,
SaveIn(lleltptr));
fcx.schedule_drop_mem(
cleanup::CustomScope(temp_scope),
}
return bcx;
}
- ast::ExprRepeat(element, count_expr) => {
+ ast::ExprRepeat(ref element, ref count_expr) => {
match dest {
Ignore => {
- return expr::trans_into(bcx, element, Ignore);
+ return expr::trans_into(bcx, &**element, Ignore);
}
SaveIn(lldest) => {
- let count = ty::eval_repeat_count(bcx.tcx(), count_expr);
+ let count = ty::eval_repeat_count(bcx.tcx(), &**count_expr);
if count == 0 {
return bcx;
}
// this can only happen as a result of OOM. So we just skip out on the
// cleanup since things would *probably* be broken at that point anyways.
- let elem = unpack_datum!(bcx, expr::trans(bcx, element));
+ let elem = unpack_datum!(bcx, expr::trans(bcx, &**element));
assert!(!ty::type_moves_by_default(bcx.tcx(), elem.ty));
let bcx = iter_vec_loop(bcx, lldest, vt,
}
},
ast::ExprVec(ref es) => es.len(),
- ast::ExprRepeat(_, count_expr) => {
- ty::eval_repeat_count(bcx.tcx(), count_expr)
+ ast::ExprRepeat(_, ref count_expr) => {
+ ty::eval_repeat_count(bcx.tcx(), &**count_expr)
}
_ => bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content")
use middle::const_eval;
use middle::def;
use middle::dependency_format;
-use middle::lang_items::{ExchangeHeapLangItem, OpaqueStructLangItem};
+use middle::lang_items::OpaqueStructLangItem;
use middle::lang_items::{TyDescStructLangItem, TyVisitorTraitLangItem};
use middle::freevars;
use middle::resolve;
use std::fmt::Show;
use std::fmt;
use std::hash::{Hash, sip, Writer};
+use std::gc::Gc;
use std::iter::AdditiveIterator;
use std::mem;
use std::ops;
/// These two caches are used by const_eval when decoding external statics
/// and variants that are found.
- pub extern_const_statics: RefCell<DefIdMap<Option<@ast::Expr>>>,
- pub extern_const_variants: RefCell<DefIdMap<Option<@ast::Expr>>>,
+ pub extern_const_statics: RefCell<DefIdMap<Option<Gc<ast::Expr>>>>,
+ pub extern_const_variants: RefCell<DefIdMap<Option<Gc<ast::Expr>>>>,
pub method_map: typeck::MethodMap,
pub vtable_map: typeck::vtable_map,
}
ast::ExprBox(place, _) => {
- // Special case `Box<T>` for now:
+ // Special case `Box<T>`/`Gc<T>` for now:
let definition = match tcx.def_map.borrow().find(&place.id) {
Some(&def) => def,
None => fail!("no def for place"),
};
let def_id = definition.def_id();
- match tcx.lang_items.items.get(ExchangeHeapLangItem as uint) {
- &Some(item_def_id) if def_id == item_def_id => {
- RvalueDatumExpr
- }
- &Some(_) | &None => RvalueDpsExpr,
+ if tcx.lang_items.exchange_heap() == Some(def_id) ||
+ tcx.lang_items.managed_heap() == Some(def_id) {
+ RvalueDatumExpr
+ } else {
+ RvalueDpsExpr
}
}
- ast::ExprParen(e) => expr_kind(tcx, e),
+ ast::ExprParen(ref e) => expr_kind(tcx, &**e),
ast::ExprMac(..) => {
tcx.sess.span_bug(
}
ty_enum(id, _) => format!("enum {}", item_path_str(cx, id)),
- ty_box(_) => "@-ptr".to_string(),
+ ty_box(_) => "Gc-ptr".to_string(),
ty_uniq(_) => "box".to_string(),
ty_vec(_, _) => "vector".to_string(),
ty_ptr(_) => "*-ptr".to_string(),
};
match variant.node.disr_expr {
- Some(e) => match const_eval::eval_const_expr_partial(cx, e) {
+ Some(ref e) => match const_eval::eval_const_expr_partial(cx, &**e) {
Ok(const_eval::const_int(val)) => {
discriminant = val as Disr
}
};
last_discriminant = Some(discriminant);
- Rc::new(VariantInfo::from_ast_variant(cx, variant,
+ Rc::new(VariantInfo::from_ast_variant(cx, &*variant,
discriminant))
}).collect())
}
}
let tps = path.segments.iter().flat_map(|s| s.types.iter())
- .map(|&a_t| ast_ty_to_ty(this, rscope, a_t))
+ .map(|a_t| ast_ty_to_ty(this, rscope, &**a_t))
.collect();
let mut substs = subst::Substs {
supplied to `Box<T>`");
Some(ty::mk_err())
}
+ def::DefTy(did) | def::DefStruct(did)
+ if Some(did) == this.tcx().lang_items.gc() => {
+ if path.segments
+ .iter()
+ .flat_map(|s| s.types.iter())
+ .count() > 1 {
+ this.tcx()
+ .sess
+ .span_err(path.span,
+ "`Gc` has only one type parameter")
+ }
+
+ for inner_ast_type in path.segments
+ .iter()
+ .flat_map(|s| s.types.iter()) {
+ let mt = ast::MutTy {
+ ty: *inner_ast_type,
+ mutbl: ast::MutImmutable,
+ };
+ return Some(mk_pointer(this,
+ rscope,
+ &mt,
+ Box,
+ |typ| {
+ match ty::get(typ).sty {
+ ty::ty_str => {
+ this.tcx()
+ .sess
+ .span_err(path.span,
+ "`Gc<str>` is not a type");
+ ty::mk_err()
+ }
+ ty::ty_vec(_, None) => {
+ this.tcx()
+ .sess
+ .span_err(path.span,
+ "`Gc<[T]>` is not a type");
+ ty::mk_err()
+ }
+ _ => ty::mk_box(this.tcx(), typ),
+ }
+ }))
+ }
+ this.tcx().sess.span_bug(path.span,
+ "not enough type parameters \
+ supplied to `Gc<T>`")
+ }
_ => None
}
}
.inputs
.iter()
.map(|input| {
- ast_ty_to_ty(this, rscope, input.ty)
+ ast_ty_to_ty(this, rscope, &*input.ty)
}).collect::<Vec<_>>();
let input_tuple = ty::mk_tup(this.tcx(), input_types);
let output_type = ast_ty_to_ty(this,
rscope,
- unboxed_function.decl.output);
+ &*unboxed_function.decl.output);
let substs = subst::Substs {
self_ty: None,
tps: vec!(input_tuple, output_type),
debug!("mk_pointer(ptr_ty={:?})", ptr_ty);
match a_seq_ty.ty.node {
- ast::TyVec(ty) => {
- let mut mt = ast_ty_to_mt(this, rscope, ty);
+ ast::TyVec(ref ty) => {
+ let mut mt = ast_ty_to_mt(this, rscope, &**ty);
if a_seq_ty.mutbl == ast::MutMutable {
mt.mutbl = ast::MutMutable;
}
substs
} = trait_ref_for_unboxed_function(this,
rscope,
- *unboxed_function);
+ &**unboxed_function);
return ty::mk_trait(this.tcx(),
def_id,
substs,
_ => {}
}
- constr(ast_ty_to_ty(this, rscope, a_seq_ty.ty))
+ constr(ast_ty_to_ty(this, rscope, &*a_seq_ty.ty))
}
// Parses the programmer's textual representation of a type into our
ast::TyVec(ty) => {
tcx.sess.span_err(ast_ty.span, "bare `[]` is not a type");
// return /something/ so they can at least get more errors
- let vec_ty = ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, ty), None);
+ let vec_ty = ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, &*ty), None);
ty::mk_uniq(tcx, vec_ty)
}
ast::TyPtr(ref mt) => {
ty::mk_ptr(tcx, ty::mt {
- ty: ast_ty_to_ty(this, rscope, mt.ty),
+ ty: ast_ty_to_ty(this, rscope, &*mt.ty),
mutbl: mt.mutbl
})
}
}
ast::TyTup(ref fields) => {
let flds = fields.iter()
- .map(|&t| ast_ty_to_ty(this, rscope, t))
+ .map(|t| ast_ty_to_ty(this, rscope, &**t))
.collect();
ty::mk_tup(tcx, flds)
}
"variadic function must have C calling convention");
}
ty::mk_bare_fn(tcx, ty_of_bare_fn(this, ast_ty.id, bf.fn_style,
- bf.abi, bf.decl))
+ bf.abi, &*bf.decl))
}
ast::TyClosure(ref f, ref region) => {
f.onceness,
bounds,
store,
- f.decl,
+ &*f.decl,
None);
ty::mk_closure(tcx, fn_decl)
}
f.onceness,
bounds,
ty::UniqTraitStore,
- f.decl,
+ &*f.decl,
None);
ty::mk_closure(tcx, fn_decl)
}
}
}
ast::TyFixedLengthVec(ty, e) => {
- match const_eval::eval_const_expr_partial(tcx, e) {
+ match const_eval::eval_const_expr_partial(tcx, &*e) {
Ok(ref r) => {
match *r {
const_eval::const_int(i) =>
- ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, ty),
+ ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, &*ty),
Some(i as uint)),
const_eval::const_uint(i) =>
- ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, ty),
+ ty::mk_vec(tcx, ast_ty_to_mt(this, rscope, &*ty),
Some(i as uint)),
_ => {
tcx.sess.span_fatal(
match a.ty.node {
ast::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
ast::TyInfer => this.ty_infer(a.ty.span),
- _ => ast_ty_to_ty(this, rscope, a.ty),
+ _ => ast_ty_to_ty(this, rscope, &*a.ty),
}
}
let output_ty = match decl.output.node {
ast::TyInfer => this.ty_infer(decl.output.span),
- _ => ast_ty_to_ty(this, &rb, decl.output)
+ _ => ast_ty_to_ty(this, &rb, &*decl.output)
};
return ty::BareFnTy {
let output_ty = match decl.output.node {
ast::TyInfer if expected_ret_ty.is_some() => expected_ret_ty.unwrap(),
ast::TyInfer => this.ty_infer(decl.output.span),
- _ => ast_ty_to_ty(this, &rb, decl.output)
+ _ => ast_ty_to_ty(this, &rb, &*decl.output)
};
ty::ClosureTy {
use middle::typeck::require_same_types;
use std::collections::{HashMap, HashSet};
+use std::gc::Gc;
use syntax::ast;
use syntax::ast_util;
use syntax::parse::token;
for arm in arms.iter() {
let mut pcx = pat_ctxt {
fcx: fcx,
- map: pat_id_map(&tcx.def_map, *arm.pats.get(0)),
+ map: pat_id_map(&tcx.def_map, &**arm.pats.get(0)),
};
- for p in arm.pats.iter() { check_pat(&mut pcx, *p, discrim_ty);}
+ for p in arm.pats.iter() { check_pat(&mut pcx, &**p, discrim_ty);}
}
// The result of the match is the common supertype of all the
let mut guard_err = false;
let mut guard_bot = false;
match arm.guard {
- Some(e) => {
- check_expr_has_type(fcx, e, ty::mk_bool());
- let e_ty = fcx.expr_ty(e);
+ Some(ref e) => {
+ check_expr_has_type(fcx, &**e, ty::mk_bool());
+ let e_ty = fcx.expr_ty(&**e);
if ty::type_is_error(e_ty) {
guard_err = true;
}
},
None => ()
}
- check_expr(fcx, arm.body);
+ check_expr(fcx, &*arm.body);
let bty = fcx.node_ty(arm.body.id);
saw_err = saw_err || ty::type_is_error(bty);
if guard_err {
}
pub fn check_pat_variant(pcx: &pat_ctxt, pat: &ast::Pat, path: &ast::Path,
- subpats: &Option<Vec<@ast::Pat>>, expected: ty::t) {
+ subpats: &Option<Vec<Gc<ast::Pat>>>, expected: ty::t) {
// Typecheck the path.
let fcx = pcx.fcx;
if !error_happened {
for pats in subpats.iter() {
for (subpat, arg_ty) in pats.iter().zip(arg_types.iter()) {
- check_pat(pcx, *subpat, *arg_ty);
+ check_pat(pcx, &**subpat, *arg_ty);
}
}
}
if error_happened {
for pats in subpats.iter() {
for pat in pats.iter() {
- check_pat(pcx, *pat, ty::mk_err());
+ check_pat(pcx, &**pat, ty::mk_err());
}
}
}
class_id,
class_field.id,
substitutions);
- check_pat(pcx, field.pat, field_type);
+ check_pat(pcx, &*field.pat, field_type);
found_fields.insert(index);
}
None => {
// Check the pattern anyway, so that attempts to look
// up its type won't fail
- check_pat(pcx, field.pat, ty::mk_err());
+ check_pat(pcx, &*field.pat, ty::mk_err());
tcx.sess.span_err(span,
format!("struct `{}` does not have a field named `{}`",
ty::item_path_str(tcx, class_id),
ast::PatWild | ast::PatWildMulti => {
fcx.write_ty(pat.id, expected);
}
- ast::PatLit(lt) => {
- check_expr_has_type(fcx, lt, expected);
- fcx.write_ty(pat.id, fcx.expr_ty(lt));
+ ast::PatLit(ref lt) => {
+ check_expr_has_type(fcx, &**lt, expected);
+ fcx.write_ty(pat.id, fcx.expr_ty(&**lt));
}
- ast::PatRange(begin, end) => {
- check_expr_has_type(fcx, begin, expected);
- check_expr_has_type(fcx, end, expected);
+ ast::PatRange(ref begin, ref end) => {
+ check_expr_has_type(fcx, &**begin, expected);
+ check_expr_has_type(fcx, &**end, expected);
let b_ty =
- fcx.infcx().resolve_type_vars_if_possible(fcx.expr_ty(begin));
+ fcx.infcx().resolve_type_vars_if_possible(fcx.expr_ty(&**begin));
let e_ty =
- fcx.infcx().resolve_type_vars_if_possible(fcx.expr_ty(end));
+ fcx.infcx().resolve_type_vars_if_possible(fcx.expr_ty(&**end));
debug!("pat_range beginning type: {:?}", b_ty);
debug!("pat_range ending type: {:?}", e_ty);
if !require_same_types(
tcx.sess.span_err(pat.span,
"only char and numeric types are allowed in range");
} else {
- match valid_range_bounds(fcx.ccx, begin, end) {
+ match valid_range_bounds(fcx.ccx, &**begin, &**end) {
Some(false) => {
tcx.sess.span_err(begin.span,
"lower range bound must be less than upper");
debug!("(checking match) writing type for pat id {}", pat.id);
match sub {
- Some(p) => check_pat(pcx, p, expected),
+ Some(ref p) => check_pat(pcx, &**p, expected),
_ => ()
}
}
match *s {
ty::ty_tup(ref ex_elts) if e_count == ex_elts.len() => {
for (i, elt) in elts.iter().enumerate() {
- check_pat(pcx, *elt, *ex_elts.get(i));
+ check_pat(pcx, &**elt, *ex_elts.get(i));
}
fcx.write_ty(pat.id, expected);
}
_ => {
for elt in elts.iter() {
- check_pat(pcx, *elt, ty::mk_err());
+ check_pat(pcx, &**elt, ty::mk_err());
}
// use terr_tuple_size if both types are tuples
let type_error = match *s {
}
}
}
- ast::PatBox(inner) => {
- check_pointer_pat(pcx, Send, inner, pat.id, pat.span, expected);
+ ast::PatBox(ref inner) => {
+ check_pointer_pat(pcx, Send, &**inner, pat.id, pat.span, expected);
}
- ast::PatRegion(inner) => {
- check_pointer_pat(pcx, Borrowed, inner, pat.id, pat.span, expected);
+ ast::PatRegion(ref inner) => {
+ check_pointer_pat(pcx, Borrowed, &**inner, pat.id, pat.span, expected);
}
ast::PatVec(ref before, slice, ref after) => {
let default_region_var =
infer::PatternRegion(pat.span));
let check_err = || {
- for &elt in before.iter() {
- check_pat(pcx, elt, ty::mk_err());
+ for elt in before.iter() {
+ check_pat(pcx, &**elt, ty::mk_err());
}
- for &elt in slice.iter() {
- check_pat(pcx, elt, ty::mk_err());
+ for elt in slice.iter() {
+ check_pat(pcx, &**elt, ty::mk_err());
}
- for &elt in after.iter() {
- check_pat(pcx, elt, ty::mk_err());
+ for elt in after.iter() {
+ check_pat(pcx, &**elt, ty::mk_err());
}
// See [Note-Type-error-reporting] in middle/typeck/infer/mod.rs
fcx.infcx().type_error_message_str_with_expected(
}
};
for elt in before.iter() {
- check_pat(pcx, *elt, elt_type);
+ check_pat(pcx, &**elt, elt_type);
}
match slice {
- Some(slice_pat) => {
+ Some(ref slice_pat) => {
let slice_ty = ty::mk_slice(tcx,
region_var,
ty::mt {ty: elt_type, mutbl: mutbl});
- check_pat(pcx, slice_pat, slice_ty);
+ check_pat(pcx, &**slice_pat, slice_ty);
}
None => ()
}
for elt in after.iter() {
- check_pat(pcx, *elt, elt_type);
+ check_pat(pcx, &**elt, elt_type);
}
fcx.write_ty(pat.id, expected);
}
}
}
-// Helper function to check @, box and & patterns
+// Helper function to check gc, box and & patterns
pub fn check_pointer_pat(pcx: &pat_ctxt,
pointer_kind: PointerKind,
inner: &ast::Pat,
use middle::const_eval;
use middle::def;
-use middle::lang_items::{ExchangeHeapLangItem, GcLangItem};
-use middle::lang_items::{ManagedHeapLangItem};
use middle::lint::UnreachableCode;
use middle::pat_util::pat_id_map;
use middle::pat_util;
use std::collections::HashMap;
use std::mem::replace;
use std::rc::Rc;
-use std::vec::Vec;
+use std::gc::Gc;
use syntax::abi;
use syntax::ast::{Provided, Required};
use syntax::ast;
fn visit_local(&mut self, local: &ast::Local, _: ()) {
let o_ty = match local.ty.node {
ast::TyInfer => None,
- _ => Some(self.fcx.to_ty(local.ty))
+ _ => Some(self.fcx.to_ty(&*local.ty))
};
self.assign(local.id, o_ty);
debug!("Local variable {} is assigned type {}",
- self.fcx.pat_to_str(local.pat),
+ self.fcx.pat_to_str(&*local.pat),
self.fcx.infcx().ty_to_str(
self.fcx.inh.locals.borrow().get_copy(&local.id)));
visit::walk_local(self, local, ());
for (arg_ty, input) in arg_tys.iter().zip(decl.inputs.iter()) {
// Create type variables for each argument.
pat_util::pat_bindings(&tcx.def_map,
- input.pat,
+ &*input.pat,
|_bm, pat_id, _sp, _path| {
visit.assign(pat_id, None);
});
// Check the pattern.
let pcx = pat_ctxt {
fcx: &fcx,
- map: pat_id_map(&tcx.def_map, input.pat),
+ map: pat_id_map(&tcx.def_map, &*input.pat),
};
- _match::check_pat(&pcx, input.pat, *arg_ty);
+ _match::check_pat(&pcx, &*input.pat, *arg_ty);
}
visit.visit_block(body, ());
// We unify the tail expr's type with the
// function result type, if there is a tail expr.
match body.expr {
- Some(tail_expr) => {
+ Some(ref tail_expr) => {
// Special case: we print a special error if there appears
// to be do-block/for-loop confusion
demand::suptype_with_fn(&fcx, tail_expr.span, false,
- fcx.ret_ty, fcx.expr_ty(tail_expr),
+ fcx.ret_ty, fcx.expr_ty(&**tail_expr),
|sp, e, a, s| {
fcx.report_mismatched_return_types(sp, e, a, s);
});
enum_definition.variants.as_slice());
}
ast::ItemStruct(..) => {
- check_fields_sized(ccx.tcx, ccx.tcx.map.expect_struct(it.id));
+ check_fields_sized(ccx.tcx, &*ccx.tcx.map.expect_struct(it.id));
}
_ => {}
}
let _indenter = indenter();
match it.node {
- ast::ItemStatic(_, _, e) => check_const(ccx, it.span, e, it.id),
+ ast::ItemStatic(_, _, ref e) => check_const(ccx, it.span, &**e, it.id),
ast::ItemEnum(ref enum_definition, _) => {
check_enum_variants(ccx,
it.span,
enum_definition.variants.as_slice(),
it.id);
}
- ast::ItemFn(decl, _, _, _, body) => {
+ ast::ItemFn(ref decl, _, _, _, ref body) => {
let fn_tpt = ty::lookup_item_type(ccx.tcx, ast_util::local_def(it.id));
let param_env = ty::construct_parameter_environment(
fn_tpt.generics.region_param_defs.as_slice(),
body.id);
- check_bare_fn(ccx, decl, body, it.id, fn_tpt.ty, param_env);
+ check_bare_fn(ccx, &**decl, &**body, it.id, fn_tpt.ty, param_env);
}
ast::ItemImpl(_, ref opt_trait_ref, _, ref ms) => {
debug!("ItemImpl {} with id {}", token::get_ident(it.ident), it.id);
let impl_tpt = ty::lookup_item_type(ccx.tcx, ast_util::local_def(it.id));
for m in ms.iter() {
- check_method_body(ccx, &impl_tpt.generics, None, *m);
+ check_method_body(ccx, &impl_tpt.generics, None, &**m);
}
match *opt_trait_ref {
}
Provided(m) => {
check_method_body(ccx, &trait_def.generics,
- Some(trait_def.trait_ref.clone()), m);
+ Some(trait_def.trait_ref.clone()), &*m);
}
}
}
ast::ItemForeignMod(ref m) => {
if m.abi == abi::RustIntrinsic {
for item in m.items.iter() {
- check_intrinsic_type(ccx, *item);
+ check_intrinsic_type(ccx, &**item);
}
} else {
for item in m.items.iter() {
let fty = ty::node_id_to_type(ccx.tcx, method.id);
- check_bare_fn(ccx, method.decl, method.body, method.id, fty, param_env);
+ check_bare_fn(ccx, &*method.decl, &*method.body, method.id, fty, param_env);
}
fn check_impl_methods_against_trait(ccx: &CrateCtxt,
impl_generics: &ty::Generics,
ast_trait_ref: &ast::TraitRef,
impl_trait_ref: &ty::TraitRef,
- impl_methods: &[@ast::Method]) {
+ impl_methods: &[Gc<ast::Method>]) {
// Locate trait methods
let tcx = ccx.tcx;
let trait_methods = ty::trait_methods(tcx, impl_trait_ref.def_id);
/// Attempts to resolve a call expression as an overloaded call.
fn try_overloaded_call(fcx: &FnCtxt,
call_expression: &ast::Expr,
- callee: @ast::Expr,
+ callee: Gc<ast::Expr>,
callee_type: ty::t,
- args: &[@ast::Expr])
+ args: &[Gc<ast::Expr>])
-> bool {
// Try `FnOnce`, then `FnMut`, then `Fn`.
for &(maybe_function_trait, method_name) in [
sp: Span,
method_fn_ty: ty::t,
callee_expr: &ast::Expr,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
deref_args: DerefArgs,
tuple_arguments: TupleArgumentsFlag)
-> ty::t {
sp: Span,
fn_inputs: &[ty::t],
callee_expr: &ast::Expr,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
deref_args: DerefArgs,
variadic: bool,
tuple_arguments: TupleArgumentsFlag) {
DontDerefArgs => {}
}
- check_expr_coercable_to_type(fcx, *arg, formal_ty);
+ check_expr_coercable_to_type(fcx, &**arg, formal_ty);
}
}
// arguments which we skipped above.
if variadic {
for arg in args.iter().skip(expected_arg_count) {
- check_expr(fcx, *arg);
+ check_expr(fcx, &**arg);
// There are a few types which get autopromoted when passed via varargs
// in C but we just error out instead and require explicit casts.
- let arg_ty = structurally_resolved_type(fcx, arg.span, fcx.expr_ty(*arg));
+ let arg_ty = structurally_resolved_type(fcx, arg.span,
+ fcx.expr_ty(&**arg));
match ty::get(arg_ty).sty {
ty::ty_float(ast::TyF32) => {
fcx.type_error_message(arg.span,
fn check_call(fcx: &FnCtxt,
call_expr: &ast::Expr,
f: &ast::Expr,
- args: &[@ast::Expr]) {
+ args: &[Gc<ast::Expr>]) {
// Store the type of `f` as the type of the callee
let fn_ty = fcx.expr_ty(f);
fn check_method_call(fcx: &FnCtxt,
expr: &ast::Expr,
method_name: ast::SpannedIdent,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
tps: &[ast::P<ast::Ty>]) {
- let rcvr = args[0];
+ let rcvr = args[0].clone();
// We can't know if we need &mut self before we look up the method,
// so treat the receiver as mutable just in case - only explicit
// overloaded dereferences care about the distinction.
- check_expr_with_lvalue_pref(fcx, rcvr, PreferMutLvalue);
+ check_expr_with_lvalue_pref(fcx, &*rcvr, PreferMutLvalue);
// no need to check for bot/err -- callee does that
let expr_t = structurally_resolved_type(fcx,
expr.span,
- fcx.expr_ty(rcvr));
+ fcx.expr_ty(&*rcvr));
- let tps = tps.iter().map(|&ast_ty| fcx.to_ty(ast_ty)).collect::<Vec<_>>();
- let fn_ty = match method::lookup(fcx, expr, rcvr,
+ let tps = tps.iter().map(|ast_ty| fcx.to_ty(&**ast_ty)).collect::<Vec<_>>();
+ let fn_ty = match method::lookup(fcx, expr, &*rcvr,
method_name.node.name,
expr_t, tps.as_slice(),
DontDerefArgs,
// Check for potential static matches (missing self parameters)
method::lookup(fcx,
expr,
- rcvr,
+ &*rcvr,
method_name.node.name,
expr_t,
tps.as_slice(),
fn check_then_else(fcx: &FnCtxt,
cond_expr: &ast::Expr,
then_blk: &ast::Block,
- opt_else_expr: Option<@ast::Expr>,
+ opt_else_expr: Option<Gc<ast::Expr>>,
id: ast::NodeId,
sp: Span,
expected: Option<ty::t>) {
check_expr_has_type(fcx, cond_expr, ty::mk_bool());
let branches_ty = match opt_else_expr {
- Some(else_expr) => {
+ Some(ref else_expr) => {
check_block_with_expected(fcx, then_blk, expected);
let then_ty = fcx.node_ty(then_blk.id);
- check_expr_with_opt_hint(fcx, else_expr, expected);
- let else_ty = fcx.expr_ty(else_expr);
+ check_expr_with_opt_hint(fcx, &**else_expr, expected);
+ let else_ty = fcx.expr_ty(&**else_expr);
infer::common_supertype(fcx.infcx(),
infer::IfExpression(sp),
true,
self_t: ty::t,
opname: ast::Name,
trait_did: Option<ast::DefId>,
- args: &[@ast::Expr],
+ args: &[Gc<ast::Expr>],
autoderef_receiver: AutoderefReceiverFlag,
unbound_method: ||) -> ty::t {
let method = match trait_did {
fn check_binop(fcx: &FnCtxt,
expr: &ast::Expr,
op: ast::BinOp,
- lhs: @ast::Expr,
- rhs: @ast::Expr,
+ lhs: Gc<ast::Expr>,
+ rhs: Gc<ast::Expr>,
is_binop_assignment: IsBinopAssignment) {
let tcx = fcx.ccx.tcx;
BinopAssignment => PreferMutLvalue,
SimpleBinop => NoPreference
};
- check_expr_with_lvalue_pref(fcx, lhs, lvalue_pref);
+ check_expr_with_lvalue_pref(fcx, &*lhs, lvalue_pref);
// Callee does bot / err checking
let lhs_t = structurally_resolved_type(fcx, lhs.span,
- fcx.expr_ty(lhs));
+ fcx.expr_ty(&*lhs));
if ty::type_is_integral(lhs_t) && ast_util::is_shift_binop(op) {
// Shift is a special case: rhs can be any integral type
- check_expr(fcx, rhs);
- let rhs_t = fcx.expr_ty(rhs);
+ check_expr(fcx, &*rhs);
+ let rhs_t = fcx.expr_ty(&*rhs);
require_integral(fcx, rhs.span, rhs_t);
fcx.write_ty(expr.id, lhs_t);
return;
if ty::is_binopable(tcx, lhs_t, op) {
let tvar = fcx.infcx().next_ty_var();
demand::suptype(fcx, expr.span, tvar, lhs_t);
- check_expr_has_type(fcx, rhs, tvar);
+ check_expr_has_type(fcx, &*rhs, tvar);
let result_t = match op {
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiLe | ast::BiGe |
},
lhs_t,
None);
- check_expr(fcx, rhs);
+ check_expr(fcx, &*rhs);
ty::mk_err()
};
fn check_user_binop(fcx: &FnCtxt,
ex: &ast::Expr,
- lhs_expr: @ast::Expr,
+ lhs_expr: Gc<ast::Expr>,
lhs_resolved_t: ty::t,
op: ast::BinOp,
- rhs: @ast::Expr) -> ty::t {
+ rhs: Gc<ast::Expr>) -> ty::t {
let tcx = fcx.ccx.tcx;
let lang = &tcx.lang_items;
let (name, trait_did) = match op {
ast::BiEq => ("eq", lang.eq_trait()),
ast::BiNe => ("ne", lang.eq_trait()),
ast::BiAnd | ast::BiOr => {
- check_expr(fcx, rhs);
+ check_expr(fcx, &*rhs);
return ty::mk_err();
}
};
mname: &str,
trait_did: Option<ast::DefId>,
ex: &ast::Expr,
- rhs_expr: @ast::Expr,
+ rhs_expr: Gc<ast::Expr>,
rhs_t: ty::t) -> ty::t {
lookup_op_method(fcx, ex, rhs_t, token::intern(mname),
trait_did, [rhs_expr], DontAutoderefReceiver, || {
};
check_fn(fcx.ccx, inherited_style, &fty_sig,
- decl, id, body, fcx.inh);
+ &*decl, id, &*body, fcx.inh);
}
None => {}
}
- let tps: Vec<ty::t> = tys.iter().map(|&ty| fcx.to_ty(ty)).collect();
+ let tps: Vec<ty::t> = tys.iter().map(|ty| fcx.to_ty(&**ty)).collect();
match method::lookup(fcx,
expr,
base,
// an error, so we can continue typechecking
check_expr_coercable_to_type(
fcx,
- field.expr,
+ &*field.expr,
expected_field_type);
}
span: codemap::Span,
class_id: ast::DefId,
fields: &[ast::Field],
- base_expr: Option<@ast::Expr>) {
+ base_expr: Option<Gc<ast::Expr>>) {
let tcx = fcx.ccx.tcx;
// Look up the number of type parameters and the raw type, and
match base_expr {
None => {}
Some(base_expr) => {
- check_expr_has_type(fcx, base_expr, struct_type);
+ check_expr_has_type(fcx, &*base_expr, struct_type);
if ty::type_is_bot(fcx.node_ty(base_expr.id)) {
struct_type = ty::mk_bot();
}
let mut any_bot = false;
let t: ty::t = fcx.infcx().next_ty_var();
for e in args.iter() {
- check_expr_has_type(fcx, *e, t);
- let arg_t = fcx.expr_ty(*e);
+ check_expr_has_type(fcx, &**e, t);
+ let arg_t = fcx.expr_ty(&**e);
if ty::type_is_error(arg_t) {
any_error = true;
}
} else if any_bot {
ty::mk_bot()
} else {
- ast_expr_vstore_to_ty(fcx, ev, vst, ||
+ ast_expr_vstore_to_ty(fcx, &*ev, vst, ||
ty::mt{ ty: ty::mk_vec(tcx,
ty::mt {ty: t, mutbl: mutability},
None),
mutbl: mutability })
}
}
- ast::ExprRepeat(element, count_expr) => {
- check_expr_with_hint(fcx, count_expr, ty::mk_uint());
- let _ = ty::eval_repeat_count(fcx, count_expr);
+ ast::ExprRepeat(ref element, ref count_expr) => {
+ check_expr_with_hint(fcx, &**count_expr, ty::mk_uint());
+ let _ = ty::eval_repeat_count(fcx, &**count_expr);
let mutability = match vst {
ast::ExprVstoreMutSlice => ast::MutMutable,
_ => ast::MutImmutable,
};
let t = fcx.infcx().next_ty_var();
- check_expr_has_type(fcx, element, t);
- let arg_t = fcx.expr_ty(element);
+ check_expr_has_type(fcx, &**element, t);
+ let arg_t = fcx.expr_ty(&**element);
if ty::type_is_error(arg_t) {
ty::mk_err()
} else if ty::type_is_bot(arg_t) {
ty::mk_bot()
} else {
- ast_expr_vstore_to_ty(fcx, ev, vst, ||
+ ast_expr_vstore_to_ty(fcx, &*ev, vst, ||
ty::mt{ ty: ty::mk_vec(tcx,
ty::mt {ty: t, mutbl: mutability},
None),
fcx.write_ty(id, typ);
}
- ast::ExprBox(place, subexpr) => {
- check_expr(fcx, place);
- check_expr(fcx, subexpr);
+ ast::ExprBox(ref place, ref subexpr) => {
+ check_expr(fcx, &**place);
+ check_expr(fcx, &**subexpr);
let mut checked = false;
match place.node {
// places: the exchange heap and the managed heap.
let definition = lookup_def(fcx, path.span, place.id);
let def_id = definition.def_id();
- match tcx.lang_items
- .items
- .get(ExchangeHeapLangItem as uint) {
- &Some(item_def_id) if def_id == item_def_id => {
- fcx.write_ty(id, ty::mk_uniq(tcx,
- fcx.expr_ty(subexpr)));
- checked = true
- }
- &Some(_) | &None => {}
- }
- if !checked {
- match tcx.lang_items
- .items
- .get(ManagedHeapLangItem as uint) {
- &Some(item_def_id) if def_id == item_def_id => {
- // Assign the magic `Gc<T>` struct.
- let gc_struct_id =
- match tcx.lang_items
- .require(GcLangItem) {
- Ok(id) => id,
- Err(msg) => {
- tcx.sess.span_err(expr.span,
- msg.as_slice());
- ast::DefId {
- krate: ast::CRATE_NODE_ID,
- node: ast::DUMMY_NODE_ID,
- }
- }
- };
- let regions =
- subst::NonerasedRegions(Vec::new());
- let sty = ty::mk_struct(tcx,
- gc_struct_id,
- subst::Substs {
- self_ty: None,
- tps: vec!(
- fcx.expr_ty(
- subexpr)
- ),
- regions: regions,
- });
- fcx.write_ty(id, sty);
- checked = true
- }
- &Some(_) | &None => {}
- }
+ if tcx.lang_items.exchange_heap() == Some(def_id) {
+ fcx.write_ty(id, ty::mk_uniq(tcx,
+ fcx.expr_ty(&**subexpr)));
+ checked = true
+ } else if tcx.lang_items.managed_heap() == Some(def_id) {
+ fcx.write_ty(id, ty::mk_box(tcx,
+ fcx.expr_ty(&**subexpr)));
+ checked = true
}
}
_ => {}
}
}
- ast::ExprLit(lit) => {
- let typ = check_lit(fcx, lit);
+ ast::ExprLit(ref lit) => {
+ let typ = check_lit(fcx, &**lit);
fcx.write_ty(id, typ);
}
- ast::ExprBinary(op, lhs, rhs) => {
- check_binop(fcx, expr, op, lhs, rhs, SimpleBinop);
+ ast::ExprBinary(op, ref lhs, ref rhs) => {
+ check_binop(fcx, expr, op, lhs.clone(), rhs.clone(), SimpleBinop);
- let lhs_ty = fcx.expr_ty(lhs);
- let rhs_ty = fcx.expr_ty(rhs);
+ let lhs_ty = fcx.expr_ty(&**lhs);
+ let rhs_ty = fcx.expr_ty(&**rhs);
if ty::type_is_error(lhs_ty) ||
ty::type_is_error(rhs_ty) {
fcx.write_error(id);
fcx.write_bot(id);
}
}
- ast::ExprAssignOp(op, lhs, rhs) => {
- check_binop(fcx, expr, op, lhs, rhs, BinopAssignment);
+ ast::ExprAssignOp(op, ref lhs, ref rhs) => {
+ check_binop(fcx, expr, op, lhs.clone(), rhs.clone(), BinopAssignment);
- let lhs_t = fcx.expr_ty(lhs);
+ let lhs_t = fcx.expr_ty(&**lhs);
let result_t = fcx.expr_ty(expr);
demand::suptype(fcx, expr.span, result_t, lhs_t);
let tcx = fcx.tcx();
- if !ty::expr_is_lval(tcx, lhs) {
+ if !ty::expr_is_lval(tcx, &**lhs) {
tcx.sess.span_err(lhs.span, "illegal left-hand side expression");
}
fcx.write_nil(expr.id);
}
}
- ast::ExprUnary(unop, oprnd) => {
+ ast::ExprUnary(unop, ref oprnd) => {
let exp_inner = unpack_expected(fcx, expected, |sty| {
match unop {
ast::UnBox | ast::UnUniq => match *sty {
ast::UnDeref => lvalue_pref,
_ => NoPreference
};
- check_expr_with_opt_hint_and_lvalue_pref(fcx, oprnd, exp_inner, lvalue_pref);
- let mut oprnd_t = fcx.expr_ty(oprnd);
+ check_expr_with_opt_hint_and_lvalue_pref(fcx, &**oprnd, exp_inner, lvalue_pref);
+ let mut oprnd_t = fcx.expr_ty(&**oprnd);
if !ty::type_is_error(oprnd_t) && !ty::type_is_bot(oprnd_t) {
match unop {
ast::UnBox => {
Some(mt) => mt.ty,
None => match try_overloaded_deref(fcx, expr.span,
Some(MethodCall::expr(expr.id)),
- Some(&*oprnd), oprnd_t, lvalue_pref) {
+ Some(&**oprnd), oprnd_t, lvalue_pref) {
Some(mt) => mt.ty,
None => {
let is_newtype = match ty::get(oprnd_t).sty {
ty::get(oprnd_t).sty == ty::ty_bool) {
oprnd_t = check_user_unop(fcx, "!", "not",
tcx.lang_items.not_trait(),
- expr, oprnd, oprnd_t);
+ expr, oprnd.clone(), oprnd_t);
}
}
ast::UnNeg => {
ty::type_is_fp(oprnd_t)) {
oprnd_t = check_user_unop(fcx, "-", "neg",
tcx.lang_items.neg_trait(),
- expr, oprnd, oprnd_t);
+ expr, oprnd.clone(), oprnd_t);
}
}
}
}
fcx.write_ty(id, oprnd_t);
}
- ast::ExprAddrOf(mutbl, oprnd) => {
+ ast::ExprAddrOf(mutbl, ref oprnd) => {
let hint = unpack_expected(
fcx, expected,
|sty| match *sty { ty::ty_rptr(_, ref mt) => Some(mt.ty),
ast::MutMutable => PreferMutLvalue,
ast::MutImmutable => NoPreference
};
- check_expr_with_opt_hint_and_lvalue_pref(fcx, oprnd, hint, lvalue_pref);
+ check_expr_with_opt_hint_and_lvalue_pref(fcx, &**oprnd, hint, lvalue_pref);
// Note: at this point, we cannot say what the best lifetime
// is to use for resulting pointer. We want to use the
let region = fcx.infcx().next_region_var(
infer::AddrOfRegion(expr.span));
- let tm = ty::mt { ty: fcx.expr_ty(oprnd), mutbl: mutbl };
+ let tm = ty::mt { ty: fcx.expr_ty(&**oprnd), mutbl: mutbl };
let oprnd_t = if ty::type_is_error(tm.ty) {
ty::mk_err()
} else if ty::type_is_bot(tm.ty) {
instantiate_path(fcx, pth, tpt, defn, expr.span, expr.id);
}
ast::ExprInlineAsm(ref ia) => {
- for &(_, input) in ia.inputs.iter() {
- check_expr(fcx, input);
+ for &(_, ref input) in ia.inputs.iter() {
+ check_expr(fcx, &**input);
}
- for &(_, out) in ia.outputs.iter() {
- check_expr(fcx, out);
+ for &(_, ref out) in ia.outputs.iter() {
+ check_expr(fcx, &**out);
}
fcx.write_nil(id);
}
ast::ExprMac(_) => tcx.sess.bug("unexpanded macro"),
ast::ExprBreak(_) => { fcx.write_bot(id); }
ast::ExprAgain(_) => { fcx.write_bot(id); }
- ast::ExprRet(expr_opt) => {
+ ast::ExprRet(ref expr_opt) => {
let ret_ty = fcx.ret_ty;
- match expr_opt {
+ match *expr_opt {
None => match fcx.mk_eqty(false, infer::Misc(expr.span),
ret_ty, ty::mk_nil()) {
Ok(_) => { /* fall through */ }
"`return;` in function returning non-nil");
}
},
- Some(e) => {
- check_expr_has_type(fcx, e, ret_ty);
+ Some(ref e) => {
+ check_expr_has_type(fcx, &**e, ret_ty);
}
}
fcx.write_bot(id);
}
- ast::ExprParen(a) => {
- check_expr_with_opt_hint_and_lvalue_pref(fcx, a, expected, lvalue_pref);
- fcx.write_ty(id, fcx.expr_ty(a));
+ ast::ExprParen(ref a) => {
+ check_expr_with_opt_hint_and_lvalue_pref(fcx, &**a, expected, lvalue_pref);
+ fcx.write_ty(id, fcx.expr_ty(&**a));
}
- ast::ExprAssign(lhs, rhs) => {
- check_expr_with_lvalue_pref(fcx, lhs, PreferMutLvalue);
+ ast::ExprAssign(ref lhs, ref rhs) => {
+ check_expr_with_lvalue_pref(fcx, &**lhs, PreferMutLvalue);
let tcx = fcx.tcx();
- if !ty::expr_is_lval(tcx, lhs) {
+ if !ty::expr_is_lval(tcx, &**lhs) {
tcx.sess.span_err(lhs.span, "illegal left-hand side expression");
}
- let lhs_ty = fcx.expr_ty(lhs);
- check_expr_has_type(fcx, rhs, lhs_ty);
- let rhs_ty = fcx.expr_ty(rhs);
+ let lhs_ty = fcx.expr_ty(&**lhs);
+ check_expr_has_type(fcx, &**rhs, lhs_ty);
+ let rhs_ty = fcx.expr_ty(&**rhs);
if ty::type_is_error(lhs_ty) || ty::type_is_error(rhs_ty) {
fcx.write_error(id);
fcx.write_nil(id);
}
}
- ast::ExprIf(cond, then_blk, opt_else_expr) => {
- check_then_else(fcx, cond, then_blk, opt_else_expr,
+ ast::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => {
+ check_then_else(fcx, &**cond, &**then_blk, opt_else_expr.clone(),
id, expr.span, expected);
}
- ast::ExprWhile(cond, body) => {
- check_expr_has_type(fcx, cond, ty::mk_bool());
- check_block_no_value(fcx, body);
- let cond_ty = fcx.expr_ty(cond);
+ ast::ExprWhile(ref cond, ref body) => {
+ check_expr_has_type(fcx, &**cond, ty::mk_bool());
+ check_block_no_value(fcx, &**body);
+ let cond_ty = fcx.expr_ty(&**cond);
let body_ty = fcx.node_ty(body.id);
if ty::type_is_error(cond_ty) || ty::type_is_error(body_ty) {
fcx.write_error(id);
}
ast::ExprForLoop(..) =>
fail!("non-desugared expr_for_loop"),
- ast::ExprLoop(body, _) => {
- check_block_no_value(fcx, (body));
- if !may_break(tcx, expr.id, body) {
+ ast::ExprLoop(ref body, _) => {
+ check_block_no_value(fcx, &**body);
+ if !may_break(tcx, expr.id, body.clone()) {
fcx.write_bot(id);
}
else {
fcx.write_nil(id);
}
}
- ast::ExprMatch(discrim, ref arms) => {
- _match::check_match(fcx, expr, discrim, arms.as_slice());
+ ast::ExprMatch(ref discrim, ref arms) => {
+ _match::check_match(fcx, expr, &**discrim, arms.as_slice());
}
- ast::ExprFnBlock(decl, body) => {
+ ast::ExprFnBlock(ref decl, ref body) => {
let region = astconv::opt_ast_region_to_region(fcx,
fcx.infcx(),
expr.span,
check_expr_fn(fcx,
expr,
ty::RegionTraitStore(region, ast::MutMutable),
- decl,
- body,
+ &**decl,
+ body.clone(),
expected);
}
- ast::ExprProc(decl, body) => {
+ ast::ExprProc(ref decl, ref body) => {
check_expr_fn(fcx,
expr,
ty::UniqTraitStore,
- decl,
- body,
+ &**decl,
+ body.clone(),
expected);
}
- ast::ExprBlock(b) => {
- check_block_with_expected(fcx, b, expected);
+ ast::ExprBlock(ref b) => {
+ check_block_with_expected(fcx, &**b, expected);
fcx.write_ty(id, fcx.node_ty(b.id));
}
- ast::ExprCall(f, ref args) => {
+ ast::ExprCall(ref f, ref args) => {
// Index expressions need to be handled separately, to inform them
// that they appear in call position.
- check_expr(fcx, f);
- let f_ty = fcx.expr_ty(f);
+ check_expr(fcx, &**f);
+ let f_ty = fcx.expr_ty(&**f);
- if !try_overloaded_call(fcx, expr, f, f_ty, args.as_slice()) {
- check_call(fcx, expr, f, args.as_slice());
+ if !try_overloaded_call(fcx, expr, f.clone(), f_ty, args.as_slice()) {
+ check_call(fcx, expr, &**f, args.as_slice());
let (args_bot, args_err) = args.iter().fold((false, false),
|(rest_bot, rest_err), a| {
// is this not working?
- let a_ty = fcx.expr_ty(*a);
+ let a_ty = fcx.expr_ty(&**a);
(rest_bot || ty::type_is_bot(a_ty),
rest_err || ty::type_is_error(a_ty))});
if ty::type_is_error(f_ty) || args_err {
}
ast::ExprMethodCall(ident, ref tps, ref args) => {
check_method_call(fcx, expr, ident, args.as_slice(), tps.as_slice());
- let mut arg_tys = args.iter().map(|a| fcx.expr_ty(*a));
+ let mut arg_tys = args.iter().map(|a| fcx.expr_ty(&**a));
let (args_bot, args_err) = arg_tys.fold((false, false),
|(rest_bot, rest_err), a| {
(rest_bot || ty::type_is_bot(a),
fcx.write_bot(id);
}
}
- ast::ExprCast(e, t) => {
- check_expr(fcx, e);
- let t_1 = fcx.to_ty(t);
- let t_e = fcx.expr_ty(e);
+ ast::ExprCast(ref e, ref t) => {
+ check_expr(fcx, &**e);
+ let t_1 = fcx.to_ty(&**t);
+ let t_e = fcx.expr_ty(&**e);
debug!("t_1={}", fcx.infcx().ty_to_str(t_1));
debug!("t_e={}", fcx.infcx().ty_to_str(t_e));
/* this case is allowed */
}
_ => {
- demand::coerce(fcx, e.span, t_1, e);
+ demand::coerce(fcx, e.span, t_1, &**e);
}
}
} else if !(type_is_scalar(fcx,expr.span,t_e)
ast::ExprVec(ref args) => {
let t: ty::t = fcx.infcx().next_ty_var();
for e in args.iter() {
- check_expr_has_type(fcx, *e, t);
+ check_expr_has_type(fcx, &**e, t);
}
let typ = ty::mk_vec(tcx, ty::mt {ty: t, mutbl: ast::MutImmutable},
Some(args.len()));
fcx.write_ty(id, typ);
}
- ast::ExprRepeat(element, count_expr) => {
- check_expr_with_hint(fcx, count_expr, ty::mk_uint());
- let count = ty::eval_repeat_count(fcx, count_expr);
+ ast::ExprRepeat(ref element, ref count_expr) => {
+ check_expr_with_hint(fcx, &**count_expr, ty::mk_uint());
+ let count = ty::eval_repeat_count(fcx, &**count_expr);
let t: ty::t = fcx.infcx().next_ty_var();
- check_expr_has_type(fcx, element, t);
- let element_ty = fcx.expr_ty(element);
+ check_expr_has_type(fcx, &**element, t);
+ let element_ty = fcx.expr_ty(&**element);
if ty::type_is_error(element_ty) {
fcx.write_error(id);
}
Some(ref fs) if i < fs.len() => Some(*fs.get(i)),
_ => None
};
- check_expr_with_opt_hint(fcx, *e, opt_hint);
- let t = fcx.expr_ty(*e);
+ check_expr_with_opt_hint(fcx, &**e, opt_hint);
+ let t = fcx.expr_ty(&**e);
err_field = err_field || ty::type_is_error(t);
bot_field = bot_field || ty::type_is_bot(t);
t
}
}
}
- ast::ExprField(base, field, ref tys) => {
- check_field(fcx, expr, lvalue_pref, base, field.name, tys.as_slice());
+ ast::ExprField(ref base, ref field, ref tys) => {
+ check_field(fcx, expr, lvalue_pref, &**base, field.name, tys.as_slice());
}
- ast::ExprIndex(base, idx) => {
- check_expr_with_lvalue_pref(fcx, base, lvalue_pref);
- check_expr(fcx, idx);
- let raw_base_t = fcx.expr_ty(base);
- let idx_t = fcx.expr_ty(idx);
+ ast::ExprIndex(ref base, ref idx) => {
+ check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
+ check_expr(fcx, &**idx);
+ let raw_base_t = fcx.expr_ty(&**base);
+ let idx_t = fcx.expr_ty(&**idx);
if ty::type_is_error(raw_base_t) || ty::type_is_bot(raw_base_t) {
fcx.write_ty(id, raw_base_t);
} else if ty::type_is_error(idx_t) || ty::type_is_bot(idx_t) {
lvalue_pref, |base_t, _| ty::index(base_t));
match field_ty {
Some(mt) => {
- check_expr_has_type(fcx, idx, ty::mk_uint());
+ check_expr_has_type(fcx, &**idx, ty::mk_uint());
fcx.write_ty(id, mt.ty);
fcx.write_autoderef_adjustment(base.id, autoderefs);
}
resolved,
token::intern("index"),
tcx.lang_items.index_trait(),
- [base, idx],
+ [base.clone(), idx.clone()],
AutoderefReceiver,
|| {
fcx.type_error_message(expr.span,
fcx.write_ty(local.id, t);
match local.init {
- Some(init) => {
- check_decl_initializer(fcx, local.id, init);
- let init_ty = fcx.expr_ty(init);
+ Some(ref init) => {
+ check_decl_initializer(fcx, local.id, &**init);
+ let init_ty = fcx.expr_ty(&**init);
if ty::type_is_error(init_ty) || ty::type_is_bot(init_ty) {
fcx.write_ty(local.id, init_ty);
}
let pcx = pat_ctxt {
fcx: fcx,
- map: pat_id_map(&tcx.def_map, local.pat),
+ map: pat_id_map(&tcx.def_map, &*local.pat),
};
- _match::check_pat(&pcx, local.pat, t);
+ _match::check_pat(&pcx, &*local.pat, t);
let pat_ty = fcx.node_ty(local.pat.id);
if ty::type_is_error(pat_ty) || ty::type_is_bot(pat_ty) {
fcx.write_ty(local.id, pat_ty);
node_id = id;
match decl.node {
ast::DeclLocal(ref l) => {
- check_decl_local(fcx, *l);
+ check_decl_local(fcx, &**l);
let l_t = fcx.node_ty(l.id);
saw_bot = saw_bot || ty::type_is_bot(l_t);
saw_err = saw_err || ty::type_is_error(l_t);
ast::DeclItem(_) => {/* ignore for now */ }
}
}
- ast::StmtExpr(expr, id) => {
+ ast::StmtExpr(ref expr, id) => {
node_id = id;
// Check with expected type of ()
- check_expr_has_type(fcx, expr, ty::mk_nil());
- let expr_ty = fcx.expr_ty(expr);
+ check_expr_has_type(fcx, &**expr, ty::mk_nil());
+ let expr_ty = fcx.expr_ty(&**expr);
saw_bot = saw_bot || ty::type_is_bot(expr_ty);
saw_err = saw_err || ty::type_is_error(expr_ty);
}
- ast::StmtSemi(expr, id) => {
+ ast::StmtSemi(ref expr, id) => {
node_id = id;
- check_expr(fcx, expr);
- let expr_ty = fcx.expr_ty(expr);
+ check_expr(fcx, &**expr);
+ let expr_ty = fcx.expr_ty(&**expr);
saw_bot |= ty::type_is_bot(expr_ty);
saw_err |= ty::type_is_error(expr_ty);
}
let mut any_bot = false;
let mut any_err = false;
for s in blk.stmts.iter() {
- check_stmt(fcx, *s);
- let s_id = ast_util::stmt_id(*s);
+ check_stmt(fcx, &**s);
+ let s_id = ast_util::stmt_id(&**s);
let s_ty = fcx.node_ty(s_id);
if last_was_bot && !warned && match s.node {
ast::StmtDecl(decl, _) => {
e.span,
"unreachable expression".to_string());
}
- check_expr_with_opt_hint(fcx, e, expected);
- let ety = fcx.expr_ty(e);
+ check_expr_with_opt_hint(fcx, &*e, expected);
+ let ety = fcx.expr_ty(&*e);
fcx.write_ty(blk.id, ety);
if any_err {
fcx.write_error(blk.id);
}
}
},
- ast::StructVariantKind(struct_def) => check_fields_sized(ccx.tcx, struct_def),
+ ast::StructVariantKind(struct_def) => check_fields_sized(ccx.tcx, &*struct_def),
_ => {}
}
}
match v.node.disr_expr {
Some(e) => {
- debug!("disr expr, checking {}", pprust::expr_to_str(e));
+ debug!("disr expr, checking {}", pprust::expr_to_str(&*e));
let inh = blank_inherited_fields(ccx);
let fcx = blank_fn_ctxt(ccx, &inh, rty, e.id);
let declty = ty::mk_int_var(ccx.tcx, fcx.infcx().next_int_var_id());
- check_const_with_ty(&fcx, e.span, e, declty);
+ check_const_with_ty(&fcx, e.span, &*e, declty);
// check_expr (from check_const pass) doesn't guarantee
// that the expression is in a form that eval_const_expr can
// handle, so we may still get an internal compiler error
- match const_eval::eval_const_expr_partial(ccx.tcx, e) {
+ match const_eval::eval_const_expr_partial(ccx.tcx, &*e) {
Ok(const_eval::const_int(val)) => current_disr_val = val as Disr,
Ok(const_eval::const_uint(val)) => current_disr_val = val as Disr,
Ok(_) => {
}
disr_vals.push(current_disr_val);
- let variant_info = Rc::new(VariantInfo::from_ast_variant(ccx.tcx, v,
+ let variant_info = Rc::new(VariantInfo::from_ast_variant(ccx.tcx, &*v,
current_disr_val));
prev_disr_val = Some(current_disr_val);
let mut pushed = false;
for (i, ty) in pth.segments.iter()
.flat_map(|segment| segment.types.iter())
- .map(|&ast_type| fcx.to_ty(ast_type))
+ .map(|ast_type| fcx.to_ty(&**ast_type))
.enumerate() {
match self_parameter_index {
Some(index) if index == i => {
pub fn may_break(cx: &ty::ctxt, id: ast::NodeId, b: ast::P<ast::Block>) -> bool {
// First: is there an unlabeled break immediately
// inside the loop?
- (loop_query(b, |e| {
+ (loop_query(&*b, |e| {
match *e {
ast::ExprBreak(_) => true,
_ => false
use syntax::visit::Visitor;
use std::cell::RefCell;
+use std::gc::Gc;
// If mem categorization results in an error, it's because the type
// check failed (or will fail, when the error is uncovered and
fn visit_arm(rcx: &mut Rcx, arm: &ast::Arm) {
// see above
- for &p in arm.pats.iter() {
- constrain_bindings_in_pat(p, rcx);
+ for p in arm.pats.iter() {
+ constrain_bindings_in_pat(&**p, rcx);
}
visit::walk_arm(rcx, arm, ());
fn visit_local(rcx: &mut Rcx, l: &ast::Local) {
// see above
- constrain_bindings_in_pat(l.pat, rcx);
+ constrain_bindings_in_pat(&*l.pat, rcx);
link_local(rcx, l);
visit::walk_local(rcx, l, ());
}
}
match expr.node {
- ast::ExprCall(callee, ref args) => {
+ ast::ExprCall(ref callee, ref args) => {
if !has_method_map {
- constrain_callee(rcx, callee.id, expr, callee);
+ constrain_callee(rcx, callee.id, expr, &**callee);
constrain_call(rcx,
Some(callee.id),
expr,
visit::walk_expr(rcx, expr, ());
}
- ast::ExprAssign(lhs, _) => {
- adjust_borrow_kind_for_assignment_lhs(rcx, lhs);
+ ast::ExprAssign(ref lhs, _) => {
+ adjust_borrow_kind_for_assignment_lhs(rcx, &**lhs);
visit::walk_expr(rcx, expr, ());
}
- ast::ExprAssignOp(_, lhs, rhs) => {
+ ast::ExprAssignOp(_, ref lhs, ref rhs) => {
if has_method_map {
- constrain_call(rcx, None, expr, Some(lhs), [rhs], true);
+ constrain_call(rcx, None, expr, Some(lhs.clone()),
+ [rhs.clone()], true);
}
- adjust_borrow_kind_for_assignment_lhs(rcx, lhs);
+ adjust_borrow_kind_for_assignment_lhs(rcx, &**lhs);
visit::walk_expr(rcx, expr, ());
}
- ast::ExprIndex(lhs, rhs) |
- ast::ExprBinary(_, lhs, rhs) if has_method_map => {
+ ast::ExprIndex(ref lhs, ref rhs) |
+ ast::ExprBinary(_, ref lhs, ref rhs) if has_method_map => {
// As `expr_method_call`, but the call is via an
// overloaded op. Note that we (sadly) currently use an
// implicit "by ref" sort of passing style here. This
// should be converted to an adjustment!
- constrain_call(rcx, None, expr, Some(lhs), [rhs], true);
+ constrain_call(rcx, None, expr, Some(lhs.clone()),
+ [rhs.clone()], true);
visit::walk_expr(rcx, expr, ());
}
- ast::ExprUnary(_, lhs) if has_method_map => {
+ ast::ExprUnary(_, ref lhs) if has_method_map => {
// As above.
- constrain_call(rcx, None, expr, Some(lhs), [], true);
+ constrain_call(rcx, None, expr, Some(lhs.clone()), [], true);
visit::walk_expr(rcx, expr, ());
}
- ast::ExprUnary(ast::UnDeref, base) => {
+ ast::ExprUnary(ast::UnDeref, ref base) => {
// For *a, the lifetime of a must enclose the deref
let method_call = MethodCall::expr(expr.id);
let base_ty = match rcx.fcx.inh.method_map.borrow().find(&method_call) {
Some(method) => {
- constrain_call(rcx, None, expr, Some(base), [], true);
+ constrain_call(rcx, None, expr, Some(base.clone()), [], true);
ty::ty_fn_ret(method.ty)
}
None => rcx.resolve_node_type(base.id)
visit::walk_expr(rcx, expr, ());
}
- ast::ExprIndex(vec_expr, _) => {
+ ast::ExprIndex(ref vec_expr, _) => {
// For a[b], the lifetime of a must enclose the deref
- let vec_type = rcx.resolve_expr_type_adjusted(vec_expr);
+ let vec_type = rcx.resolve_expr_type_adjusted(&**vec_expr);
constrain_index(rcx, expr, vec_type);
visit::walk_expr(rcx, expr, ());
}
- ast::ExprCast(source, _) => {
+ ast::ExprCast(ref source, _) => {
// Determine if we are casting `source` to a trait
// instance. If so, we have to be sure that the type of
// the source obeys the trait's region bound.
ty::ty_trait(box ty::TyTrait {
store: ty::RegionTraitStore(trait_region, _), ..
}) => {
- let source_ty = rcx.resolve_expr_type_adjusted(source);
+ let source_ty = rcx.resolve_expr_type_adjusted(&**source);
constrain_regions_in_type(
rcx,
trait_region,
visit::walk_expr(rcx, expr, ());
}
- ast::ExprAddrOf(m, base) => {
- link_addr_of(rcx, expr, m, base);
+ ast::ExprAddrOf(m, ref base) => {
+ link_addr_of(rcx, expr, m, &**base);
// Require that when you write a `&expr` expression, the
// resulting pointer has a lifetime that encompasses the
visit::walk_expr(rcx, expr, ());
}
- ast::ExprMatch(discr, ref arms) => {
- link_match(rcx, discr, arms.as_slice());
+ ast::ExprMatch(ref discr, ref arms) => {
+ link_match(rcx, &**discr, arms.as_slice());
visit::walk_expr(rcx, expr, ());
}
check_expr_fn_block(rcx, expr, &**body);
}
- ast::ExprLoop(body, _) => {
+ ast::ExprLoop(ref body, _) => {
let repeating_scope = rcx.set_repeating_scope(body.id);
visit::walk_expr(rcx, expr, ());
rcx.set_repeating_scope(repeating_scope);
}
- ast::ExprWhile(cond, body) => {
+ ast::ExprWhile(ref cond, ref body) => {
let repeating_scope = rcx.set_repeating_scope(cond.id);
- rcx.visit_expr(cond, ());
+ rcx.visit_expr(&**cond, ());
rcx.set_repeating_scope(body.id);
- rcx.visit_block(body, ());
+ rcx.visit_block(&**body, ());
rcx.set_repeating_scope(repeating_scope);
}
// operator
fn_expr_id: Option<ast::NodeId>,
call_expr: &ast::Expr,
- receiver: Option<@ast::Expr>,
- arg_exprs: &[@ast::Expr],
+ receiver: Option<Gc<ast::Expr>>,
+ arg_exprs: &[Gc<ast::Expr>],
implicitly_ref_args: bool) {
//! Invoked on every call site (i.e., normal calls, method calls,
//! and overloaded operators). Constrains the regions which appear
let callee_scope = call_expr.id;
let callee_region = ty::ReScope(callee_scope);
- for &arg_expr in arg_exprs.iter() {
+ for arg_expr in arg_exprs.iter() {
debug!("Argument");
// ensure that any regions appearing in the argument type are
// result. modes are going away and the "DerefArgs" code
// should be ported to use adjustments
if implicitly_ref_args {
- link_by_ref(rcx, arg_expr, callee_scope);
+ link_by_ref(rcx, &**arg_expr, callee_scope);
}
}
// as loop above, but for receiver
- for &r in receiver.iter() {
+ for r in receiver.iter() {
debug!("Receiver");
constrain_regions_in_type_of_node(
rcx, r.id, callee_region, infer::CallRcvr(r.span));
if implicitly_ref_args {
- link_by_ref(rcx, r, callee_scope);
+ link_by_ref(rcx, &**r, callee_scope);
}
}
debug!("regionck::for_local()");
let init_expr = match local.init {
None => { return; }
- Some(expr) => expr,
+ Some(ref expr) => expr,
};
let mc = mc::MemCategorizationContext::new(rcx);
- let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
- link_pattern(rcx, mc, discr_cmt, local.pat);
+ let discr_cmt = ignore_err!(mc.cat_expr(&**init_expr));
+ link_pattern(rcx, mc, discr_cmt, &*local.pat);
}
fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
let discr_cmt = ignore_err!(mc.cat_expr(discr));
debug!("discr_cmt={}", discr_cmt.repr(rcx.tcx()));
for arm in arms.iter() {
- for &root_pat in arm.pats.iter() {
- link_pattern(rcx, mc, discr_cmt.clone(), root_pat);
+ for root_pat in arm.pats.iter() {
+ link_pattern(rcx, mc, discr_cmt.clone(), &**root_pat);
}
}
}
}
// `[_, ..slice, _]` pattern
- ast::PatVec(_, Some(slice_pat), _) => {
- match mc.cat_slice_pattern(sub_cmt, slice_pat) {
+ ast::PatVec(_, Some(ref slice_pat), _) => {
+ match mc.cat_slice_pattern(sub_cmt, &**slice_pat) {
Ok((slice_cmt, slice_mutbl, slice_r)) => {
link_region(rcx, sub_pat.span, slice_r,
ty::BorrowKind::from_mutbl(slice_mutbl),
None => {}
}
}
- ast::ExprCast(src, _) => {
+ ast::ExprCast(ref src, _) => {
debug!("vtable resolution on expr {}", ex.repr(fcx.tcx()));
let target_ty = fcx.expr_ty(ex);
- resolve_object_cast(src, target_ty);
+ resolve_object_cast(&**src, target_ty);
}
_ => ()
}
let mut wbcx = WritebackCx::new(fcx);
wbcx.visit_block(blk, ());
for arg in decl.inputs.iter() {
- wbcx.visit_pat(arg.pat, ());
+ wbcx.visit_pat(&*arg.pat, ());
// Privacy needs the type for the whole pattern, not just each binding
- if !pat_util::pat_is_binding(&fcx.tcx().def_map, arg.pat) {
+ if !pat_util::pat_is_binding(&fcx.tcx().def_map, &*arg.pat) {
wbcx.visit_node_id(ResolvingPattern(arg.pat.span),
arg.pat.id);
}
_ => {}
}
}
+ ty_box(..) => {
+ match tcx.lang_items.gc() {
+ Some(did) if did.krate == ast::LOCAL_CRATE => {
+ found_nominal = true;
+ }
+ _ => {}
+ }
+ }
_ => { }
}
// Then visit the module items.
visit::walk_mod(self, module_, ());
}
- ItemImpl(_, None, ast_ty, _) => {
- if !self.cc.ast_type_is_defined_in_local_crate(ast_ty) {
+ ItemImpl(_, None, ref ast_ty, _) => {
+ if !self.cc.ast_type_is_defined_in_local_crate(&**ast_ty) {
// This is an error.
let session = &self.cc.crate_context.tcx.sess;
session.span_err(item.span,
use std::collections::{HashMap, HashSet};
use std::rc::Rc;
+use std::gc::Gc;
+
use syntax::abi;
use syntax::ast::{StaticRegionTyParamBound, OtherRegionTyParamBound};
use syntax::ast::{TraitTyParamBound, UnboxedFnTyParamBound};
}
match self.tcx.map.find(id.node) {
- Some(ast_map::NodeItem(item)) => ty_of_item(self, item),
+ Some(ast_map::NodeItem(item)) => ty_of_item(self, &*item),
Some(ast_map::NodeForeignItem(foreign_item)) => {
let abi = self.tcx.map.get_foreign_abi(id.node);
- ty_of_foreign_item(self, foreign_item, abi)
+ ty_of_foreign_item(self, &*foreign_item, abi)
}
x => {
self.tcx.sess.bug(format!("unexpected sort of node \
let result_ty = match variant.node.kind {
ast::TupleVariantKind(ref args) if args.len() > 0 => {
let rs = ExplicitRscope;
- let input_tys: Vec<_> = args.iter().map(|va| ccx.to_ty(&rs, va.ty)).collect();
+ let input_tys: Vec<_> = args.iter().map(|va| ccx.to_ty(&rs, &*va.ty)).collect();
ty::mk_ctor_fn(tcx, scope, input_tys.as_slice(), enum_ty)
}
ty: enum_ty
};
- convert_struct(ccx, struct_def, tpt, variant.node.id);
+ convert_struct(ccx, &*struct_def, tpt, variant.node.id);
let input_tys: Vec<_> = struct_def.fields.iter().map(
|f| ty::node_id_to_type(ccx.tcx, f.node.id)).collect();
ty_method_of_trait_method(
ccx, trait_id, &trait_ty_generics,
&m.id, &m.ident, &m.explicit_self,
- &m.generics, &m.fn_style, m.decl)
+ &m.generics, &m.fn_style, &*m.decl)
}
&ast::Provided(ref m) => {
ty_method_of_trait_method(
ccx, trait_id, &trait_ty_generics,
&m.id, &m.ident, &m.explicit_self,
- &m.generics, &m.fn_style, m.decl)
+ &m.generics, &m.fn_style, &*m.decl)
}
});
struct_generics: &ty::Generics,
v: &ast::StructField,
origin: ast::DefId) -> ty::field_ty {
- let tt = ccx.to_ty(&ExplicitRscope, v.node.ty);
+ let tt = ccx.to_ty(&ExplicitRscope, &*v.node.ty);
write_ty_to_tcx(ccx.tcx, v.node.id, tt);
/* add the field to the tcache */
ccx.tcx.tcache.borrow_mut().insert(local_def(v.node.id),
fn convert_methods(ccx: &CrateCtxt,
container: MethodContainer,
- ms: &[@ast::Method],
+ ms: &[Gc<ast::Method>],
untransformed_rcvr_ty: ty::t,
rcvr_ty_generics: &ty::Generics,
rcvr_ast_generics: &ast::Generics,
num_rcvr_ty_params);
let mty = Rc::new(ty_of_method(ccx,
container,
- *m,
+ &**m,
untransformed_rcvr_ty,
rcvr_ast_generics,
rcvr_visibility));
{
let fty = astconv::ty_of_method(ccx, m.id, m.fn_style,
untransformed_rcvr_ty,
- m.explicit_self, m.decl);
+ m.explicit_self, &*m.decl);
// if the method specifies a visibility, use that, otherwise
// inherit the visibility from the impl (so `foo` in `pub impl
},
ast::ItemImpl(ref generics, ref opt_trait_ref, selfty, ref ms) => {
let ty_generics = ty_generics_for_type(ccx, generics);
- let selfty = ccx.to_ty(&ExplicitRscope, selfty);
+ let selfty = ccx.to_ty(&ExplicitRscope, &*selfty);
write_ty_to_tcx(tcx, it.id, selfty);
tcx.tcache.borrow_mut().insert(local_def(it.id),
// Write the super-struct type, if it exists.
match struct_def.super_struct {
Some(ty) => {
- let supserty = ccx.to_ty(&ExplicitRscope, ty);
+ let supserty = ccx.to_ty(&ExplicitRscope, &*ty);
write_ty_to_tcx(tcx, it.id, supserty);
},
_ => {},
}
- convert_struct(ccx, struct_def, tpt, it.id);
+ convert_struct(ccx, &*struct_def, tpt, it.id);
},
ast::ItemTy(_, ref generics) => {
ensure_no_ty_param_bounds(ccx, it.span, generics, "type");
}
match ccx.tcx.map.get(trait_id.node) {
- ast_map::NodeItem(item) => trait_def_of_item(ccx, item),
+ ast_map::NodeItem(item) => trait_def_of_item(ccx, &*item),
_ => {
ccx.tcx.sess.bug(format!("get_trait_def({}): not an item",
trait_id.node).as_slice())
}
match it.node {
ast::ItemStatic(t, _, _) => {
- let typ = ccx.to_ty(&ExplicitRscope, t);
+ let typ = ccx.to_ty(&ExplicitRscope, &*t);
let tpt = no_params(typ);
tcx.tcache.borrow_mut().insert(local_def(it.id), tpt.clone());
it.id,
fn_style,
abi,
- decl);
+ &*decl);
let tpt = ty_param_bounds_and_ty {
generics: ty_generics,
ty: ty::mk_bare_fn(ccx.tcx, tofd)
}
let tpt = {
- let ty = ccx.to_ty(&ExplicitRscope, t);
+ let ty = ccx.to_ty(&ExplicitRscope, &*t);
ty_param_bounds_and_ty {
generics: ty_generics_for_type(ccx, generics),
ty: ty
match it.node {
ast::ForeignItemFn(fn_decl, ref generics) => {
ty_of_foreign_fn_decl(ccx,
- fn_decl,
+ &*fn_decl,
local_def(it.id),
generics,
abi)
type_param_defs: Rc::new(Vec::new()),
region_param_defs: Rc::new(Vec::new()),
},
- ty: ast_ty_to_ty(ccx, &ExplicitRscope, t)
+ ty: ast_ty_to_ty(ccx, &ExplicitRscope, &*t)
}
}
}
param.ident,
param.span));
let default = param.default.map(|path| {
- let ty = ast_ty_to_ty(ccx, &ExplicitRscope, path);
+ let ty = ast_ty_to_ty(ccx, &ExplicitRscope, &*path);
let cur_idx = param_ty.idx;
ty::walk_ty(ty, |t| {
.map(|a| ty_of_arg(ccx, &rb, a, None))
.collect();
- let output_ty = ast_ty_to_ty(ccx, &rb, decl.output);
+ let output_ty = ast_ty_to_ty(ccx, &rb, &*decl.output);
let t_fn = ty::mk_bare_fn(
ccx.tcx,
}
ref other => other.clone()
};
- @ast::Ty { id: from.id, node: new_node, span: from.span }
+ box(GC) ast::Ty { id: from.id, node: new_node, span: from.span }
}
let new_ty_node = match to.node {
}
_ => fail!("expect ast::TyRptr or ast::TyPath")
};
- let new_ty = @ast::Ty {
+ let new_ty = box(GC) ast::Ty {
id: to.id,
node: new_ty_node,
span: to.span
// `ty::VariantInfo::from_ast_variant()` ourselves
// here, mainly so as to mask the differences between
// struct-like enums and so forth.
- for &ast_variant in enum_definition.variants.iter() {
+ for ast_variant in enum_definition.variants.iter() {
let variant =
ty::VariantInfo::from_ast_variant(tcx,
- ast_variant,
+ &**ast_variant,
/*discriminant*/ 0);
- for &arg_ty in variant.args.iter() {
- self.add_constraints_from_ty(arg_ty, self.covariant);
+ for arg_ty in variant.args.iter() {
+ self.add_constraints_from_ty(*arg_ty, self.covariant);
}
}
}
p: p,
flag: false,
};
- visit::walk_block(&mut v, b, ());
+ visit::walk_block(&mut v, &*b, ());
return v.flag;
}
use middle::typeck;
use std::rc::Rc;
-use std::string::String;
+use std::gc::Gc;
use syntax::abi;
use syntax::ast_map;
use syntax::codemap::{Span, Pos};
}
}
-impl<T:Repr> Repr for @T {
+impl<T:Repr + 'static> Repr for Gc<T> {
fn repr(&self, tcx: &ctxt) -> String {
(&**self).repr(tcx)
}
use std::rc::Rc;
use std::u32;
+use std::gc::Gc;
use core;
use doctree;
}
}
-impl<T: Clean<U>, U> Clean<U> for @T {
+impl<T: Clean<U>, U> Clean<U> for Gc<T> {
fn clean(&self) -> U {
(**self).clean()
}
_ => None,
}
}
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@ast::MetaItem]> { None }
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<ast::MetaItem>]> { None }
}
impl<'a> attr::AttrMetaMethods for &'a Attribute {
fn name(&self) -> InternedString { (**self).name() }
fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@ast::MetaItem]> { None }
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<ast::MetaItem>]> { None }
}
#[deriving(Clone, Encodable, Decodable)]
impl Clean<Argument> for ast::Arg {
fn clean(&self) -> Argument {
Argument {
- name: name_from_pat(self.pat),
+ name: name_from_pat(&*self.pat),
type_: (self.ty.clean()),
id: self.id
}
remaining,
b.clone());
let path = syntax::codemap::dummy_spanned(path);
- ret.push(convert(&ast::ViewItemUse(@path)));
+ ret.push(convert(&ast::ViewItemUse(box(GC) path)));
}
}
ast::ViewPathSimple(_, _, id) => {
PatStruct(..) => fail!("tried to get argument name from pat_struct, \
which is not allowed in function arguments"),
PatTup(..) => "(tuple arg NYI)".to_string(),
- PatBox(p) => name_from_pat(p),
- PatRegion(p) => name_from_pat(p),
+ PatBox(p) => name_from_pat(&*p),
+ PatRegion(p) => name_from_pat(&*p),
PatLit(..) => {
warn!("tried to get argument name from PatLit, \
which is silly in function arguments");
let mut cfg = build_configuration(&sess);
for cfg_ in cfgs.move_iter() {
let cfg_ = token::intern_and_get_ident(cfg_.as_slice());
- cfg.push(@dummy_spanned(ast::MetaWord(cfg_)));
+ cfg.push(box(GC) dummy_spanned(ast::MetaWord(cfg_)));
}
let krate = phase_1_parse_input(&sess, cfg, &input);
pub fn run_core(libs: HashSet<Path>, cfgs: Vec<String>, path: &Path)
-> (clean::Crate, CrateAnalysis) {
let (ctxt, analysis) = get_ast_and_resolve(path, libs, cfgs);
- let ctxt = @ctxt;
+ let ctxt = box(GC) ctxt;
super::ctxtkey.replace(Some(ctxt));
let krate = {
- let mut v = RustdocVisitor::new(ctxt, Some(&analysis));
+ let mut v = RustdocVisitor::new(&*ctxt, Some(&analysis));
v.visit(&ctxt.krate);
v.clean()
};
use syntax::ast;
use syntax::ast::{Ident, NodeId};
+use std::gc::Gc;
+
pub struct Module {
pub name: Option<Ident>,
pub attrs: Vec<ast::Attribute>,
pub struct Static {
pub type_: ast::P<ast::Ty>,
pub mutability: ast::Mutability,
- pub expr: @ast::Expr,
+ pub expr: Gc<ast::Expr>,
pub name: Ident,
pub attrs: Vec<ast::Attribute>,
pub vis: ast::Visibility,
pub generics: ast::Generics,
pub trait_: Option<ast::TraitRef>,
pub for_: ast::P<ast::Ty>,
- pub methods: Vec<@ast::Method>,
+ pub methods: Vec<Gc<ast::Method>>,
pub attrs: Vec<ast::Attribute>,
pub where: Span,
pub vis: ast::Visibility,
use std::io;
use std::str;
use std::string::String;
+use std::sync::Arc;
-use sync::Arc;
use serialize::json::ToJson;
use syntax::ast;
use syntax::ast_util;
extern crate libc;
extern crate rustc;
extern crate serialize;
-extern crate sync;
extern crate syntax;
extern crate testing = "test";
extern crate time;
use std::io;
use std::io::{File, MemWriter};
use std::str;
+use std::gc::Gc;
use serialize::{json, Decodable, Encodable};
// reexported from `clean` so it can be easily updated with the mod itself
"unindent-comments",
];
-local_data_key!(pub ctxtkey: @core::DocContext)
+local_data_key!(pub ctxtkey: Gc<core::DocContext>)
local_data_key!(pub analysiskey: core::CrateAnalysis)
type Output = (clean::Crate, Vec<plugins::PluginJson> );
let mut cfg = config::build_configuration(&sess);
cfg.extend(cfgs.move_iter().map(|cfg_| {
let cfg_ = token::intern_and_get_ident(cfg_.as_slice());
- @dummy_spanned(ast::MetaWord(cfg_))
+ box(GC) dummy_spanned(ast::MetaWord(cfg_))
}));
let krate = driver::phase_1_parse_input(&sess, cfg, &input);
let (krate, _) = driver::phase_2_configure_and_expand(&sess, krate,
&from_str("rustdoc-test").unwrap());
- let ctx = @core::DocContext {
+ let ctx = box(GC) core::DocContext {
krate: krate,
maybe_typed: core::NotTyped(sess),
src: input_path,
};
super::ctxtkey.replace(Some(ctx));
- let mut v = RustdocVisitor::new(ctx, None);
+ let mut v = RustdocVisitor::new(&*ctx, None);
v.visit(&ctx.krate);
let krate = v.clean();
let (krate, _) = passes::unindent_comments(krate);
use syntax::attr::AttrMetaMethods;
use syntax::codemap::Span;
+use std::gc::Gc;
+
use core;
use doctree::*;
self.module.is_crate = true;
}
- pub fn visit_struct_def(&mut self, item: &ast::Item, sd: @ast::StructDef,
+ pub fn visit_struct_def(&mut self, item: &ast::Item, sd: Gc<ast::StructDef>,
generics: &ast::Generics) -> Struct {
debug!("Visiting struct");
- let struct_type = struct_type_from_def(sd);
+ let struct_type = struct_type_from_def(&*sd);
Struct {
id: item.id,
struct_type: struct_type,
om.vis = vis;
om.id = id;
for i in m.items.iter() {
- self.visit_item(*i, &mut om);
+ self.visit_item(&**i, &mut om);
}
om
}
om.view_items.push(item);
}
- fn visit_view_path(&mut self, path: @ast::ViewPath,
+ fn visit_view_path(&mut self, path: Gc<ast::ViewPath>,
om: &mut Module,
- please_inline: bool) -> Option<@ast::ViewPath> {
+ please_inline: bool) -> Option<Gc<ast::ViewPath>> {
match path.node {
ast::ViewPathSimple(_, _, id) => {
if self.resolve_id(id, false, om, please_inline) { return None }
}
if mine.len() == 0 { return None }
- return Some(@::syntax::codemap::Spanned {
+ return Some(box(GC) ::syntax::codemap::Spanned {
node: ast::ViewPathList(p.clone(), mine, b.clone()),
span: path.span,
})
self.visit_view_item(vi, om);
}
for i in m.items.iter() {
- self.visit_item(*i, om);
+ self.visit_item(&**i, om);
}
}
_ => { fail!("glob not mapped to a module"); }
}
} else {
- self.visit_item(it, om);
+ self.visit_item(&*it, om);
}
true
}
om.enums.push(self.visit_enum_def(item, ed, gen)),
ast::ItemStruct(sd, ref gen) =>
om.structs.push(self.visit_struct_def(item, sd, gen)),
- ast::ItemFn(fd, ref pur, ref abi, ref gen, _) =>
- om.fns.push(self.visit_fn(item, fd, pur, abi, gen)),
+ ast::ItemFn(ref fd, ref pur, ref abi, ref gen, _) =>
+ om.fns.push(self.visit_fn(item, &**fd, pur, abi, gen)),
ast::ItemTy(ty, ref gen) => {
let t = Typedef {
ty: ty,
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
-#![feature(macro_rules, phase, globs, thread_local, managed_boxes, asm)]
+#![feature(macro_rules, phase, globs, thread_local, managed_boxes, asm,
+ linkage)]
#![no_std]
#![experimental]
pub mod args;
pub mod bookkeeping;
+pub mod c_str;
pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod rtio;
pub mod stack;
pub mod task;
+pub mod thread;
pub mod unwind;
-pub mod c_str;
/// The interface to the current runtime.
///
```rust
local_data_key!(key_int: int)
-local_data_key!(key_vector: ~[int])
+local_data_key!(key_vector: Vec<int>)
key_int.replace(Some(3));
assert_eq!(*key_int.get().unwrap(), 3);
-key_vector.replace(Some(~[4]));
-assert_eq!(*key_vector.get().unwrap(), ~[4]);
+key_vector.replace(Some(vec![4]));
+assert_eq!(*key_vector.get().unwrap(), vec![4]);
```
*/
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Native os-thread management
+//!
+//! This modules contains bindings necessary for managing OS-level threads.
+//! These functions operate outside of the rust runtime, creating threads
+//! which are not used for scheduling in any way.
+
+#![allow(non_camel_case_types)]
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::mem;
+use core::uint;
+use libc;
+
+use stack;
+
+type StartFn = extern "C" fn(*libc::c_void) -> imp::rust_thread_return;
+
+/// This struct represents a native thread's state. This is used to join on an
+/// existing thread created in the join-able state.
+pub struct Thread<T> {
+ native: imp::rust_thread,
+ joined: bool,
+ packet: Box<Option<T>>,
+}
+
+static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
+
+// This is the starting point of rust os threads. The first thing we do
+// is make sure that we don't trigger __morestack (also why this has a
+// no_split_stack annotation), and then we extract the main function
+// and invoke it.
+#[no_split_stack]
+extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return {
+ unsafe {
+ stack::record_stack_bounds(0, uint::MAX);
+ let f: Box<proc()> = mem::transmute(main);
+ (*f)();
+ mem::transmute(0 as imp::rust_thread_return)
+ }
+}
+
+// There are two impl blocks b/c if T were specified at the top then it's just a
+// pain to specify a type parameter on Thread::spawn (which doesn't need the
+// type parameter).
+impl Thread<()> {
+
+ /// Starts execution of a new OS thread.
+ ///
+ /// This function will not wait for the thread to join, but a handle to the
+ /// thread will be returned.
+ ///
+ /// Note that the handle returned is used to acquire the return value of the
+ /// procedure `main`. The `join` function will wait for the thread to finish
+ /// and return the value that `main` generated.
+ ///
+ /// Also note that the `Thread` returned will *always* wait for the thread
+ /// to finish executing. This means that even if `join` is not explicitly
+ /// called, when the `Thread` falls out of scope its destructor will block
+ /// waiting for the OS thread.
+ pub fn start<T: Send>(main: proc():Send -> T) -> Thread<T> {
+ Thread::start_stack(DEFAULT_STACK_SIZE, main)
+ }
+
+ /// Performs the same functionality as `start`, but specifies an explicit
+ /// stack size for the new thread.
+ pub fn start_stack<T: Send>(stack: uint, main: proc():Send -> T) -> Thread<T> {
+
+ // We need the address of the packet to fill in to be stable so when
+ // `main` fills it in it's still valid, so allocate an extra box to do
+ // so.
+ let packet = box None;
+ let packet2: *mut Option<T> = unsafe {
+ *mem::transmute::<&Box<Option<T>>, **mut Option<T>>(&packet)
+ };
+ let main = proc() unsafe { *packet2 = Some(main()); };
+ let native = unsafe { imp::create(stack, box main) };
+
+ Thread {
+ native: native,
+ joined: false,
+ packet: packet,
+ }
+ }
+
+ /// This will spawn a new thread, but it will not wait for the thread to
+ /// finish, nor is it possible to wait for the thread to finish.
+ ///
+ /// This corresponds to creating threads in the 'detached' state on unix
+ /// systems. Note that platforms may not keep the main program alive even if
+ /// there are detached thread still running around.
+ pub fn spawn(main: proc():Send) {
+ Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
+ }
+
+ /// Performs the same functionality as `spawn`, but explicitly specifies a
+ /// stack size for the new thread.
+ pub fn spawn_stack(stack: uint, main: proc():Send) {
+ unsafe {
+ let handle = imp::create(stack, box main);
+ imp::detach(handle);
+ }
+ }
+
+ /// Relinquishes the CPU slot that this OS-thread is currently using,
+ /// allowing another thread to run for awhile.
+ pub fn yield_now() {
+ unsafe { imp::yield_now(); }
+ }
+}
+
+impl<T: Send> Thread<T> {
+ /// Wait for this thread to finish, returning the result of the thread's
+ /// calculation.
+ pub fn join(mut self) -> T {
+ assert!(!self.joined);
+ unsafe { imp::join(self.native) };
+ self.joined = true;
+ assert!(self.packet.is_some());
+ self.packet.take_unwrap()
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Thread<T> {
+ fn drop(&mut self) {
+ // This is required for correctness. If this is not done then the thread
+ // would fill in a return box which no longer exists.
+ if !self.joined {
+ unsafe { imp::join(self.native) };
+ }
+ }
+}
+
+#[cfg(windows)]
+mod imp {
+ use core::prelude::*;
+
+ use alloc::owned::Box;
+ use core::cmp;
+ use core::mem;
+ use core::ptr;
+ use libc;
+ use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
+ LPVOID, DWORD, LPDWORD, HANDLE};
+ use stack::RED_ZONE;
+
+ pub type rust_thread = HANDLE;
+ pub type rust_thread_return = DWORD;
+
+ pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+ let arg: *mut libc::c_void = mem::transmute(p);
+ // FIXME On UNIX, we guard against stack sizes that are too small but
+ // that's because pthreads enforces that stacks are at least
+ // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
+ // just that below a certain threshold you can't do anything useful.
+ // That threshold is application and architecture-specific, however.
+ // For now, the only requirement is that it's big enough to hold the
+ // red zone. Round up to the next 64 kB because that's what the NT
+ // kernel does, might as well make it explicit. With the current
+ // 20 kB red zone, that makes for a 64 kB minimum stack.
+ let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
+ let ret = CreateThread(ptr::mut_null(), stack_size as libc::size_t,
+ super::thread_start, arg, 0, ptr::mut_null());
+
+ if ret as uint == 0 {
+ // be sure to not leak the closure
+ let _p: Box<proc():Send> = mem::transmute(arg);
+ fail!("failed to spawn native thread: {}", ret);
+ }
+ return ret;
+ }
+
+ pub unsafe fn join(native: rust_thread) {
+ use libc::consts::os::extra::INFINITE;
+ WaitForSingleObject(native, INFINITE);
+ }
+
+ pub unsafe fn detach(native: rust_thread) {
+ assert!(libc::CloseHandle(native) != 0);
+ }
+
+ pub unsafe fn yield_now() {
+ // This function will return 0 if there are no other threads to execute,
+ // but this also means that the yield was useless so this isn't really a
+ // case that needs to be worried about.
+ SwitchToThread();
+ }
+
+ #[allow(non_snake_case_functions)]
+ extern "system" {
+ fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
+ dwStackSize: SIZE_T,
+ lpStartAddress: super::StartFn,
+ lpParameter: LPVOID,
+ dwCreationFlags: DWORD,
+ lpThreadId: LPDWORD) -> HANDLE;
+ fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
+ fn SwitchToThread() -> BOOL;
+ }
+}
+
+#[cfg(unix)]
+mod imp {
+ use core::prelude::*;
+
+ use alloc::owned::Box;
+ use core::cmp;
+ use core::mem;
+ use core::ptr;
+ use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
+ use libc;
+
+ use stack::RED_ZONE;
+
+ pub type rust_thread = libc::pthread_t;
+ pub type rust_thread_return = *u8;
+
+ pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
+ let mut native: libc::pthread_t = mem::zeroed();
+ let mut attr: libc::pthread_attr_t = mem::zeroed();
+ assert_eq!(pthread_attr_init(&mut attr), 0);
+ assert_eq!(pthread_attr_setdetachstate(&mut attr,
+ PTHREAD_CREATE_JOINABLE), 0);
+
+ // Reserve room for the red zone, the runtime's stack of last resort.
+ let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as uint);
+ match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
+ 0 => {
+ },
+ libc::EINVAL => {
+ // EINVAL means |stack_size| is either too small or not a
+ // multiple of the system page size. Because it's definitely
+ // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+ // Round up to the neareast page and try again.
+ let page_size = libc::sysconf(libc::_SC_PAGESIZE) as uint;
+ let stack_size = (stack_size + page_size - 1) &
+ (-(page_size as int - 1) as uint - 1);
+ assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
+ },
+ errno => {
+ // This cannot really happen.
+ fail!("pthread_attr_setstacksize() error: {}", errno);
+ },
+ };
+
+ let arg: *libc::c_void = mem::transmute(p);
+ let ret = pthread_create(&mut native, &attr, super::thread_start, arg);
+ assert_eq!(pthread_attr_destroy(&mut attr), 0);
+
+ if ret != 0 {
+ // be sure to not leak the closure
+ let _p: Box<proc():Send> = mem::transmute(arg);
+ fail!("failed to spawn native thread: {}", ret);
+ }
+ native
+ }
+
+ pub unsafe fn join(native: rust_thread) {
+ assert_eq!(pthread_join(native, ptr::null()), 0);
+ }
+
+ pub unsafe fn detach(native: rust_thread) {
+ assert_eq!(pthread_detach(native), 0);
+ }
+
+ pub unsafe fn yield_now() { assert_eq!(sched_yield(), 0); }
+
+ // glibc >= 2.15 has a __pthread_get_minstack() function that returns
+ // PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
+ // storage. We need that information to avoid blowing up when a small stack
+ // is created in an application with big thread-local storage requirements.
+ // See #6233 for rationale and details.
+ //
+ // Link weakly to the symbol for compatibility with older versions of glibc.
+ // Assumes that we've been dynamically linked to libpthread but that is
+ // currently always the case. Note that you need to check that the symbol
+ // is non-null before calling it!
+ #[cfg(target_os = "linux")]
+ fn min_stack_size(attr: *libc::pthread_attr_t) -> libc::size_t {
+ type F = unsafe extern "C" fn(*libc::pthread_attr_t) -> libc::size_t;
+ extern {
+ #[linkage = "extern_weak"]
+ static __pthread_get_minstack: *();
+ }
+ if __pthread_get_minstack.is_null() {
+ PTHREAD_STACK_MIN
+ } else {
+ unsafe { mem::transmute::<*(), F>(__pthread_get_minstack)(attr) }
+ }
+ }
+
+ // __pthread_get_minstack() is marked as weak but extern_weak linkage is
+ // not supported on OS X, hence this kludge...
+ #[cfg(not(target_os = "linux"))]
+ fn min_stack_size(_: *libc::pthread_attr_t) -> libc::size_t {
+ PTHREAD_STACK_MIN
+ }
+
+ extern {
+ fn pthread_create(native: *mut libc::pthread_t,
+ attr: *libc::pthread_attr_t,
+ f: super::StartFn,
+ value: *libc::c_void) -> libc::c_int;
+ fn pthread_join(native: libc::pthread_t,
+ value: **libc::c_void) -> libc::c_int;
+ fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+ fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int;
+ fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
+ stack_size: libc::size_t) -> libc::c_int;
+ fn pthread_attr_setdetachstate(attr: *mut libc::pthread_attr_t,
+ state: libc::c_int) -> libc::c_int;
+ fn pthread_detach(thread: libc::pthread_t) -> libc::c_int;
+ fn sched_yield() -> libc::c_int;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Thread;
+
+ #[test]
+ fn smoke() { Thread::start(proc (){}).join(); }
+
+ #[test]
+ fn data() { assert_eq!(Thread::start(proc () { 1 }).join(), 1); }
+
+ #[test]
+ fn detached() { Thread::spawn(proc () {}) }
+
+ #[test]
+ fn small_stacks() {
+ assert_eq!(42, Thread::start_stack(0, proc () 42).join());
+ assert_eq!(42, Thread::start_stack(1, proc () 42).join());
+ }
+}
+
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
-#![deny(deprecated_owned_vector)]
use std::char;
use std::cmp;
```
Two wrapper functions are provided to encode a Encodable object
-into a string (String) or buffer (~[u8]): `str_encode(&m)` and `buffer_encode(&m)`.
+into a string (String) or buffer (vec![u8]): `str_encode(&m)` and `buffer_encode(&m)`.
```rust
use serialize::json;
fn to_json(&self) -> Json { List(self.iter().map(|elt| elt.to_json()).collect()) }
}
-impl<A:ToJson> ToJson for ~[A] {
- fn to_json(&self) -> Json { List(self.iter().map(|elt| elt.to_json()).collect()) }
-}
-
impl<A:ToJson> ToJson for Vec<A> {
fn to_json(&self) -> Json { List(self.iter().map(|elt| elt.to_json()).collect()) }
}
let _hm: HashMap<uint, bool> = Decodable::decode(&mut decoder).unwrap();
}
- fn assert_stream_equal(src: &str, expected: ~[(JsonEvent, ~[StackElement])]) {
+ fn assert_stream_equal(src: &str,
+ expected: Vec<(JsonEvent, Vec<StackElement>)>) {
let mut parser = Parser::new(src.chars());
let mut i = 0;
loop {
Some(e) => e,
None => { break; }
};
- let (ref expected_evt, ref expected_stack) = expected[i];
+ let (ref expected_evt, ref expected_stack) = *expected.get(i);
if !parser.stack().is_equal_to(expected_stack.as_slice()) {
fail!("Parser stack is not equal to {}", expected_stack);
}
}
}
#[test]
+ #[ignore(cfg(target_word_size = "32"))] // FIXME(#14064)
fn test_streaming_parser() {
assert_stream_equal(
r#"{ "foo":"bar", "array" : [0, 1, 2,3 ,4,5], "idents":[null,true,false]}"#,
- ~[
- (ObjectStart, ~[]),
- (StringValue("bar".to_string()), ~[Key("foo")]),
- (ListStart, ~[Key("array")]),
- (NumberValue(0.0), ~[Key("array"), Index(0)]),
- (NumberValue(1.0), ~[Key("array"), Index(1)]),
- (NumberValue(2.0), ~[Key("array"), Index(2)]),
- (NumberValue(3.0), ~[Key("array"), Index(3)]),
- (NumberValue(4.0), ~[Key("array"), Index(4)]),
- (NumberValue(5.0), ~[Key("array"), Index(5)]),
- (ListEnd, ~[Key("array")]),
- (ListStart, ~[Key("idents")]),
- (NullValue, ~[Key("idents"), Index(0)]),
- (BooleanValue(true), ~[Key("idents"), Index(1)]),
- (BooleanValue(false), ~[Key("idents"), Index(2)]),
- (ListEnd, ~[Key("idents")]),
- (ObjectEnd, ~[]),
+ vec![
+ (ObjectStart, vec![]),
+ (StringValue("bar".to_string()), vec![Key("foo")]),
+ (ListStart, vec![Key("array")]),
+ (NumberValue(0.0), vec![Key("array"), Index(0)]),
+ (NumberValue(1.0), vec![Key("array"), Index(1)]),
+ (NumberValue(2.0), vec![Key("array"), Index(2)]),
+ (NumberValue(3.0), vec![Key("array"), Index(3)]),
+ (NumberValue(4.0), vec![Key("array"), Index(4)]),
+ (NumberValue(5.0), vec![Key("array"), Index(5)]),
+ (ListEnd, vec![Key("array")]),
+ (ListStart, vec![Key("idents")]),
+ (NullValue, vec![Key("idents"), Index(0)]),
+ (BooleanValue(true), vec![Key("idents"), Index(1)]),
+ (BooleanValue(false), vec![Key("idents"), Index(2)]),
+ (ListEnd, vec![Key("idents")]),
+ (ObjectEnd, vec![]),
]
);
}
assert_stream_equal(
"{}",
- box [(ObjectStart, box []), (ObjectEnd, box [])]
+ vec![(ObjectStart, vec![]), (ObjectEnd, vec![])]
);
assert_stream_equal(
"{\"a\": 3}",
- box [
- (ObjectStart, box []),
- (NumberValue(3.0), box [Key("a")]),
- (ObjectEnd, box []),
+ vec![
+ (ObjectStart, vec![]),
+ (NumberValue(3.0), vec![Key("a")]),
+ (ObjectEnd, vec![]),
]
);
assert_stream_equal(
"{ \"a\": null, \"b\" : true }",
- box [
- (ObjectStart, box []),
- (NullValue, box [Key("a")]),
- (BooleanValue(true), box [Key("b")]),
- (ObjectEnd, box []),
+ vec![
+ (ObjectStart, vec![]),
+ (NullValue, vec![Key("a")]),
+ (BooleanValue(true), vec![Key("b")]),
+ (ObjectEnd, vec![]),
]
);
assert_stream_equal(
"{\"a\" : 1.0 ,\"b\": [ true ]}",
- box [
- (ObjectStart, box []),
- (NumberValue(1.0), box [Key("a")]),
- (ListStart, box [Key("b")]),
- (BooleanValue(true),box [Key("b"), Index(0)]),
- (ListEnd, box [Key("b")]),
- (ObjectEnd, box []),
+ vec![
+ (ObjectStart, vec![]),
+ (NumberValue(1.0), vec![Key("a")]),
+ (ListStart, vec![Key("b")]),
+ (BooleanValue(true),vec![Key("b"), Index(0)]),
+ (ListEnd, vec![Key("b")]),
+ (ObjectEnd, vec![]),
]
);
assert_stream_equal(
{ "c": {"d": null} }
]
}"#,
- ~[
- (ObjectStart, ~[]),
- (NumberValue(1.0), ~[Key("a")]),
- (ListStart, ~[Key("b")]),
- (BooleanValue(true), ~[Key("b"), Index(0)]),
- (StringValue("foo\nbar".to_string()), ~[Key("b"), Index(1)]),
- (ObjectStart, ~[Key("b"), Index(2)]),
- (ObjectStart, ~[Key("b"), Index(2), Key("c")]),
- (NullValue, ~[Key("b"), Index(2), Key("c"), Key("d")]),
- (ObjectEnd, ~[Key("b"), Index(2), Key("c")]),
- (ObjectEnd, ~[Key("b"), Index(2)]),
- (ListEnd, ~[Key("b")]),
- (ObjectEnd, ~[]),
+ vec![
+ (ObjectStart, vec![]),
+ (NumberValue(1.0), vec![Key("a")]),
+ (ListStart, vec![Key("b")]),
+ (BooleanValue(true), vec![Key("b"), Index(0)]),
+ (StringValue("foo\nbar".to_string()), vec![Key("b"), Index(1)]),
+ (ObjectStart, vec![Key("b"), Index(2)]),
+ (ObjectStart, vec![Key("b"), Index(2), Key("c")]),
+ (NullValue, vec![Key("b"), Index(2), Key("c"), Key("d")]),
+ (ObjectEnd, vec![Key("b"), Index(2), Key("c")]),
+ (ObjectEnd, vec![Key("b"), Index(2)]),
+ (ListEnd, vec![Key("b")]),
+ (ObjectEnd, vec![]),
]
);
}
fn test_read_list_streaming() {
assert_stream_equal(
"[]",
- box [
- (ListStart, box []),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[ ]",
- box [
- (ListStart, box []),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[true]",
- box [
- (ListStart, box []),
- (BooleanValue(true), box [Index(0)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (BooleanValue(true), vec![Index(0)]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[ false ]",
- box [
- (ListStart, box []),
- (BooleanValue(false), box [Index(0)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (BooleanValue(false), vec![Index(0)]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[null]",
- box [
- (ListStart, box []),
- (NullValue, box [Index(0)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (NullValue, vec![Index(0)]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[3, 1]",
- box [
- (ListStart, box []),
- (NumberValue(3.0), box [Index(0)]),
- (NumberValue(1.0), box [Index(1)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (NumberValue(3.0), vec![Index(0)]),
+ (NumberValue(1.0), vec![Index(1)]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"\n[3, 2]\n",
- box [
- (ListStart, box []),
- (NumberValue(3.0), box [Index(0)]),
- (NumberValue(2.0), box [Index(1)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (NumberValue(3.0), vec![Index(0)]),
+ (NumberValue(2.0), vec![Index(1)]),
+ (ListEnd, vec![]),
]
);
assert_stream_equal(
"[2, [4, 1]]",
- box [
- (ListStart, box []),
- (NumberValue(2.0), box [Index(0)]),
- (ListStart, box [Index(1)]),
- (NumberValue(4.0), box [Index(1), Index(0)]),
- (NumberValue(1.0), box [Index(1), Index(1)]),
- (ListEnd, box [Index(1)]),
- (ListEnd, box []),
+ vec![
+ (ListStart, vec![]),
+ (NumberValue(2.0), vec![Index(0)]),
+ (ListStart, vec![Index(1)]),
+ (NumberValue(4.0), vec![Index(1), Index(0)]),
+ (NumberValue(1.0), vec![Index(1), Index(1)]),
+ (ListEnd, vec![Index(1)]),
+ (ListEnd, vec![]),
]
);
assert_eq!((1, 2, 3).to_json(), list3);
assert_eq!([1, 2].to_json(), list2);
assert_eq!((&[1, 2, 3]).to_json(), list3);
- assert_eq!((~[1, 2]).to_json(), list2);
+ assert_eq!((vec![1, 2]).to_json(), list2);
assert_eq!(vec!(1, 2, 3).to_json(), list3);
let mut tree_map = TreeMap::new();
tree_map.insert("a".to_string(), 1);
use std::path;
use std::rc::Rc;
+use std::gc::Gc;
pub trait Encoder<E> {
// Primitive types:
}
}
-impl<E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for @T {
+impl<E, S:Encoder<E>,T:'static + Encodable<S, E>> Encodable<S, E> for Gc<T> {
fn encode(&self, s: &mut S) -> Result<(), E> {
(**self).encode(s)
}
}
}
-impl<E, D:Decoder<E>,T:Decodable<D, E> + 'static> Decodable<D, E> for @T {
- fn decode(d: &mut D) -> Result<@T, E> {
- Ok(@try!(Decodable::decode(d)))
+impl<E, D:Decoder<E>,T:Decodable<D, E> + 'static> Decodable<D, E> for Gc<T> {
+ fn decode(d: &mut D) -> Result<Gc<T>, E> {
+ Ok(box(GC) try!(Decodable::decode(d)))
}
}
}
}
-impl<E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for ~[T] {
- fn encode(&self, s: &mut S) -> Result<(), E> {
- s.emit_seq(self.len(), |s| {
- for (i, e) in self.iter().enumerate() {
- try!(s.emit_seq_elt(i, |s| e.encode(s)))
- }
- Ok(())
- })
- }
-}
-
-impl<E, D:Decoder<E>,T:Decodable<D, E>> Decodable<D, E> for ~[T] {
- fn decode(d: &mut D) -> Result<~[T], E> {
- use std::vec::FromVec;
-
- d.read_seq(|d, len| {
- let mut v: Vec<T> = Vec::with_capacity(len);
- for i in range(0, len) {
- v.push(try!(d.read_seq_elt(i, |d| Decodable::decode(d))));
- }
- let k: ~[T] = FromVec::from_vec(v);
- Ok(k)
- })
- }
-}
-
impl<E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for Vec<T> {
fn encode(&self, s: &mut S) -> Result<(), E> {
s.emit_seq(self.len(), |s| {
unsafe fn into_ascii_nocheck(self) -> Vec<Ascii>;
}
-impl OwnedAsciiCast for ~[u8] {
- #[inline]
- fn is_ascii(&self) -> bool {
- self.as_slice().is_ascii()
- }
-
- #[inline]
- unsafe fn into_ascii_nocheck(self) -> Vec<Ascii> {
- mem::transmute(Vec::from_slice(self.as_slice()))
- }
-}
-
impl OwnedAsciiCast for String {
#[inline]
fn is_ascii(&self) -> bool {
}
}
-impl IntoStr for ~[Ascii] {
- #[inline]
- fn into_str(self) -> String {
- let vector: Vec<Ascii> = self.as_slice().iter().map(|x| *x).collect();
- vector.into_str()
- }
-}
-
impl IntoStr for Vec<Ascii> {
#[inline]
fn into_str(self) -> String {
let test = &[40u8, 32u8, 59u8];
assert_eq!(test.to_ascii(), v2ascii!([40, 32, 59]));
assert_eq!("( ;".to_ascii(), v2ascii!([40, 32, 59]));
- let v = box [40u8, 32u8, 59u8];
- assert_eq!(v.to_ascii(), v2ascii!([40, 32, 59]));
+ let v = vec![40u8, 32u8, 59u8];
+ assert_eq!(v.as_slice().to_ascii(), v2ascii!([40, 32, 59]));
assert_eq!("( ;".to_string().as_slice().to_ascii(), v2ascii!([40, 32, 59]));
assert_eq!("abCDef&?#".to_ascii().to_lower().into_str(), "abcdef&?#".to_string());
#[test]
fn test_owned_ascii_vec() {
assert_eq!(("( ;".to_string()).into_ascii(), vec2ascii![40, 32, 59]);
- assert_eq!((box [40u8, 32u8, 59u8]).into_ascii(), vec2ascii![40, 32, 59]);
+ assert_eq!((vec![40u8, 32u8, 59u8]).into_ascii(), vec2ascii![40, 32, 59]);
}
#[test]
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Communication primitives for concurrent tasks
-//!
-//! Rust makes it very difficult to share data among tasks to prevent race
-//! conditions and to improve parallelism, but there is often a need for
-//! communication between concurrent tasks. The primitives defined in this
-//! module are the building blocks for synchronization in rust.
-//!
-//! This module provides message-based communication over channels, concretely
-//! defined among three types:
-//!
-//! * `Sender`
-//! * `SyncSender`
-//! * `Receiver`
-//!
-//! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
-//! senders are clone-able such that many tasks can send simultaneously to one
-//! receiver. These channels are *task blocking*, not *thread blocking*. This
-//! means that if one task is blocked on a channel, other tasks can continue to
-//! make progress.
-//!
-//! Rust channels come in one of two flavors:
-//!
-//! 1. An asynchronous, infinitely buffered channel. The `channel()` function
-//! will return a `(Sender, Receiver)` tuple where all sends will be
-//! **asynchronous** (they never block). The channel conceptually has an
-//! infinite buffer.
-//!
-//! 2. A synchronous, bounded channel. The `sync_channel()` function will return
-//! a `(SyncSender, Receiver)` tuple where the storage for pending messages
-//! is a pre-allocated buffer of a fixed size. All sends will be
-//! **synchronous** by blocking until there is buffer space available. Note
-//! that a bound of 0 is allowed, causing the channel to become a
-//! "rendezvous" channel where each sender atomically hands off a message to
-//! a receiver.
-//!
-//! ## Failure Propagation
-//!
-//! In addition to being a core primitive for communicating in rust, channels
-//! are the points at which failure is propagated among tasks. Whenever the one
-//! half of channel is closed, the other half will have its next operation
-//! `fail!`. The purpose of this is to allow propagation of failure among tasks
-//! that are linked to one another via channels.
-//!
-//! There are methods on both of senders and receivers to perform their
-//! respective operations without failing, however.
-//!
-//! ## Runtime Requirements
-//!
-//! The channel types defined in this module generally have very few runtime
-//! requirements in order to operate. The major requirement they have is for a
-//! local rust `Task` to be available if any *blocking* operation is performed.
-//!
-//! If a local `Task` is not available (for example an FFI callback), then the
-//! `send` operation is safe on a `Sender` (as well as a `send_opt`) as well as
-//! the `try_send` method on a `SyncSender`, but no other operations are
-//! guaranteed to be safe.
-//!
-//! Additionally, channels can interoperate between runtimes. If one task in a
-//! program is running on libnative and another is running on libgreen, they can
-//! still communicate with one another using channels.
-//!
-//! # Example
-//!
-//! Simple usage:
-//!
-//! ```
-//! // Create a simple streaming channel
-//! let (tx, rx) = channel();
-//! spawn(proc() {
-//! tx.send(10);
-//! });
-//! assert_eq!(rx.recv(), 10);
-//! ```
-//!
-//! Shared usage:
-//!
-//! ```
-//! // Create a shared channel which can be sent along from many tasks
-//! let (tx, rx) = channel();
-//! for i in range(0, 10) {
-//! let tx = tx.clone();
-//! spawn(proc() {
-//! tx.send(i);
-//! })
-//! }
-//!
-//! for _ in range(0, 10) {
-//! let j = rx.recv();
-//! assert!(0 <= j && j < 10);
-//! }
-//! ```
-//!
-//! Propagating failure:
-//!
-//! ```should_fail
-//! // The call to recv() will fail!() because the channel has already hung
-//! // up (or been deallocated)
-//! let (tx, rx) = channel::<int>();
-//! drop(tx);
-//! rx.recv();
-//! ```
-//!
-//! Synchronous channels:
-//!
-//! ```
-//! let (tx, rx) = sync_channel(0);
-//! spawn(proc() {
-//! // This will wait for the parent task to start receiving
-//! tx.send(53);
-//! });
-//! rx.recv();
-//! ```
-
-// A description of how Rust's channel implementation works
-//
-// Channels are supposed to be the basic building block for all other
-// concurrent primitives that are used in Rust. As a result, the channel type
-// needs to be highly optimized, flexible, and broad enough for use everywhere.
-//
-// The choice of implementation of all channels is to be built on lock-free data
-// structures. The channels themselves are then consequently also lock-free data
-// structures. As always with lock-free code, this is a very "here be dragons"
-// territory, especially because I'm unaware of any academic papers which have
-// gone into great length about channels of these flavors.
-//
-// ## Flavors of channels
-//
-// From the perspective of a consumer of this library, there is only one flavor
-// of channel. This channel can be used as a stream and cloned to allow multiple
-// senders. Under the hood, however, there are actually three flavors of
-// channels in play.
-//
-// * Oneshots - these channels are highly optimized for the one-send use case.
-// They contain as few atomics as possible and involve one and
-// exactly one allocation.
-// * Streams - these channels are optimized for the non-shared use case. They
-// use a different concurrent queue which is more tailored for this
-// use case. The initial allocation of this flavor of channel is not
-// optimized.
-// * Shared - this is the most general form of channel that this module offers,
-// a channel with multiple senders. This type is as optimized as it
-// can be, but the previous two types mentioned are much faster for
-// their use-cases.
-//
-// ## Concurrent queues
-//
-// The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
-// recv() obviously blocks. This means that under the hood there must be some
-// shared and concurrent queue holding all of the actual data.
-//
-// With two flavors of channels, two flavors of queues are also used. We have
-// chosen to use queues from a well-known author which are abbreviated as SPSC
-// and MPSC (single producer, single consumer and multiple producer, single
-// consumer). SPSC queues are used for streams while MPSC queues are used for
-// shared channels.
-//
-// ### SPSC optimizations
-//
-// The SPSC queue found online is essentially a linked list of nodes where one
-// half of the nodes are the "queue of data" and the other half of nodes are a
-// cache of unused nodes. The unused nodes are used such that an allocation is
-// not required on every push() and a free doesn't need to happen on every
-// pop().
-//
-// As found online, however, the cache of nodes is of an infinite size. This
-// means that if a channel at one point in its life had 50k items in the queue,
-// then the queue will always have the capacity for 50k items. I believed that
-// this was an unnecessary limitation of the implementation, so I have altered
-// the queue to optionally have a bound on the cache size.
-//
-// By default, streams will have an unbounded SPSC queue with a small-ish cache
-// size. The hope is that the cache is still large enough to have very fast
-// send() operations while not too large such that millions of channels can
-// coexist at once.
-//
-// ### MPSC optimizations
-//
-// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
-// a linked list under the hood to earn its unboundedness, but I have not put
-// forth much effort into having a cache of nodes similar to the SPSC queue.
-//
-// For now, I believe that this is "ok" because shared channels are not the most
-// common type, but soon we may wish to revisit this queue choice and determine
-// another candidate for backend storage of shared channels.
-//
-// ## Overview of the Implementation
-//
-// Now that there's a little background on the concurrent queues used, it's
-// worth going into much more detail about the channels themselves. The basic
-// pseudocode for a send/recv are:
-//
-//
-// send(t) recv()
-// queue.push(t) return if queue.pop()
-// if increment() == -1 deschedule {
-// wakeup() if decrement() > 0
-// cancel_deschedule()
-// }
-// queue.pop()
-//
-// As mentioned before, there are no locks in this implementation, only atomic
-// instructions are used.
-//
-// ### The internal atomic counter
-//
-// Every channel has a shared counter with each half to keep track of the size
-// of the queue. This counter is used to abort descheduling by the receiver and
-// to know when to wake up on the sending side.
-//
-// As seen in the pseudocode, senders will increment this count and receivers
-// will decrement the count. The theory behind this is that if a sender sees a
-// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
-// then it doesn't need to block.
-//
-// The recv() method has a beginning call to pop(), and if successful, it needs
-// to decrement the count. It is a crucial implementation detail that this
-// decrement does *not* happen to the shared counter. If this were the case,
-// then it would be possible for the counter to be very negative when there were
-// no receivers waiting, in which case the senders would have to determine when
-// it was actually appropriate to wake up a receiver.
-//
-// Instead, the "steal count" is kept track of separately (not atomically
-// because it's only used by receivers), and then the decrement() call when
-// descheduling will lump in all of the recent steals into one large decrement.
-//
-// The implication of this is that if a sender sees a -1 count, then there's
-// guaranteed to be a waiter waiting!
-//
-// ## Native Implementation
-//
-// A major goal of these channels is to work seamlessly on and off the runtime.
-// All of the previous race conditions have been worded in terms of
-// scheduler-isms (which is obviously not available without the runtime).
-//
-// For now, native usage of channels (off the runtime) will fall back onto
-// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
-// is still entirely lock-free, the "deschedule" blocks above are surrounded by
-// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
-// condition variable.
-//
-// ## Select
-//
-// Being able to support selection over channels has greatly influenced this
-// design, and not only does selection need to work inside the runtime, but also
-// outside the runtime.
-//
-// The implementation is fairly straightforward. The goal of select() is not to
-// return some data, but only to return which channel can receive data without
-// blocking. The implementation is essentially the entire blocking procedure
-// followed by an increment as soon as its woken up. The cancellation procedure
-// involves an increment and swapping out of to_wake to acquire ownership of the
-// task to unblock.
-//
-// Sadly this current implementation requires multiple allocations, so I have
-// seen the throughput of select() be much worse than it should be. I do not
-// believe that there is anything fundamental which needs to change about these
-// channels, however, in order to support a more efficient select().
-//
-// # Conclusion
-//
-// And now that you've seen all the races that I found and attempted to fix,
-// here's the code for you to find some more!
-
-use alloc::arc::Arc;
-
-use cell::Cell;
-use clone::Clone;
-use iter::Iterator;
-use kinds::Send;
-use kinds::marker;
-use mem;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use result::{Ok, Err, Result};
-use rt::local::Local;
-use rt::task::{Task, BlockedTask};
-use ty::Unsafe;
-
-pub use comm::select::{Select, Handle};
-
-macro_rules! test (
- { fn $name:ident() $b:block $(#[$a:meta])*} => (
- mod $name {
- #![allow(unused_imports)]
-
- use native;
- use comm::*;
- use prelude::*;
- use super::*;
- use super::super::*;
- use owned::Box;
- use task;
-
- fn f() $b
-
- $(#[$a])* #[test] fn uv() { f() }
- $(#[$a])* #[test] fn native() {
- use native;
- let (tx, rx) = channel();
- native::task::spawn(proc() { tx.send(f()) });
- rx.recv();
- }
- }
- )
-)
-
-mod select;
-mod oneshot;
-mod stream;
-mod shared;
-mod sync;
-
-// Use a power of 2 to allow LLVM to optimize to something that's not a
-// division, this is hit pretty regularly.
-static RESCHED_FREQ: int = 256;
-
-/// The receiving-half of Rust's channel type. This half can only be owned by
-/// one task
-pub struct Receiver<T> {
- inner: Unsafe<Flavor<T>>,
- receives: Cell<uint>,
- // can't share in an arc
- marker: marker::NoShare,
-}
-
-/// An iterator over messages on a receiver, this iterator will block
-/// whenever `next` is called, waiting for a new message, and `None` will be
-/// returned when the corresponding channel has hung up.
-pub struct Messages<'a, T> {
- rx: &'a Receiver<T>
-}
-
-/// The sending-half of Rust's asynchronous channel type. This half can only be
-/// owned by one task, but it can be cloned to send to other tasks.
-pub struct Sender<T> {
- inner: Unsafe<Flavor<T>>,
- sends: Cell<uint>,
- // can't share in an arc
- marker: marker::NoShare,
-}
-
-/// The sending-half of Rust's synchronous channel type. This half can only be
-/// owned by one task, but it can be cloned to send to other tasks.
-pub struct SyncSender<T> {
- inner: Arc<Unsafe<sync::Packet<T>>>,
- // can't share in an arc
- marker: marker::NoShare,
-}
-
-/// This enumeration is the list of the possible reasons that try_recv could not
-/// return data when called.
-#[deriving(PartialEq, Clone, Show)]
-pub enum TryRecvError {
- /// This channel is currently empty, but the sender(s) have not yet
- /// disconnected, so data may yet become available.
- Empty,
- /// This channel's sending half has become disconnected, and there will
- /// never be any more data received on this channel
- Disconnected,
-}
-
-/// This enumeration is the list of the possible error outcomes for the
-/// `SyncSender::try_send` method.
-#[deriving(PartialEq, Clone, Show)]
-pub enum TrySendError<T> {
- /// The data could not be sent on the channel because it would require that
- /// the callee block to send the data.
- ///
- /// If this is a buffered channel, then the buffer is full at this time. If
- /// this is not a buffered channel, then there is no receiver available to
- /// acquire the data.
- Full(T),
- /// This channel's receiving half has disconnected, so the data could not be
- /// sent. The data is returned back to the callee in this case.
- RecvDisconnected(T),
-}
-
-enum Flavor<T> {
- Oneshot(Arc<Unsafe<oneshot::Packet<T>>>),
- Stream(Arc<Unsafe<stream::Packet<T>>>),
- Shared(Arc<Unsafe<shared::Packet<T>>>),
- Sync(Arc<Unsafe<sync::Packet<T>>>),
-}
-
-#[doc(hidden)]
-trait UnsafeFlavor<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>>;
- unsafe fn mut_inner<'a>(&'a self) -> &'a mut Flavor<T> {
- &mut *self.inner_unsafe().get()
- }
- unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
- &*self.inner_unsafe().get()
- }
-}
-impl<T> UnsafeFlavor<T> for Sender<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>> {
- &self.inner
- }
-}
-impl<T> UnsafeFlavor<T> for Receiver<T> {
- fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>> {
- &self.inner
- }
-}
-
-/// Creates a new asynchronous channel, returning the sender/receiver halves.
-///
-/// All data sent on the sender will become available on the receiver, and no
-/// send will block the calling task (this channel has an "infinite buffer").
-///
-/// # Example
-///
-/// ```
-/// let (tx, rx) = channel();
-///
-/// // Spawn off an expensive computation
-/// spawn(proc() {
-/// # fn expensive_computation() {}
-/// tx.send(expensive_computation());
-/// });
-///
-/// // Do some useful work for awhile
-///
-/// // Let's see what that answer was
-/// println!("{}", rx.recv());
-/// ```
-pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
- let a = Arc::new(Unsafe::new(oneshot::Packet::new()));
- (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
-}
-
-/// Creates a new synchronous, bounded channel.
-///
-/// Like asynchronous channels, the `Receiver` will block until a message
-/// becomes available. These channels differ greatly in the semantics of the
-/// sender from asynchronous channels, however.
-///
-/// This channel has an internal buffer on which messages will be queued. When
-/// the internal buffer becomes full, future sends will *block* waiting for the
-/// buffer to open up. Note that a buffer size of 0 is valid, in which case this
-/// becomes "rendezvous channel" where each send will not return until a recv
-/// is paired with it.
-///
-/// As with asynchronous channels, all senders will fail in `send` if the
-/// `Receiver` has been destroyed.
-///
-/// # Example
-///
-/// ```
-/// let (tx, rx) = sync_channel(1);
-///
-/// // this returns immediately
-/// tx.send(1);
-///
-/// spawn(proc() {
-/// // this will block until the previous message has been received
-/// tx.send(2);
-/// });
-///
-/// assert_eq!(rx.recv(), 1);
-/// assert_eq!(rx.recv(), 2);
-/// ```
-pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
- let a = Arc::new(Unsafe::new(sync::Packet::new(bound)));
- (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Sender
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> Sender<T> {
- fn new(inner: Flavor<T>) -> Sender<T> {
- Sender { inner: Unsafe::new(inner), sends: Cell::new(0), marker: marker::NoShare }
- }
-
- /// Sends a value along this channel to be received by the corresponding
- /// receiver.
- ///
- /// Rust channels are infinitely buffered so this method will never block.
- ///
- /// # Failure
- ///
- /// This function will fail if the other end of the channel has hung up.
- /// This means that if the corresponding receiver has fallen out of scope,
- /// this function will trigger a fail message saying that a message is
- /// being sent on a closed channel.
- ///
- /// Note that if this function does *not* fail, it does not mean that the
- /// data will be successfully received. All sends are placed into a queue,
- /// so it is possible for a send to succeed (the other end is alive), but
- /// then the other end could immediately disconnect.
- ///
- /// The purpose of this functionality is to propagate failure among tasks.
- /// If failure is not desired, then consider using the `send_opt` method
- pub fn send(&self, t: T) {
- if self.send_opt(t).is_err() {
- fail!("sending on a closed channel");
- }
- }
-
- /// Attempts to send a value on this channel, returning it back if it could
- /// not be sent.
- ///
- /// A successful send occurs when it is determined that the other end of
- /// the channel has not hung up already. An unsuccessful send would be one
- /// where the corresponding receiver has already been deallocated. Note
- /// that a return value of `Err` means that the data will never be
- /// received, but a return value of `Ok` does *not* mean that the data
- /// will be received. It is possible for the corresponding receiver to
- /// hang up immediately after this function returns `Ok`.
- ///
- /// Like `send`, this method will never block.
- ///
- /// # Failure
- ///
- /// This method will never fail, it will return the message back to the
- /// caller if the other end is disconnected
- ///
- /// # Example
- ///
- /// ```
- /// let (tx, rx) = channel();
- ///
- /// // This send is always successful
- /// assert_eq!(tx.send_opt(1), Ok(()));
- ///
- /// // This send will fail because the receiver is gone
- /// drop(rx);
- /// assert_eq!(tx.send_opt(1), Err(1));
- /// ```
- pub fn send_opt(&self, t: T) -> Result<(), T> {
- // In order to prevent starvation of other tasks in situations where
- // a task sends repeatedly without ever receiving, we occasionally
- // yield instead of doing a send immediately.
- //
- // Don't unconditionally attempt to yield because the TLS overhead can
- // be a bit much, and also use `try_take` instead of `take` because
- // there's no reason that this send shouldn't be usable off the
- // runtime.
- let cnt = self.sends.get() + 1;
- self.sends.set(cnt);
- if cnt % (RESCHED_FREQ as uint) == 0 {
- let task: Option<Box<Task>> = Local::try_take();
- task.map(|t| t.maybe_yield());
- }
-
- let (new_inner, ret) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- unsafe {
- let p = p.get();
- if !(*p).sent() {
- return (*p).send(t);
- } else {
- let a = Arc::new(Unsafe::new(stream::Packet::new()));
- match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
- oneshot::UpSuccess => {
- let ret = (*a.get()).send(t);
- (a, ret)
- }
- oneshot::UpDisconnected => (a, Err(t)),
- oneshot::UpWoke(task) => {
- // This send cannot fail because the task is
- // asleep (we're looking at it), so the receiver
- // can't go away.
- (*a.get()).send(t).ok().unwrap();
- task.wake().map(|t| t.reawaken());
- (a, Ok(()))
- }
- }
- }
- }
- }
- Stream(ref p) => return unsafe { (*p.get()).send(t) },
- Shared(ref p) => return unsafe { (*p.get()).send(t) },
- Sync(..) => unreachable!(),
- };
-
- unsafe {
- let tmp = Sender::new(Stream(new_inner));
- mem::swap(self.mut_inner(), tmp.mut_inner());
- }
- return ret;
- }
-}
-
-impl<T: Send> Clone for Sender<T> {
- fn clone(&self) -> Sender<T> {
- let (packet, sleeper) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- let a = Arc::new(Unsafe::new(shared::Packet::new()));
- unsafe {
- (*a.get()).postinit_lock();
- match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
- oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
- oneshot::UpWoke(task) => (a, Some(task))
- }
- }
- }
- Stream(ref p) => {
- let a = Arc::new(Unsafe::new(shared::Packet::new()));
- unsafe {
- (*a.get()).postinit_lock();
- match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
- stream::UpSuccess | stream::UpDisconnected => (a, None),
- stream::UpWoke(task) => (a, Some(task)),
- }
- }
- }
- Shared(ref p) => {
- unsafe { (*p.get()).clone_chan(); }
- return Sender::new(Shared(p.clone()));
- }
- Sync(..) => unreachable!(),
- };
-
- unsafe {
- (*packet.get()).inherit_blocker(sleeper);
-
- let tmp = Sender::new(Shared(packet.clone()));
- mem::swap(self.mut_inner(), tmp.mut_inner());
- }
- Sender::new(Shared(packet))
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Sender<T> {
- fn drop(&mut self) {
- match *unsafe { self.mut_inner() } {
- Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
- Sync(..) => unreachable!(),
- }
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// SyncSender
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> SyncSender<T> {
- fn new(inner: Arc<Unsafe<sync::Packet<T>>>) -> SyncSender<T> {
- SyncSender { inner: inner, marker: marker::NoShare }
- }
-
- /// Sends a value on this synchronous channel.
- ///
- /// This function will *block* until space in the internal buffer becomes
- /// available or a receiver is available to hand off the message to.
- ///
- /// Note that a successful send does *not* guarantee that the receiver will
- /// ever see the data if there is a buffer on this channel. Messages may be
- /// enqueued in the internal buffer for the receiver to receive at a later
- /// time. If the buffer size is 0, however, it can be guaranteed that the
- /// receiver has indeed received the data if this function returns success.
- ///
- /// # Failure
- ///
- /// Similarly to `Sender::send`, this function will fail if the
- /// corresponding `Receiver` for this channel has disconnected. This
- /// behavior is used to propagate failure among tasks.
- ///
- /// If failure is not desired, you can achieve the same semantics with the
- /// `SyncSender::send_opt` method which will not fail if the receiver
- /// disconnects.
- pub fn send(&self, t: T) {
- if self.send_opt(t).is_err() {
- fail!("sending on a closed channel");
- }
- }
-
- /// Send a value on a channel, returning it back if the receiver
- /// disconnected
- ///
- /// This method will *block* to send the value `t` on the channel, but if
- /// the value could not be sent due to the receiver disconnecting, the value
- /// is returned back to the callee. This function is similar to `try_send`,
- /// except that it will block if the channel is currently full.
- ///
- /// # Failure
- ///
- /// This function cannot fail.
- pub fn send_opt(&self, t: T) -> Result<(), T> {
- unsafe { (*self.inner.get()).send(t) }
- }
-
- /// Attempts to send a value on this channel without blocking.
- ///
- /// This method differs from `send_opt` by returning immediately if the
- /// channel's buffer is full or no receiver is waiting to acquire some
- /// data. Compared with `send_opt`, this function has two failure cases
- /// instead of one (one for disconnection, one for a full buffer).
- ///
- /// See `SyncSender::send` for notes about guarantees of whether the
- /// receiver has received the data or not if this function is successful.
- ///
- /// # Failure
- ///
- /// This function cannot fail
- pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
- unsafe { (*self.inner.get()).try_send(t) }
- }
-}
-
-impl<T: Send> Clone for SyncSender<T> {
- fn clone(&self) -> SyncSender<T> {
- unsafe { (*self.inner.get()).clone_chan(); }
- return SyncSender::new(self.inner.clone());
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for SyncSender<T> {
- fn drop(&mut self) {
- unsafe { (*self.inner.get()).drop_chan(); }
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Receiver
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T: Send> Receiver<T> {
- fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: Unsafe::new(inner), receives: Cell::new(0), marker: marker::NoShare }
- }
-
- /// Blocks waiting for a value on this receiver
- ///
- /// This function will block if necessary to wait for a corresponding send
- /// on the channel from its paired `Sender` structure. This receiver will
- /// be woken up when data is ready, and the data will be returned.
- ///
- /// # Failure
- ///
- /// Similar to channels, this method will trigger a task failure if the
- /// other end of the channel has hung up (been deallocated). The purpose of
- /// this is to propagate failure among tasks.
- ///
- /// If failure is not desired, then there are two options:
- ///
- /// * If blocking is still desired, the `recv_opt` method will return `None`
- /// when the other end hangs up
- ///
- /// * If blocking is not desired, then the `try_recv` method will attempt to
- /// peek at a value on this receiver.
- pub fn recv(&self) -> T {
- match self.recv_opt() {
- Ok(t) => t,
- Err(()) => fail!("receiving on a closed channel"),
- }
- }
-
- /// Attempts to return a pending value on this receiver without blocking
- ///
- /// This method will never block the caller in order to wait for data to
- /// become available. Instead, this will always return immediately with a
- /// possible option of pending data on the channel.
- ///
- /// This is useful for a flavor of "optimistic check" before deciding to
- /// block on a receiver.
- ///
- /// This function cannot fail.
- pub fn try_recv(&self) -> Result<T, TryRecvError> {
- // If a thread is spinning in try_recv, we should take the opportunity
- // to reschedule things occasionally. See notes above in scheduling on
- // sends for why this doesn't always hit TLS, and also for why this uses
- // `try_take` instead of `take`.
- let cnt = self.receives.get() + 1;
- self.receives.set(cnt);
- if cnt % (RESCHED_FREQ as uint) == 0 {
- let task: Option<Box<Task>> = Local::try_take();
- task.map(|t| t.maybe_yield());
- }
-
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(oneshot::Empty) => return Err(Empty),
- Err(oneshot::Disconnected) => return Err(Disconnected),
- Err(oneshot::Upgraded(rx)) => rx,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(stream::Empty) => return Err(Empty),
- Err(stream::Disconnected) => return Err(Disconnected),
- Err(stream::Upgraded(rx)) => rx,
- }
- }
- Shared(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(shared::Empty) => return Err(Empty),
- Err(shared::Disconnected) => return Err(Disconnected),
- }
- }
- Sync(ref p) => {
- match unsafe { (*p.get()).try_recv() } {
- Ok(t) => return Ok(t),
- Err(sync::Empty) => return Err(Empty),
- Err(sync::Disconnected) => return Err(Disconnected),
- }
- }
- };
- unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
- }
- }
- }
-
- /// Attempt to wait for a value on this receiver, but does not fail if the
- /// corresponding channel has hung up.
- ///
- /// This implementation of iterators for ports will always block if there is
- /// not data available on the receiver, but it will not fail in the case
- /// that the channel has been deallocated.
- ///
- /// In other words, this function has the same semantics as the `recv`
- /// method except for the failure aspect.
- ///
- /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
- /// the value found on the receiver is returned.
- pub fn recv_opt(&self) -> Result<T, ()> {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(oneshot::Empty) => return unreachable!(),
- Err(oneshot::Disconnected) => return Err(()),
- Err(oneshot::Upgraded(rx)) => rx,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(stream::Empty) => return unreachable!(),
- Err(stream::Disconnected) => return Err(()),
- Err(stream::Upgraded(rx)) => rx,
- }
- }
- Shared(ref p) => {
- match unsafe { (*p.get()).recv() } {
- Ok(t) => return Ok(t),
- Err(shared::Empty) => return unreachable!(),
- Err(shared::Disconnected) => return Err(()),
- }
- }
- Sync(ref p) => return unsafe { (*p.get()).recv() }
- };
- unsafe {
- mem::swap(self.mut_inner(), new_port.mut_inner());
- }
- }
- }
-
- /// Returns an iterator which will block waiting for messages, but never
- /// `fail!`. It will return `None` when the channel has hung up.
- pub fn iter<'a>(&'a self) -> Messages<'a, T> {
- Messages { rx: self }
- }
-}
-
-impl<T: Send> select::Packet for Receiver<T> {
- fn can_recv(&self) -> bool {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).can_recv() } {
- Ok(ret) => return ret,
- Err(upgrade) => upgrade,
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).can_recv() } {
- Ok(ret) => return ret,
- Err(upgrade) => upgrade,
- }
- }
- Shared(ref p) => {
- return unsafe { (*p.get()).can_recv() };
- }
- Sync(ref p) => {
- return unsafe { (*p.get()).can_recv() };
- }
- };
- unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
- }
- }
- }
-
- fn start_selection(&self, mut task: BlockedTask) -> Result<(), BlockedTask>{
- loop {
- let (t, new_port) = match *unsafe { self.inner() } {
- Oneshot(ref p) => {
- match unsafe { (*p.get()).start_selection(task) } {
- oneshot::SelSuccess => return Ok(()),
- oneshot::SelCanceled(task) => return Err(task),
- oneshot::SelUpgraded(t, rx) => (t, rx),
- }
- }
- Stream(ref p) => {
- match unsafe { (*p.get()).start_selection(task) } {
- stream::SelSuccess => return Ok(()),
- stream::SelCanceled(task) => return Err(task),
- stream::SelUpgraded(t, rx) => (t, rx),
- }
- }
- Shared(ref p) => {
- return unsafe { (*p.get()).start_selection(task) };
- }
- Sync(ref p) => {
- return unsafe { (*p.get()).start_selection(task) };
- }
- };
- task = t;
- unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
- }
- }
- }
-
- fn abort_selection(&self) -> bool {
- let mut was_upgrade = false;
- loop {
- let result = match *unsafe { self.inner() } {
- Oneshot(ref p) => unsafe { (*p.get()).abort_selection() },
- Stream(ref p) => unsafe {
- (*p.get()).abort_selection(was_upgrade)
- },
- Shared(ref p) => return unsafe {
- (*p.get()).abort_selection(was_upgrade)
- },
- Sync(ref p) => return unsafe {
- (*p.get()).abort_selection()
- },
- };
- let new_port = match result { Ok(b) => return b, Err(p) => p };
- was_upgrade = true;
- unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
- }
- }
- }
-}
-
-impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
- fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Receiver<T> {
- fn drop(&mut self) {
- match *unsafe { self.mut_inner() } {
- Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
- Sync(ref mut p) => unsafe { (*p.get()).drop_port(); },
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use prelude::*;
-
- use native;
- use os;
- use super::*;
-
- pub fn stress_factor() -> uint {
- match os::getenv("RUST_TEST_STRESS") {
- Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
- None => 1,
- }
- }
-
- test!(fn smoke() {
- let (tx, rx) = channel();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn drop_full() {
- let (tx, _rx) = channel();
- tx.send(box 1);
- })
-
- test!(fn drop_full_shared() {
- let (tx, _rx) = channel();
- drop(tx.clone());
- drop(tx.clone());
- tx.send(box 1);
- })
-
- test!(fn smoke_shared() {
- let (tx, rx) = channel();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- let tx = tx.clone();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_threads() {
- let (tx, rx) = channel();
- spawn(proc() {
- tx.send(1);
- });
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_port_gone() {
- let (tx, rx) = channel();
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone() {
- let (tx, rx) = channel();
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone2() {
- let (tx, rx) = channel();
- drop(rx);
- let tx2 = tx.clone();
- drop(tx);
- tx2.send(1);
- } #[should_fail])
-
- test!(fn port_gone_concurrent() {
- let (tx, rx) = channel();
- spawn(proc() {
- rx.recv();
- });
- loop { tx.send(1) }
- } #[should_fail])
-
- test!(fn port_gone_concurrent_shared() {
- let (tx, rx) = channel();
- let tx2 = tx.clone();
- spawn(proc() {
- rx.recv();
- });
- loop {
- tx.send(1);
- tx2.send(1);
- }
- } #[should_fail])
-
- test!(fn smoke_chan_gone() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- rx.recv();
- } #[should_fail])
-
- test!(fn smoke_chan_gone_shared() {
- let (tx, rx) = channel::<()>();
- let tx2 = tx.clone();
- drop(tx);
- drop(tx2);
- rx.recv();
- } #[should_fail])
-
- test!(fn chan_gone_concurrent() {
- let (tx, rx) = channel();
- spawn(proc() {
- tx.send(1);
- tx.send(1);
- });
- loop { rx.recv(); }
- } #[should_fail])
-
- test!(fn stress() {
- let (tx, rx) = channel();
- spawn(proc() {
- for _ in range(0, 10000) { tx.send(1); }
- });
- for _ in range(0, 10000) {
- assert_eq!(rx.recv(), 1);
- }
- })
-
- test!(fn stress_shared() {
- static AMT: uint = 10000;
- static NTHREADS: uint = 8;
- let (tx, rx) = channel::<int>();
- let (dtx, drx) = channel::<()>();
-
- spawn(proc() {
- for _ in range(0, AMT * NTHREADS) {
- assert_eq!(rx.recv(), 1);
- }
- match rx.try_recv() {
- Ok(..) => fail!(),
- _ => {}
- }
- dtx.send(());
- });
-
- for _ in range(0, NTHREADS) {
- let tx = tx.clone();
- spawn(proc() {
- for _ in range(0, AMT) { tx.send(1); }
- });
- }
- drop(tx);
- drx.recv();
- })
-
- #[test]
- fn send_from_outside_runtime() {
- let (tx1, rx1) = channel::<()>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
- let tx4 = tx3.clone();
- spawn(proc() {
- tx1.send(());
- for _ in range(0, 40) {
- assert_eq!(rx2.recv(), 1);
- }
- tx3.send(());
- });
- rx1.recv();
- native::task::spawn(proc() {
- for _ in range(0, 40) {
- tx2.send(1);
- }
- tx4.send(());
- });
- rx3.recv();
- rx3.recv();
- }
-
- #[test]
- fn recv_from_outside_runtime() {
- let (tx, rx) = channel::<int>();
- let (dtx, drx) = channel();
- native::task::spawn(proc() {
- for _ in range(0, 40) {
- assert_eq!(rx.recv(), 1);
- }
- dtx.send(());
- });
- for _ in range(0, 40) {
- tx.send(1);
- }
- drx.recv();
- }
-
- #[test]
- fn no_runtime() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
- let tx4 = tx3.clone();
- native::task::spawn(proc() {
- assert_eq!(rx1.recv(), 1);
- tx2.send(2);
- tx4.send(());
- });
- native::task::spawn(proc() {
- tx1.send(1);
- assert_eq!(rx2.recv(), 2);
- tx3.send(());
- });
- rx3.recv();
- rx3.recv();
- }
-
- test!(fn oneshot_single_thread_close_port_first() {
- // Simple test of closing without sending
- let (_tx, rx) = channel::<int>();
- drop(rx);
- })
-
- test!(fn oneshot_single_thread_close_chan_first() {
- // Simple test of closing without sending
- let (tx, _rx) = channel::<int>();
- drop(tx);
- })
-
- test!(fn oneshot_single_thread_send_port_close() {
- // Testing that the sender cleans up the payload if receiver is closed
- let (tx, rx) = channel::<Box<int>>();
- drop(rx);
- tx.send(box 0);
- } #[should_fail])
-
- test!(fn oneshot_single_thread_recv_chan_close() {
- // Receiving on a closed chan will fail
- let res = task::try(proc() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- rx.recv();
- });
- // What is our res?
- assert!(res.is_err());
- })
-
- test!(fn oneshot_single_thread_send_then_recv() {
- let (tx, rx) = channel::<Box<int>>();
- tx.send(box 10);
- assert!(rx.recv() == box 10);
- })
-
- test!(fn oneshot_single_thread_try_send_open() {
- let (tx, rx) = channel::<int>();
- assert!(tx.send_opt(10).is_ok());
- assert!(rx.recv() == 10);
- })
-
- test!(fn oneshot_single_thread_try_send_closed() {
- let (tx, rx) = channel::<int>();
- drop(rx);
- assert!(tx.send_opt(10).is_err());
- })
-
- test!(fn oneshot_single_thread_try_recv_open() {
- let (tx, rx) = channel::<int>();
- tx.send(10);
- assert!(rx.recv_opt() == Ok(10));
- })
-
- test!(fn oneshot_single_thread_try_recv_closed() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- assert!(rx.recv_opt() == Err(()));
- })
-
- test!(fn oneshot_single_thread_peek_data() {
- let (tx, rx) = channel::<int>();
- assert_eq!(rx.try_recv(), Err(Empty))
- tx.send(10);
- assert_eq!(rx.try_recv(), Ok(10));
- })
-
- test!(fn oneshot_single_thread_peek_close() {
- let (tx, rx) = channel::<int>();
- drop(tx);
- assert_eq!(rx.try_recv(), Err(Disconnected));
- assert_eq!(rx.try_recv(), Err(Disconnected));
- })
-
- test!(fn oneshot_single_thread_peek_open() {
- let (_tx, rx) = channel::<int>();
- assert_eq!(rx.try_recv(), Err(Empty));
- })
-
- test!(fn oneshot_multi_task_recv_then_send() {
- let (tx, rx) = channel::<Box<int>>();
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
-
- tx.send(box 10);
- })
-
- test!(fn oneshot_multi_task_recv_then_close() {
- let (tx, rx) = channel::<Box<int>>();
- spawn(proc() {
- drop(tx);
- });
- let res = task::try(proc() {
- assert!(rx.recv() == box 10);
- });
- assert!(res.is_err());
- })
-
- test!(fn oneshot_multi_thread_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- drop(rx);
- });
- drop(tx);
- }
- })
-
- test!(fn oneshot_multi_thread_send_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- drop(rx);
- });
- let _ = task::try(proc() {
- tx.send(1);
- });
- }
- })
-
- test!(fn oneshot_multi_thread_recv_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel::<int>();
- spawn(proc() {
- let res = task::try(proc() {
- rx.recv();
- });
- assert!(res.is_err());
- });
- spawn(proc() {
- spawn(proc() {
- drop(tx);
- });
- });
- }
- })
-
- test!(fn oneshot_multi_thread_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel();
- spawn(proc() {
- tx.send(box 10);
- });
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
- }
- })
-
- test!(fn stream_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = channel();
-
- send(tx, 0);
- recv(rx, 0);
-
- fn send(tx: Sender<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- tx.send(box i);
- send(tx, i + 1);
- });
- }
-
- fn recv(rx: Receiver<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- assert!(rx.recv() == box i);
- recv(rx, i + 1);
- });
- }
- }
- })
-
- test!(fn recv_a_lot() {
- // Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = channel();
- for _ in range(0, 10000) { tx.send(()); }
- for _ in range(0, 10000) { rx.recv(); }
- })
-
- test!(fn shared_chan_stress() {
- let (tx, rx) = channel();
- let total = stress_factor() + 100;
- for _ in range(0, total) {
- let tx = tx.clone();
- spawn(proc() {
- tx.send(());
- });
- }
-
- for _ in range(0, total) {
- rx.recv();
- }
- })
-
- test!(fn test_nested_recv_iter() {
- let (tx, rx) = channel::<int>();
- let (total_tx, total_rx) = channel::<int>();
-
- spawn(proc() {
- let mut acc = 0;
- for x in rx.iter() {
- acc += x;
- }
- total_tx.send(acc);
- });
-
- tx.send(3);
- tx.send(1);
- tx.send(2);
- drop(tx);
- assert_eq!(total_rx.recv(), 6);
- })
-
- test!(fn test_recv_iter_break() {
- let (tx, rx) = channel::<int>();
- let (count_tx, count_rx) = channel();
-
- spawn(proc() {
- let mut count = 0;
- for x in rx.iter() {
- if count >= 3 {
- break;
- } else {
- count += x;
- }
- }
- count_tx.send(count);
- });
-
- tx.send(2);
- tx.send(2);
- tx.send(2);
- let _ = tx.send_opt(2);
- drop(tx);
- assert_eq!(count_rx.recv(), 4);
- })
-
- test!(fn try_recv_states() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<()>();
- let (tx3, rx3) = channel::<()>();
- spawn(proc() {
- rx2.recv();
- tx1.send(1);
- tx3.send(());
- rx2.recv();
- drop(tx1);
- tx3.send(());
- });
-
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Ok(1));
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Err(Disconnected));
- })
-
- // This bug used to end up in a livelock inside of the Receiver destructor
- // because the internal state of the Shared packet was corrupted
- test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
- let (tx, rx) = channel();
- let (tx2, rx2) = channel();
- spawn(proc() {
- rx.recv(); // wait on a oneshot
- drop(rx); // destroy a shared
- tx2.send(());
- });
- // make sure the other task has gone to sleep
- for _ in range(0, 5000) { task::deschedule(); }
-
- // upgrade to a shared chan and send a message
- let t = tx.clone();
- drop(tx);
- t.send(());
-
- // wait for the child task to exit before we exit
- rx2.recv();
- })
-
- test!(fn sends_off_the_runtime() {
- use rt::thread::Thread;
-
- let (tx, rx) = channel();
- let t = Thread::start(proc() {
- for _ in range(0, 1000) {
- tx.send(());
- }
- });
- for _ in range(0, 1000) {
- rx.recv();
- }
- t.join();
- })
-
- test!(fn try_recvs_off_the_runtime() {
- use rt::thread::Thread;
-
- let (tx, rx) = channel();
- let (cdone, pdone) = channel();
- let t = Thread::start(proc() {
- let mut hits = 0;
- while hits < 10 {
- match rx.try_recv() {
- Ok(()) => { hits += 1; }
- Err(Empty) => { Thread::yield_now(); }
- Err(Disconnected) => return,
- }
- }
- cdone.send(());
- });
- for _ in range(0, 10) {
- tx.send(());
- }
- t.join();
- pdone.recv();
- })
-}
-
-#[cfg(test)]
-mod sync_tests {
- use prelude::*;
- use os;
-
- pub fn stress_factor() -> uint {
- match os::getenv("RUST_TEST_STRESS") {
- Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
- None => 1,
- }
- }
-
- test!(fn smoke() {
- let (tx, rx) = sync_channel(1);
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn drop_full() {
- let (tx, _rx) = sync_channel(1);
- tx.send(box 1);
- })
-
- test!(fn smoke_shared() {
- let (tx, rx) = sync_channel(1);
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- let tx = tx.clone();
- tx.send(1);
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_threads() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- tx.send(1);
- });
- assert_eq!(rx.recv(), 1);
- })
-
- test!(fn smoke_port_gone() {
- let (tx, rx) = sync_channel(0);
- drop(rx);
- tx.send(1);
- } #[should_fail])
-
- test!(fn smoke_shared_port_gone2() {
- let (tx, rx) = sync_channel(0);
- drop(rx);
- let tx2 = tx.clone();
- drop(tx);
- tx2.send(1);
- } #[should_fail])
-
- test!(fn port_gone_concurrent() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- rx.recv();
- });
- loop { tx.send(1) }
- } #[should_fail])
-
- test!(fn port_gone_concurrent_shared() {
- let (tx, rx) = sync_channel(0);
- let tx2 = tx.clone();
- spawn(proc() {
- rx.recv();
- });
- loop {
- tx.send(1);
- tx2.send(1);
- }
- } #[should_fail])
-
- test!(fn smoke_chan_gone() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- rx.recv();
- } #[should_fail])
-
- test!(fn smoke_chan_gone_shared() {
- let (tx, rx) = sync_channel::<()>(0);
- let tx2 = tx.clone();
- drop(tx);
- drop(tx2);
- rx.recv();
- } #[should_fail])
-
- test!(fn chan_gone_concurrent() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- tx.send(1);
- tx.send(1);
- });
- loop { rx.recv(); }
- } #[should_fail])
-
- test!(fn stress() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- for _ in range(0, 10000) { tx.send(1); }
- });
- for _ in range(0, 10000) {
- assert_eq!(rx.recv(), 1);
- }
- })
-
- test!(fn stress_shared() {
- static AMT: uint = 1000;
- static NTHREADS: uint = 8;
- let (tx, rx) = sync_channel::<int>(0);
- let (dtx, drx) = sync_channel::<()>(0);
-
- spawn(proc() {
- for _ in range(0, AMT * NTHREADS) {
- assert_eq!(rx.recv(), 1);
- }
- match rx.try_recv() {
- Ok(..) => fail!(),
- _ => {}
- }
- dtx.send(());
- });
-
- for _ in range(0, NTHREADS) {
- let tx = tx.clone();
- spawn(proc() {
- for _ in range(0, AMT) { tx.send(1); }
- });
- }
- drop(tx);
- drx.recv();
- })
-
- test!(fn oneshot_single_thread_close_port_first() {
- // Simple test of closing without sending
- let (_tx, rx) = sync_channel::<int>(0);
- drop(rx);
- })
-
- test!(fn oneshot_single_thread_close_chan_first() {
- // Simple test of closing without sending
- let (tx, _rx) = sync_channel::<int>(0);
- drop(tx);
- })
-
- test!(fn oneshot_single_thread_send_port_close() {
- // Testing that the sender cleans up the payload if receiver is closed
- let (tx, rx) = sync_channel::<Box<int>>(0);
- drop(rx);
- tx.send(box 0);
- } #[should_fail])
-
- test!(fn oneshot_single_thread_recv_chan_close() {
- // Receiving on a closed chan will fail
- let res = task::try(proc() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- rx.recv();
- });
- // What is our res?
- assert!(res.is_err());
- })
-
- test!(fn oneshot_single_thread_send_then_recv() {
- let (tx, rx) = sync_channel::<Box<int>>(1);
- tx.send(box 10);
- assert!(rx.recv() == box 10);
- })
-
- test!(fn oneshot_single_thread_try_send_open() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(tx.try_send(10), Ok(()));
- assert!(rx.recv() == 10);
- })
-
- test!(fn oneshot_single_thread_try_send_closed() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(rx);
- assert_eq!(tx.try_send(10), Err(RecvDisconnected(10)));
- })
-
- test!(fn oneshot_single_thread_try_send_closed2() {
- let (tx, _rx) = sync_channel::<int>(0);
- assert_eq!(tx.try_send(10), Err(Full(10)));
- })
-
- test!(fn oneshot_single_thread_try_recv_open() {
- let (tx, rx) = sync_channel::<int>(1);
- tx.send(10);
- assert!(rx.recv_opt() == Ok(10));
- })
-
- test!(fn oneshot_single_thread_try_recv_closed() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- assert!(rx.recv_opt() == Err(()));
- })
-
- test!(fn oneshot_single_thread_peek_data() {
- let (tx, rx) = sync_channel::<int>(1);
- assert_eq!(rx.try_recv(), Err(Empty))
- tx.send(10);
- assert_eq!(rx.try_recv(), Ok(10));
- })
-
- test!(fn oneshot_single_thread_peek_close() {
- let (tx, rx) = sync_channel::<int>(0);
- drop(tx);
- assert_eq!(rx.try_recv(), Err(Disconnected));
- assert_eq!(rx.try_recv(), Err(Disconnected));
- })
-
- test!(fn oneshot_single_thread_peek_open() {
- let (_tx, rx) = sync_channel::<int>(0);
- assert_eq!(rx.try_recv(), Err(Empty));
- })
-
- test!(fn oneshot_multi_task_recv_then_send() {
- let (tx, rx) = sync_channel::<Box<int>>(0);
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
-
- tx.send(box 10);
- })
-
- test!(fn oneshot_multi_task_recv_then_close() {
- let (tx, rx) = sync_channel::<Box<int>>(0);
- spawn(proc() {
- drop(tx);
- });
- let res = task::try(proc() {
- assert!(rx.recv() == box 10);
- });
- assert!(res.is_err());
- })
-
- test!(fn oneshot_multi_thread_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- drop(rx);
- });
- drop(tx);
- }
- })
-
- test!(fn oneshot_multi_thread_send_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- drop(rx);
- });
- let _ = task::try(proc() {
- tx.send(1);
- });
- }
- })
-
- test!(fn oneshot_multi_thread_recv_close_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel::<int>(0);
- spawn(proc() {
- let res = task::try(proc() {
- rx.recv();
- });
- assert!(res.is_err());
- });
- spawn(proc() {
- spawn(proc() {
- drop(tx);
- });
- });
- }
- })
-
- test!(fn oneshot_multi_thread_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- tx.send(box 10);
- });
- spawn(proc() {
- assert!(rx.recv() == box 10);
- });
- }
- })
-
- test!(fn stream_send_recv_stress() {
- for _ in range(0, stress_factor()) {
- let (tx, rx) = sync_channel(0);
-
- send(tx, 0);
- recv(rx, 0);
-
- fn send(tx: SyncSender<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- tx.send(box i);
- send(tx, i + 1);
- });
- }
-
- fn recv(rx: Receiver<Box<int>>, i: int) {
- if i == 10 { return }
-
- spawn(proc() {
- assert!(rx.recv() == box i);
- recv(rx, i + 1);
- });
- }
- }
- })
-
- test!(fn recv_a_lot() {
- // Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = sync_channel(10000);
- for _ in range(0, 10000) { tx.send(()); }
- for _ in range(0, 10000) { rx.recv(); }
- })
-
- test!(fn shared_chan_stress() {
- let (tx, rx) = sync_channel(0);
- let total = stress_factor() + 100;
- for _ in range(0, total) {
- let tx = tx.clone();
- spawn(proc() {
- tx.send(());
- });
- }
-
- for _ in range(0, total) {
- rx.recv();
- }
- })
-
- test!(fn test_nested_recv_iter() {
- let (tx, rx) = sync_channel::<int>(0);
- let (total_tx, total_rx) = sync_channel::<int>(0);
-
- spawn(proc() {
- let mut acc = 0;
- for x in rx.iter() {
- acc += x;
- }
- total_tx.send(acc);
- });
-
- tx.send(3);
- tx.send(1);
- tx.send(2);
- drop(tx);
- assert_eq!(total_rx.recv(), 6);
- })
-
- test!(fn test_recv_iter_break() {
- let (tx, rx) = sync_channel::<int>(0);
- let (count_tx, count_rx) = sync_channel(0);
-
- spawn(proc() {
- let mut count = 0;
- for x in rx.iter() {
- if count >= 3 {
- break;
- } else {
- count += x;
- }
- }
- count_tx.send(count);
- });
-
- tx.send(2);
- tx.send(2);
- tx.send(2);
- let _ = tx.try_send(2);
- drop(tx);
- assert_eq!(count_rx.recv(), 4);
- })
-
- test!(fn try_recv_states() {
- let (tx1, rx1) = sync_channel::<int>(1);
- let (tx2, rx2) = sync_channel::<()>(1);
- let (tx3, rx3) = sync_channel::<()>(1);
- spawn(proc() {
- rx2.recv();
- tx1.send(1);
- tx3.send(());
- rx2.recv();
- drop(tx1);
- tx3.send(());
- });
-
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Ok(1));
- assert_eq!(rx1.try_recv(), Err(Empty));
- tx2.send(());
- rx3.recv();
- assert_eq!(rx1.try_recv(), Err(Disconnected));
- })
-
- // This bug used to end up in a livelock inside of the Receiver destructor
- // because the internal state of the Shared packet was corrupted
- test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
- let (tx, rx) = sync_channel(0);
- let (tx2, rx2) = sync_channel(0);
- spawn(proc() {
- rx.recv(); // wait on a oneshot
- drop(rx); // destroy a shared
- tx2.send(());
- });
- // make sure the other task has gone to sleep
- for _ in range(0, 5000) { task::deschedule(); }
-
- // upgrade to a shared chan and send a message
- let t = tx.clone();
- drop(tx);
- t.send(());
-
- // wait for the child task to exit before we exit
- rx2.recv();
- })
-
- test!(fn try_recvs_off_the_runtime() {
- use std::rt::thread::Thread;
-
- let (tx, rx) = sync_channel(0);
- let (cdone, pdone) = channel();
- let t = Thread::start(proc() {
- let mut hits = 0;
- while hits < 10 {
- match rx.try_recv() {
- Ok(()) => { hits += 1; }
- Err(Empty) => { Thread::yield_now(); }
- Err(Disconnected) => return,
- }
- }
- cdone.send(());
- });
- for _ in range(0, 10) {
- tx.send(());
- }
- t.join();
- pdone.recv();
- })
-
- test!(fn send_opt1() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() { rx.recv(); });
- assert_eq!(tx.send_opt(1), Ok(()));
- })
-
- test!(fn send_opt2() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() { drop(rx); });
- assert_eq!(tx.send_opt(1), Err(1));
- })
-
- test!(fn send_opt3() {
- let (tx, rx) = sync_channel(1);
- assert_eq!(tx.send_opt(1), Ok(()));
- spawn(proc() { drop(rx); });
- assert_eq!(tx.send_opt(1), Err(1));
- })
-
- test!(fn send_opt4() {
- let (tx, rx) = sync_channel(0);
- let tx2 = tx.clone();
- let (done, donerx) = channel();
- let done2 = done.clone();
- spawn(proc() {
- assert_eq!(tx.send_opt(1), Err(1));
- done.send(());
- });
- spawn(proc() {
- assert_eq!(tx2.send_opt(2), Err(2));
- done2.send(());
- });
- drop(rx);
- donerx.recv();
- donerx.recv();
- })
-
- test!(fn try_send1() {
- let (tx, _rx) = sync_channel(0);
- assert_eq!(tx.try_send(1), Err(Full(1)));
- })
-
- test!(fn try_send2() {
- let (tx, _rx) = sync_channel(1);
- assert_eq!(tx.try_send(1), Ok(()));
- assert_eq!(tx.try_send(1), Err(Full(1)));
- })
-
- test!(fn try_send3() {
- let (tx, rx) = sync_channel(1);
- assert_eq!(tx.try_send(1), Ok(()));
- drop(rx);
- assert_eq!(tx.try_send(1), Err(RecvDisconnected(1)));
- })
-
- test!(fn try_send4() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- for _ in range(0, 1000) { task::deschedule(); }
- assert_eq!(tx.try_send(1), Ok(()));
- });
- assert_eq!(rx.recv(), 1);
- } #[ignore(reason = "flaky on libnative")])
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Oneshot channels/ports
-///
-/// This is the initial flavor of channels/ports used for comm module. This is
-/// an optimization for the one-use case of a channel. The major optimization of
-/// this type is to have one and exactly one allocation when the chan/port pair
-/// is created.
-///
-/// Another possible optimization would be to not use an Arc box because
-/// in theory we know when the shared packet can be deallocated (no real need
-/// for the atomic reference counting), but I was having trouble how to destroy
-/// the data early in a drop of a Port.
-///
-/// # Implementation
-///
-/// Oneshots are implemented around one atomic uint variable. This variable
-/// indicates both the state of the port/chan but also contains any tasks
-/// blocked on the port. All atomic operations happen on this one word.
-///
-/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
-/// on behalf of the channel side of things (it can be mentally thought of as
-/// consuming the port). This upgrade is then also stored in the shared packet.
-/// The one caveat to consider is that when a port sees a disconnected channel
-/// it must check for data because there is no "data plus upgrade" state.
-
-use comm::Receiver;
-use kinds::Send;
-use mem;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use result::{Result, Ok, Err};
-use rt::local::Local;
-use rt::task::{Task, BlockedTask};
-use sync::atomics;
-
-// Various states you can find a port in.
-static EMPTY: uint = 0;
-static DATA: uint = 1;
-static DISCONNECTED: uint = 2;
-
-pub struct Packet<T> {
- // Internal state of the chan/port pair (stores the blocked task as well)
- state: atomics::AtomicUint,
- // One-shot data slot location
- data: Option<T>,
- // when used for the second time, a oneshot channel must be upgraded, and
- // this contains the slot for the upgrade
- upgrade: MyUpgrade<T>,
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(BlockedTask),
-}
-
-pub enum SelectionResult<T> {
- SelCanceled(BlockedTask),
- SelUpgraded(BlockedTask, Receiver<T>),
- SelSuccess,
-}
-
-enum MyUpgrade<T> {
- NothingSent,
- SendUsed,
- GoUp(Receiver<T>),
-}
-
-impl<T: Send> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- data: None,
- upgrade: NothingSent,
- state: atomics::AtomicUint::new(EMPTY),
- }
- }
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // Sanity check
- match self.upgrade {
- NothingSent => {}
- _ => fail!("sending on a oneshot that's already sent on "),
- }
- assert!(self.data.is_none());
- self.data = Some(t);
- self.upgrade = SendUsed;
-
- match self.state.swap(DATA, atomics::SeqCst) {
- // Sent the data, no one was waiting
- EMPTY => Ok(()),
-
- // Couldn't send the data, the port hung up first. Return the data
- // back up the stack.
- DISCONNECTED => {
- Err(self.data.take_unwrap())
- }
-
- // Not possible, these are one-use channels
- DATA => unreachable!(),
-
- // Anything else means that there was a task waiting on the other
- // end. We leave the 'DATA' state inside so it'll pick it up on the
- // other end.
- n => unsafe {
- let t = BlockedTask::cast_from_uint(n);
- t.wake().map(|t| t.reawaken());
- Ok(())
- }
- }
- }
-
- // Just tests whether this channel has been sent on or not, this is only
- // safe to use from the sender.
- pub fn sent(&self) -> bool {
- match self.upgrade {
- NothingSent => false,
- _ => true,
- }
- }
-
- pub fn recv(&mut self) -> Result<T, Failure<T>> {
- // Attempt to not block the task (it's a little expensive). If it looks
- // like we're not empty, then immediately go through to `try_recv`.
- if self.state.load(atomics::SeqCst) == EMPTY {
- let t: Box<Task> = Local::take();
- t.deschedule(1, |task| {
- let n = unsafe { task.cast_to_uint() };
- match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
- // Nothing on the channel, we legitimately block
- EMPTY => Ok(()),
-
- // If there's data or it's a disconnected channel, then we
- // failed the cmpxchg, so we just wake ourselves back up
- DATA | DISCONNECTED => {
- unsafe { Err(BlockedTask::cast_from_uint(n)) }
- }
-
- // Only one thread is allowed to sleep on this port
- _ => unreachable!()
- }
- });
- }
-
- self.try_recv()
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
- match self.state.load(atomics::SeqCst) {
- EMPTY => Err(Empty),
-
- // We saw some data on the channel, but the channel can be used
- // again to send us an upgrade. As a result, we need to re-insert
- // into the channel that there's no data available (otherwise we'll
- // just see DATA next time). This is done as a cmpxchg because if
- // the state changes under our feet we'd rather just see that state
- // change.
- DATA => {
- self.state.compare_and_swap(DATA, EMPTY, atomics::SeqCst);
- match self.data.take() {
- Some(data) => Ok(data),
- None => unreachable!(),
- }
- }
-
- // There's no guarantee that we receive before an upgrade happens,
- // and an upgrade flags the channel as disconnected, so when we see
- // this we first need to check if there's data available and *then*
- // we go through and process the upgrade.
- DISCONNECTED => {
- match self.data.take() {
- Some(data) => Ok(data),
- None => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- SendUsed | NothingSent => Err(Disconnected),
- GoUp(upgrade) => Err(Upgraded(upgrade))
- }
- }
- }
- }
- _ => unreachable!()
- }
- }
-
- // Returns whether the upgrade was completed. If the upgrade wasn't
- // completed, then the port couldn't get sent to the other half (it will
- // never receive it).
- pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
- let prev = match self.upgrade {
- NothingSent => NothingSent,
- SendUsed => SendUsed,
- _ => fail!("upgrading again"),
- };
- self.upgrade = GoUp(up);
-
- match self.state.swap(DISCONNECTED, atomics::SeqCst) {
- // If the channel is empty or has data on it, then we're good to go.
- // Senders will check the data before the upgrade (in case we
- // plastered over the DATA state).
- DATA | EMPTY => UpSuccess,
-
- // If the other end is already disconnected, then we failed the
- // upgrade. Be sure to trash the port we were given.
- DISCONNECTED => { self.upgrade = prev; UpDisconnected }
-
- // If someone's waiting, we gotta wake them up
- n => UpWoke(unsafe { BlockedTask::cast_from_uint(n) })
- }
- }
-
- pub fn drop_chan(&mut self) {
- match self.state.swap(DISCONNECTED, atomics::SeqCst) {
- DATA | DISCONNECTED | EMPTY => {}
-
- // If someone's waiting, we gotta wake them up
- n => unsafe {
- let t = BlockedTask::cast_from_uint(n);
- t.wake().map(|t| t.reawaken());
- }
- }
- }
-
- pub fn drop_port(&mut self) {
- match self.state.swap(DISCONNECTED, atomics::SeqCst) {
- // An empty channel has nothing to do, and a remotely disconnected
- // channel also has nothing to do b/c we're about to run the drop
- // glue
- DISCONNECTED | EMPTY => {}
-
- // There's data on the channel, so make sure we destroy it promptly.
- // This is why not using an arc is a little difficult (need the box
- // to stay valid while we take the data).
- DATA => { self.data.take_unwrap(); }
-
- // We're the only ones that can block on this port
- _ => unreachable!()
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // If Ok, the value is whether this port has data, if Err, then the upgraded
- // port needs to be checked instead of this one.
- pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
- match self.state.load(atomics::SeqCst) {
- EMPTY => Ok(false), // Welp, we tried
- DATA => Ok(true), // we have some un-acquired data
- DISCONNECTED if self.data.is_some() => Ok(true), // we have data
- DISCONNECTED => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- // The other end sent us an upgrade, so we need to
- // propagate upwards whether the upgrade can receive
- // data
- GoUp(upgrade) => Err(upgrade),
-
- // If the other end disconnected without sending an
- // upgrade, then we have data to receive (the channel is
- // disconnected).
- up => { self.upgrade = up; Ok(true) }
- }
- }
- _ => unreachable!(), // we're the "one blocker"
- }
- }
-
- // Attempts to start selection on this port. This can either succeed, fail
- // because there is data, or fail because there is an upgrade pending.
- pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
- let n = unsafe { task.cast_to_uint() };
- match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
- EMPTY => SelSuccess,
- DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
- DISCONNECTED if self.data.is_some() => {
- SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
- }
- DISCONNECTED => {
- match mem::replace(&mut self.upgrade, SendUsed) {
- // The other end sent us an upgrade, so we need to
- // propagate upwards whether the upgrade can receive
- // data
- GoUp(upgrade) => {
- SelUpgraded(unsafe { BlockedTask::cast_from_uint(n) },
- upgrade)
- }
-
- // If the other end disconnected without sending an
- // upgrade, then we have data to receive (the channel is
- // disconnected).
- up => {
- self.upgrade = up;
- SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
- }
- }
- }
- _ => unreachable!(), // we're the "one blocker"
- }
- }
-
- // Remove a previous selecting task from this port. This ensures that the
- // blocked task will no longer be visible to any other threads.
- //
- // The return value indicates whether there's data on this port.
- pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
- let state = match self.state.load(atomics::SeqCst) {
- // Each of these states means that no further activity will happen
- // with regard to abortion selection
- s @ EMPTY |
- s @ DATA |
- s @ DISCONNECTED => s,
-
- // If we've got a blocked task, then use an atomic to gain ownership
- // of it (may fail)
- n => self.state.compare_and_swap(n, EMPTY, atomics::SeqCst)
- };
-
- // Now that we've got ownership of our state, figure out what to do
- // about it.
- match state {
- EMPTY => unreachable!(),
- // our task used for select was stolen
- DATA => Ok(true),
-
- // If the other end has hung up, then we have complete ownership
- // of the port. First, check if there was data waiting for us. This
- // is possible if the other end sent something and then hung up.
- //
- // We then need to check to see if there was an upgrade requested,
- // and if so, the upgraded port needs to have its selection aborted.
- DISCONNECTED => {
- if self.data.is_some() {
- Ok(true)
- } else {
- match mem::replace(&mut self.upgrade, SendUsed) {
- GoUp(port) => Err(port),
- _ => Ok(true),
- }
- }
- }
-
- // We woke ourselves up from select. Assert that the task should be
- // trashed and returne that we don't have any data.
- n => {
- let t = unsafe { BlockedTask::cast_from_uint(n) };
- t.trash();
- Ok(false)
- }
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.state.load(atomics::SeqCst), DISCONNECTED);
- }
-}
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Selection over an array of receivers
-//!
-//! This module contains the implementation machinery necessary for selecting
-//! over a number of receivers. One large goal of this module is to provide an
-//! efficient interface to selecting over any receiver of any type.
-//!
-//! This is achieved through an architecture of a "receiver set" in which
-//! receivers are added to a set and then the entire set is waited on at once.
-//! The set can be waited on multiple times to prevent re-adding each receiver
-//! to the set.
-//!
-//! Usage of this module is currently encouraged to go through the use of the
-//! `select!` macro. This macro allows naturally binding of variables to the
-//! received values of receivers in a much more natural syntax then usage of the
-//! `Select` structure directly.
-//!
-//! # Example
-//!
-//! ```rust
-//! let (tx1, rx1) = channel();
-//! let (tx2, rx2) = channel();
-//!
-//! tx1.send(1);
-//! tx2.send(2);
-//!
-//! select! {
-//! val = rx1.recv() => {
-//! assert_eq!(val, 1);
-//! },
-//! val = rx2.recv() => {
-//! assert_eq!(val, 2);
-//! }
-//! }
-//! ```
-
-#![allow(dead_code)]
-
-use cell::Cell;
-use iter::Iterator;
-use kinds::Send;
-use kinds::marker;
-use mem;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use ptr::RawPtr;
-use result::{Ok, Err, Result};
-use rt::local::Local;
-use rt::task::{Task, BlockedTask};
-use super::Receiver;
-use uint;
-
-/// The "receiver set" of the select interface. This structure is used to manage
-/// a set of receivers which are being selected over.
-pub struct Select {
- head: *mut Handle<'static, ()>,
- tail: *mut Handle<'static, ()>,
- next_id: Cell<uint>,
- marker1: marker::NoSend,
-}
-
-/// A handle to a receiver which is currently a member of a `Select` set of
-/// receivers. This handle is used to keep the receiver in the set as well as
-/// interact with the underlying receiver.
-pub struct Handle<'rx, T> {
- /// The ID of this handle, used to compare against the return value of
- /// `Select::wait()`
- id: uint,
- selector: &'rx Select,
- next: *mut Handle<'static, ()>,
- prev: *mut Handle<'static, ()>,
- added: bool,
- packet: &'rx Packet,
-
- // due to our fun transmutes, we be sure to place this at the end. (nothing
- // previous relies on T)
- rx: &'rx Receiver<T>,
-}
-
-struct Packets { cur: *mut Handle<'static, ()> }
-
-#[doc(hidden)]
-pub trait Packet {
- fn can_recv(&self) -> bool;
- fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>;
- fn abort_selection(&self) -> bool;
-}
-
-impl Select {
- /// Creates a new selection structure. This set is initially empty and
- /// `wait` will fail!() if called.
- ///
- /// Usage of this struct directly can sometimes be burdensome, and usage is
- /// rather much easier through the `select!` macro.
- pub fn new() -> Select {
- Select {
- marker1: marker::NoSend,
- head: 0 as *mut Handle<'static, ()>,
- tail: 0 as *mut Handle<'static, ()>,
- next_id: Cell::new(1),
- }
- }
-
- /// Creates a new handle into this receiver set for a new receiver. Note
- /// that this does *not* add the receiver to the receiver set, for that you
- /// must call the `add` method on the handle itself.
- pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> {
- let id = self.next_id.get();
- self.next_id.set(id + 1);
- Handle {
- id: id,
- selector: self,
- next: 0 as *mut Handle<'static, ()>,
- prev: 0 as *mut Handle<'static, ()>,
- added: false,
- rx: rx,
- packet: rx,
- }
- }
-
- /// Waits for an event on this receiver set. The returned value is *not* an
- /// index, but rather an id. This id can be queried against any active
- /// `Handle` structures (each one has an `id` method). The handle with
- /// the matching `id` will have some sort of event available on it. The
- /// event could either be that data is available or the corresponding
- /// channel has been closed.
- pub fn wait(&self) -> uint {
- self.wait2(false)
- }
-
- /// Helper method for skipping the preflight checks during testing
- fn wait2(&self, do_preflight_checks: bool) -> uint {
- // Note that this is currently an inefficient implementation. We in
- // theory have knowledge about all receivers in the set ahead of time,
- // so this method shouldn't really have to iterate over all of them yet
- // again. The idea with this "receiver set" interface is to get the
- // interface right this time around, and later this implementation can
- // be optimized.
- //
- // This implementation can be summarized by:
- //
- // fn select(receivers) {
- // if any receiver ready { return ready index }
- // deschedule {
- // block on all receivers
- // }
- // unblock on all receivers
- // return ready index
- // }
- //
- // Most notably, the iterations over all of the receivers shouldn't be
- // necessary.
- unsafe {
- let mut amt = 0;
- for p in self.iter() {
- amt += 1;
- if do_preflight_checks && (*p).packet.can_recv() {
- return (*p).id;
- }
- }
- assert!(amt > 0);
-
- let mut ready_index = amt;
- let mut ready_id = uint::MAX;
- let mut iter = self.iter().enumerate();
-
- // Acquire a number of blocking contexts, and block on each one
- // sequentially until one fails. If one fails, then abort
- // immediately so we can go unblock on all the other receivers.
- let task: Box<Task> = Local::take();
- task.deschedule(amt, |task| {
- // Prepare for the block
- let (i, handle) = iter.next().unwrap();
- match (*handle).packet.start_selection(task) {
- Ok(()) => Ok(()),
- Err(task) => {
- ready_index = i;
- ready_id = (*handle).id;
- Err(task)
- }
- }
- });
-
- // Abort the selection process on each receiver. If the abort
- // process returns `true`, then that means that the receiver is
- // ready to receive some data. Note that this also means that the
- // receiver may have yet to have fully read the `to_wake` field and
- // woken us up (although the wakeup is guaranteed to fail).
- //
- // This situation happens in the window of where a sender invokes
- // increment(), sees -1, and then decides to wake up the task. After
- // all this is done, the sending thread will set `selecting` to
- // `false`. Until this is done, we cannot return. If we were to
- // return, then a sender could wake up a receiver which has gone
- // back to sleep after this call to `select`.
- //
- // Note that it is a "fairly small window" in which an increment()
- // views that it should wake a thread up until the `selecting` bit
- // is set to false. For now, the implementation currently just spins
- // in a yield loop. This is very distasteful, but this
- // implementation is already nowhere near what it should ideally be.
- // A rewrite should focus on avoiding a yield loop, and for now this
- // implementation is tying us over to a more efficient "don't
- // iterate over everything every time" implementation.
- for handle in self.iter().take(ready_index) {
- if (*handle).packet.abort_selection() {
- ready_id = (*handle).id;
- }
- }
-
- assert!(ready_id != uint::MAX);
- return ready_id;
- }
- }
-
- fn iter(&self) -> Packets { Packets { cur: self.head } }
-}
-
-impl<'rx, T: Send> Handle<'rx, T> {
- /// Retrieve the id of this handle.
- #[inline]
- pub fn id(&self) -> uint { self.id }
-
- /// Receive a value on the underlying receiver. Has the same semantics as
- /// `Receiver.recv`
- pub fn recv(&mut self) -> T { self.rx.recv() }
- /// Block to receive a value on the underlying receiver, returning `Some` on
- /// success or `None` if the channel disconnects. This function has the same
- /// semantics as `Receiver.recv_opt`
- pub fn recv_opt(&mut self) -> Result<T, ()> { self.rx.recv_opt() }
-
- /// Adds this handle to the receiver set that the handle was created from. This
- /// method can be called multiple times, but it has no effect if `add` was
- /// called previously.
- ///
- /// This method is unsafe because it requires that the `Handle` is not moved
- /// while it is added to the `Select` set.
- pub unsafe fn add(&mut self) {
- if self.added { return }
- let selector: &mut Select = mem::transmute(&*self.selector);
- let me: *mut Handle<'static, ()> = mem::transmute(&*self);
-
- if selector.head.is_null() {
- selector.head = me;
- selector.tail = me;
- } else {
- (*me).prev = selector.tail;
- assert!((*me).next.is_null());
- (*selector.tail).next = me;
- selector.tail = me;
- }
- self.added = true;
- }
-
- /// Removes this handle from the `Select` set. This method is unsafe because
- /// it has no guarantee that the `Handle` was not moved since `add` was
- /// called.
- pub unsafe fn remove(&mut self) {
- if !self.added { return }
-
- let selector: &mut Select = mem::transmute(&*self.selector);
- let me: *mut Handle<'static, ()> = mem::transmute(&*self);
-
- if self.prev.is_null() {
- assert_eq!(selector.head, me);
- selector.head = self.next;
- } else {
- (*self.prev).next = self.next;
- }
- if self.next.is_null() {
- assert_eq!(selector.tail, me);
- selector.tail = self.prev;
- } else {
- (*self.next).prev = self.prev;
- }
-
- self.next = 0 as *mut Handle<'static, ()>;
- self.prev = 0 as *mut Handle<'static, ()>;
-
- self.added = false;
- }
-}
-
-#[unsafe_destructor]
-impl Drop for Select {
- fn drop(&mut self) {
- assert!(self.head.is_null());
- assert!(self.tail.is_null());
- }
-}
-
-#[unsafe_destructor]
-impl<'rx, T: Send> Drop for Handle<'rx, T> {
- fn drop(&mut self) {
- unsafe { self.remove() }
- }
-}
-
-impl Iterator<*mut Handle<'static, ()>> for Packets {
- fn next(&mut self) -> Option<*mut Handle<'static, ()>> {
- if self.cur.is_null() {
- None
- } else {
- let ret = Some(self.cur);
- unsafe { self.cur = (*self.cur).next; }
- ret
- }
- }
-}
-
-#[cfg(test)]
-#[allow(unused_imports)]
-mod test {
- use super::super::*;
- use prelude::*;
-
- test!(fn smoke() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- tx1.send(1);
- select! (
- foo = rx1.recv() => { assert_eq!(foo, 1); },
- _bar = rx2.recv() => { fail!() }
- )
- tx2.send(2);
- select! (
- _foo = rx1.recv() => { fail!() },
- bar = rx2.recv() => { assert_eq!(bar, 2) }
- )
- drop(tx1);
- select! (
- foo = rx1.recv_opt() => { assert_eq!(foo, Err(())); },
- _bar = rx2.recv() => { fail!() }
- )
- drop(tx2);
- select! (
- bar = rx2.recv_opt() => { assert_eq!(bar, Err(())); }
- )
- })
-
- test!(fn smoke2() {
- let (_tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (_tx3, rx3) = channel::<int>();
- let (_tx4, rx4) = channel::<int>();
- let (tx5, rx5) = channel::<int>();
- tx5.send(4);
- select! (
- _foo = rx1.recv() => { fail!("1") },
- _foo = rx2.recv() => { fail!("2") },
- _foo = rx3.recv() => { fail!("3") },
- _foo = rx4.recv() => { fail!("4") },
- foo = rx5.recv() => { assert_eq!(foo, 4); }
- )
- })
-
- test!(fn closed() {
- let (_tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- drop(tx2);
-
- select! (
- _a1 = rx1.recv_opt() => { fail!() },
- a2 = rx2.recv_opt() => { assert_eq!(a2, Err(())); }
- )
- })
-
- test!(fn unblocks() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<int>();
-
- spawn(proc() {
- for _ in range(0, 20) { task::deschedule(); }
- tx1.send(1);
- rx3.recv();
- for _ in range(0, 20) { task::deschedule(); }
- });
-
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- _b = rx2.recv() => { fail!() }
- )
- tx3.send(1);
- select! (
- a = rx1.recv_opt() => { assert_eq!(a, Err(())); },
- _b = rx2.recv() => { fail!() }
- )
- })
-
- test!(fn both_ready() {
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- for _ in range(0, 20) { task::deschedule(); }
- tx1.send(1);
- tx2.send(2);
- rx3.recv();
- });
-
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- a = rx2.recv() => { assert_eq!(a, 2); }
- )
- select! (
- a = rx1.recv() => { assert_eq!(a, 1); },
- a = rx2.recv() => { assert_eq!(a, 2); }
- )
- assert_eq!(rx1.try_recv(), Err(Empty));
- assert_eq!(rx2.try_recv(), Err(Empty));
- tx3.send(());
- })
-
- test!(fn stress() {
- static AMT: int = 10000;
- let (tx1, rx1) = channel::<int>();
- let (tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- for i in range(0, AMT) {
- if i % 2 == 0 {
- tx1.send(i);
- } else {
- tx2.send(i);
- }
- rx3.recv();
- }
- });
-
- for i in range(0, AMT) {
- select! (
- i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1); },
- i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2); }
- )
- tx3.send(());
- }
- })
-
- test!(fn cloning() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- rx3.recv();
- tx1.clone();
- assert_eq!(rx3.try_recv(), Err(Empty));
- tx1.send(2);
- rx3.recv();
- });
-
- tx3.send(());
- select!(
- _i1 = rx1.recv() => {},
- _i2 = rx2.recv() => fail!()
- )
- tx3.send(());
- })
-
- test!(fn cloning2() {
- let (tx1, rx1) = channel::<int>();
- let (_tx2, rx2) = channel::<int>();
- let (tx3, rx3) = channel::<()>();
-
- spawn(proc() {
- rx3.recv();
- tx1.clone();
- assert_eq!(rx3.try_recv(), Err(Empty));
- tx1.send(2);
- rx3.recv();
- });
-
- tx3.send(());
- select!(
- _i1 = rx1.recv() => {},
- _i2 = rx2.recv() => fail!()
- )
- tx3.send(());
- })
-
- test!(fn cloning3() {
- let (tx1, rx1) = channel::<()>();
- let (tx2, rx2) = channel::<()>();
- let (tx3, rx3) = channel::<()>();
- spawn(proc() {
- let s = Select::new();
- let mut h1 = s.handle(&rx1);
- let mut h2 = s.handle(&rx2);
- unsafe { h2.add(); }
- unsafe { h1.add(); }
- assert_eq!(s.wait(), h2.id);
- tx3.send(());
- });
-
- for _ in range(0, 1000) { task::deschedule(); }
- drop(tx1.clone());
- tx2.send(());
- rx3.recv();
- })
-
- test!(fn preflight1() {
- let (tx, rx) = channel();
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight2() {
- let (tx, rx) = channel();
- tx.send(());
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight3() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- select!(
- () = rx.recv() => {}
- )
- })
-
- test!(fn preflight4() {
- let (tx, rx) = channel();
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight5() {
- let (tx, rx) = channel();
- tx.send(());
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight6() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight7() {
- let (tx, rx) = channel::<()>();
- drop(tx);
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight8() {
- let (tx, rx) = channel();
- tx.send(());
- drop(tx);
- rx.recv();
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn preflight9() {
- let (tx, rx) = channel();
- drop(tx.clone());
- tx.send(());
- drop(tx);
- rx.recv();
- let s = Select::new();
- let mut h = s.handle(&rx);
- unsafe { h.add(); }
- assert_eq!(s.wait2(false), h.id);
- })
-
- test!(fn oneshot_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn stream_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- tx1.send(());
- tx1.send(());
- rx1.recv();
- rx1.recv();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn shared_data_waiting() {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- drop(tx1.clone());
- tx1.send(());
- rx1.recv();
- spawn(proc() {
- select! {
- () = rx1.recv() => {}
- }
- tx2.send(());
- });
-
- for _ in range(0, 100) { task::deschedule() }
- tx1.send(());
- rx2.recv();
- })
-
- test!(fn sync1() {
- let (tx, rx) = sync_channel(1);
- tx.send(1);
- select! {
- n = rx.recv() => { assert_eq!(n, 1); }
- }
- })
-
- test!(fn sync2() {
- let (tx, rx) = sync_channel(0);
- spawn(proc() {
- for _ in range(0, 100) { task::deschedule() }
- tx.send(1);
- });
- select! {
- n = rx.recv() => { assert_eq!(n, 1); }
- }
- })
-
- test!(fn sync3() {
- let (tx1, rx1) = sync_channel(0);
- let (tx2, rx2) = channel();
- spawn(proc() { tx1.send(1); });
- spawn(proc() { tx2.send(2); });
- select! {
- n = rx1.recv() => {
- assert_eq!(n, 1);
- assert_eq!(rx2.recv(), 2);
- },
- n = rx2.recv() => {
- assert_eq!(n, 2);
- assert_eq!(rx1.recv(), 1);
- }
- }
- })
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Shared channels
-///
-/// This is the flavor of channels which are not necessarily optimized for any
-/// particular use case, but are the most general in how they are used. Shared
-/// channels are cloneable allowing for multiple senders.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module. You'll also note that the implementation of the shared and stream
-/// channels are quite similar, and this is no coincidence!
-
-use cmp;
-use int;
-use iter::Iterator;
-use kinds::Send;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use result::{Ok, Err, Result};
-use rt::local::Local;
-use rt::mutex::NativeMutex;
-use rt::task::{Task, BlockedTask};
-use rt::thread::Thread;
-use sync::atomics;
-
-use mpsc = sync::mpsc_queue;
-
-static DISCONNECTED: int = int::MIN;
-static FUDGE: int = 1024;
-#[cfg(test)]
-static MAX_STEALS: int = 5;
-#[cfg(not(test))]
-static MAX_STEALS: int = 1 << 20;
-
-pub struct Packet<T> {
- queue: mpsc::Queue<T>,
- cnt: atomics::AtomicInt, // How many items are on this channel
- steals: int, // How many times has a port received without blocking?
- to_wake: atomics::AtomicUint, // Task to wake up
-
- // The number of channels which are currently using this packet.
- channels: atomics::AtomicInt,
-
- // See the discussion in Port::drop and the channel send methods for what
- // these are used for
- port_dropped: atomics::AtomicBool,
- sender_drain: atomics::AtomicInt,
-
- // this lock protects various portions of this implementation during
- // select()
- select_lock: NativeMutex,
-}
-
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-impl<T: Send> Packet<T> {
- // Creation of a packet *must* be followed by a call to postinit_lock
- // and later by inherit_blocker
- pub fn new() -> Packet<T> {
- let p = Packet {
- queue: mpsc::Queue::new(),
- cnt: atomics::AtomicInt::new(0),
- steals: 0,
- to_wake: atomics::AtomicUint::new(0),
- channels: atomics::AtomicInt::new(2),
- port_dropped: atomics::AtomicBool::new(false),
- sender_drain: atomics::AtomicInt::new(0),
- select_lock: unsafe { NativeMutex::new() },
- };
- return p;
- }
-
- // This function should be used after newly created Packet
- // was wrapped with an Arc
- // In other case mutex data will be duplicated while clonning
- // and that could cause problems on platforms where it is
- // represented by opaque data structure
- pub fn postinit_lock(&mut self) {
- unsafe { self.select_lock.lock_noguard() }
- }
-
- // This function is used at the creation of a shared packet to inherit a
- // previously blocked task. This is done to prevent spurious wakeups of
- // tasks in select().
- //
- // This can only be called at channel-creation time
- pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
- match task {
- Some(task) => {
- assert_eq!(self.cnt.load(atomics::SeqCst), 0);
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- self.to_wake.store(unsafe { task.cast_to_uint() },
- atomics::SeqCst);
- self.cnt.store(-1, atomics::SeqCst);
-
- // This store is a little sketchy. What's happening here is
- // that we're transferring a blocker from a oneshot or stream
- // channel to this shared channel. In doing so, we never
- // spuriously wake them up and rather only wake them up at the
- // appropriate time. This implementation of shared channels
- // assumes that any blocking recv() will undo the increment of
- // steals performed in try_recv() once the recv is complete.
- // This thread that we're inheriting, however, is not in the
- // middle of recv. Hence, the first time we wake them up,
- // they're going to wake up from their old port, move on to the
- // upgraded port, and then call the block recv() function.
- //
- // When calling this function, they'll find there's data
- // immediately available, counting it as a steal. This in fact
- // wasn't a steal because we appropriately blocked them waiting
- // for data.
- //
- // To offset this bad increment, we initially set the steal
- // count to -1. You'll find some special code in
- // abort_selection() as well to ensure that this -1 steal count
- // doesn't escape too far.
- self.steals = -1;
- }
- None => {}
- }
-
- // When the shared packet is constructed, we grabbed this lock. The
- // purpose of this lock is to ensure that abort_selection() doesn't
- // interfere with this method. After we unlock this lock, we're
- // signifying that we're done modifying self.cnt and self.to_wake and
- // the port is ready for the world to continue using it.
- unsafe { self.select_lock.unlock_noguard() }
- }
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // See Port::drop for what's going on
- if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
-
- // Note that the multiple sender case is a little tricker
- // semantically than the single sender case. The logic for
- // incrementing is "add and if disconnected store disconnected".
- // This could end up leading some senders to believe that there
- // wasn't a disconnect if in fact there was a disconnect. This means
- // that while one thread is attempting to re-store the disconnected
- // states, other threads could walk through merrily incrementing
- // this very-negative disconnected count. To prevent senders from
- // spuriously attempting to send when the channels is actually
- // disconnected, the count has a ranged check here.
- //
- // This is also done for another reason. Remember that the return
- // value of this function is:
- //
- // `true` == the data *may* be received, this essentially has no
- // meaning
- // `false` == the data will *never* be received, this has a lot of
- // meaning
- //
- // In the SPSC case, we have a check of 'queue.is_empty()' to see
- // whether the data was actually received, but this same condition
- // means nothing in a multi-producer context. As a result, this
- // preflight check serves as the definitive "this will never be
- // received". Once we get beyond this check, we have permanently
- // entered the realm of "this may be received"
- if self.cnt.load(atomics::SeqCst) < DISCONNECTED + FUDGE {
- return Err(t)
- }
-
- self.queue.push(t);
- match self.cnt.fetch_add(1, atomics::SeqCst) {
- -1 => {
- self.take_to_wake().wake().map(|t| t.reawaken());
- }
-
- // In this case, we have possibly failed to send our data, and
- // we need to consider re-popping the data in order to fully
- // destroy it. We must arbitrate among the multiple senders,
- // however, because the queues that we're using are
- // single-consumer queues. In order to do this, all exiting
- // pushers will use an atomic count in order to count those
- // flowing through. Pushers who see 0 are required to drain as
- // much as possible, and then can only exit when they are the
- // only pusher (otherwise they must try again).
- n if n < DISCONNECTED + FUDGE => {
- // see the comment in 'try' for a shared channel for why this
- // window of "not disconnected" is ok.
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
-
- if self.sender_drain.fetch_add(1, atomics::SeqCst) == 0 {
- loop {
- // drain the queue, for info on the thread yield see the
- // discussion in try_recv
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => {}
- mpsc::Empty => break,
- mpsc::Inconsistent => Thread::yield_now(),
- }
- }
- // maybe we're done, if we're not the last ones
- // here, then we need to go try again.
- if self.sender_drain.fetch_sub(1, atomics::SeqCst) == 1 {
- break
- }
- }
-
- // At this point, there may still be data on the queue,
- // but only if the count hasn't been incremented and
- // some other sender hasn't finished pushing data just
- // yet. That sender in question will drain its own data.
- }
- }
-
- // Can't make any assumptions about this case like in the SPSC case.
- _ => {}
- }
-
- Ok(())
- }
-
- pub fn recv(&mut self) -> Result<T, Failure> {
- // This code is essentially the exact same as that found in the stream
- // case (see stream.rs)
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- let task: Box<Task> = Local::take();
- task.deschedule(1, |task| {
- self.decrement(task)
- });
-
- match self.try_recv() {
- data @ Ok(..) => { self.steals -= 1; data }
- data => data,
- }
- }
-
- // Essentially the exact same thing as the stream decrement function.
- fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- let n = unsafe { task.cast_to_uint() };
- self.to_wake.store(n, atomics::SeqCst);
-
- let steals = self.steals;
- self.steals = 0;
-
- match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
- DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 { return Ok(()) }
- }
- }
-
- self.to_wake.store(0, atomics::SeqCst);
- Err(unsafe { BlockedTask::cast_from_uint(n) })
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure> {
- let ret = match self.queue.pop() {
- mpsc::Data(t) => Some(t),
- mpsc::Empty => None,
-
- // This is a bit of an interesting case. The channel is
- // reported as having data available, but our pop() has
- // failed due to the queue being in an inconsistent state.
- // This means that there is some pusher somewhere which has
- // yet to complete, but we are guaranteed that a pop will
- // eventually succeed. In this case, we spin in a yield loop
- // because the remote sender should finish their enqueue
- // operation "very quickly".
- //
- // Note that this yield loop does *not* attempt to do a green
- // yield (regardless of the context), but *always* performs an
- // OS-thread yield. The reasoning for this is that the pusher in
- // question which is causing the inconsistent state is
- // guaranteed to *not* be a blocked task (green tasks can't get
- // pre-empted), so it must be on a different OS thread. Also,
- // `try_recv` is normally a "guaranteed no rescheduling" context
- // in a green-thread situation. By yielding control of the
- // thread, we will hopefully allow time for the remote task on
- // the other OS thread to make progress.
- //
- // Avoiding this yield loop would require a different queue
- // abstraction which provides the guarantee that after M
- // pushes have succeeded, at least M pops will succeed. The
- // current queues guarantee that if there are N active
- // pushes, you can pop N times once all N have finished.
- mpsc::Inconsistent => {
- let data;
- loop {
- Thread::yield_now();
- match self.queue.pop() {
- mpsc::Data(t) => { data = t; break }
- mpsc::Empty => fail!("inconsistent => empty"),
- mpsc::Inconsistent => {}
- }
- }
- Some(data)
- }
- };
- match ret {
- // See the discussion in the stream implementation for why we
- // might decrement steals.
- Some(data) => {
- if self.steals > MAX_STEALS {
- match self.cnt.swap(0, atomics::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
- }
- n => {
- let m = cmp::min(n, self.steals);
- self.steals -= m;
- self.bump(n - m);
- }
- }
- assert!(self.steals >= 0);
- }
- self.steals += 1;
- Ok(data)
- }
-
- // See the discussion in the stream implementation for why we try
- // again.
- None => {
- match self.cnt.load(atomics::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
- _ => {
- match self.queue.pop() {
- mpsc::Data(t) => Ok(t),
- mpsc::Empty => Err(Disconnected),
- // with no senders, an inconsistency is impossible.
- mpsc::Inconsistent => unreachable!(),
- }
- }
- }
- }
- }
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&mut self) {
- self.channels.fetch_add(1, atomics::SeqCst);
- }
-
- // Decrement the reference count on a channel. This is called whenever a
- // Chan is dropped and may end up waking up a receiver. It's the receiver's
- // responsibility on the other end to figure out that we've disconnected.
- pub fn drop_chan(&mut self) {
- match self.channels.fetch_sub(1, atomics::SeqCst) {
- 1 => {}
- n if n > 1 => return,
- n => fail!("bad number of channels left {}", n),
- }
-
- match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
- -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
- DISCONNECTED => {}
- n => { assert!(n >= 0); }
- }
- }
-
- // See the long discussion inside of stream.rs for why the queue is drained,
- // and why it is done in this fashion.
- pub fn drop_port(&mut self) {
- self.port_dropped.store(true, atomics::SeqCst);
- let mut steals = self.steals;
- while {
- let cnt = self.cnt.compare_and_swap(
- steals, DISCONNECTED, atomics::SeqCst);
- cnt != DISCONNECTED && cnt != steals
- } {
- // See the discussion in 'try_recv' for why we yield
- // control of this thread.
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => { steals += 1; }
- mpsc::Empty | mpsc::Inconsistent => break,
- }
- }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&mut self) -> BlockedTask {
- let task = self.to_wake.load(atomics::SeqCst);
- self.to_wake.store(0, atomics::SeqCst);
- assert!(task != 0);
- unsafe { BlockedTask::cast_from_uint(task) }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // Helper function for select, tests whether this port can receive without
- // blocking (obviously not an atomic decision).
- //
- // This is different than the stream version because there's no need to peek
- // at the queue, we can just look at the local count.
- pub fn can_recv(&mut self) -> bool {
- let cnt = self.cnt.load(atomics::SeqCst);
- cnt == DISCONNECTED || cnt - self.steals > 0
- }
-
- // increment the count on the channel (used for selection)
- fn bump(&mut self, amt: int) -> int {
- match self.cnt.fetch_add(amt, atomics::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
- DISCONNECTED
- }
- n => n
- }
- }
-
- // Inserts the blocked task for selection on this port, returning it back if
- // the port already has data on it.
- //
- // The code here is the same as in stream.rs, except that it doesn't need to
- // peek at the channel to see if an upgrade is pending.
- pub fn start_selection(&mut self,
- task: BlockedTask) -> Result<(), BlockedTask> {
- match self.decrement(task) {
- Ok(()) => Ok(()),
- Err(task) => {
- let prev = self.bump(1);
- assert!(prev == DISCONNECTED || prev >= 0);
- return Err(task);
- }
- }
- }
-
- // Cancels a previous task waiting on this port, returning whether there's
- // data on the port.
- //
- // This is similar to the stream implementation (hence fewer comments), but
- // uses a different value for the "steals" variable.
- pub fn abort_selection(&mut self, _was_upgrade: bool) -> bool {
- // Before we do anything else, we bounce on this lock. The reason for
- // doing this is to ensure that any upgrade-in-progress is gone and
- // done with. Without this bounce, we can race with inherit_blocker
- // about looking at and dealing with to_wake. Once we have acquired the
- // lock, we are guaranteed that inherit_blocker is done.
- unsafe {
- let _guard = self.select_lock.lock();
- }
-
- // Like the stream implementation, we want to make sure that the count
- // on the channel goes non-negative. We don't know how negative the
- // stream currently is, so instead of using a steal value of 1, we load
- // the channel count and figure out what we should do to make it
- // positive.
- let steals = {
- let cnt = self.cnt.load(atomics::SeqCst);
- if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
- };
- let prev = self.bump(steals + 1);
-
- if prev == DISCONNECTED {
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- true
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
- if prev < 0 {
- self.take_to_wake().trash();
- } else {
- while self.to_wake.load(atomics::SeqCst) != 0 {
- Thread::yield_now();
- }
- }
- // if the number of steals is -1, it was the pre-emptive -1 steal
- // count from when we inherited a blocker. This is fine because
- // we're just going to overwrite it with a real value.
- assert!(self.steals == 0 || self.steals == -1);
- self.steals = steals;
- prev >= 0
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- assert_eq!(self.channels.load(atomics::SeqCst), 0);
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Stream channels
-///
-/// This is the flavor of channels which are optimized for one sender and one
-/// receiver. The sender will be upgraded to a shared channel if the channel is
-/// cloned.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module.
-
-use cmp;
-use comm::Receiver;
-use int;
-use iter::Iterator;
-use kinds::Send;
-use ops::Drop;
-use option::{Some, None};
-use owned::Box;
-use result::{Ok, Err, Result};
-use rt::local::Local;
-use rt::task::{Task, BlockedTask};
-use rt::thread::Thread;
-use spsc = sync::spsc_queue;
-use sync::atomics;
-
-static DISCONNECTED: int = int::MIN;
-#[cfg(test)]
-static MAX_STEALS: int = 5;
-#[cfg(not(test))]
-static MAX_STEALS: int = 1 << 20;
-
-pub struct Packet<T> {
- queue: spsc::Queue<Message<T>>, // internal queue for all message
-
- cnt: atomics::AtomicInt, // How many items are on this channel
- steals: int, // How many times has a port received without blocking?
- to_wake: atomics::AtomicUint, // Task to wake up
-
- port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed.
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(BlockedTask),
-}
-
-pub enum SelectionResult<T> {
- SelSuccess,
- SelCanceled(BlockedTask),
- SelUpgraded(BlockedTask, Receiver<T>),
-}
-
-// Any message could contain an "upgrade request" to a new shared port, so the
-// internal queue it's a queue of T, but rather Message<T>
-enum Message<T> {
- Data(T),
- GoUp(Receiver<T>),
-}
-
-impl<T: Send> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- queue: spsc::Queue::new(128),
-
- cnt: atomics::AtomicInt::new(0),
- steals: 0,
- to_wake: atomics::AtomicUint::new(0),
-
- port_dropped: atomics::AtomicBool::new(false),
- }
- }
-
-
- pub fn send(&mut self, t: T) -> Result<(), T> {
- // If the other port has deterministically gone away, then definitely
- // must return the data back up the stack. Otherwise, the data is
- // considered as being sent.
- if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
-
- match self.do_send(Data(t)) {
- UpSuccess | UpDisconnected => {},
- UpWoke(task) => { task.wake().map(|t| t.reawaken()); }
- }
- Ok(())
- }
- pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
- // If the port has gone away, then there's no need to proceed any
- // further.
- if self.port_dropped.load(atomics::SeqCst) { return UpDisconnected }
-
- self.do_send(GoUp(up))
- }
-
- fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
- self.queue.push(t);
- match self.cnt.fetch_add(1, atomics::SeqCst) {
- // As described in the mod's doc comment, -1 == wakeup
- -1 => UpWoke(self.take_to_wake()),
- // As as described before, SPSC queues must be >= -2
- -2 => UpSuccess,
-
- // Be sure to preserve the disconnected state, and the return value
- // in this case is going to be whether our data was received or not.
- // This manifests itself on whether we have an empty queue or not.
- //
- // Primarily, are required to drain the queue here because the port
- // will never remove this data. We can only have at most one item to
- // drain (the port drains the rest).
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
- let first = self.queue.pop();
- let second = self.queue.pop();
- assert!(second.is_none());
-
- match first {
- Some(..) => UpSuccess, // we failed to send the data
- None => UpDisconnected, // we successfully sent data
- }
- }
-
- // Otherwise we just sent some data on a non-waiting queue, so just
- // make sure the world is sane and carry on!
- n => { assert!(n >= 0); UpSuccess }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&mut self) -> BlockedTask {
- let task = self.to_wake.load(atomics::SeqCst);
- self.to_wake.store(0, atomics::SeqCst);
- assert!(task != 0);
- unsafe { BlockedTask::cast_from_uint(task) }
- }
-
- // Decrements the count on the channel for a sleeper, returning the sleeper
- // back if it shouldn't sleep. Note that this is the location where we take
- // steals into account.
- fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- let n = unsafe { task.cast_to_uint() };
- self.to_wake.store(n, atomics::SeqCst);
-
- let steals = self.steals;
- self.steals = 0;
-
- match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
- DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 { return Ok(()) }
- }
- }
-
- self.to_wake.store(0, atomics::SeqCst);
- Err(unsafe { BlockedTask::cast_from_uint(n) })
- }
-
- pub fn recv(&mut self) -> Result<T, Failure<T>> {
- // Optimistic preflight check (scheduling is expensive).
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- // Welp, our channel has no data. Deschedule the current task and
- // initiate the blocking protocol.
- let task: Box<Task> = Local::take();
- task.deschedule(1, |task| {
- self.decrement(task)
- });
-
- match self.try_recv() {
- // Messages which actually popped from the queue shouldn't count as
- // a steal, so offset the decrement here (we already have our
- // "steal" factored into the channel count above).
- data @ Ok(..) |
- data @ Err(Upgraded(..)) => {
- self.steals -= 1;
- data
- }
-
- data => data,
- }
- }
-
- pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
- match self.queue.pop() {
- // If we stole some data, record to that effect (this will be
- // factored into cnt later on).
- //
- // Note that we don't allow steals to grow without bound in order to
- // prevent eventual overflow of either steals or cnt as an overflow
- // would have catastrophic results. Sometimes, steals > cnt, but
- // other times cnt > steals, so we don't know the relation between
- // steals and cnt. This code path is executed only rarely, so we do
- // a pretty slow operation, of swapping 0 into cnt, taking steals
- // down as much as possible (without going negative), and then
- // adding back in whatever we couldn't factor into steals.
- Some(data) => {
- if self.steals > MAX_STEALS {
- match self.cnt.swap(0, atomics::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
- }
- n => {
- let m = cmp::min(n, self.steals);
- self.steals -= m;
- self.bump(n - m);
- }
- }
- assert!(self.steals >= 0);
- }
- self.steals += 1;
- match data {
- Data(t) => Ok(t),
- GoUp(up) => Err(Upgraded(up)),
- }
- }
-
- None => {
- match self.cnt.load(atomics::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
-
- // This is a little bit of a tricky case. We failed to pop
- // data above, and then we have viewed that the channel is
- // disconnected. In this window more data could have been
- // sent on the channel. It doesn't really make sense to
- // return that the channel is disconnected when there's
- // actually data on it, so be extra sure there's no data by
- // popping one more time.
- //
- // We can ignore steals because the other end is
- // disconnected and we'll never need to really factor in our
- // steals again.
- _ => {
- match self.queue.pop() {
- Some(Data(t)) => Ok(t),
- Some(GoUp(up)) => Err(Upgraded(up)),
- None => Err(Disconnected),
- }
- }
- }
- }
- }
- }
-
- pub fn drop_chan(&mut self) {
- // Dropping a channel is pretty simple, we just flag it as disconnected
- // and then wakeup a blocker if there is one.
- match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
- -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
- DISCONNECTED => {}
- n => { assert!(n >= 0); }
- }
- }
-
- pub fn drop_port(&mut self) {
- // Dropping a port seems like a fairly trivial thing. In theory all we
- // need to do is flag that we're disconnected and then everything else
- // can take over (we don't have anyone to wake up).
- //
- // The catch for Ports is that we want to drop the entire contents of
- // the queue. There are multiple reasons for having this property, the
- // largest of which is that if another chan is waiting in this channel
- // (but not received yet), then waiting on that port will cause a
- // deadlock.
- //
- // So if we accept that we must now destroy the entire contents of the
- // queue, this code may make a bit more sense. The tricky part is that
- // we can't let any in-flight sends go un-dropped, we have to make sure
- // *everything* is dropped and nothing new will come onto the channel.
-
- // The first thing we do is set a flag saying that we're done for. All
- // sends are gated on this flag, so we're immediately guaranteed that
- // there are a bounded number of active sends that we'll have to deal
- // with.
- self.port_dropped.store(true, atomics::SeqCst);
-
- // Now that we're guaranteed to deal with a bounded number of senders,
- // we need to drain the queue. This draining process happens atomically
- // with respect to the "count" of the channel. If the count is nonzero
- // (with steals taken into account), then there must be data on the
- // channel. In this case we drain everything and then try again. We will
- // continue to fail while active senders send data while we're dropping
- // data, but eventually we're guaranteed to break out of this loop
- // (because there is a bounded number of senders).
- let mut steals = self.steals;
- while {
- let cnt = self.cnt.compare_and_swap(
- steals, DISCONNECTED, atomics::SeqCst);
- cnt != DISCONNECTED && cnt != steals
- } {
- loop {
- match self.queue.pop() {
- Some(..) => { steals += 1; }
- None => break
- }
- }
- }
-
- // At this point in time, we have gated all future senders from sending,
- // and we have flagged the channel as being disconnected. The senders
- // still have some responsibility, however, because some sends may not
- // complete until after we flag the disconnection. There are more
- // details in the sending methods that see DISCONNECTED
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // Tests to see whether this port can receive without blocking. If Ok is
- // returned, then that's the answer. If Err is returned, then the returned
- // port needs to be queried instead (an upgrade happened)
- pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
- // We peek at the queue to see if there's anything on it, and we use
- // this return value to determine if we should pop from the queue and
- // upgrade this channel immediately. If it looks like we've got an
- // upgrade pending, then go through the whole recv rigamarole to update
- // the internal state.
- match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.recv() {
- Err(Upgraded(port)) => Err(port),
- _ => unreachable!(),
- }
- }
- Some(..) => Ok(true),
- None => Ok(false)
- }
- }
-
- // increment the count on the channel (used for selection)
- fn bump(&mut self, amt: int) -> int {
- match self.cnt.fetch_add(amt, atomics::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, atomics::SeqCst);
- DISCONNECTED
- }
- n => n
- }
- }
-
- // Attempts to start selecting on this port. Like a oneshot, this can fail
- // immediately because of an upgrade.
- pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
- match self.decrement(task) {
- Ok(()) => SelSuccess,
- Err(task) => {
- let ret = match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.queue.pop() {
- Some(GoUp(port)) => SelUpgraded(task, port),
- _ => unreachable!(),
- }
- }
- Some(..) => SelCanceled(task),
- None => SelCanceled(task),
- };
- // Undo our decrement above, and we should be guaranteed that the
- // previous value is positive because we're not going to sleep
- let prev = self.bump(1);
- assert!(prev == DISCONNECTED || prev >= 0);
- return ret;
- }
- }
- }
-
- // Removes a previous task from being blocked in this port
- pub fn abort_selection(&mut self,
- was_upgrade: bool) -> Result<bool, Receiver<T>> {
- // If we're aborting selection after upgrading from a oneshot, then
- // we're guarantee that no one is waiting. The only way that we could
- // have seen the upgrade is if data was actually sent on the channel
- // half again. For us, this means that there is guaranteed to be data on
- // this channel. Furthermore, we're guaranteed that there was no
- // start_selection previously, so there's no need to modify `self.cnt`
- // at all.
- //
- // Hence, because of these invariants, we immediately return `Ok(true)`.
- // Note that the data may not actually be sent on the channel just yet.
- // The other end could have flagged the upgrade but not sent data to
- // this end. This is fine because we know it's a small bounded windows
- // of time until the data is actually sent.
- if was_upgrade {
- assert_eq!(self.steals, 0);
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- return Ok(true)
- }
-
- // We want to make sure that the count on the channel goes non-negative,
- // and in the stream case we can have at most one steal, so just assume
- // that we had one steal.
- let steals = 1;
- let prev = self.bump(steals + 1);
-
- // If we were previously disconnected, then we know for sure that there
- // is no task in to_wake, so just keep going
- let has_data = if prev == DISCONNECTED {
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- true // there is data, that data is that we're disconnected
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
-
- // If the previous count was negative, then we just made things go
- // positive, hence we passed the -1 boundary and we're responsible
- // for removing the to_wake() field and trashing it.
- //
- // If the previous count was positive then we're in a tougher
- // situation. A possible race is that a sender just incremented
- // through -1 (meaning it's going to try to wake a task up), but it
- // hasn't yet read the to_wake. In order to prevent a future recv()
- // from waking up too early (this sender picking up the plastered
- // over to_wake), we spin loop here waiting for to_wake to be 0.
- // Note that this entire select() implementation needs an overhaul,
- // and this is *not* the worst part of it, so this is not done as a
- // final solution but rather out of necessity for now to get
- // something working.
- if prev < 0 {
- self.take_to_wake().trash();
- } else {
- while self.to_wake.load(atomics::SeqCst) != 0 {
- Thread::yield_now();
- }
- }
- assert_eq!(self.steals, 0);
- self.steals = steals;
-
- // if we were previously positive, then there's surely data to
- // receive
- prev >= 0
- };
-
- // Now that we've determined that this queue "has data", we peek at the
- // queue to see if the data is an upgrade or not. If it's an upgrade,
- // then we need to destroy this port and abort selection on the
- // upgraded port.
- if has_data {
- match self.queue.peek() {
- Some(&GoUp(..)) => {
- match self.queue.pop() {
- Some(GoUp(port)) => Err(port),
- _ => unreachable!(),
- }
- }
- _ => Ok(true),
- }
- } else {
- Ok(false)
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
- assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
- }
-}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/// Synchronous channels/ports
-///
-/// This channel implementation differs significantly from the asynchronous
-/// implementations found next to it (oneshot/stream/share). This is an
-/// implementation of a synchronous, bounded buffer channel.
-///
-/// Each channel is created with some amount of backing buffer, and sends will
-/// *block* until buffer space becomes available. A buffer size of 0 is valid,
-/// which means that every successful send is paired with a successful recv.
-///
-/// This flavor of channels defines a new `send_opt` method for channels which
-/// is the method by which a message is sent but the task does not fail if it
-/// cannot be delivered.
-///
-/// Another major difference is that send() will *always* return back the data
-/// if it couldn't be sent. This is because it is deterministically known when
-/// the data is received and when it is not received.
-///
-/// Implementation-wise, it can all be summed up with "use a mutex plus some
-/// logic". The mutex used here is an OS native mutex, meaning that no user code
-/// is run inside of the mutex (to prevent context switching). This
-/// implementation shares almost all code for the buffered and unbuffered cases
-/// of a synchronous channel. There are a few branches for the unbuffered case,
-/// but they're mostly just relevant to blocking senders.
-
-use collections::Collection;
-use iter::Iterator;
-use kinds::Send;
-use mem;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use ptr::RawPtr;
-use result::{Result, Ok, Err};
-use rt::local::Local;
-use rt::mutex::{NativeMutex, LockGuard};
-use rt::task::{Task, BlockedTask};
-use sync::atomics;
-use ty::Unsafe;
-use vec::Vec;
-
-pub struct Packet<T> {
- /// Only field outside of the mutex. Just done for kicks, but mainly because
- /// the other shared channel already had the code implemented
- channels: atomics::AtomicUint,
-
- /// The state field is protected by this mutex
- lock: NativeMutex,
- state: Unsafe<State<T>>,
-}
-
-struct State<T> {
- disconnected: bool, // Is the channel disconnected yet?
- queue: Queue, // queue of senders waiting to send data
- blocker: Blocker, // currently blocked task on this channel
- buf: Buffer<T>, // storage for buffered messages
- cap: uint, // capacity of this channel
-
- /// A curious flag used to indicate whether a sender failed or succeeded in
- /// blocking. This is used to transmit information back to the task that it
- /// must dequeue its message from the buffer because it was not received.
- /// This is only relevant in the 0-buffer case. This obviously cannot be
- /// safely constructed, but it's guaranteed to always have a valid pointer
- /// value.
- canceled: Option<&'static mut bool>,
-}
-
-/// Possible flavors of tasks who can be blocked on this channel.
-enum Blocker {
- BlockedSender(BlockedTask),
- BlockedReceiver(BlockedTask),
- NoneBlocked
-}
-
-/// Simple queue for threading tasks together. Nodes are stack-allocated, so
-/// this structure is not safe at all
-struct Queue {
- head: *mut Node,
- tail: *mut Node,
-}
-
-struct Node {
- task: Option<BlockedTask>,
- next: *mut Node,
-}
-
-/// A simple ring-buffer
-struct Buffer<T> {
- buf: Vec<Option<T>>,
- start: uint,
- size: uint,
-}
-
-#[deriving(Show)]
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-/// Atomically blocks the current task, placing it into `slot`, unlocking `lock`
-/// in the meantime. This re-locks the mutex upon returning.
-fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
- lock: &NativeMutex) {
- let me: Box<Task> = Local::take();
- me.deschedule(1, |task| {
- match mem::replace(slot, f(task)) {
- NoneBlocked => {}
- _ => unreachable!(),
- }
- unsafe { lock.unlock_noguard(); }
- Ok(())
- });
- unsafe { lock.lock_noguard(); }
-}
-
-/// Wakes up a task, dropping the lock at the correct time
-fn wakeup(task: BlockedTask, guard: LockGuard) {
- // We need to be careful to wake up the waiting task *outside* of the mutex
- // in case it incurs a context switch.
- mem::drop(guard);
- task.wake().map(|t| t.reawaken());
-}
-
-impl<T: Send> Packet<T> {
- pub fn new(cap: uint) -> Packet<T> {
- Packet {
- channels: atomics::AtomicUint::new(1),
- lock: unsafe { NativeMutex::new() },
- state: Unsafe::new(State {
- disconnected: false,
- blocker: NoneBlocked,
- cap: cap,
- canceled: None,
- queue: Queue {
- head: 0 as *mut Node,
- tail: 0 as *mut Node,
- },
- buf: Buffer {
- buf: Vec::from_fn(cap + if cap == 0 {1} else {0}, |_| None),
- start: 0,
- size: 0,
- },
- }),
- }
- }
-
- // Locks this channel, returning a guard for the state and the mutable state
- // itself. Care should be taken to ensure that the state does not escape the
- // guard!
- //
- // Note that we're ok promoting an & reference to an &mut reference because
- // the lock ensures that we're the only ones in the world with a pointer to
- // the state.
- fn lock<'a>(&'a self) -> (LockGuard<'a>, &'a mut State<T>) {
- unsafe {
- let guard = self.lock.lock();
- (guard, &mut *self.state.get())
- }
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- let (guard, state) = self.lock();
-
- // wait for a slot to become available, and enqueue the data
- while !state.disconnected && state.buf.size() == state.buf.cap() {
- state.queue.enqueue(&self.lock);
- }
- if state.disconnected { return Err(t) }
- state.buf.enqueue(t);
-
- match mem::replace(&mut state.blocker, NoneBlocked) {
- // if our capacity is 0, then we need to wait for a receiver to be
- // available to take our data. After waiting, we check again to make
- // sure the port didn't go away in the meantime. If it did, we need
- // to hand back our data.
- NoneBlocked if state.cap == 0 => {
- let mut canceled = false;
- assert!(state.canceled.is_none());
- state.canceled = Some(unsafe { mem::transmute(&mut canceled) });
- wait(&mut state.blocker, BlockedSender, &self.lock);
- if canceled {Err(state.buf.dequeue())} else {Ok(())}
- }
-
- // success, we buffered some data
- NoneBlocked => Ok(()),
-
- // success, someone's about to receive our buffered data.
- BlockedReceiver(task) => { wakeup(task, guard); Ok(()) }
-
- BlockedSender(..) => fail!("lolwut"),
- }
- }
-
- pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
- let (guard, state) = self.lock();
- if state.disconnected {
- Err(super::RecvDisconnected(t))
- } else if state.buf.size() == state.buf.cap() {
- Err(super::Full(t))
- } else if state.cap == 0 {
- // With capacity 0, even though we have buffer space we can't
- // transfer the data unless there's a receiver waiting.
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => Err(super::Full(t)),
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(task) => {
- state.buf.enqueue(t);
- wakeup(task, guard);
- Ok(())
- }
- }
- } else {
- // If the buffer has some space and the capacity isn't 0, then we
- // just enqueue the data for later retrieval.
- assert!(state.buf.size() < state.buf.cap());
- state.buf.enqueue(t);
- Ok(())
- }
- }
-
- // Receives a message from this channel
- //
- // When reading this, remember that there can only ever be one receiver at
- // time.
- pub fn recv(&self) -> Result<T, ()> {
- let (guard, state) = self.lock();
-
- // Wait for the buffer to have something in it. No need for a while loop
- // because we're the only receiver.
- let mut waited = false;
- if !state.disconnected && state.buf.size() == 0 {
- wait(&mut state.blocker, BlockedReceiver, &self.lock);
- waited = true;
- }
- if state.disconnected && state.buf.size() == 0 { return Err(()) }
-
- // Pick up the data, wake up our neighbors, and carry on
- assert!(state.buf.size() > 0);
- let ret = state.buf.dequeue();
- self.wakeup_senders(waited, guard, state);
- return Ok(ret);
- }
-
- pub fn try_recv(&self) -> Result<T, Failure> {
- let (guard, state) = self.lock();
-
- // Easy cases first
- if state.disconnected { return Err(Disconnected) }
- if state.buf.size() == 0 { return Err(Empty) }
-
- // Be sure to wake up neighbors
- let ret = Ok(state.buf.dequeue());
- self.wakeup_senders(false, guard, state);
-
- return ret;
- }
-
- // Wake up pending senders after some data has been received
- //
- // * `waited` - flag if the receiver blocked to receive some data, or if it
- // just picked up some data on the way out
- // * `guard` - the lock guard that is held over this channel's lock
- fn wakeup_senders(&self, waited: bool,
- guard: LockGuard,
- state: &mut State<T>) {
- let pending_sender1: Option<BlockedTask> = state.queue.dequeue();
-
- // If this is a no-buffer channel (cap == 0), then if we didn't wait we
- // need to ACK the sender. If we waited, then the sender waking us up
- // was already the ACK.
- let pending_sender2 = if state.cap == 0 && !waited {
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedReceiver(..) => unreachable!(),
- BlockedSender(task) => {
- state.canceled.take();
- Some(task)
- }
- }
- } else {
- None
- };
- mem::drop((state, guard));
-
- // only outside of the lock do we wake up the pending tasks
- pending_sender1.map(|t| t.wake().map(|t| t.reawaken()));
- pending_sender2.map(|t| t.wake().map(|t| t.reawaken()));
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&self) {
- self.channels.fetch_add(1, atomics::SeqCst);
- }
-
- pub fn drop_chan(&self) {
- // Only flag the channel as disconnected if we're the last channel
- match self.channels.fetch_sub(1, atomics::SeqCst) {
- 1 => {}
- _ => return
- }
-
- // Not much to do other than wake up a receiver if one's there
- let (guard, state) = self.lock();
- if state.disconnected { return }
- state.disconnected = true;
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(task) => wakeup(task, guard),
- }
- }
-
- pub fn drop_port(&self) {
- let (guard, state) = self.lock();
-
- if state.disconnected { return }
- state.disconnected = true;
-
- // If the capacity is 0, then the sender may want its data back after
- // we're disconnected. Otherwise it's now our responsibility to destroy
- // the buffered data. As with many other portions of this code, this
- // needs to be careful to destroy the data *outside* of the lock to
- // prevent deadlock.
- let _data = if state.cap != 0 {
- mem::replace(&mut state.buf.buf, Vec::new())
- } else {
- Vec::new()
- };
- let mut queue = mem::replace(&mut state.queue, Queue {
- head: 0 as *mut Node,
- tail: 0 as *mut Node,
- });
-
- let waiter = match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedSender(task) => {
- *state.canceled.take_unwrap() = true;
- Some(task)
- }
- BlockedReceiver(..) => unreachable!(),
- };
- mem::drop((state, guard));
-
- loop {
- match queue.dequeue() {
- Some(task) => { task.wake().map(|t| t.reawaken()); }
- None => break,
- }
- }
- waiter.map(|t| t.wake().map(|t| t.reawaken()));
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // If Ok, the value is whether this port has data, if Err, then the upgraded
- // port needs to be checked instead of this one.
- pub fn can_recv(&self) -> bool {
- let (_g, state) = self.lock();
- state.disconnected || state.buf.size() > 0
- }
-
- // Attempts to start selection on this port. This can either succeed or fail
- // because there is data waiting.
- pub fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>{
- let (_g, state) = self.lock();
- if state.disconnected || state.buf.size() > 0 {
- Err(task)
- } else {
- match mem::replace(&mut state.blocker, BlockedReceiver(task)) {
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(..) => unreachable!(),
- }
- Ok(())
- }
- }
-
- // Remove a previous selecting task from this port. This ensures that the
- // blocked task will no longer be visible to any other threads.
- //
- // The return value indicates whether there's data on this port.
- pub fn abort_selection(&self) -> bool {
- let (_g, state) = self.lock();
- match mem::replace(&mut state.blocker, NoneBlocked) {
- NoneBlocked => true,
- BlockedSender(task) => {
- state.blocker = BlockedSender(task);
- true
- }
- BlockedReceiver(task) => { task.trash(); false }
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.channels.load(atomics::SeqCst), 0);
- let (_g, state) = self.lock();
- assert!(state.queue.dequeue().is_none());
- assert!(state.canceled.is_none());
- }
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-// Buffer, a simple ring buffer backed by Vec<T>
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T> Buffer<T> {
- fn enqueue(&mut self, t: T) {
- let pos = (self.start + self.size) % self.buf.len();
- self.size += 1;
- let prev = mem::replace(self.buf.get_mut(pos), Some(t));
- assert!(prev.is_none());
- }
-
- fn dequeue(&mut self) -> T {
- let start = self.start;
- self.size -= 1;
- self.start = (self.start + 1) % self.buf.len();
- self.buf.get_mut(start).take_unwrap()
- }
-
- fn size(&self) -> uint { self.size }
- fn cap(&self) -> uint { self.buf.len() }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
-////////////////////////////////////////////////////////////////////////////////
-
-impl Queue {
- fn enqueue(&mut self, lock: &NativeMutex) {
- let task: Box<Task> = Local::take();
- let mut node = Node {
- task: None,
- next: 0 as *mut Node,
- };
- task.deschedule(1, |task| {
- node.task = Some(task);
- if self.tail.is_null() {
- self.head = &mut node as *mut Node;
- self.tail = &mut node as *mut Node;
- } else {
- unsafe {
- (*self.tail).next = &mut node as *mut Node;
- self.tail = &mut node as *mut Node;
- }
- }
- unsafe { lock.unlock_noguard(); }
- Ok(())
- });
- unsafe { lock.lock_noguard(); }
- assert!(node.next.is_null());
- }
-
- fn dequeue(&mut self) -> Option<BlockedTask> {
- if self.head.is_null() {
- return None
- }
- let node = self.head;
- self.head = unsafe { (*node).next };
- if self.head.is_null() {
- self.tail = 0 as *mut Node;
- }
- unsafe {
- (*node).next = 0 as *mut Node;
- Some((*node).task.take_unwrap())
- }
- }
-}
#![allow(experimental)]
-use kinds::marker;
use clone::Clone;
+use cmp::{Ord, PartialOrd, Ordering, Eq, PartialEq};
+use default::Default;
+use fmt;
+use hash;
+use kinds::marker;
+use ops::Deref;
+use raw;
/// Immutable garbage-collected pointer type
#[lang="gc"]
-#[cfg(not(test))]
#[experimental = "Gc is currently based on reference-counting and will not collect cycles until \
task annihilation. For now, cycles need to be broken manually by using `Rc<T>` \
with a non-owning `Weak<T>` pointer. A tracing garbage collector is planned."]
pub struct Gc<T> {
+ #[cfg(stage0)]
ptr: @T,
+ #[cfg(not(stage0))]
+ _ptr: *T,
marker: marker::NoSend,
}
-#[cfg(test)]
-pub struct Gc<T> {
- ptr: @T,
- marker: marker::NoSend,
-}
-
-impl<T: 'static> Gc<T> {
- /// Construct a new garbage-collected box
- #[inline]
- pub fn new(value: T) -> Gc<T> {
- Gc { ptr: @value, marker: marker::NoSend }
- }
-
- /// Borrow the value contained in the garbage-collected box
- #[inline]
- pub fn borrow<'r>(&'r self) -> &'r T {
- &*self.ptr
- }
-
- /// Determine if two garbage-collected boxes point to the same object
- #[inline]
- pub fn ptr_eq(&self, other: &Gc<T>) -> bool {
- self.borrow() as *T == other.borrow() as *T
- }
-}
-
impl<T> Clone for Gc<T> {
/// Clone the pointer only
#[inline]
- fn clone(&self) -> Gc<T> {
- Gc{ ptr: self.ptr, marker: marker::NoSend }
- }
+ fn clone(&self) -> Gc<T> { *self }
}
/// An value that represents the task-local managed heap.
#[cfg(not(test))]
pub static GC: () = ();
-#[cfg(test)]
-pub static GC: () = ();
+impl<T: PartialEq + 'static> PartialEq for Gc<T> {
+ #[inline]
+ fn eq(&self, other: &Gc<T>) -> bool { *(*self) == *(*other) }
+ #[inline]
+ fn ne(&self, other: &Gc<T>) -> bool { *(*self) != *(*other) }
+}
+impl<T: PartialOrd + 'static> PartialOrd for Gc<T> {
+ #[inline]
+ fn lt(&self, other: &Gc<T>) -> bool { *(*self) < *(*other) }
+ #[inline]
+ fn le(&self, other: &Gc<T>) -> bool { *(*self) <= *(*other) }
+ #[inline]
+ fn ge(&self, other: &Gc<T>) -> bool { *(*self) >= *(*other) }
+ #[inline]
+ fn gt(&self, other: &Gc<T>) -> bool { *(*self) > *(*other) }
+}
+impl<T: Ord + 'static> Ord for Gc<T> {
+ #[inline]
+ fn cmp(&self, other: &Gc<T>) -> Ordering { (**self).cmp(&**other) }
+}
+impl<T: Eq + 'static> Eq for Gc<T> {}
+
+impl<T: 'static> Deref<T> for Gc<T> {
+ #[cfg(stage0)]
+ fn deref<'a>(&'a self) -> &'a T { &*self.ptr }
+ #[cfg(not(stage0))]
+ fn deref<'a>(&'a self) -> &'a T { &**self }
+}
+
+impl<T: Default + 'static> Default for Gc<T> {
+ fn default() -> Gc<T> {
+ box(GC) Default::default()
+ }
+}
+
+impl<T: 'static> raw::Repr<*raw::Box<T>> for Gc<T> {}
+
+impl<S: hash::Writer, T: hash::Hash<S> + 'static> hash::Hash<S> for Gc<T> {
+ fn hash(&self, s: &mut S) {
+ (**self).hash(s)
+ }
+}
+
+impl<T: 'static + fmt::Show> fmt::Show for Gc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
#[cfg(test)]
mod tests {
use io;
use option::{None, Option, Some};
use result::{Ok, Err};
-use super::{Reader, Writer, IoResult};
-use str::StrSlice;
use slice::{bytes, MutableVector, ImmutableVector};
+use str::StrSlice;
+use super::{Reader, Writer, IoResult};
use vec::Vec;
/// Allows reading from a rx.
assert_eq!(Ok(2), reader.read(buf));
assert_eq!(&[7,8,6], buf.as_slice());
- match reader.read(buf) {
+ match reader.read(buf.as_mut_slice()) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.kind, io::EndOfFile),
}
assert_eq!(&[7,8,6], buf.as_slice());
// Ensure it continues to fail in the same way.
- match reader.read(buf) {
+ match reader.read(buf.as_mut_slice()) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.kind, io::EndOfFile),
}
#[test]
fn test_read_f32() {
//big-endian floating-point 8.1250
- let buf = box [0x41, 0x02, 0x00, 0x00];
+ let buf = vec![0x41, 0x02, 0x00, 0x00];
let mut writer = MemWriter::new();
- writer.write(buf).unwrap();
+ writer.write(buf.as_slice()).unwrap();
let mut reader = MemReader::new(writer.unwrap());
let f = reader.read_be_f32().unwrap();
use c_str::ToCStr;
use clone::Clone;
use collections::Collection;
+use io::{FilePermission, Write, UnstableFileStat, Open, FileAccess, FileMode};
+use io::{IoResult, IoError, FileStat, SeekStyle, Seek, Writer, Reader};
+use io::{Read, Truncate, SeekCur, SeekSet, ReadWrite, SeekEnd, Append};
use io;
use iter::Iterator;
use kinds::Send;
use owned::Box;
use path::{Path, GenericPath};
use path;
-use result::{Ok, Err};
-use rt::rtio::{RtioFileStream, IoFactory, LocalIo};
+use result::{Err, Ok};
+use rt::rtio::LocalIo;
use rt::rtio;
-use slice::{OwnedVector, ImmutableVector};
-use super::UnstableFileStat;
-use super::{FileMode, FileAccess, FileStat, IoResult, FilePermission};
-use super::{Reader, Writer, Seek, Append, SeekCur, SeekEnd, SeekSet};
-use super::{SeekStyle, Read, Write, ReadWrite, Open, IoError, Truncate};
+use slice::ImmutableVector;
use vec::Vec;
/// Unconstrained file access type that exposes read and write operations
/// configured at creation time, via the `FileAccess` parameter to
/// `File::open_mode()`.
pub struct File {
- fd: Box<RtioFileStream:Send>,
+ fd: Box<rtio::RtioFileStream:Send>,
path: Path,
last_nread: int,
}
let mut read_buf = [0, .. 1028];
let read_str = match check!(read_stream.read(read_buf)) {
-1|0 => fail!("shouldn't happen"),
- n => str::from_utf8(read_buf.slice_to(n).to_owned()).unwrap().to_owned()
+ n => str::from_utf8(read_buf.slice_to(n)).unwrap().to_owned()
};
assert_eq!(read_str, message.to_owned());
}
#[test]
fn test_buf_reader() {
- let in_buf = box [0, 1, 2, 3, 4, 5, 6, 7];
- let mut reader = BufReader::new(in_buf);
+ let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
+ let mut reader = BufReader::new(in_buf.as_slice());
let mut buf = [];
assert_eq!(reader.read(buf), Ok(0));
assert_eq!(reader.tell(), Ok(0));
assert_eq!(reader.read(buf), Ok(3));
assert_eq!(buf.slice(0, 3), &[5, 6, 7]);
assert!(reader.read(buf).is_err());
- let mut reader = BufReader::new(in_buf);
+ let mut reader = BufReader::new(in_buf.as_slice());
assert_eq!(reader.read_until(3).unwrap(), vec!(0, 1, 2, 3));
assert_eq!(reader.read_until(3).unwrap(), vec!(4, 5, 6, 7));
assert!(reader.read(buf).is_err());
})
iotest!(fn test_add_to_env() {
- let new_env = box [("RUN_TEST_NEW_ENV", "123")];
- let prog = env_cmd().env(new_env).spawn().unwrap();
+ let new_env = vec![("RUN_TEST_NEW_ENV", "123")];
+ let prog = env_cmd().env(new_env.as_slice()).spawn().unwrap();
let result = prog.wait_with_output().unwrap();
let output = str::from_utf8_lossy(result.output.as_slice()).into_string();
#[test]
fn test_null_writer() {
let mut s = NullWriter;
- let buf = box [0, 0, 0];
- s.write(buf).unwrap();
+ let buf = vec![0, 0, 0];
+ s.write(buf.as_slice()).unwrap();
s.flush().unwrap();
}
#[test]
fn test_zero_reader() {
let mut s = ZeroReader;
- let mut buf = box [1, 2, 3];
- assert_eq!(s.read(buf), Ok(3));
- assert_eq!(box [0, 0, 0], buf);
+ let mut buf = vec![1, 2, 3];
+ assert_eq!(s.read(buf.as_mut_slice()), Ok(3));
+ assert_eq!(vec![0, 0, 0], buf);
}
#[test]
fn test_null_reader() {
let mut r = NullReader;
- let mut buf = box [0];
- assert!(r.read(buf).is_err());
+ let mut buf = vec![0];
+ assert!(r.read(buf.as_mut_slice()).is_err());
}
#[test]
extern crate core;
extern crate core_collections = "collections";
extern crate core_rand = "rand";
+extern crate core_sync = "sync";
extern crate libc;
extern crate rustrt;
#[cfg(test)] pub use realstd::ops;
#[cfg(test)] pub use realstd::cmp;
#[cfg(test)] pub use realstd::ty;
+#[cfg(test)] pub use realstd::owned;
+#[cfg(test)] pub use realstd::gc;
// NB: These reexports are in the order they should be listed in rustdoc
pub use rustrt::c_str;
pub use rustrt::local_data;
+pub use core_sync::comm;
+
// Run tests with libgreen instead of libnative.
//
// FIXME: This egregiously hacks around starting the test runner in a different
pub mod ascii;
+#[cfg(not(test))]
pub mod gc;
/* Common traits */
/* Tasks and communication */
pub mod task;
-pub mod comm;
pub mod sync;
-
/* Runtime and platform support */
pub mod c_vec;
use ptr::RawPtr;
use ptr;
use result::{Err, Ok, Result};
-use slice::{Vector, ImmutableVector, MutableVector, OwnedVector};
+use slice::{Vector, ImmutableVector, MutableVector};
use str::{Str, StrSlice, StrAllocating};
use str;
use string::String;
unsafe {
use libc::funcs::bsd44::*;
use libc::consts::os::extra::*;
- let mib = box [CTL_KERN as c_int,
+ let mib = vec![CTL_KERN as c_int,
KERN_PROC as c_int,
KERN_PROC_PATHNAME as c_int, -1 as c_int];
let mut sz: libc::size_t = 0;
*/
-#![deny(deprecated_owned_vector)]
-
use collections::Collection;
use c_str::CString;
use clone::Clone;
}
}
-impl BytesContainer for ~[u8] {
- #[inline]
- fn container_as_bytes<'a>(&'a self) -> &'a [u8] {
- self.as_slice()
- }
-}
-
impl BytesContainer for Vec<u8> {
#[inline]
fn container_as_bytes<'a>(&'a self) -> &'a [u8] {
use str::Str;
use str;
use slice::{CloneableVector, Splits, Vector, VectorVector,
- ImmutableEqVector, OwnedVector, ImmutableVector};
+ ImmutableEqVector, ImmutableVector};
use vec::Vec;
use super::{BytesContainer, GenericPath, GenericPathUnsafe};
}
}
- #[allow(deprecated_owned_vector)]
fn path_relative_from(&self, base: &Path) -> Option<Path> {
if self.is_absolute() != base.is_absolute() {
if self.is_absolute() {
use iter::{AdditiveIterator, DoubleEndedIterator, Extendable, Iterator, Map};
use mem;
use option::{Option, Some, None};
-use slice::{Vector, OwnedVector, ImmutableVector};
+use slice::{Vector, ImmutableVector};
use str::{CharSplits, Str, StrAllocating, StrVector, StrSlice};
use string::String;
use vec::Vec;
#[doc(no_inline)] pub use iter::{Iterator, DoubleEndedIterator};
#[doc(no_inline)] pub use iter::{RandomAccessIterator, CloneableIterator};
#[doc(no_inline)] pub use iter::{OrdIterator, MutableDoubleEndedIterator};
-#[doc(no_inline)] pub use num::{Num, NumCast, CheckedAdd, CheckedSub, CheckedMul};
+#[doc(no_inline)] pub use num::{Num, NumCast, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv};
#[doc(no_inline)] pub use num::{Signed, Unsigned, Primitive, Int, Float};
#[doc(no_inline)] pub use num::{FloatMath, ToPrimitive, FromPrimitive};
#[doc(no_inline)] pub use owned::Box;
#[doc(no_inline)] pub use slice::{MutableCloneableVector, MutableOrdVector};
#[doc(no_inline)] pub use slice::{ImmutableVector, MutableVector};
#[doc(no_inline)] pub use slice::{ImmutableEqVector, ImmutableOrdVector};
-#[doc(no_inline)] pub use slice::{Vector, VectorVector, OwnedVector};
+#[doc(no_inline)] pub use slice::{Vector, VectorVector};
#[doc(no_inline)] pub use slice::MutableVectorAllocating;
#[doc(no_inline)] pub use string::String;
#[doc(no_inline)] pub use vec::Vec;
}
#[cfg(test)]
-#[allow(deprecated_owned_vector)]
mod test {
use prelude::*;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
- let v = box [1u64, 2u64, 3u64];
- let bytes: ~[u8] = unsafe {mem::transmute(v)};
- let mut rng = ReaderRng::new(MemReader::new(bytes.move_iter().collect()));
+ let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
+ 0 , 0, 0, 0, 0, 0, 0, 2,
+ 0, 0, 0, 0, 0, 0, 0, 3];
+ let mut rng = ReaderRng::new(MemReader::new(v));
- assert_eq!(rng.next_u64(), 1);
- assert_eq!(rng.next_u64(), 2);
- assert_eq!(rng.next_u64(), 3);
+ assert_eq!(rng.next_u64(), mem::to_be64(1));
+ assert_eq!(rng.next_u64(), mem::to_be64(2));
+ assert_eq!(rng.next_u64(), mem::to_be64(3));
}
#[test]
fn test_reader_rng_u32() {
- // transmute from the target to avoid endianness concerns.
- let v = box [1u32, 2u32, 3u32];
- let bytes: ~[u8] = unsafe {mem::transmute(v)};
- let mut rng = ReaderRng::new(MemReader::new(bytes.move_iter().collect()));
+ let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
+ let mut rng = ReaderRng::new(MemReader::new(v));
- assert_eq!(rng.next_u32(), 1);
- assert_eq!(rng.next_u32(), 2);
- assert_eq!(rng.next_u32(), 3);
+ assert_eq!(rng.next_u32(), mem::to_be32(1));
+ assert_eq!(rng.next_u32(), mem::to_be32(2));
+ assert_eq!(rng.next_u32(), mem::to_be32(3));
}
#[test]
fn test_reader_rng_fill_bytes() {
// Reexport functionality from librustrt and other crates underneath the
// standard library which work together to create the entire runtime.
pub use alloc::{heap, libc_heap};
-pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio};
+pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio, thread};
pub use rustrt::{Stdio, Stdout, Stderr, begin_unwind, begin_unwind_fmt};
pub use rustrt::{bookkeeping, at_exit, unwind, DEFAULT_ERROR_CODE, Runtime};
-// Bindings to system threading libraries.
-pub mod thread;
-
// Simple backtrace functionality (to print on failure)
pub mod backtrace;
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Native os-thread management
-//!
-//! This modules contains bindings necessary for managing OS-level threads.
-//! These functions operate outside of the rust runtime, creating threads
-//! which are not used for scheduling in any way.
-
-#![allow(non_camel_case_types)]
-#![allow(unsigned_negate)]
-
-use kinds::Send;
-use libc;
-use mem;
-use ops::Drop;
-use option::{Option, Some, None};
-use owned::Box;
-use uint;
-
-type StartFn = extern "C" fn(*libc::c_void) -> imp::rust_thread_return;
-
-/// This struct represents a native thread's state. This is used to join on an
-/// existing thread created in the join-able state.
-pub struct Thread<T> {
- native: imp::rust_thread,
- joined: bool,
- packet: Box<Option<T>>,
-}
-
-static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
-
-// This is the starting point of rust os threads. The first thing we do
-// is make sure that we don't trigger __morestack (also why this has a
-// no_split_stack annotation), and then we extract the main function
-// and invoke it.
-#[no_split_stack]
-extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return {
- use rt::stack;
- unsafe {
- stack::record_stack_bounds(0, uint::MAX);
- let f: Box<proc()> = mem::transmute(main);
- (*f)();
- mem::transmute(0 as imp::rust_thread_return)
- }
-}
-
-// There are two impl blocks b/c if T were specified at the top then it's just a
-// pain to specify a type parameter on Thread::spawn (which doesn't need the
-// type parameter).
-impl Thread<()> {
-
- /// Starts execution of a new OS thread.
- ///
- /// This function will not wait for the thread to join, but a handle to the
- /// thread will be returned.
- ///
- /// Note that the handle returned is used to acquire the return value of the
- /// procedure `main`. The `join` function will wait for the thread to finish
- /// and return the value that `main` generated.
- ///
- /// Also note that the `Thread` returned will *always* wait for the thread
- /// to finish executing. This means that even if `join` is not explicitly
- /// called, when the `Thread` falls out of scope its destructor will block
- /// waiting for the OS thread.
- pub fn start<T: Send>(main: proc():Send -> T) -> Thread<T> {
- Thread::start_stack(DEFAULT_STACK_SIZE, main)
- }
-
- /// Performs the same functionality as `start`, but specifies an explicit
- /// stack size for the new thread.
- pub fn start_stack<T: Send>(stack: uint, main: proc():Send -> T) -> Thread<T> {
-
- // We need the address of the packet to fill in to be stable so when
- // `main` fills it in it's still valid, so allocate an extra box to do
- // so.
- let packet = box None;
- let packet2: *mut Option<T> = unsafe {
- *mem::transmute::<&Box<Option<T>>, **mut Option<T>>(&packet)
- };
- let main = proc() unsafe { *packet2 = Some(main()); };
- let native = unsafe { imp::create(stack, box main) };
-
- Thread {
- native: native,
- joined: false,
- packet: packet,
- }
- }
-
- /// This will spawn a new thread, but it will not wait for the thread to
- /// finish, nor is it possible to wait for the thread to finish.
- ///
- /// This corresponds to creating threads in the 'detached' state on unix
- /// systems. Note that platforms may not keep the main program alive even if
- /// there are detached thread still running around.
- pub fn spawn(main: proc():Send) {
- Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
- }
-
- /// Performs the same functionality as `spawn`, but explicitly specifies a
- /// stack size for the new thread.
- pub fn spawn_stack(stack: uint, main: proc():Send) {
- unsafe {
- let handle = imp::create(stack, box main);
- imp::detach(handle);
- }
- }
-
- /// Relinquishes the CPU slot that this OS-thread is currently using,
- /// allowing another thread to run for awhile.
- pub fn yield_now() {
- unsafe { imp::yield_now(); }
- }
-}
-
-impl<T: Send> Thread<T> {
- /// Wait for this thread to finish, returning the result of the thread's
- /// calculation.
- pub fn join(mut self) -> T {
- assert!(!self.joined);
- unsafe { imp::join(self.native) };
- self.joined = true;
- assert!(self.packet.is_some());
- self.packet.take_unwrap()
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Thread<T> {
- fn drop(&mut self) {
- // This is required for correctness. If this is not done then the thread
- // would fill in a return box which no longer exists.
- if !self.joined {
- unsafe { imp::join(self.native) };
- }
- }
-}
-
-#[cfg(windows)]
-mod imp {
- use mem;
- use cmp;
- use kinds::Send;
- use libc;
- use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
- LPVOID, DWORD, LPDWORD, HANDLE};
- use os;
- use owned::Box;
- use ptr;
- use rt::stack::RED_ZONE;
-
- pub type rust_thread = HANDLE;
- pub type rust_thread_return = DWORD;
-
- pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
- let arg: *mut libc::c_void = mem::transmute(p);
- // FIXME On UNIX, we guard against stack sizes that are too small but
- // that's because pthreads enforces that stacks are at least
- // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
- // just that below a certain threshold you can't do anything useful.
- // That threshold is application and architecture-specific, however.
- // For now, the only requirement is that it's big enough to hold the
- // red zone. Round up to the next 64 kB because that's what the NT
- // kernel does, might as well make it explicit. With the current
- // 20 kB red zone, that makes for a 64 kB minimum stack.
- let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
- let ret = CreateThread(ptr::mut_null(), stack_size as libc::size_t,
- super::thread_start, arg, 0, ptr::mut_null());
-
- if ret as uint == 0 {
- // be sure to not leak the closure
- let _p: Box<proc():Send> = mem::transmute(arg);
- fail!("failed to spawn native thread: {}", os::last_os_error());
- }
- return ret;
- }
-
- pub unsafe fn join(native: rust_thread) {
- use libc::consts::os::extra::INFINITE;
- WaitForSingleObject(native, INFINITE);
- }
-
- pub unsafe fn detach(native: rust_thread) {
- assert!(libc::CloseHandle(native) != 0);
- }
-
- pub unsafe fn yield_now() {
- // This function will return 0 if there are no other threads to execute,
- // but this also means that the yield was useless so this isn't really a
- // case that needs to be worried about.
- SwitchToThread();
- }
-
- #[allow(non_snake_case_functions)]
- extern "system" {
- fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
- dwStackSize: SIZE_T,
- lpStartAddress: super::StartFn,
- lpParameter: LPVOID,
- dwCreationFlags: DWORD,
- lpThreadId: LPDWORD) -> HANDLE;
- fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
- fn SwitchToThread() -> BOOL;
- }
-}
-
-#[cfg(unix)]
-mod imp {
- use cmp;
- use kinds::Send;
- use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
- use libc;
- use mem;
- use os;
- use owned::Box;
- use ptr;
- use rt::stack::RED_ZONE;
-
- pub type rust_thread = libc::pthread_t;
- pub type rust_thread_return = *u8;
-
- pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
- let mut native: libc::pthread_t = mem::zeroed();
- let mut attr: libc::pthread_attr_t = mem::zeroed();
- assert_eq!(pthread_attr_init(&mut attr), 0);
- assert_eq!(pthread_attr_setdetachstate(&mut attr,
- PTHREAD_CREATE_JOINABLE), 0);
-
- // Reserve room for the red zone, the runtime's stack of last resort.
- let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as uint);
- match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
- 0 => {
- },
- libc::EINVAL => {
- // EINVAL means |stack_size| is either too small or not a
- // multiple of the system page size. Because it's definitely
- // >= PTHREAD_STACK_MIN, it must be an alignment issue.
- // Round up to the nearest page and try again.
- let page_size = os::page_size();
- let stack_size = (stack_size + page_size - 1) & (-(page_size - 1) - 1);
- assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
- },
- errno => {
- // This cannot really happen.
- fail!("pthread_attr_setstacksize() error: {} ({})", os::last_os_error(), errno);
- },
- };
-
- let arg: *libc::c_void = mem::transmute(p);
- let ret = pthread_create(&mut native, &attr, super::thread_start, arg);
- assert_eq!(pthread_attr_destroy(&mut attr), 0);
-
- if ret != 0 {
- // be sure to not leak the closure
- let _p: Box<proc():Send> = mem::transmute(arg);
- fail!("failed to spawn native thread: {}", os::last_os_error());
- }
- native
- }
-
- pub unsafe fn join(native: rust_thread) {
- assert_eq!(pthread_join(native, ptr::null()), 0);
- }
-
- pub unsafe fn detach(native: rust_thread) {
- assert_eq!(pthread_detach(native), 0);
- }
-
- pub unsafe fn yield_now() { assert_eq!(sched_yield(), 0); }
-
- // glibc >= 2.15 has a __pthread_get_minstack() function that returns
- // PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
- // storage. We need that information to avoid blowing up when a small stack
- // is created in an application with big thread-local storage requirements.
- // See #6233 for rationale and details.
- //
- // Link weakly to the symbol for compatibility with older versions of glibc.
- // Assumes that we've been dynamically linked to libpthread but that is
- // currently always the case. Note that you need to check that the symbol
- // is non-null before calling it!
- #[cfg(target_os = "linux")]
- fn min_stack_size(attr: *libc::pthread_attr_t) -> libc::size_t {
- use ptr::RawPtr;
- type F = unsafe extern "C" fn(*libc::pthread_attr_t) -> libc::size_t;
- extern {
- #[linkage = "extern_weak"]
- static __pthread_get_minstack: *();
- }
- if __pthread_get_minstack.is_null() {
- PTHREAD_STACK_MIN
- } else {
- unsafe { mem::transmute::<*(), F>(__pthread_get_minstack)(attr) }
- }
- }
-
- // __pthread_get_minstack() is marked as weak but extern_weak linkage is
- // not supported on OS X, hence this kludge...
- #[cfg(not(target_os = "linux"))]
- fn min_stack_size(_: *libc::pthread_attr_t) -> libc::size_t {
- PTHREAD_STACK_MIN
- }
-
- extern {
- fn pthread_create(native: *mut libc::pthread_t,
- attr: *libc::pthread_attr_t,
- f: super::StartFn,
- value: *libc::c_void) -> libc::c_int;
- fn pthread_join(native: libc::pthread_t,
- value: **libc::c_void) -> libc::c_int;
- fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int;
- fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int;
- fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
- stack_size: libc::size_t) -> libc::c_int;
- fn pthread_attr_setdetachstate(attr: *mut libc::pthread_attr_t,
- state: libc::c_int) -> libc::c_int;
- fn pthread_detach(thread: libc::pthread_t) -> libc::c_int;
- fn sched_yield() -> libc::c_int;
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::Thread;
-
- #[test]
- fn smoke() { Thread::start(proc (){}).join(); }
-
- #[test]
- fn data() { assert_eq!(Thread::start(proc () { 1 }).join(), 1); }
-
- #[test]
- fn detached() { Thread::spawn(proc () {}) }
-
- #[test]
- fn small_stacks() {
- assert_eq!(42, Thread::start_stack(0, proc () 42).join());
- assert_eq!(42, Thread::start_stack(1, proc () 42).join());
- }
-}
-
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Atomic types
-//!
-//! Atomic types provide primitive shared-memory communication between
-//! threads, and are the building blocks of other concurrent
-//! types.
-//!
-//! This module defines atomic versions of a select number of primitive
-//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
-//! Atomic types present operations that, when used correctly, synchronize
-//! updates between threads.
-//!
-//! Each method takes an `Ordering` which represents the strength of
-//! the memory barrier for that operation. These orderings are the
-//! same as [C++11 atomic orderings][1].
-//!
-//! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
-//!
-//! Atomic variables are safe to share between threads (they implement `Share`)
-//! but they do not themselves provide the mechanism for sharing. The most
-//! common way to share an atomic variable is to put it into an `Arc` (an
-//! atomically-reference-counted shared pointer).
-//!
-//! Most atomic types may be stored in static variables, initialized using
-//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
-//! are often used for lazy global initialization.
-//!
-//!
-//! # Examples
-//!
-//! A simple spinlock:
-//!
-//! ```
-//! extern crate sync;
-//!
-//! use sync::Arc;
-//! use std::sync::atomics::{AtomicUint, SeqCst};
-//! use std::task::deschedule;
-//!
-//! fn main() {
-//! let spinlock = Arc::new(AtomicUint::new(1));
-//!
-//! let spinlock_clone = spinlock.clone();
-//! spawn(proc() {
-//! spinlock_clone.store(0, SeqCst);
-//! });
-//!
-//! // Wait for the other task to release the lock
-//! while spinlock.load(SeqCst) != 0 {
-//! // Since tasks may not be preemptive (if they are green threads)
-//! // yield to the scheduler to let the other task run. Low level
-//! // concurrent code needs to take into account Rust's two threading
-//! // models.
-//! deschedule();
-//! }
-//! }
-//! ```
-//!
-//! Transferring a heap object with `AtomicOption`:
-//!
-//! ```
-//! extern crate sync;
-//!
-//! use sync::Arc;
-//! use std::sync::atomics::{AtomicOption, SeqCst};
-//!
-//! fn main() {
-//! struct BigObject;
-//!
-//! let shared_big_object = Arc::new(AtomicOption::empty());
-//!
-//! let shared_big_object_clone = shared_big_object.clone();
-//! spawn(proc() {
-//! let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
-//! if unwrapped_big_object.is_some() {
-//! println!("got a big object from another task");
-//! } else {
-//! println!("other task hasn't sent big object yet");
-//! }
-//! });
-//!
-//! shared_big_object.swap(box BigObject, SeqCst);
-//! }
-//! ```
-//!
-//! Keep a global count of live tasks:
-//!
-//! ```
-//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
-//!
-//! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
-//!
-//! unsafe {
-//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
-//! println!("live tasks: {}", old_task_count + 1);
-//! }
-//! ```
-
-use mem;
-use ops::Drop;
-use option::{Option,Some,None};
-use owned::Box;
-
-pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
-pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
-pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
-pub use core::atomics::fence;
-
-/// An atomic, nullable unique pointer
-///
-/// This can be used as the concurrency primitive for operations that transfer
-/// owned heap objects across tasks.
-#[unsafe_no_drop_flag]
-pub struct AtomicOption<T> {
- p: AtomicUint,
-}
-
-impl<T> AtomicOption<T> {
- /// Create a new `AtomicOption`
- pub fn new(p: Box<T>) -> AtomicOption<T> {
- unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } }
- }
-
- /// Create a new `AtomicOption` that doesn't contain a value
- pub fn empty() -> AtomicOption<T> { AtomicOption { p: AtomicUint::new(0) } }
-
- /// Store a value, returning the old value
- #[inline]
- pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
- let val = unsafe { mem::transmute(val) };
-
- match self.p.swap(val, order) {
- 0 => None,
- n => Some(unsafe { mem::transmute(n) }),
- }
- }
-
- /// Remove the value, leaving the `AtomicOption` empty.
- #[inline]
- pub fn take(&self, order: Ordering) -> Option<Box<T>> {
- unsafe { self.swap(mem::transmute(0), order) }
- }
-
- /// Replace an empty value with a non-empty value.
- ///
- /// Succeeds if the option is `None` and returns `None` if so. If
- /// the option was already `Some`, returns `Some` of the rejected
- /// value.
- #[inline]
- pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
- unsafe {
- let val = mem::transmute(val);
- let expected = mem::transmute(0);
- let oldval = self.p.compare_and_swap(expected, val, order);
- if oldval == expected {
- None
- } else {
- Some(mem::transmute(val))
- }
- }
- }
-
- /// Returns `true` if the `AtomicOption` is empty.
- ///
- /// Be careful: The caller must have some external method of ensuring the
- /// result does not get invalidated by another task after this returns.
- #[inline]
- pub fn is_empty(&self, order: Ordering) -> bool {
- self.p.load(order) as uint == 0
- }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for AtomicOption<T> {
- fn drop(&mut self) {
- let _ = self.take(SeqCst);
- }
-}
-
-#[cfg(test)]
-mod test {
- use option::*;
- use super::*;
-
- #[test]
- fn option_empty() {
- let option: AtomicOption<()> = AtomicOption::empty();
- assert!(option.is_empty(SeqCst));
- }
-
- #[test]
- fn option_swap() {
- let p = AtomicOption::new(box 1);
- let a = box 2;
-
- let b = p.swap(a, SeqCst);
-
- assert!(b == Some(box 1));
- assert!(p.take(SeqCst) == Some(box 2));
- }
-
- #[test]
- fn option_take() {
- let p = AtomicOption::new(box 1);
-
- assert!(p.take(SeqCst) == Some(box 1));
- assert!(p.take(SeqCst) == None);
-
- let p2 = box 2;
- p.swap(p2, SeqCst);
-
- assert!(p.take(SeqCst) == Some(box 2));
- }
-
- #[test]
- fn option_fill() {
- let p = AtomicOption::new(box 1);
- assert!(p.fill(box 2, SeqCst).is_some()); // should fail; shouldn't leak!
- assert!(p.take(SeqCst) == Some(box 1));
-
- assert!(p.fill(box 2, SeqCst).is_none()); // shouldn't fail
- assert!(p.take(SeqCst) == Some(box 2));
- }
-}
-
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A (mostly) lock-free concurrent work-stealing deque
-//!
-//! This module contains an implementation of the Chase-Lev work stealing deque
-//! described in "Dynamic Circular Work-Stealing Deque". The implementation is
-//! heavily based on the pseudocode found in the paper.
-//!
-//! This implementation does not want to have the restriction of a garbage
-//! collector for reclamation of buffers, and instead it uses a shared pool of
-//! buffers. This shared pool is required for correctness in this
-//! implementation.
-//!
-//! The only lock-synchronized portions of this deque are the buffer allocation
-//! and deallocation portions. Otherwise all operations are lock-free.
-//!
-//! # Example
-//!
-//! use std::rt::deque::BufferPool;
-//!
-//! let mut pool = BufferPool::new();
-//! let (mut worker, mut stealer) = pool.deque();
-//!
-//! // Only the worker may push/pop
-//! worker.push(1);
-//! worker.pop();
-//!
-//! // Stealers take data from the other end of the deque
-//! worker.push(1);
-//! stealer.steal();
-//!
-//! // Stealers can be cloned to have many stealers stealing in parallel
-//! worker.push(1);
-//! let mut stealer2 = stealer.clone();
-//! stealer2.steal();
-
-// NB: the "buffer pool" strategy is not done for speed, but rather for
-// correctness. For more info, see the comment on `swap_buffer`
-
-// FIXME: all atomic operations in this module use a SeqCst ordering. That is
-// probably overkill
-
-use alloc::arc::Arc;
-
-use clone::Clone;
-use iter::{range, Iterator};
-use kinds::Send;
-use kinds::marker;
-use mem::{forget, min_align_of, size_of, transmute, overwrite};
-use ops::Drop;
-use option::{Option, Some, None};
-use owned::Box;
-use ptr::RawPtr;
-use ptr;
-use rt::heap::{allocate, deallocate};
-use slice::ImmutableVector;
-use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
-use rt::exclusive::Exclusive;
-use vec::Vec;
-
-// Once the queue is less than 1/K full, then it will be downsized. Note that
-// the deque requires that this number be less than 2.
-static K: int = 4;
-
-// Minimum number of bits that a buffer size should be. No buffer will resize to
-// under this value, and all deques will initially contain a buffer of this
-// size.
-//
-// The size in question is 1 << MIN_BITS
-static MIN_BITS: int = 7;
-
-struct Deque<T> {
- bottom: AtomicInt,
- top: AtomicInt,
- array: AtomicPtr<Buffer<T>>,
- pool: BufferPool<T>,
-}
-
-/// Worker half of the work-stealing deque. This worker has exclusive access to
-/// one side of the deque, and uses `push` and `pop` method to manipulate it.
-///
-/// There may only be one worker per deque.
-pub struct Worker<T> {
- deque: Arc<Deque<T>>,
- noshare: marker::NoShare,
-}
-
-/// The stealing half of the work-stealing deque. Stealers have access to the
-/// opposite end of the deque from the worker, and they only have access to the
-/// `steal` method.
-pub struct Stealer<T> {
- deque: Arc<Deque<T>>,
- noshare: marker::NoShare,
-}
-
-/// When stealing some data, this is an enumeration of the possible outcomes.
-#[deriving(PartialEq, Show)]
-pub enum Stolen<T> {
- /// The deque was empty at the time of stealing
- Empty,
- /// The stealer lost the race for stealing data, and a retry may return more
- /// data.
- Abort,
- /// The stealer has successfully stolen some data.
- Data(T),
-}
-
-/// The allocation pool for buffers used by work-stealing deques. Right now this
-/// structure is used for reclamation of memory after it is no longer in use by
-/// deques.
-///
-/// This data structure is protected by a mutex, but it is rarely used. Deques
-/// will only use this structure when allocating a new buffer or deallocating a
-/// previous one.
-pub struct BufferPool<T> {
- pool: Arc<Exclusive<Vec<Box<Buffer<T>>>>>,
-}
-
-/// An internal buffer used by the chase-lev deque. This structure is actually
-/// implemented as a circular buffer, and is used as the intermediate storage of
-/// the data in the deque.
-///
-/// This type is implemented with *T instead of Vec<T> for two reasons:
-///
-/// 1. There is nothing safe about using this buffer. This easily allows the
-/// same value to be read twice in to rust, and there is nothing to
-/// prevent this. The usage by the deque must ensure that one of the
-/// values is forgotten. Furthermore, we only ever want to manually run
-/// destructors for values in this buffer (on drop) because the bounds
-/// are defined by the deque it's owned by.
-///
-/// 2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
-/// LLVM is probably pretty good at doing this already.
-struct Buffer<T> {
- storage: *T,
- log_size: int,
-}
-
-impl<T: Send> BufferPool<T> {
- /// Allocates a new buffer pool which in turn can be used to allocate new
- /// deques.
- pub fn new() -> BufferPool<T> {
- BufferPool { pool: Arc::new(Exclusive::new(vec!())) }
- }
-
- /// Allocates a new work-stealing deque which will send/receiving memory to
- /// and from this buffer pool.
- pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
- let a = Arc::new(Deque::new(self.clone()));
- let b = a.clone();
- (Worker { deque: a, noshare: marker::NoShare },
- Stealer { deque: b, noshare: marker::NoShare })
- }
-
- fn alloc(&self, bits: int) -> Box<Buffer<T>> {
- unsafe {
- let mut pool = self.pool.lock();
- match pool.iter().position(|x| x.size() >= (1 << bits)) {
- Some(i) => pool.remove(i).unwrap(),
- None => box Buffer::new(bits)
- }
- }
- }
-
- fn free(&self, buf: Box<Buffer<T>>) {
- unsafe {
- let mut pool = self.pool.lock();
- match pool.iter().position(|v| v.size() > buf.size()) {
- Some(i) => pool.insert(i, buf),
- None => pool.push(buf),
- }
- }
- }
-}
-
-impl<T: Send> Clone for BufferPool<T> {
- fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
-}
-
-impl<T: Send> Worker<T> {
- /// Pushes data onto the front of this work queue.
- pub fn push(&self, t: T) {
- unsafe { self.deque.push(t) }
- }
- /// Pops data off the front of the work queue, returning `None` on an empty
- /// queue.
- pub fn pop(&self) -> Option<T> {
- unsafe { self.deque.pop() }
- }
-
- /// Gets access to the buffer pool that this worker is attached to. This can
- /// be used to create more deques which share the same buffer pool as this
- /// deque.
- pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
- &self.deque.pool
- }
-}
-
-impl<T: Send> Stealer<T> {
- /// Steals work off the end of the queue (opposite of the worker's end)
- pub fn steal(&self) -> Stolen<T> {
- unsafe { self.deque.steal() }
- }
-
- /// Gets access to the buffer pool that this stealer is attached to. This
- /// can be used to create more deques which share the same buffer pool as
- /// this deque.
- pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
- &self.deque.pool
- }
-}
-
-impl<T: Send> Clone for Stealer<T> {
- fn clone(&self) -> Stealer<T> {
- Stealer { deque: self.deque.clone(), noshare: marker::NoShare }
- }
-}
-
-// Almost all of this code can be found directly in the paper so I'm not
-// personally going to heavily comment what's going on here.
-
-impl<T: Send> Deque<T> {
- fn new(pool: BufferPool<T>) -> Deque<T> {
- let buf = pool.alloc(MIN_BITS);
- Deque {
- bottom: AtomicInt::new(0),
- top: AtomicInt::new(0),
- array: AtomicPtr::new(unsafe { transmute(buf) }),
- pool: pool,
- }
- }
-
- unsafe fn push(&self, data: T) {
- let mut b = self.bottom.load(SeqCst);
- let t = self.top.load(SeqCst);
- let mut a = self.array.load(SeqCst);
- let size = b - t;
- if size >= (*a).size() - 1 {
- // You won't find this code in the chase-lev deque paper. This is
- // alluded to in a small footnote, however. We always free a buffer
- // when growing in order to prevent leaks.
- a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
- b = self.bottom.load(SeqCst);
- }
- (*a).put(b, data);
- self.bottom.store(b + 1, SeqCst);
- }
-
- unsafe fn pop(&self) -> Option<T> {
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- let b = b - 1;
- self.bottom.store(b, SeqCst);
- let t = self.top.load(SeqCst);
- let size = b - t;
- if size < 0 {
- self.bottom.store(t, SeqCst);
- return None;
- }
- let data = (*a).get(b);
- if size > 0 {
- self.maybe_shrink(b, t);
- return Some(data);
- }
- if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
- self.bottom.store(t + 1, SeqCst);
- return Some(data);
- } else {
- self.bottom.store(t + 1, SeqCst);
- forget(data); // someone else stole this value
- return None;
- }
- }
-
- unsafe fn steal(&self) -> Stolen<T> {
- let t = self.top.load(SeqCst);
- let old = self.array.load(SeqCst);
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- let size = b - t;
- if size <= 0 { return Empty }
- if size % (*a).size() == 0 {
- if a == old && t == self.top.load(SeqCst) {
- return Empty
- }
- return Abort
- }
- let data = (*a).get(t);
- if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
- Data(data)
- } else {
- forget(data); // someone else stole this value
- Abort
- }
- }
-
- unsafe fn maybe_shrink(&self, b: int, t: int) {
- let a = self.array.load(SeqCst);
- if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
- self.swap_buffer(b, a, (*a).resize(b, t, -1));
- }
- }
-
- // Helper routine not mentioned in the paper which is used in growing and
- // shrinking buffers to swap in a new buffer into place. As a bit of a
- // recap, the whole point that we need a buffer pool rather than just
- // calling malloc/free directly is that stealers can continue using buffers
- // after this method has called 'free' on it. The continued usage is simply
- // a read followed by a forget, but we must make sure that the memory can
- // continue to be read after we flag this buffer for reclamation.
- unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
- buf: Buffer<T>) -> *mut Buffer<T> {
- let newbuf: *mut Buffer<T> = transmute(box buf);
- self.array.store(newbuf, SeqCst);
- let ss = (*newbuf).size();
- self.bottom.store(b + ss, SeqCst);
- let t = self.top.load(SeqCst);
- if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
- self.bottom.store(b, SeqCst);
- }
- self.pool.free(transmute(old));
- return newbuf;
- }
-}
-
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Deque<T> {
- fn drop(&mut self) {
- let t = self.top.load(SeqCst);
- let b = self.bottom.load(SeqCst);
- let a = self.array.load(SeqCst);
- // Free whatever is leftover in the dequeue, and then move the buffer
- // back into the pool.
- for i in range(t, b) {
- let _: T = unsafe { (*a).get(i) };
- }
- self.pool.free(unsafe { transmute(a) });
- }
-}
-
-#[inline]
-fn buffer_alloc_size<T>(log_size: int) -> uint {
- (1 << log_size) * size_of::<T>()
-}
-
-impl<T: Send> Buffer<T> {
- unsafe fn new(log_size: int) -> Buffer<T> {
- let size = buffer_alloc_size::<T>(log_size);
- let buffer = allocate(size, min_align_of::<T>());
- Buffer {
- storage: buffer as *T,
- log_size: log_size,
- }
- }
-
- fn size(&self) -> int { 1 << self.log_size }
-
- // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
- fn mask(&self) -> int { (1 << self.log_size) - 1 }
-
- unsafe fn elem(&self, i: int) -> *T { self.storage.offset(i & self.mask()) }
-
- // This does not protect against loading duplicate values of the same cell,
- // nor does this clear out the contents contained within. Hence, this is a
- // very unsafe method which the caller needs to treat specially in case a
- // race is lost.
- unsafe fn get(&self, i: int) -> T {
- ptr::read(self.elem(i))
- }
-
- // Unsafe because this unsafely overwrites possibly uninitialized or
- // initialized data.
- unsafe fn put(&self, i: int, t: T) {
- overwrite(self.elem(i) as *mut T, t);
- }
-
- // Again, unsafe because this has incredibly dubious ownership violations.
- // It is assumed that this buffer is immediately dropped.
- unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
- let buf = Buffer::new(self.log_size + delta);
- for i in range(t, b) {
- buf.put(i, self.get(i));
- }
- return buf;
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Buffer<T> {
- fn drop(&mut self) {
- // It is assumed that all buffers are empty on drop.
- let size = buffer_alloc_size::<T>(self.log_size);
- unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
- use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
-
- use mem;
- use owned::Box;
- use rt::thread::Thread;
- use rand;
- use rand::Rng;
- use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
- AtomicUint, INIT_ATOMIC_UINT};
- use vec;
-
- #[test]
- fn smoke() {
- let pool = BufferPool::new();
- let (w, s) = pool.deque();
- assert_eq!(w.pop(), None);
- assert_eq!(s.steal(), Empty);
- w.push(1);
- assert_eq!(w.pop(), Some(1));
- w.push(1);
- assert_eq!(s.steal(), Data(1));
- w.push(1);
- assert_eq!(s.clone().steal(), Data(1));
- }
-
- #[test]
- fn stealpush() {
- static AMT: int = 100000;
- let pool = BufferPool::<int>::new();
- let (w, s) = pool.deque();
- let t = Thread::start(proc() {
- let mut left = AMT;
- while left > 0 {
- match s.steal() {
- Data(i) => {
- assert_eq!(i, 1);
- left -= 1;
- }
- Abort | Empty => {}
- }
- }
- });
-
- for _ in range(0, AMT) {
- w.push(1);
- }
-
- t.join();
- }
-
- #[test]
- fn stealpush_large() {
- static AMT: int = 100000;
- let pool = BufferPool::<(int, int)>::new();
- let (w, s) = pool.deque();
- let t = Thread::start(proc() {
- let mut left = AMT;
- while left > 0 {
- match s.steal() {
- Data((1, 10)) => { left -= 1; }
- Data(..) => fail!(),
- Abort | Empty => {}
- }
- }
- });
-
- for _ in range(0, AMT) {
- w.push((1, 10));
- }
-
- t.join();
- }
-
- fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
- nthreads: int, amt: uint) {
- for _ in range(0, amt) {
- w.push(box 20);
- }
- let mut remaining = AtomicUint::new(amt);
- let unsafe_remaining: *mut AtomicUint = &mut remaining;
-
- let threads = range(0, nthreads).map(|_| {
- let s = s.clone();
- Thread::start(proc() {
- unsafe {
- while (*unsafe_remaining).load(SeqCst) > 0 {
- match s.steal() {
- Data(box 20) => {
- (*unsafe_remaining).fetch_sub(1, SeqCst);
- }
- Data(..) => fail!(),
- Abort | Empty => {}
- }
- }
- }
- })
- }).collect::<Vec<Thread<()>>>();
-
- while remaining.load(SeqCst) > 0 {
- match w.pop() {
- Some(box 20) => { remaining.fetch_sub(1, SeqCst); }
- Some(..) => fail!(),
- None => {}
- }
- }
-
- for thread in threads.move_iter() {
- thread.join();
- }
- }
-
- #[test]
- fn run_stampede() {
- let pool = BufferPool::<Box<int>>::new();
- let (w, s) = pool.deque();
- stampede(w, s, 8, 10000);
- }
-
- #[test]
- fn many_stampede() {
- static AMT: uint = 4;
- let pool = BufferPool::<Box<int>>::new();
- let threads = range(0, AMT).map(|_| {
- let (w, s) = pool.deque();
- Thread::start(proc() {
- stampede(w, s, 4, 10000);
- })
- }).collect::<Vec<Thread<()>>>();
-
- for thread in threads.move_iter() {
- thread.join();
- }
- }
-
- #[test]
- fn stress() {
- static AMT: int = 100000;
- static NTHREADS: int = 8;
- static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
- static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
- let pool = BufferPool::<int>::new();
- let (w, s) = pool.deque();
-
- let threads = range(0, NTHREADS).map(|_| {
- let s = s.clone();
- Thread::start(proc() {
- unsafe {
- loop {
- match s.steal() {
- Data(2) => { HITS.fetch_add(1, SeqCst); }
- Data(..) => fail!(),
- _ if DONE.load(SeqCst) => break,
- _ => {}
- }
- }
- }
- })
- }).collect::<Vec<Thread<()>>>();
-
- let mut rng = rand::task_rng();
- let mut expected = 0;
- while expected < AMT {
- if rng.gen_range(0, 3) == 2 {
- match w.pop() {
- None => {}
- Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
- Some(_) => fail!(),
- }
- } else {
- expected += 1;
- w.push(2);
- }
- }
-
- unsafe {
- while HITS.load(SeqCst) < AMT as uint {
- match w.pop() {
- None => {}
- Some(2) => { HITS.fetch_add(1, SeqCst); },
- Some(_) => fail!(),
- }
- }
- DONE.store(true, SeqCst);
- }
-
- for thread in threads.move_iter() {
- thread.join();
- }
-
- assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
- }
-
- #[test]
- #[ignore(cfg(windows))] // apparently windows scheduling is weird?
- fn no_starvation() {
- static AMT: int = 10000;
- static NTHREADS: int = 4;
- static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
- let pool = BufferPool::<(int, uint)>::new();
- let (w, s) = pool.deque();
-
- let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
- let s = s.clone();
- let unique_box = box AtomicUint::new(0);
- let thread_box = unsafe {
- *mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
- };
- (Thread::start(proc() {
- unsafe {
- loop {
- match s.steal() {
- Data((1, 2)) => {
- (*thread_box).fetch_add(1, SeqCst);
- }
- Data(..) => fail!(),
- _ if DONE.load(SeqCst) => break,
- _ => {}
- }
- }
- }
- }), unique_box)
- }));
-
- let mut rng = rand::task_rng();
- let mut myhit = false;
- let mut iter = 0;
- 'outer: loop {
- for _ in range(0, rng.gen_range(0, AMT)) {
- if !myhit && rng.gen_range(0, 3) == 2 {
- match w.pop() {
- None => {}
- Some((1, 2)) => myhit = true,
- Some(_) => fail!(),
- }
- } else {
- w.push((1, 2));
- }
- }
- iter += 1;
-
- debug!("loop iteration {}", iter);
- for (i, slot) in hits.iter().enumerate() {
- let amt = slot.load(SeqCst);
- debug!("thread {}: {}", i, amt);
- if amt == 0 { continue 'outer; }
- }
- if myhit {
- break
- }
- }
-
- unsafe { DONE.store(true, SeqCst); }
-
- for thread in threads.move_iter() {
- thread.join();
- }
- }
-}
--- /dev/null
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * A type representing values that may be computed concurrently and
+ * operations for working with them.
+ *
+ * # Example
+ *
+ * ```rust
+ * use std::sync::Future;
+ * # fn fib(n: uint) -> uint {42};
+ * # fn make_a_sandwich() {};
+ * let mut delayed_fib = Future::spawn(proc() { fib(5000) });
+ * make_a_sandwich();
+ * println!("fib(5000) = {}", delayed_fib.get())
+ * ```
+ */
+
+#![allow(missing_doc)]
+
+use core::prelude::*;
+use core::mem::replace;
+
+use comm::{Receiver, channel};
+use task::spawn;
+
+/// A type encapsulating the result of a computation which may not be complete
+pub struct Future<A> {
+ state: FutureState<A>,
+}
+
+enum FutureState<A> {
+ Pending(proc():Send -> A),
+ Evaluating,
+ Forced(A)
+}
+
+/// Methods on the `future` type
+impl<A:Clone> Future<A> {
+ pub fn get(&mut self) -> A {
+ //! Get the value of the future.
+ (*(self.get_ref())).clone()
+ }
+}
+
+impl<A> Future<A> {
+ /// Gets the value from this future, forcing evaluation.
+ pub fn unwrap(mut self) -> A {
+ self.get_ref();
+ let state = replace(&mut self.state, Evaluating);
+ match state {
+ Forced(v) => v,
+ _ => fail!( "Logic error." ),
+ }
+ }
+
+ pub fn get_ref<'a>(&'a mut self) -> &'a A {
+ /*!
+ * Executes the future's closure and then returns a reference
+ * to the result. The reference lasts as long as
+ * the future.
+ */
+ match self.state {
+ Forced(ref v) => return v,
+ Evaluating => fail!("Recursive forcing of future!"),
+ Pending(_) => {
+ match replace(&mut self.state, Evaluating) {
+ Forced(_) | Evaluating => fail!("Logic error."),
+ Pending(f) => {
+ self.state = Forced(f());
+ self.get_ref()
+ }
+ }
+ }
+ }
+ }
+
+ pub fn from_value(val: A) -> Future<A> {
+ /*!
+ * Create a future from a value.
+ *
+ * The value is immediately available and calling `get` later will
+ * not block.
+ */
+
+ Future {state: Forced(val)}
+ }
+
+ pub fn from_fn(f: proc():Send -> A) -> Future<A> {
+ /*!
+ * Create a future from a function.
+ *
+ * The first time that the value is requested it will be retrieved by
+ * calling the function. Note that this function is a local
+ * function. It is not spawned into another task.
+ */
+
+ Future {state: Pending(f)}
+ }
+}
+
+impl<A:Send> Future<A> {
+ pub fn from_receiver(rx: Receiver<A>) -> Future<A> {
+ /*!
+ * Create a future from a port
+ *
+ * The first time that the value is requested the task will block
+ * waiting for the result to be received on the port.
+ */
+
+ Future::from_fn(proc() {
+ rx.recv()
+ })
+ }
+
+ pub fn spawn(blk: proc():Send -> A) -> Future<A> {
+ /*!
+ * Create a future from a unique closure.
+ *
+ * The closure will be run in a new task and its result used as the
+ * value of the future.
+ */
+
+ let (tx, rx) = channel();
+
+ spawn(proc() {
+ tx.send(blk());
+ });
+
+ Future::from_receiver(rx)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use prelude::*;
+ use sync::Future;
+ use task;
+
+ #[test]
+ fn test_from_value() {
+ let mut f = Future::from_value("snail".to_string());
+ assert_eq!(f.get(), "snail".to_string());
+ }
+
+ #[test]
+ fn test_from_receiver() {
+ let (tx, rx) = channel();
+ tx.send("whale".to_string());
+ let mut f = Future::from_receiver(rx);
+ assert_eq!(f.get(), "whale".to_string());
+ }
+
+ #[test]
+ fn test_from_fn() {
+ let mut f = Future::from_fn(proc() "brail".to_string());
+ assert_eq!(f.get(), "brail".to_string());
+ }
+
+ #[test]
+ fn test_interface_get() {
+ let mut f = Future::from_value("fail".to_string());
+ assert_eq!(f.get(), "fail".to_string());
+ }
+
+ #[test]
+ fn test_interface_unwrap() {
+ let f = Future::from_value("fail".to_string());
+ assert_eq!(f.unwrap(), "fail".to_string());
+ }
+
+ #[test]
+ fn test_get_ref_method() {
+ let mut f = Future::from_value(22);
+ assert_eq!(*f.get_ref(), 22);
+ }
+
+ #[test]
+ fn test_spawn() {
+ let mut f = Future::spawn(proc() "bale".to_string());
+ assert_eq!(f.get(), "bale".to_string());
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_futurefail() {
+ let mut f = Future::spawn(proc() fail!());
+ let _x: String = f.get();
+ }
+
+ #[test]
+ fn test_sendable_future() {
+ let expected = "schlorf";
+ let f = Future::spawn(proc() { expected });
+ task::spawn(proc() {
+ let mut f = f;
+ let actual = f.get();
+ assert_eq!(actual, expected);
+ });
+ }
+}
//! and/or blocking at all, but rather provide the necessary tools to build
//! other types of concurrent primitives.
-pub mod atomics;
-pub mod deque;
-pub mod mpmc_bounded_queue;
-pub mod mpsc_queue;
-pub mod spsc_queue;
+pub use core_sync::{atomics, deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
+pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
+pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
+pub use core_sync::{Semaphore, SemaphoreGuard};
+pub use core_sync::one::{Once, ONCE_INIT};
+
+pub use self::future::Future;
+pub use self::task_pool::TaskPool;
+
+mod future;
+mod task_pool;
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-#![allow(missing_doc, dead_code)]
-
-// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
-
-use alloc::arc::Arc;
-
-use clone::Clone;
-use kinds::Send;
-use num::next_power_of_two;
-use option::{Option, Some, None};
-use sync::atomics::{AtomicUint,Relaxed,Release,Acquire};
-use vec::Vec;
-use ty::Unsafe;
-
-struct Node<T> {
- sequence: AtomicUint,
- value: Option<T>,
-}
-
-struct State<T> {
- pad0: [u8, ..64],
- buffer: Vec<Unsafe<Node<T>>>,
- mask: uint,
- pad1: [u8, ..64],
- enqueue_pos: AtomicUint,
- pad2: [u8, ..64],
- dequeue_pos: AtomicUint,
- pad3: [u8, ..64],
-}
-
-pub struct Queue<T> {
- state: Arc<State<T>>,
-}
-
-impl<T: Send> State<T> {
- fn with_capacity(capacity: uint) -> State<T> {
- let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
- if capacity < 2 {
- 2u
- } else {
- // use next power of 2 as capacity
- next_power_of_two(capacity)
- }
- } else {
- capacity
- };
- let buffer = Vec::from_fn(capacity, |i| {
- Unsafe::new(Node { sequence:AtomicUint::new(i), value: None })
- });
- State{
- pad0: [0, ..64],
- buffer: buffer,
- mask: capacity-1,
- pad1: [0, ..64],
- enqueue_pos: AtomicUint::new(0),
- pad2: [0, ..64],
- dequeue_pos: AtomicUint::new(0),
- pad3: [0, ..64],
- }
- }
-
- fn push(&self, value: T) -> bool {
- let mask = self.mask;
- let mut pos = self.enqueue_pos.load(Relaxed);
- loop {
- let node = self.buffer.get(pos & mask);
- let seq = unsafe { (*node.get()).sequence.load(Acquire) };
- let diff: int = seq as int - pos as int;
-
- if diff == 0 {
- let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
- if enqueue_pos == pos {
- unsafe {
- (*node.get()).value = Some(value);
- (*node.get()).sequence.store(pos+1, Release);
- }
- break
- } else {
- pos = enqueue_pos;
- }
- } else if diff < 0 {
- return false
- } else {
- pos = self.enqueue_pos.load(Relaxed);
- }
- }
- true
- }
-
- fn pop(&self) -> Option<T> {
- let mask = self.mask;
- let mut pos = self.dequeue_pos.load(Relaxed);
- loop {
- let node = self.buffer.get(pos & mask);
- let seq = unsafe { (*node.get()).sequence.load(Acquire) };
- let diff: int = seq as int - (pos + 1) as int;
- if diff == 0 {
- let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
- if dequeue_pos == pos {
- unsafe {
- let value = (*node.get()).value.take();
- (*node.get()).sequence.store(pos + mask + 1, Release);
- return value
- }
- } else {
- pos = dequeue_pos;
- }
- } else if diff < 0 {
- return None
- } else {
- pos = self.dequeue_pos.load(Relaxed);
- }
- }
- }
-}
-
-impl<T: Send> Queue<T> {
- pub fn with_capacity(capacity: uint) -> Queue<T> {
- Queue{
- state: Arc::new(State::with_capacity(capacity))
- }
- }
-
- pub fn push(&self, value: T) -> bool {
- self.state.push(value)
- }
-
- pub fn pop(&self) -> Option<T> {
- self.state.pop()
- }
-}
-
-impl<T: Send> Clone for Queue<T> {
- fn clone(&self) -> Queue<T> {
- Queue { state: self.state.clone() }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
- use super::Queue;
- use native;
-
- #[test]
- fn test() {
- let nthreads = 8u;
- let nmsgs = 1000u;
- let q = Queue::with_capacity(nthreads*nmsgs);
- assert_eq!(None, q.pop());
- let (tx, rx) = channel();
-
- for _ in range(0, nthreads) {
- let q = q.clone();
- let tx = tx.clone();
- native::task::spawn(proc() {
- let q = q;
- for i in range(0, nmsgs) {
- assert!(q.push(i));
- }
- tx.send(());
- });
- }
-
- let mut completion_rxs = vec![];
- for _ in range(0, nthreads) {
- let (tx, rx) = channel();
- completion_rxs.push(rx);
- let q = q.clone();
- native::task::spawn(proc() {
- let q = q;
- let mut i = 0u;
- loop {
- match q.pop() {
- None => {},
- Some(_) => {
- i += 1;
- if i == nmsgs { break }
- }
- }
- }
- tx.send(i);
- });
- }
-
- for rx in completion_rxs.mut_iter() {
- assert_eq!(nmsgs, rx.recv());
- }
- for _ in range(0, nthreads) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-//! A mostly lock-free multi-producer, single consumer queue.
-//!
-//! This module contains an implementation of a concurrent MPSC queue. This
-//! queue can be used to share data between tasks, and is also used as the
-//! building block of channels in rust.
-//!
-//! Note that the current implementation of this queue has a caveat of the `pop`
-//! method, and see the method for more information about it. Due to this
-//! caveat, this queue may not be appropriate for all use-cases.
-
-// http://www.1024cores.net/home/lock-free-algorithms
-// /queues/non-intrusive-mpsc-node-based-queue
-
-use kinds::Send;
-use mem;
-use ops::Drop;
-use option::{Option, None, Some};
-use owned::Box;
-use ptr::RawPtr;
-use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
-use ty::Unsafe;
-
-/// A result of the `pop` function.
-pub enum PopResult<T> {
- /// Some data has been popped
- Data(T),
- /// The queue is empty
- Empty,
- /// The queue is in an inconsistent state. Popping data should succeed, but
- /// some pushers have yet to make enough progress in order allow a pop to
- /// succeed. It is recommended that a pop() occur "in the near future" in
- /// order to see if the sender has made progress or not
- Inconsistent,
-}
-
-struct Node<T> {
- next: AtomicPtr<Node<T>>,
- value: Option<T>,
-}
-
-/// The multi-producer single-consumer structure. This is not cloneable, but it
-/// may be safely shared so long as it is guaranteed that there is only one
-/// popper at a time (many pushers are allowed).
-pub struct Queue<T> {
- head: AtomicPtr<Node<T>>,
- tail: Unsafe<*mut Node<T>>,
-}
-
-impl<T> Node<T> {
- unsafe fn new(v: Option<T>) -> *mut Node<T> {
- mem::transmute(box Node {
- next: AtomicPtr::new(0 as *mut Node<T>),
- value: v,
- })
- }
-}
-
-impl<T: Send> Queue<T> {
- /// Creates a new queue that is safe to share among multiple producers and
- /// one consumer.
- pub fn new() -> Queue<T> {
- let stub = unsafe { Node::new(None) };
- Queue {
- head: AtomicPtr::new(stub),
- tail: Unsafe::new(stub),
- }
- }
-
- /// Pushes a new value onto this queue.
- pub fn push(&self, t: T) {
- unsafe {
- let n = Node::new(Some(t));
- let prev = self.head.swap(n, AcqRel);
- (*prev).next.store(n, Release);
- }
- }
-
- /// Pops some data from this queue.
- ///
- /// Note that the current implementation means that this function cannot
- /// return `Option<T>`. It is possible for this queue to be in an
- /// inconsistent state where many pushes have succeeded and completely
- /// finished, but pops cannot return `Some(t)`. This inconsistent state
- /// happens when a pusher is pre-empted at an inopportune moment.
- ///
- /// This inconsistent state means that this queue does indeed have data, but
- /// it does not currently have access to it at this time.
- pub fn pop(&self) -> PopResult<T> {
- unsafe {
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
-
- if !next.is_null() {
- *self.tail.get() = next;
- assert!((*tail).value.is_none());
- assert!((*next).value.is_some());
- let ret = (*next).value.take_unwrap();
- let _: Box<Node<T>> = mem::transmute(tail);
- return Data(ret);
- }
-
- if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
- }
- }
-
- /// Attempts to pop data from this queue, but doesn't attempt too hard. This
- /// will canonicalize inconsistent states to a `None` value.
- pub fn casual_pop(&self) -> Option<T> {
- match self.pop() {
- Data(t) => Some(t),
- Empty | Inconsistent => None,
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Queue<T> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.tail.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Relaxed);
- let _: Box<Node<T>> = mem::transmute(cur);
- cur = next;
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
-
- use alloc::arc::Arc;
-
- use native;
- use super::{Queue, Data, Empty, Inconsistent};
-
- #[test]
- fn test_full() {
- let q = Queue::new();
- q.push(box 1);
- q.push(box 2);
- }
-
- #[test]
- fn test() {
- let nthreads = 8u;
- let nmsgs = 1000u;
- let q = Queue::new();
- match q.pop() {
- Empty => {}
- Inconsistent | Data(..) => fail!()
- }
- let (tx, rx) = channel();
- let q = Arc::new(q);
-
- for _ in range(0, nthreads) {
- let tx = tx.clone();
- let q = q.clone();
- native::task::spawn(proc() {
- for i in range(0, nmsgs) {
- q.push(i);
- }
- tx.send(());
- });
- }
-
- let mut i = 0u;
- while i < nthreads * nmsgs {
- match q.pop() {
- Empty | Inconsistent => {},
- Data(_) => { i += 1 }
- }
- }
- drop(tx);
- for _ in range(0, nthreads) {
- rx.recv();
- }
- }
-}
+++ /dev/null
-/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
- * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
- */
-
-// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
-
-//! A single-producer single-consumer concurrent queue
-//!
-//! This module contains the implementation of an SPSC queue which can be used
-//! concurrently between two tasks. This data structure is safe to use and
-//! enforces the semantics that there is one pusher and one popper.
-
-use kinds::Send;
-use mem;
-use ops::Drop;
-use option::{Some, None, Option};
-use owned::Box;
-use ptr::RawPtr;
-use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
-use ty::Unsafe;
-
-// Node within the linked list queue of messages to send
-struct Node<T> {
- // FIXME: this could be an uninitialized T if we're careful enough, and
- // that would reduce memory usage (and be a bit faster).
- // is it worth it?
- value: Option<T>, // nullable for re-use of nodes
- next: AtomicPtr<Node<T>>, // next node in the queue
-}
-
-/// The single-producer single-consumer queue. This structure is not cloneable,
-/// but it can be safely shared in an Arc if it is guaranteed that there
-/// is only one popper and one pusher touching the queue at any one point in
-/// time.
-pub struct Queue<T> {
- // consumer fields
- tail: Unsafe<*mut Node<T>>, // where to pop from
- tail_prev: AtomicPtr<Node<T>>, // where to pop from
-
- // producer fields
- head: Unsafe<*mut Node<T>>, // where to push to
- first: Unsafe<*mut Node<T>>, // where to get new nodes from
- tail_copy: Unsafe<*mut Node<T>>, // between first/tail
-
- // Cache maintenance fields. Additions and subtractions are stored
- // separately in order to allow them to use nonatomic addition/subtraction.
- cache_bound: uint,
- cache_additions: AtomicUint,
- cache_subtractions: AtomicUint,
-}
-
-impl<T: Send> Node<T> {
- fn new() -> *mut Node<T> {
- unsafe {
- mem::transmute(box Node {
- value: None,
- next: AtomicPtr::new(0 as *mut Node<T>),
- })
- }
- }
-}
-
-impl<T: Send> Queue<T> {
- /// Creates a new queue. The producer returned is connected to the consumer
- /// to push all data to the consumer.
- ///
- /// # Arguments
- ///
- /// * `bound` - This queue implementation is implemented with a linked
- /// list, and this means that a push is always a malloc. In
- /// order to amortize this cost, an internal cache of nodes is
- /// maintained to prevent a malloc from always being
- /// necessary. This bound is the limit on the size of the
- /// cache (if desired). If the value is 0, then the cache has
- /// no bound. Otherwise, the cache will never grow larger than
- /// `bound` (although the queue itself could be much larger.
- pub fn new(bound: uint) -> Queue<T> {
- let n1 = Node::new();
- let n2 = Node::new();
- unsafe { (*n1).next.store(n2, Relaxed) }
- Queue {
- tail: Unsafe::new(n2),
- tail_prev: AtomicPtr::new(n1),
- head: Unsafe::new(n2),
- first: Unsafe::new(n1),
- tail_copy: Unsafe::new(n1),
- cache_bound: bound,
- cache_additions: AtomicUint::new(0),
- cache_subtractions: AtomicUint::new(0),
- }
- }
-
- /// Pushes a new value onto this queue. Note that to use this function
- /// safely, it must be externally guaranteed that there is only one pusher.
- pub fn push(&self, t: T) {
- unsafe {
- // Acquire a node (which either uses a cached one or allocates a new
- // one), and then append this to the 'head' node.
- let n = self.alloc();
- assert!((*n).value.is_none());
- (*n).value = Some(t);
- (*n).next.store(0 as *mut Node<T>, Relaxed);
- (**self.head.get()).next.store(n, Release);
- *self.head.get() = n;
- }
- }
-
- unsafe fn alloc(&self) -> *mut Node<T> {
- // First try to see if we can consume the 'first' node for our uses.
- // We try to avoid as many atomic instructions as possible here, so
- // the addition to cache_subtractions is not atomic (plus we're the
- // only one subtracting from the cache).
- if *self.first.get() != *self.tail_copy.get() {
- if self.cache_bound > 0 {
- let b = self.cache_subtractions.load(Relaxed);
- self.cache_subtractions.store(b + 1, Relaxed);
- }
- let ret = *self.first.get();
- *self.first.get() = (*ret).next.load(Relaxed);
- return ret;
- }
- // If the above fails, then update our copy of the tail and try
- // again.
- *self.tail_copy.get() = self.tail_prev.load(Acquire);
- if *self.first.get() != *self.tail_copy.get() {
- if self.cache_bound > 0 {
- let b = self.cache_subtractions.load(Relaxed);
- self.cache_subtractions.store(b + 1, Relaxed);
- }
- let ret = *self.first.get();
- *self.first.get() = (*ret).next.load(Relaxed);
- return ret;
- }
- // If all of that fails, then we have to allocate a new node
- // (there's nothing in the node cache).
- Node::new()
- }
-
- /// Attempts to pop a value from this queue. Remember that to use this type
- /// safely you must ensure that there is only one popper at a time.
- pub fn pop(&self) -> Option<T> {
- unsafe {
- // The `tail` node is not actually a used node, but rather a
- // sentinel from where we should start popping from. Hence, look at
- // tail's next field and see if we can use it. If we do a pop, then
- // the current tail node is a candidate for going into the cache.
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
- if next.is_null() { return None }
- assert!((*next).value.is_some());
- let ret = (*next).value.take();
-
- *self.tail.get() = next;
- if self.cache_bound == 0 {
- self.tail_prev.store(tail, Release);
- } else {
- // FIXME: this is dubious with overflow.
- let additions = self.cache_additions.load(Relaxed);
- let subtractions = self.cache_subtractions.load(Relaxed);
- let size = additions - subtractions;
-
- if size < self.cache_bound {
- self.tail_prev.store(tail, Release);
- self.cache_additions.store(additions + 1, Relaxed);
- } else {
- (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed);
- // We have successfully erased all references to 'tail', so
- // now we can safely drop it.
- let _: Box<Node<T>> = mem::transmute(tail);
- }
- }
- return ret;
- }
- }
-
- /// Attempts to peek at the head of the queue, returning `None` if the queue
- /// has no data currently
- pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
- // This is essentially the same as above with all the popping bits
- // stripped out.
- unsafe {
- let tail = *self.tail.get();
- let next = (*tail).next.load(Acquire);
- if next.is_null() { return None }
- return (*next).value.as_mut();
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T: Send> Drop for Queue<T> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.first.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Relaxed);
- let _n: Box<Node<T>> = mem::transmute(cur);
- cur = next;
- }
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use prelude::*;
-
- use alloc::arc::Arc;
- use native;
-
- use super::Queue;
-
- #[test]
- fn smoke() {
- let q = Queue::new(0);
- q.push(1);
- q.push(2);
- assert_eq!(q.pop(), Some(1));
- assert_eq!(q.pop(), Some(2));
- assert_eq!(q.pop(), None);
- q.push(3);
- q.push(4);
- assert_eq!(q.pop(), Some(3));
- assert_eq!(q.pop(), Some(4));
- assert_eq!(q.pop(), None);
- }
-
- #[test]
- fn drop_full() {
- let q = Queue::new(0);
- q.push(box 1);
- q.push(box 2);
- }
-
- #[test]
- fn smoke_bound() {
- let q = Queue::new(1);
- q.push(1);
- q.push(2);
- assert_eq!(q.pop(), Some(1));
- assert_eq!(q.pop(), Some(2));
- assert_eq!(q.pop(), None);
- q.push(3);
- q.push(4);
- assert_eq!(q.pop(), Some(3));
- assert_eq!(q.pop(), Some(4));
- assert_eq!(q.pop(), None);
- }
-
- #[test]
- fn stress() {
- stress_bound(0);
- stress_bound(1);
-
- fn stress_bound(bound: uint) {
- let a = Arc::new(Queue::new(bound));
- let b = a.clone();
- let (tx, rx) = channel();
- native::task::spawn(proc() {
- for _ in range(0, 100000) {
- loop {
- match b.pop() {
- Some(1) => break,
- Some(_) => fail!(),
- None => {}
- }
- }
- }
- tx.send(());
- });
- for _ in range(0, 100000) {
- a.push(1);
- }
- rx.recv();
- }
- }
-}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(missing_doc)]
+
+/// A task pool abstraction. Useful for achieving predictable CPU
+/// parallelism.
+
+use core::prelude::*;
+
+use task;
+use task::spawn;
+use vec::Vec;
+use comm::{channel, Sender};
+
+enum Msg<T> {
+ Execute(proc(&T):Send),
+ Quit
+}
+
+pub struct TaskPool<T> {
+ channels: Vec<Sender<Msg<T>>>,
+ next_index: uint,
+}
+
+#[unsafe_destructor]
+impl<T> Drop for TaskPool<T> {
+ fn drop(&mut self) {
+ for channel in self.channels.mut_iter() {
+ channel.send(Quit);
+ }
+ }
+}
+
+impl<T> TaskPool<T> {
+ /// Spawns a new task pool with `n_tasks` tasks. If the `sched_mode`
+ /// is None, the tasks run on this scheduler; otherwise, they run on a
+ /// new scheduler with the given mode. The provided `init_fn_factory`
+ /// returns a function which, given the index of the task, should return
+ /// local data to be kept around in that task.
+ pub fn new(n_tasks: uint,
+ init_fn_factory: || -> proc(uint):Send -> T)
+ -> TaskPool<T> {
+ assert!(n_tasks >= 1);
+
+ let channels = Vec::from_fn(n_tasks, |i| {
+ let (tx, rx) = channel::<Msg<T>>();
+ let init_fn = init_fn_factory();
+
+ let task_body = proc() {
+ let local_data = init_fn(i);
+ loop {
+ match rx.recv() {
+ Execute(f) => f(&local_data),
+ Quit => break
+ }
+ }
+ };
+
+ // Run on this scheduler.
+ task::spawn(task_body);
+
+ tx
+ });
+
+ return TaskPool {
+ channels: channels,
+ next_index: 0,
+ };
+ }
+
+ /// Executes the function `f` on a task in the pool. The function
+ /// receives a reference to the local data returned by the `init_fn`.
+ pub fn execute(&mut self, f: proc(&T):Send) {
+ self.channels.get(self.next_index).send(Execute(f));
+ self.next_index += 1;
+ if self.next_index == self.channels.len() { self.next_index = 0; }
+ }
+}
+
+#[test]
+fn test_task_pool() {
+ let f: || -> proc(uint):Send -> uint = || {
+ let g: proc(uint):Send -> uint = proc(i) i;
+ g
+ };
+ let mut pool = TaskPool::new(4, f);
+ for _ in range(0, 8) {
+ pool.execute(proc(i) println!("Hello from thread {}!", *i));
+ }
+}
#[cfg(test)]
mod tests {
+ use prelude::*;
use super::*;
- use str::StrAllocating;
#[test]
fn test_simple_types() {
#[test]
fn test_vectors() {
- let x: ~[int] = box [];
+ let x: Vec<int> = vec![];
assert_eq!(x.to_str(), "[]".to_string());
- assert_eq!((box [1]).to_str(), "[1]".to_string());
- assert_eq!((box [1, 2, 3]).to_str(), "[1, 2, 3]".to_string());
- assert!((box [box [], box [1], box [1, 1]]).to_str() ==
+ assert_eq!((vec![1]).to_str(), "[1]".to_string());
+ assert_eq!((vec![1, 2, 3]).to_str(), "[1, 2, 3]".to_string());
+ assert!((vec![vec![], vec![1], vec![1, 1]]).to_str() ==
"[[], [1], [1, 1]]".to_string());
}
}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Atomic types
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent
+//! types.
+//!
+//! This module defines atomic versions of a select number of primitive
+//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [C++11 atomic orderings][1].
+//!
+//! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
+//!
+//! Atomic variables are safe to share between threads (they implement `Share`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
+//! are often used for lazy global initialization.
+//!
+//!
+//! # Examples
+//!
+//! A simple spinlock:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomics::{AtomicUint, SeqCst};
+//! use std::task::deschedule;
+//!
+//! fn main() {
+//! let spinlock = Arc::new(AtomicUint::new(1));
+//!
+//! let spinlock_clone = spinlock.clone();
+//! spawn(proc() {
+//! spinlock_clone.store(0, SeqCst);
+//! });
+//!
+//! // Wait for the other task to release the lock
+//! while spinlock.load(SeqCst) != 0 {
+//! // Since tasks may not be preemptive (if they are green threads)
+//! // yield to the scheduler to let the other task run. Low level
+//! // concurrent code needs to take into account Rust's two threading
+//! // models.
+//! deschedule();
+//! }
+//! }
+//! ```
+//!
+//! Transferring a heap object with `AtomicOption`:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomics::{AtomicOption, SeqCst};
+//!
+//! fn main() {
+//! struct BigObject;
+//!
+//! let shared_big_object = Arc::new(AtomicOption::empty());
+//!
+//! let shared_big_object_clone = shared_big_object.clone();
+//! spawn(proc() {
+//! let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
+//! if unwrapped_big_object.is_some() {
+//! println!("got a big object from another task");
+//! } else {
+//! println!("other task hasn't sent big object yet");
+//! }
+//! });
+//!
+//! shared_big_object.swap(box BigObject, SeqCst);
+//! }
+//! ```
+//!
+//! Keep a global count of live tasks:
+//!
+//! ```
+//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
+//!
+//! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
+//!
+//! unsafe {
+//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
+//! println!("live tasks: {}", old_task_count + 1);
+//! }
+//! ```
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::mem;
+
+pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
+pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
+pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
+pub use core::atomics::fence;
+
+/// An atomic, nullable unique pointer
+///
+/// This can be used as the concurrency primitive for operations that transfer
+/// owned heap objects across tasks.
+#[unsafe_no_drop_flag]
+pub struct AtomicOption<T> {
+ p: AtomicUint,
+}
+
+impl<T> AtomicOption<T> {
+ /// Create a new `AtomicOption`
+ pub fn new(p: Box<T>) -> AtomicOption<T> {
+ unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } }
+ }
+
+ /// Create a new `AtomicOption` that doesn't contain a value
+ pub fn empty() -> AtomicOption<T> { AtomicOption { p: AtomicUint::new(0) } }
+
+ /// Store a value, returning the old value
+ #[inline]
+ pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
+ let val = unsafe { mem::transmute(val) };
+
+ match self.p.swap(val, order) {
+ 0 => None,
+ n => Some(unsafe { mem::transmute(n) }),
+ }
+ }
+
+ /// Remove the value, leaving the `AtomicOption` empty.
+ #[inline]
+ pub fn take(&self, order: Ordering) -> Option<Box<T>> {
+ unsafe { self.swap(mem::transmute(0), order) }
+ }
+
+ /// Replace an empty value with a non-empty value.
+ ///
+ /// Succeeds if the option is `None` and returns `None` if so. If
+ /// the option was already `Some`, returns `Some` of the rejected
+ /// value.
+ #[inline]
+ pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
+ unsafe {
+ let val = mem::transmute(val);
+ let expected = mem::transmute(0);
+ let oldval = self.p.compare_and_swap(expected, val, order);
+ if oldval == expected {
+ None
+ } else {
+ Some(mem::transmute(val))
+ }
+ }
+ }
+
+ /// Returns `true` if the `AtomicOption` is empty.
+ ///
+ /// Be careful: The caller must have some external method of ensuring the
+ /// result does not get invalidated by another task after this returns.
+ #[inline]
+ pub fn is_empty(&self, order: Ordering) -> bool {
+ self.p.load(order) as uint == 0
+ }
+}
+
+#[unsafe_destructor]
+impl<T> Drop for AtomicOption<T> {
+ fn drop(&mut self) {
+ let _ = self.take(SeqCst);
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::prelude::*;
+ use super::*;
+
+ #[test]
+ fn option_empty() {
+ let option: AtomicOption<()> = AtomicOption::empty();
+ assert!(option.is_empty(SeqCst));
+ }
+
+ #[test]
+ fn option_swap() {
+ let p = AtomicOption::new(box 1);
+ let a = box 2;
+
+ let b = p.swap(a, SeqCst);
+
+ assert!(b == Some(box 1));
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+
+ #[test]
+ fn option_take() {
+ let p = AtomicOption::new(box 1);
+
+ assert!(p.take(SeqCst) == Some(box 1));
+ assert!(p.take(SeqCst) == None);
+
+ let p2 = box 2;
+ p.swap(p2, SeqCst);
+
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+
+ #[test]
+ fn option_fill() {
+ let p = AtomicOption::new(box 1);
+ assert!(p.fill(box 2, SeqCst).is_some()); // should fail; shouldn't leak!
+ assert!(p.take(SeqCst) == Some(box 1));
+
+ assert!(p.fill(box 2, SeqCst).is_none()); // shouldn't fail
+ assert!(p.take(SeqCst) == Some(box 2));
+ }
+}
+
+++ /dev/null
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!
-
-Higher level communication abstractions.
-
-*/
-
-#![allow(missing_doc)]
-
-use std::comm;
-
-/// An extension of `pipes::stream` that allows both sending and receiving.
-pub struct DuplexStream<S, R> {
- tx: Sender<S>,
- rx: Receiver<R>,
-}
-
-/// Creates a bidirectional stream.
-pub fn duplex<S: Send, R: Send>() -> (DuplexStream<S, R>, DuplexStream<R, S>) {
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- (DuplexStream { tx: tx1, rx: rx2 },
- DuplexStream { tx: tx2, rx: rx1 })
-}
-
-// Allow these methods to be used without import:
-impl<S:Send,R:Send> DuplexStream<S, R> {
- pub fn send(&self, x: S) {
- self.tx.send(x)
- }
- pub fn send_opt(&self, x: S) -> Result<(), S> {
- self.tx.send_opt(x)
- }
- pub fn recv(&self) -> R {
- self.rx.recv()
- }
- pub fn try_recv(&self) -> Result<R, comm::TryRecvError> {
- self.rx.try_recv()
- }
- pub fn recv_opt(&self) -> Result<R, ()> {
- self.rx.recv_opt()
- }
-}
-
-#[cfg(test)]
-mod test {
- use comm::{duplex};
-
-
- #[test]
- pub fn DuplexStream1() {
- let (left, right) = duplex();
-
- left.send("abc".to_string());
- right.send(123);
-
- assert!(left.recv() == 123);
- assert!(right.recv() == "abc".to_string());
- }
-}
--- /dev/null
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+
+Higher level communication abstractions.
+
+*/
+
+#![allow(missing_doc)]
+
+use core::prelude::*;
+
+use comm;
+use comm::{Sender, Receiver, channel};
+
+/// An extension of `pipes::stream` that allows both sending and receiving.
+pub struct DuplexStream<S, R> {
+ tx: Sender<S>,
+ rx: Receiver<R>,
+}
+
+/// Creates a bidirectional stream.
+pub fn duplex<S: Send, R: Send>() -> (DuplexStream<S, R>, DuplexStream<R, S>) {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ (DuplexStream { tx: tx1, rx: rx2 },
+ DuplexStream { tx: tx2, rx: rx1 })
+}
+
+// Allow these methods to be used without import:
+impl<S:Send,R:Send> DuplexStream<S, R> {
+ pub fn send(&self, x: S) {
+ self.tx.send(x)
+ }
+ pub fn send_opt(&self, x: S) -> Result<(), S> {
+ self.tx.send_opt(x)
+ }
+ pub fn recv(&self) -> R {
+ self.rx.recv()
+ }
+ pub fn try_recv(&self) -> Result<R, comm::TryRecvError> {
+ self.rx.try_recv()
+ }
+ pub fn recv_opt(&self) -> Result<R, ()> {
+ self.rx.recv_opt()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::prelude::*;
+ use comm::{duplex};
+
+ #[test]
+ pub fn duplex_stream_1() {
+ let (left, right) = duplex();
+
+ left.send("abc".to_string());
+ right.send(123);
+
+ assert!(left.recv() == 123);
+ assert!(right.recv() == "abc".to_string());
+ }
+}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Communication primitives for concurrent tasks
+//!
+//! Rust makes it very difficult to share data among tasks to prevent race
+//! conditions and to improve parallelism, but there is often a need for
+//! communication between concurrent tasks. The primitives defined in this
+//! module are the building blocks for synchronization in rust.
+//!
+//! This module provides message-based communication over channels, concretely
+//! defined among three types:
+//!
+//! * `Sender`
+//! * `SyncSender`
+//! * `Receiver`
+//!
+//! A `Sender` or `SyncSender` is used to send data to a `Receiver`. Both
+//! senders are clone-able such that many tasks can send simultaneously to one
+//! receiver. These channels are *task blocking*, not *thread blocking*. This
+//! means that if one task is blocked on a channel, other tasks can continue to
+//! make progress.
+//!
+//! Rust channels come in one of two flavors:
+//!
+//! 1. An asynchronous, infinitely buffered channel. The `channel()` function
+//! will return a `(Sender, Receiver)` tuple where all sends will be
+//! **asynchronous** (they never block). The channel conceptually has an
+//! infinite buffer.
+//!
+//! 2. A synchronous, bounded channel. The `sync_channel()` function will return
+//! a `(SyncSender, Receiver)` tuple where the storage for pending messages
+//! is a pre-allocated buffer of a fixed size. All sends will be
+//! **synchronous** by blocking until there is buffer space available. Note
+//! that a bound of 0 is allowed, causing the channel to become a
+//! "rendezvous" channel where each sender atomically hands off a message to
+//! a receiver.
+//!
+//! ## Failure Propagation
+//!
+//! In addition to being a core primitive for communicating in rust, channels
+//! are the points at which failure is propagated among tasks. Whenever the one
+//! half of channel is closed, the other half will have its next operation
+//! `fail!`. The purpose of this is to allow propagation of failure among tasks
+//! that are linked to one another via channels.
+//!
+//! There are methods on both of senders and receivers to perform their
+//! respective operations without failing, however.
+//!
+//! ## Runtime Requirements
+//!
+//! The channel types defined in this module generally have very few runtime
+//! requirements in order to operate. The major requirement they have is for a
+//! local rust `Task` to be available if any *blocking* operation is performed.
+//!
+//! If a local `Task` is not available (for example an FFI callback), then the
+//! `send` operation is safe on a `Sender` (as well as a `send_opt`) as well as
+//! the `try_send` method on a `SyncSender`, but no other operations are
+//! guaranteed to be safe.
+//!
+//! Additionally, channels can interoperate between runtimes. If one task in a
+//! program is running on libnative and another is running on libgreen, they can
+//! still communicate with one another using channels.
+//!
+//! # Example
+//!
+//! Simple usage:
+//!
+//! ```
+//! // Create a simple streaming channel
+//! let (tx, rx) = channel();
+//! spawn(proc() {
+//! tx.send(10);
+//! });
+//! assert_eq!(rx.recv(), 10);
+//! ```
+//!
+//! Shared usage:
+//!
+//! ```
+//! // Create a shared channel which can be sent along from many tasks
+//! let (tx, rx) = channel();
+//! for i in range(0, 10) {
+//! let tx = tx.clone();
+//! spawn(proc() {
+//! tx.send(i);
+//! })
+//! }
+//!
+//! for _ in range(0, 10) {
+//! let j = rx.recv();
+//! assert!(0 <= j && j < 10);
+//! }
+//! ```
+//!
+//! Propagating failure:
+//!
+//! ```should_fail
+//! // The call to recv() will fail!() because the channel has already hung
+//! // up (or been deallocated)
+//! let (tx, rx) = channel::<int>();
+//! drop(tx);
+//! rx.recv();
+//! ```
+//!
+//! Synchronous channels:
+//!
+//! ```
+//! let (tx, rx) = sync_channel(0);
+//! spawn(proc() {
+//! // This will wait for the parent task to start receiving
+//! tx.send(53);
+//! });
+//! rx.recv();
+//! ```
+
+// A description of how Rust's channel implementation works
+//
+// Channels are supposed to be the basic building block for all other
+// concurrent primitives that are used in Rust. As a result, the channel type
+// needs to be highly optimized, flexible, and broad enough for use everywhere.
+//
+// The choice of implementation of all channels is to be built on lock-free data
+// structures. The channels themselves are then consequently also lock-free data
+// structures. As always with lock-free code, this is a very "here be dragons"
+// territory, especially because I'm unaware of any academic papers which have
+// gone into great length about channels of these flavors.
+//
+// ## Flavors of channels
+//
+// From the perspective of a consumer of this library, there is only one flavor
+// of channel. This channel can be used as a stream and cloned to allow multiple
+// senders. Under the hood, however, there are actually three flavors of
+// channels in play.
+//
+// * Oneshots - these channels are highly optimized for the one-send use case.
+// They contain as few atomics as possible and involve one and
+// exactly one allocation.
+// * Streams - these channels are optimized for the non-shared use case. They
+// use a different concurrent queue which is more tailored for this
+// use case. The initial allocation of this flavor of channel is not
+// optimized.
+// * Shared - this is the most general form of channel that this module offers,
+// a channel with multiple senders. This type is as optimized as it
+// can be, but the previous two types mentioned are much faster for
+// their use-cases.
+//
+// ## Concurrent queues
+//
+// The basic idea of Rust's Sender/Receiver types is that send() never blocks, but
+// recv() obviously blocks. This means that under the hood there must be some
+// shared and concurrent queue holding all of the actual data.
+//
+// With two flavors of channels, two flavors of queues are also used. We have
+// chosen to use queues from a well-known author which are abbreviated as SPSC
+// and MPSC (single producer, single consumer and multiple producer, single
+// consumer). SPSC queues are used for streams while MPSC queues are used for
+// shared channels.
+//
+// ### SPSC optimizations
+//
+// The SPSC queue found online is essentially a linked list of nodes where one
+// half of the nodes are the "queue of data" and the other half of nodes are a
+// cache of unused nodes. The unused nodes are used such that an allocation is
+// not required on every push() and a free doesn't need to happen on every
+// pop().
+//
+// As found online, however, the cache of nodes is of an infinite size. This
+// means that if a channel at one point in its life had 50k items in the queue,
+// then the queue will always have the capacity for 50k items. I believed that
+// this was an unnecessary limitation of the implementation, so I have altered
+// the queue to optionally have a bound on the cache size.
+//
+// By default, streams will have an unbounded SPSC queue with a small-ish cache
+// size. The hope is that the cache is still large enough to have very fast
+// send() operations while not too large such that millions of channels can
+// coexist at once.
+//
+// ### MPSC optimizations
+//
+// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
+// a linked list under the hood to earn its unboundedness, but I have not put
+// forth much effort into having a cache of nodes similar to the SPSC queue.
+//
+// For now, I believe that this is "ok" because shared channels are not the most
+// common type, but soon we may wish to revisit this queue choice and determine
+// another candidate for backend storage of shared channels.
+//
+// ## Overview of the Implementation
+//
+// Now that there's a little background on the concurrent queues used, it's
+// worth going into much more detail about the channels themselves. The basic
+// pseudocode for a send/recv are:
+//
+//
+// send(t) recv()
+// queue.push(t) return if queue.pop()
+// if increment() == -1 deschedule {
+// wakeup() if decrement() > 0
+// cancel_deschedule()
+// }
+// queue.pop()
+//
+// As mentioned before, there are no locks in this implementation, only atomic
+// instructions are used.
+//
+// ### The internal atomic counter
+//
+// Every channel has a shared counter with each half to keep track of the size
+// of the queue. This counter is used to abort descheduling by the receiver and
+// to know when to wake up on the sending side.
+//
+// As seen in the pseudocode, senders will increment this count and receivers
+// will decrement the count. The theory behind this is that if a sender sees a
+// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
+// then it doesn't need to block.
+//
+// The recv() method has a beginning call to pop(), and if successful, it needs
+// to decrement the count. It is a crucial implementation detail that this
+// decrement does *not* happen to the shared counter. If this were the case,
+// then it would be possible for the counter to be very negative when there were
+// no receivers waiting, in which case the senders would have to determine when
+// it was actually appropriate to wake up a receiver.
+//
+// Instead, the "steal count" is kept track of separately (not atomically
+// because it's only used by receivers), and then the decrement() call when
+// descheduling will lump in all of the recent steals into one large decrement.
+//
+// The implication of this is that if a sender sees a -1 count, then there's
+// guaranteed to be a waiter waiting!
+//
+// ## Native Implementation
+//
+// A major goal of these channels is to work seamlessly on and off the runtime.
+// All of the previous race conditions have been worded in terms of
+// scheduler-isms (which is obviously not available without the runtime).
+//
+// For now, native usage of channels (off the runtime) will fall back onto
+// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
+// is still entirely lock-free, the "deschedule" blocks above are surrounded by
+// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
+// condition variable.
+//
+// ## Select
+//
+// Being able to support selection over channels has greatly influenced this
+// design, and not only does selection need to work inside the runtime, but also
+// outside the runtime.
+//
+// The implementation is fairly straightforward. The goal of select() is not to
+// return some data, but only to return which channel can receive data without
+// blocking. The implementation is essentially the entire blocking procedure
+// followed by an increment as soon as its woken up. The cancellation procedure
+// involves an increment and swapping out of to_wake to acquire ownership of the
+// task to unblock.
+//
+// Sadly this current implementation requires multiple allocations, so I have
+// seen the throughput of select() be much worse than it should be. I do not
+// believe that there is anything fundamental which needs to change about these
+// channels, however, in order to support a more efficient select().
+//
+// # Conclusion
+//
+// And now that you've seen all the races that I found and attempted to fix,
+// here's the code for you to find some more!
+
+use core::prelude::*;
+
+use alloc::arc::Arc;
+use alloc::owned::Box;
+use core::cell::Cell;
+use core::kinds::marker;
+use core::mem;
+use core::ty::Unsafe;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+
+pub use comm::select::{Select, Handle};
+pub use comm::duplex::{DuplexStream, duplex};
+
+macro_rules! test (
+ { fn $name:ident() $b:block $(#[$a:meta])*} => (
+ mod $name {
+ #![allow(unused_imports)]
+
+ use std::prelude::*;
+
+ use native;
+ use comm::*;
+ use super::*;
+ use super::super::*;
+ use std::task;
+
+ fn f() $b
+
+ $(#[$a])* #[test] fn uv() { f() }
+ $(#[$a])* #[test] fn native() {
+ use native;
+ let (tx, rx) = channel();
+ native::task::spawn(proc() { tx.send(f()) });
+ rx.recv();
+ }
+ }
+ )
+)
+
+mod duplex;
+mod oneshot;
+mod select;
+mod shared;
+mod stream;
+mod sync;
+
+// Use a power of 2 to allow LLVM to optimize to something that's not a
+// division, this is hit pretty regularly.
+static RESCHED_FREQ: int = 256;
+
+/// The receiving-half of Rust's channel type. This half can only be owned by
+/// one task
+pub struct Receiver<T> {
+ inner: Unsafe<Flavor<T>>,
+ receives: Cell<uint>,
+ // can't share in an arc
+ marker: marker::NoShare,
+}
+
+/// An iterator over messages on a receiver, this iterator will block
+/// whenever `next` is called, waiting for a new message, and `None` will be
+/// returned when the corresponding channel has hung up.
+pub struct Messages<'a, T> {
+ rx: &'a Receiver<T>
+}
+
+/// The sending-half of Rust's asynchronous channel type. This half can only be
+/// owned by one task, but it can be cloned to send to other tasks.
+pub struct Sender<T> {
+ inner: Unsafe<Flavor<T>>,
+ sends: Cell<uint>,
+ // can't share in an arc
+ marker: marker::NoShare,
+}
+
+/// The sending-half of Rust's synchronous channel type. This half can only be
+/// owned by one task, but it can be cloned to send to other tasks.
+pub struct SyncSender<T> {
+ inner: Arc<Unsafe<sync::Packet<T>>>,
+ // can't share in an arc
+ marker: marker::NoShare,
+}
+
+/// This enumeration is the list of the possible reasons that try_recv could not
+/// return data when called.
+#[deriving(PartialEq, Clone, Show)]
+pub enum TryRecvError {
+ /// This channel is currently empty, but the sender(s) have not yet
+ /// disconnected, so data may yet become available.
+ Empty,
+ /// This channel's sending half has become disconnected, and there will
+ /// never be any more data received on this channel
+ Disconnected,
+}
+
+/// This enumeration is the list of the possible error outcomes for the
+/// `SyncSender::try_send` method.
+#[deriving(PartialEq, Clone, Show)]
+pub enum TrySendError<T> {
+ /// The data could not be sent on the channel because it would require that
+ /// the callee block to send the data.
+ ///
+ /// If this is a buffered channel, then the buffer is full at this time. If
+ /// this is not a buffered channel, then there is no receiver available to
+ /// acquire the data.
+ Full(T),
+ /// This channel's receiving half has disconnected, so the data could not be
+ /// sent. The data is returned back to the callee in this case.
+ RecvDisconnected(T),
+}
+
+enum Flavor<T> {
+ Oneshot(Arc<Unsafe<oneshot::Packet<T>>>),
+ Stream(Arc<Unsafe<stream::Packet<T>>>),
+ Shared(Arc<Unsafe<shared::Packet<T>>>),
+ Sync(Arc<Unsafe<sync::Packet<T>>>),
+}
+
+#[doc(hidden)]
+trait UnsafeFlavor<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>>;
+ unsafe fn mut_inner<'a>(&'a self) -> &'a mut Flavor<T> {
+ &mut *self.inner_unsafe().get()
+ }
+ unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
+ &*self.inner_unsafe().get()
+ }
+}
+impl<T> UnsafeFlavor<T> for Sender<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>> {
+ &self.inner
+ }
+}
+impl<T> UnsafeFlavor<T> for Receiver<T> {
+ fn inner_unsafe<'a>(&'a self) -> &'a Unsafe<Flavor<T>> {
+ &self.inner
+ }
+}
+
+/// Creates a new asynchronous channel, returning the sender/receiver halves.
+///
+/// All data sent on the sender will become available on the receiver, and no
+/// send will block the calling task (this channel has an "infinite buffer").
+///
+/// # Example
+///
+/// ```
+/// let (tx, rx) = channel();
+///
+/// // Spawn off an expensive computation
+/// spawn(proc() {
+/// # fn expensive_computation() {}
+/// tx.send(expensive_computation());
+/// });
+///
+/// // Do some useful work for awhile
+///
+/// // Let's see what that answer was
+/// println!("{}", rx.recv());
+/// ```
+pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
+ let a = Arc::new(Unsafe::new(oneshot::Packet::new()));
+ (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
+}
+
+/// Creates a new synchronous, bounded channel.
+///
+/// Like asynchronous channels, the `Receiver` will block until a message
+/// becomes available. These channels differ greatly in the semantics of the
+/// sender from asynchronous channels, however.
+///
+/// This channel has an internal buffer on which messages will be queued. When
+/// the internal buffer becomes full, future sends will *block* waiting for the
+/// buffer to open up. Note that a buffer size of 0 is valid, in which case this
+/// becomes "rendezvous channel" where each send will not return until a recv
+/// is paired with it.
+///
+/// As with asynchronous channels, all senders will fail in `send` if the
+/// `Receiver` has been destroyed.
+///
+/// # Example
+///
+/// ```
+/// let (tx, rx) = sync_channel(1);
+///
+/// // this returns immediately
+/// tx.send(1);
+///
+/// spawn(proc() {
+/// // this will block until the previous message has been received
+/// tx.send(2);
+/// });
+///
+/// assert_eq!(rx.recv(), 1);
+/// assert_eq!(rx.recv(), 2);
+/// ```
+pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
+ let a = Arc::new(Unsafe::new(sync::Packet::new(bound)));
+ (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> Sender<T> {
+ fn new(inner: Flavor<T>) -> Sender<T> {
+ Sender { inner: Unsafe::new(inner), sends: Cell::new(0), marker: marker::NoShare }
+ }
+
+ /// Sends a value along this channel to be received by the corresponding
+ /// receiver.
+ ///
+ /// Rust channels are infinitely buffered so this method will never block.
+ ///
+ /// # Failure
+ ///
+ /// This function will fail if the other end of the channel has hung up.
+ /// This means that if the corresponding receiver has fallen out of scope,
+ /// this function will trigger a fail message saying that a message is
+ /// being sent on a closed channel.
+ ///
+ /// Note that if this function does *not* fail, it does not mean that the
+ /// data will be successfully received. All sends are placed into a queue,
+ /// so it is possible for a send to succeed (the other end is alive), but
+ /// then the other end could immediately disconnect.
+ ///
+ /// The purpose of this functionality is to propagate failure among tasks.
+ /// If failure is not desired, then consider using the `send_opt` method
+ pub fn send(&self, t: T) {
+ if self.send_opt(t).is_err() {
+ fail!("sending on a closed channel");
+ }
+ }
+
+ /// Attempts to send a value on this channel, returning it back if it could
+ /// not be sent.
+ ///
+ /// A successful send occurs when it is determined that the other end of
+ /// the channel has not hung up already. An unsuccessful send would be one
+ /// where the corresponding receiver has already been deallocated. Note
+ /// that a return value of `Err` means that the data will never be
+ /// received, but a return value of `Ok` does *not* mean that the data
+ /// will be received. It is possible for the corresponding receiver to
+ /// hang up immediately after this function returns `Ok`.
+ ///
+ /// Like `send`, this method will never block.
+ ///
+ /// # Failure
+ ///
+ /// This method will never fail, it will return the message back to the
+ /// caller if the other end is disconnected
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let (tx, rx) = channel();
+ ///
+ /// // This send is always successful
+ /// assert_eq!(tx.send_opt(1), Ok(()));
+ ///
+ /// // This send will fail because the receiver is gone
+ /// drop(rx);
+ /// assert_eq!(tx.send_opt(1), Err(1));
+ /// ```
+ pub fn send_opt(&self, t: T) -> Result<(), T> {
+ // In order to prevent starvation of other tasks in situations where
+ // a task sends repeatedly without ever receiving, we occasionally
+ // yield instead of doing a send immediately.
+ //
+ // Don't unconditionally attempt to yield because the TLS overhead can
+ // be a bit much, and also use `try_take` instead of `take` because
+ // there's no reason that this send shouldn't be usable off the
+ // runtime.
+ let cnt = self.sends.get() + 1;
+ self.sends.set(cnt);
+ if cnt % (RESCHED_FREQ as uint) == 0 {
+ let task: Option<Box<Task>> = Local::try_take();
+ task.map(|t| t.maybe_yield());
+ }
+
+ let (new_inner, ret) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ unsafe {
+ let p = p.get();
+ if !(*p).sent() {
+ return (*p).send(t);
+ } else {
+ let a = Arc::new(Unsafe::new(stream::Packet::new()));
+ match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
+ oneshot::UpSuccess => {
+ let ret = (*a.get()).send(t);
+ (a, ret)
+ }
+ oneshot::UpDisconnected => (a, Err(t)),
+ oneshot::UpWoke(task) => {
+ // This send cannot fail because the task is
+ // asleep (we're looking at it), so the receiver
+ // can't go away.
+ (*a.get()).send(t).ok().unwrap();
+ task.wake().map(|t| t.reawaken());
+ (a, Ok(()))
+ }
+ }
+ }
+ }
+ }
+ Stream(ref p) => return unsafe { (*p.get()).send(t) },
+ Shared(ref p) => return unsafe { (*p.get()).send(t) },
+ Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ let tmp = Sender::new(Stream(new_inner));
+ mem::swap(self.mut_inner(), tmp.mut_inner());
+ }
+ return ret;
+ }
+}
+
+impl<T: Send> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ let (packet, sleeper) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ let a = Arc::new(Unsafe::new(shared::Packet::new()));
+ unsafe {
+ (*a.get()).postinit_lock();
+ match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
+ oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
+ oneshot::UpWoke(task) => (a, Some(task))
+ }
+ }
+ }
+ Stream(ref p) => {
+ let a = Arc::new(Unsafe::new(shared::Packet::new()));
+ unsafe {
+ (*a.get()).postinit_lock();
+ match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
+ stream::UpSuccess | stream::UpDisconnected => (a, None),
+ stream::UpWoke(task) => (a, Some(task)),
+ }
+ }
+ }
+ Shared(ref p) => {
+ unsafe { (*p.get()).clone_chan(); }
+ return Sender::new(Shared(p.clone()));
+ }
+ Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ (*packet.get()).inherit_blocker(sleeper);
+
+ let tmp = Sender::new(Shared(packet.clone()));
+ mem::swap(self.mut_inner(), tmp.mut_inner());
+ }
+ Sender::new(Shared(packet))
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Sender<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.mut_inner() } {
+ Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
+ Sync(..) => unreachable!(),
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SyncSender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> SyncSender<T> {
+ fn new(inner: Arc<Unsafe<sync::Packet<T>>>) -> SyncSender<T> {
+ SyncSender { inner: inner, marker: marker::NoShare }
+ }
+
+ /// Sends a value on this synchronous channel.
+ ///
+ /// This function will *block* until space in the internal buffer becomes
+ /// available or a receiver is available to hand off the message to.
+ ///
+ /// Note that a successful send does *not* guarantee that the receiver will
+ /// ever see the data if there is a buffer on this channel. Messages may be
+ /// enqueued in the internal buffer for the receiver to receive at a later
+ /// time. If the buffer size is 0, however, it can be guaranteed that the
+ /// receiver has indeed received the data if this function returns success.
+ ///
+ /// # Failure
+ ///
+ /// Similarly to `Sender::send`, this function will fail if the
+ /// corresponding `Receiver` for this channel has disconnected. This
+ /// behavior is used to propagate failure among tasks.
+ ///
+ /// If failure is not desired, you can achieve the same semantics with the
+ /// `SyncSender::send_opt` method which will not fail if the receiver
+ /// disconnects.
+ pub fn send(&self, t: T) {
+ if self.send_opt(t).is_err() {
+ fail!("sending on a closed channel");
+ }
+ }
+
+ /// Send a value on a channel, returning it back if the receiver
+ /// disconnected
+ ///
+ /// This method will *block* to send the value `t` on the channel, but if
+ /// the value could not be sent due to the receiver disconnecting, the value
+ /// is returned back to the callee. This function is similar to `try_send`,
+ /// except that it will block if the channel is currently full.
+ ///
+ /// # Failure
+ ///
+ /// This function cannot fail.
+ pub fn send_opt(&self, t: T) -> Result<(), T> {
+ unsafe { (*self.inner.get()).send(t) }
+ }
+
+ /// Attempts to send a value on this channel without blocking.
+ ///
+ /// This method differs from `send_opt` by returning immediately if the
+ /// channel's buffer is full or no receiver is waiting to acquire some
+ /// data. Compared with `send_opt`, this function has two failure cases
+ /// instead of one (one for disconnection, one for a full buffer).
+ ///
+ /// See `SyncSender::send` for notes about guarantees of whether the
+ /// receiver has received the data or not if this function is successful.
+ ///
+ /// # Failure
+ ///
+ /// This function cannot fail
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ unsafe { (*self.inner.get()).try_send(t) }
+ }
+}
+
+impl<T: Send> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ unsafe { (*self.inner.get()).clone_chan(); }
+ return SyncSender::new(self.inner.clone());
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for SyncSender<T> {
+ fn drop(&mut self) {
+ unsafe { (*self.inner.get()).drop_chan(); }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Receiver
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T: Send> Receiver<T> {
+ fn new(inner: Flavor<T>) -> Receiver<T> {
+ Receiver { inner: Unsafe::new(inner), receives: Cell::new(0), marker: marker::NoShare }
+ }
+
+ /// Blocks waiting for a value on this receiver
+ ///
+ /// This function will block if necessary to wait for a corresponding send
+ /// on the channel from its paired `Sender` structure. This receiver will
+ /// be woken up when data is ready, and the data will be returned.
+ ///
+ /// # Failure
+ ///
+ /// Similar to channels, this method will trigger a task failure if the
+ /// other end of the channel has hung up (been deallocated). The purpose of
+ /// this is to propagate failure among tasks.
+ ///
+ /// If failure is not desired, then there are two options:
+ ///
+ /// * If blocking is still desired, the `recv_opt` method will return `None`
+ /// when the other end hangs up
+ ///
+ /// * If blocking is not desired, then the `try_recv` method will attempt to
+ /// peek at a value on this receiver.
+ pub fn recv(&self) -> T {
+ match self.recv_opt() {
+ Ok(t) => t,
+ Err(()) => fail!("receiving on a closed channel"),
+ }
+ }
+
+ /// Attempts to return a pending value on this receiver without blocking
+ ///
+ /// This method will never block the caller in order to wait for data to
+ /// become available. Instead, this will always return immediately with a
+ /// possible option of pending data on the channel.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// block on a receiver.
+ ///
+ /// This function cannot fail.
+ pub fn try_recv(&self) -> Result<T, TryRecvError> {
+ // If a thread is spinning in try_recv, we should take the opportunity
+ // to reschedule things occasionally. See notes above in scheduling on
+ // sends for why this doesn't always hit TLS, and also for why this uses
+ // `try_take` instead of `take`.
+ let cnt = self.receives.get() + 1;
+ self.receives.set(cnt);
+ if cnt % (RESCHED_FREQ as uint) == 0 {
+ let task: Option<Box<Task>> = Local::try_take();
+ task.map(|t| t.maybe_yield());
+ }
+
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Empty) => return Err(Empty),
+ Err(oneshot::Disconnected) => return Err(Disconnected),
+ Err(oneshot::Upgraded(rx)) => rx,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(stream::Empty) => return Err(Empty),
+ Err(stream::Disconnected) => return Err(Disconnected),
+ Err(stream::Upgraded(rx)) => rx,
+ }
+ }
+ Shared(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(shared::Empty) => return Err(Empty),
+ Err(shared::Disconnected) => return Err(Disconnected),
+ }
+ }
+ Sync(ref p) => {
+ match unsafe { (*p.get()).try_recv() } {
+ Ok(t) => return Ok(t),
+ Err(sync::Empty) => return Err(Empty),
+ Err(sync::Disconnected) => return Err(Disconnected),
+ }
+ }
+ };
+ unsafe {
+ mem::swap(self.mut_inner(),
+ new_port.mut_inner());
+ }
+ }
+ }
+
+ /// Attempt to wait for a value on this receiver, but does not fail if the
+ /// corresponding channel has hung up.
+ ///
+ /// This implementation of iterators for ports will always block if there is
+ /// not data available on the receiver, but it will not fail in the case
+ /// that the channel has been deallocated.
+ ///
+ /// In other words, this function has the same semantics as the `recv`
+ /// method except for the failure aspect.
+ ///
+ /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
+ /// the value found on the receiver is returned.
+ pub fn recv_opt(&self) -> Result<T, ()> {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Empty) => return unreachable!(),
+ Err(oneshot::Disconnected) => return Err(()),
+ Err(oneshot::Upgraded(rx)) => rx,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(stream::Empty) => return unreachable!(),
+ Err(stream::Disconnected) => return Err(()),
+ Err(stream::Upgraded(rx)) => rx,
+ }
+ }
+ Shared(ref p) => {
+ match unsafe { (*p.get()).recv() } {
+ Ok(t) => return Ok(t),
+ Err(shared::Empty) => return unreachable!(),
+ Err(shared::Disconnected) => return Err(()),
+ }
+ }
+ Sync(ref p) => return unsafe { (*p.get()).recv() }
+ };
+ unsafe {
+ mem::swap(self.mut_inner(), new_port.mut_inner());
+ }
+ }
+ }
+
+ /// Returns an iterator which will block waiting for messages, but never
+ /// `fail!`. It will return `None` when the channel has hung up.
+ pub fn iter<'a>(&'a self) -> Messages<'a, T> {
+ Messages { rx: self }
+ }
+}
+
+impl<T: Send> select::Packet for Receiver<T> {
+ fn can_recv(&self) -> bool {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).can_recv() } {
+ Ok(ret) => return ret,
+ Err(upgrade) => upgrade,
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).can_recv() } {
+ Ok(ret) => return ret,
+ Err(upgrade) => upgrade,
+ }
+ }
+ Shared(ref p) => {
+ return unsafe { (*p.get()).can_recv() };
+ }
+ Sync(ref p) => {
+ return unsafe { (*p.get()).can_recv() };
+ }
+ };
+ unsafe {
+ mem::swap(self.mut_inner(),
+ new_port.mut_inner());
+ }
+ }
+ }
+
+ fn start_selection(&self, mut task: BlockedTask) -> Result<(), BlockedTask>{
+ loop {
+ let (t, new_port) = match *unsafe { self.inner() } {
+ Oneshot(ref p) => {
+ match unsafe { (*p.get()).start_selection(task) } {
+ oneshot::SelSuccess => return Ok(()),
+ oneshot::SelCanceled(task) => return Err(task),
+ oneshot::SelUpgraded(t, rx) => (t, rx),
+ }
+ }
+ Stream(ref p) => {
+ match unsafe { (*p.get()).start_selection(task) } {
+ stream::SelSuccess => return Ok(()),
+ stream::SelCanceled(task) => return Err(task),
+ stream::SelUpgraded(t, rx) => (t, rx),
+ }
+ }
+ Shared(ref p) => {
+ return unsafe { (*p.get()).start_selection(task) };
+ }
+ Sync(ref p) => {
+ return unsafe { (*p.get()).start_selection(task) };
+ }
+ };
+ task = t;
+ unsafe {
+ mem::swap(self.mut_inner(),
+ new_port.mut_inner());
+ }
+ }
+ }
+
+ fn abort_selection(&self) -> bool {
+ let mut was_upgrade = false;
+ loop {
+ let result = match *unsafe { self.inner() } {
+ Oneshot(ref p) => unsafe { (*p.get()).abort_selection() },
+ Stream(ref p) => unsafe {
+ (*p.get()).abort_selection(was_upgrade)
+ },
+ Shared(ref p) => return unsafe {
+ (*p.get()).abort_selection(was_upgrade)
+ },
+ Sync(ref p) => return unsafe {
+ (*p.get()).abort_selection()
+ },
+ };
+ let new_port = match result { Ok(b) => return b, Err(p) => p };
+ was_upgrade = true;
+ unsafe {
+ mem::swap(self.mut_inner(),
+ new_port.mut_inner());
+ }
+ }
+ }
+}
+
+impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
+ fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.mut_inner() } {
+ Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ Sync(ref mut p) => unsafe { (*p.get()).drop_port(); },
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::prelude::*;
+
+ use native;
+ use std::os;
+ use super::*;
+
+ pub fn stress_factor() -> uint {
+ match os::getenv("RUST_TEST_STRESS") {
+ Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
+ None => 1,
+ }
+ }
+
+ test!(fn smoke() {
+ let (tx, rx) = channel();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn drop_full() {
+ let (tx, _rx) = channel();
+ tx.send(box 1);
+ })
+
+ test!(fn drop_full_shared() {
+ let (tx, _rx) = channel();
+ drop(tx.clone());
+ drop(tx.clone());
+ tx.send(box 1);
+ })
+
+ test!(fn smoke_shared() {
+ let (tx, rx) = channel();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ let tx = tx.clone();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_threads() {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ tx.send(1);
+ });
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_port_gone() {
+ let (tx, rx) = channel();
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone() {
+ let (tx, rx) = channel();
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone2() {
+ let (tx, rx) = channel();
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ tx2.send(1);
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent() {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop { tx.send(1) }
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent_shared() {
+ let (tx, rx) = channel();
+ let tx2 = tx.clone();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop {
+ tx.send(1);
+ tx2.send(1);
+ }
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone_shared() {
+ let (tx, rx) = channel::<()>();
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn chan_gone_concurrent() {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ tx.send(1);
+ tx.send(1);
+ });
+ loop { rx.recv(); }
+ } #[should_fail])
+
+ test!(fn stress() {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ for _ in range(0, 10000) { tx.send(1); }
+ });
+ for _ in range(0, 10000) {
+ assert_eq!(rx.recv(), 1);
+ }
+ })
+
+ test!(fn stress_shared() {
+ static AMT: uint = 10000;
+ static NTHREADS: uint = 8;
+ let (tx, rx) = channel::<int>();
+ let (dtx, drx) = channel::<()>();
+
+ spawn(proc() {
+ for _ in range(0, AMT * NTHREADS) {
+ assert_eq!(rx.recv(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => fail!(),
+ _ => {}
+ }
+ dtx.send(());
+ });
+
+ for _ in range(0, NTHREADS) {
+ let tx = tx.clone();
+ spawn(proc() {
+ for _ in range(0, AMT) { tx.send(1); }
+ });
+ }
+ drop(tx);
+ drx.recv();
+ })
+
+ #[test]
+ fn send_from_outside_runtime() {
+ let (tx1, rx1) = channel::<()>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+ let tx4 = tx3.clone();
+ spawn(proc() {
+ tx1.send(());
+ for _ in range(0, 40) {
+ assert_eq!(rx2.recv(), 1);
+ }
+ tx3.send(());
+ });
+ rx1.recv();
+ native::task::spawn(proc() {
+ for _ in range(0, 40) {
+ tx2.send(1);
+ }
+ tx4.send(());
+ });
+ rx3.recv();
+ rx3.recv();
+ }
+
+ #[test]
+ fn recv_from_outside_runtime() {
+ let (tx, rx) = channel::<int>();
+ let (dtx, drx) = channel();
+ native::task::spawn(proc() {
+ for _ in range(0, 40) {
+ assert_eq!(rx.recv(), 1);
+ }
+ dtx.send(());
+ });
+ for _ in range(0, 40) {
+ tx.send(1);
+ }
+ drx.recv();
+ }
+
+ #[test]
+ fn no_runtime() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+ let tx4 = tx3.clone();
+ native::task::spawn(proc() {
+ assert_eq!(rx1.recv(), 1);
+ tx2.send(2);
+ tx4.send(());
+ });
+ native::task::spawn(proc() {
+ tx1.send(1);
+ assert_eq!(rx2.recv(), 2);
+ tx3.send(());
+ });
+ rx3.recv();
+ rx3.recv();
+ }
+
+ test!(fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = channel::<int>();
+ drop(rx);
+ })
+
+ test!(fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = channel::<int>();
+ drop(tx);
+ })
+
+ test!(fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = channel::<Box<int>>();
+ drop(rx);
+ tx.send(box 0);
+ } #[should_fail])
+
+ test!(fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will fail
+ let res = task::try(proc() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ rx.recv();
+ });
+ // What is our res?
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = channel::<Box<int>>();
+ tx.send(box 10);
+ assert!(rx.recv() == box 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = channel::<int>();
+ assert!(tx.send_opt(10).is_ok());
+ assert!(rx.recv() == 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = channel::<int>();
+ drop(rx);
+ assert!(tx.send_opt(10).is_err());
+ })
+
+ test!(fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = channel::<int>();
+ tx.send(10);
+ assert!(rx.recv_opt() == Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ assert!(rx.recv_opt() == Err(()));
+ })
+
+ test!(fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = channel::<int>();
+ assert_eq!(rx.try_recv(), Err(Empty))
+ tx.send(10);
+ assert_eq!(rx.try_recv(), Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = channel::<int>();
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ })
+
+ test!(fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = channel::<int>();
+ assert_eq!(rx.try_recv(), Err(Empty));
+ })
+
+ test!(fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = channel::<Box<int>>();
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+
+ tx.send(box 10);
+ })
+
+ test!(fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = channel::<Box<int>>();
+ spawn(proc() {
+ drop(tx);
+ });
+ let res = task::try(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_multi_thread_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ drop(rx);
+ });
+ drop(tx);
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ drop(rx);
+ });
+ let _ = task::try(proc() {
+ tx.send(1);
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_recv_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel::<int>();
+ spawn(proc() {
+ let res = task::try(proc() {
+ rx.recv();
+ });
+ assert!(res.is_err());
+ });
+ spawn(proc() {
+ spawn(proc() {
+ drop(tx);
+ });
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel();
+ spawn(proc() {
+ tx.send(box 10);
+ });
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ }
+ })
+
+ test!(fn stream_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = channel();
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: Sender<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ tx.send(box i);
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ assert!(rx.recv() == box i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+ })
+
+ test!(fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = channel();
+ for _ in range(0, 10000) { tx.send(()); }
+ for _ in range(0, 10000) { rx.recv(); }
+ })
+
+ test!(fn shared_chan_stress() {
+ let (tx, rx) = channel();
+ let total = stress_factor() + 100;
+ for _ in range(0, total) {
+ let tx = tx.clone();
+ spawn(proc() {
+ tx.send(());
+ });
+ }
+
+ for _ in range(0, total) {
+ rx.recv();
+ }
+ })
+
+ test!(fn test_nested_recv_iter() {
+ let (tx, rx) = channel::<int>();
+ let (total_tx, total_rx) = channel::<int>();
+
+ spawn(proc() {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc);
+ });
+
+ tx.send(3);
+ tx.send(1);
+ tx.send(2);
+ drop(tx);
+ assert_eq!(total_rx.recv(), 6);
+ })
+
+ test!(fn test_recv_iter_break() {
+ let (tx, rx) = channel::<int>();
+ let (count_tx, count_rx) = channel();
+
+ spawn(proc() {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count);
+ });
+
+ tx.send(2);
+ tx.send(2);
+ tx.send(2);
+ let _ = tx.send_opt(2);
+ drop(tx);
+ assert_eq!(count_rx.recv(), 4);
+ })
+
+ test!(fn try_recv_states() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<()>();
+ let (tx3, rx3) = channel::<()>();
+ spawn(proc() {
+ rx2.recv();
+ tx1.send(1);
+ tx3.send(());
+ rx2.recv();
+ drop(tx1);
+ tx3.send(());
+ });
+
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Err(Disconnected));
+ })
+
+ // This bug used to end up in a livelock inside of the Receiver destructor
+ // because the internal state of the Shared packet was corrupted
+ test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = channel();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ rx.recv(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(());
+ });
+ // make sure the other task has gone to sleep
+ for _ in range(0, 5000) { task::deschedule(); }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(());
+
+ // wait for the child task to exit before we exit
+ rx2.recv();
+ })
+
+ test!(fn sends_off_the_runtime() {
+ use std::rt::thread::Thread;
+
+ let (tx, rx) = channel();
+ let t = Thread::start(proc() {
+ for _ in range(0, 1000) {
+ tx.send(());
+ }
+ });
+ for _ in range(0, 1000) {
+ rx.recv();
+ }
+ t.join();
+ })
+
+ test!(fn try_recvs_off_the_runtime() {
+ use std::rt::thread::Thread;
+
+ let (tx, rx) = channel();
+ let (cdone, pdone) = channel();
+ let t = Thread::start(proc() {
+ let mut hits = 0;
+ while hits < 10 {
+ match rx.try_recv() {
+ Ok(()) => { hits += 1; }
+ Err(Empty) => { Thread::yield_now(); }
+ Err(Disconnected) => return,
+ }
+ }
+ cdone.send(());
+ });
+ for _ in range(0, 10) {
+ tx.send(());
+ }
+ t.join();
+ pdone.recv();
+ })
+}
+
+#[cfg(test)]
+mod sync_tests {
+ use std::prelude::*;
+ use std::os;
+
+ pub fn stress_factor() -> uint {
+ match os::getenv("RUST_TEST_STRESS") {
+ Some(val) => from_str::<uint>(val.as_slice()).unwrap(),
+ None => 1,
+ }
+ }
+
+ test!(fn smoke() {
+ let (tx, rx) = sync_channel(1);
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn drop_full() {
+ let (tx, _rx) = sync_channel(1);
+ tx.send(box 1);
+ })
+
+ test!(fn smoke_shared() {
+ let (tx, rx) = sync_channel(1);
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ let tx = tx.clone();
+ tx.send(1);
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_threads() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ tx.send(1);
+ });
+ assert_eq!(rx.recv(), 1);
+ })
+
+ test!(fn smoke_port_gone() {
+ let (tx, rx) = sync_channel(0);
+ drop(rx);
+ tx.send(1);
+ } #[should_fail])
+
+ test!(fn smoke_shared_port_gone2() {
+ let (tx, rx) = sync_channel(0);
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ tx2.send(1);
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ rx.recv();
+ });
+ loop { tx.send(1) }
+ } #[should_fail])
+
+ test!(fn port_gone_concurrent_shared() {
+ let (tx, rx) = sync_channel(0);
+ let tx2 = tx.clone();
+ spawn(proc() {
+ rx.recv();
+ });
+ loop {
+ tx.send(1);
+ tx2.send(1);
+ }
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn smoke_chan_gone_shared() {
+ let (tx, rx) = sync_channel::<()>(0);
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ rx.recv();
+ } #[should_fail])
+
+ test!(fn chan_gone_concurrent() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ tx.send(1);
+ tx.send(1);
+ });
+ loop { rx.recv(); }
+ } #[should_fail])
+
+ test!(fn stress() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ for _ in range(0, 10000) { tx.send(1); }
+ });
+ for _ in range(0, 10000) {
+ assert_eq!(rx.recv(), 1);
+ }
+ })
+
+ test!(fn stress_shared() {
+ static AMT: uint = 1000;
+ static NTHREADS: uint = 8;
+ let (tx, rx) = sync_channel::<int>(0);
+ let (dtx, drx) = sync_channel::<()>(0);
+
+ spawn(proc() {
+ for _ in range(0, AMT * NTHREADS) {
+ assert_eq!(rx.recv(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => fail!(),
+ _ => {}
+ }
+ dtx.send(());
+ });
+
+ for _ in range(0, NTHREADS) {
+ let tx = tx.clone();
+ spawn(proc() {
+ for _ in range(0, AMT) { tx.send(1); }
+ });
+ }
+ drop(tx);
+ drx.recv();
+ })
+
+ test!(fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ })
+
+ test!(fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = sync_channel::<int>(0);
+ drop(tx);
+ })
+
+ test!(fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ drop(rx);
+ tx.send(box 0);
+ } #[should_fail])
+
+ test!(fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will fail
+ let res = task::try(proc() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ rx.recv();
+ });
+ // What is our res?
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = sync_channel::<Box<int>>(1);
+ tx.send(box 10);
+ assert!(rx.recv() == box 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(tx.try_send(10), Ok(()));
+ assert!(rx.recv() == 10);
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(rx);
+ assert_eq!(tx.try_send(10), Err(RecvDisconnected(10)));
+ })
+
+ test!(fn oneshot_single_thread_try_send_closed2() {
+ let (tx, _rx) = sync_channel::<int>(0);
+ assert_eq!(tx.try_send(10), Err(Full(10)));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = sync_channel::<int>(1);
+ tx.send(10);
+ assert!(rx.recv_opt() == Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ assert!(rx.recv_opt() == Err(()));
+ })
+
+ test!(fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = sync_channel::<int>(1);
+ assert_eq!(rx.try_recv(), Err(Empty))
+ tx.send(10);
+ assert_eq!(rx.try_recv(), Ok(10));
+ })
+
+ test!(fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = sync_channel::<int>(0);
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ assert_eq!(rx.try_recv(), Err(Disconnected));
+ })
+
+ test!(fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = sync_channel::<int>(0);
+ assert_eq!(rx.try_recv(), Err(Empty));
+ })
+
+ test!(fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+
+ tx.send(box 10);
+ })
+
+ test!(fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = sync_channel::<Box<int>>(0);
+ spawn(proc() {
+ drop(tx);
+ });
+ let res = task::try(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ assert!(res.is_err());
+ })
+
+ test!(fn oneshot_multi_thread_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ drop(rx);
+ });
+ drop(tx);
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ drop(rx);
+ });
+ let _ = task::try(proc() {
+ tx.send(1);
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_recv_close_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel::<int>(0);
+ spawn(proc() {
+ let res = task::try(proc() {
+ rx.recv();
+ });
+ assert!(res.is_err());
+ });
+ spawn(proc() {
+ spawn(proc() {
+ drop(tx);
+ });
+ });
+ }
+ })
+
+ test!(fn oneshot_multi_thread_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ tx.send(box 10);
+ });
+ spawn(proc() {
+ assert!(rx.recv() == box 10);
+ });
+ }
+ })
+
+ test!(fn stream_send_recv_stress() {
+ for _ in range(0, stress_factor()) {
+ let (tx, rx) = sync_channel(0);
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: SyncSender<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ tx.send(box i);
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<int>>, i: int) {
+ if i == 10 { return }
+
+ spawn(proc() {
+ assert!(rx.recv() == box i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+ })
+
+ test!(fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = sync_channel(10000);
+ for _ in range(0, 10000) { tx.send(()); }
+ for _ in range(0, 10000) { rx.recv(); }
+ })
+
+ test!(fn shared_chan_stress() {
+ let (tx, rx) = sync_channel(0);
+ let total = stress_factor() + 100;
+ for _ in range(0, total) {
+ let tx = tx.clone();
+ spawn(proc() {
+ tx.send(());
+ });
+ }
+
+ for _ in range(0, total) {
+ rx.recv();
+ }
+ })
+
+ test!(fn test_nested_recv_iter() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let (total_tx, total_rx) = sync_channel::<int>(0);
+
+ spawn(proc() {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc);
+ });
+
+ tx.send(3);
+ tx.send(1);
+ tx.send(2);
+ drop(tx);
+ assert_eq!(total_rx.recv(), 6);
+ })
+
+ test!(fn test_recv_iter_break() {
+ let (tx, rx) = sync_channel::<int>(0);
+ let (count_tx, count_rx) = sync_channel(0);
+
+ spawn(proc() {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count);
+ });
+
+ tx.send(2);
+ tx.send(2);
+ tx.send(2);
+ let _ = tx.try_send(2);
+ drop(tx);
+ assert_eq!(count_rx.recv(), 4);
+ })
+
+ test!(fn try_recv_states() {
+ let (tx1, rx1) = sync_channel::<int>(1);
+ let (tx2, rx2) = sync_channel::<()>(1);
+ let (tx3, rx3) = sync_channel::<()>(1);
+ spawn(proc() {
+ rx2.recv();
+ tx1.send(1);
+ tx3.send(());
+ rx2.recv();
+ drop(tx1);
+ tx3.send(());
+ });
+
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ tx2.send(());
+ rx3.recv();
+ assert_eq!(rx1.try_recv(), Err(Disconnected));
+ })
+
+ // This bug used to end up in a livelock inside of the Receiver destructor
+ // because the internal state of the Shared packet was corrupted
+ test!(fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = sync_channel(0);
+ let (tx2, rx2) = sync_channel(0);
+ spawn(proc() {
+ rx.recv(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(());
+ });
+ // make sure the other task has gone to sleep
+ for _ in range(0, 5000) { task::deschedule(); }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(());
+
+ // wait for the child task to exit before we exit
+ rx2.recv();
+ })
+
+ test!(fn try_recvs_off_the_runtime() {
+ use std::rt::thread::Thread;
+
+ let (tx, rx) = sync_channel(0);
+ let (cdone, pdone) = channel();
+ let t = Thread::start(proc() {
+ let mut hits = 0;
+ while hits < 10 {
+ match rx.try_recv() {
+ Ok(()) => { hits += 1; }
+ Err(Empty) => { Thread::yield_now(); }
+ Err(Disconnected) => return,
+ }
+ }
+ cdone.send(());
+ });
+ for _ in range(0, 10) {
+ tx.send(());
+ }
+ t.join();
+ pdone.recv();
+ })
+
+ test!(fn send_opt1() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() { rx.recv(); });
+ assert_eq!(tx.send_opt(1), Ok(()));
+ })
+
+ test!(fn send_opt2() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() { drop(rx); });
+ assert_eq!(tx.send_opt(1), Err(1));
+ })
+
+ test!(fn send_opt3() {
+ let (tx, rx) = sync_channel(1);
+ assert_eq!(tx.send_opt(1), Ok(()));
+ spawn(proc() { drop(rx); });
+ assert_eq!(tx.send_opt(1), Err(1));
+ })
+
+ test!(fn send_opt4() {
+ let (tx, rx) = sync_channel(0);
+ let tx2 = tx.clone();
+ let (done, donerx) = channel();
+ let done2 = done.clone();
+ spawn(proc() {
+ assert_eq!(tx.send_opt(1), Err(1));
+ done.send(());
+ });
+ spawn(proc() {
+ assert_eq!(tx2.send_opt(2), Err(2));
+ done2.send(());
+ });
+ drop(rx);
+ donerx.recv();
+ donerx.recv();
+ })
+
+ test!(fn try_send1() {
+ let (tx, _rx) = sync_channel(0);
+ assert_eq!(tx.try_send(1), Err(Full(1)));
+ })
+
+ test!(fn try_send2() {
+ let (tx, _rx) = sync_channel(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ assert_eq!(tx.try_send(1), Err(Full(1)));
+ })
+
+ test!(fn try_send3() {
+ let (tx, rx) = sync_channel(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ drop(rx);
+ assert_eq!(tx.try_send(1), Err(RecvDisconnected(1)));
+ })
+
+ test!(fn try_send4() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ for _ in range(0, 1000) { task::deschedule(); }
+ assert_eq!(tx.try_send(1), Ok(()));
+ });
+ assert_eq!(rx.recv(), 1);
+ } #[ignore(reason = "flaky on libnative")])
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Oneshot channels/ports
+///
+/// This is the initial flavor of channels/ports used for comm module. This is
+/// an optimization for the one-use case of a channel. The major optimization of
+/// this type is to have one and exactly one allocation when the chan/port pair
+/// is created.
+///
+/// Another possible optimization would be to not use an Arc box because
+/// in theory we know when the shared packet can be deallocated (no real need
+/// for the atomic reference counting), but I was having trouble how to destroy
+/// the data early in a drop of a Port.
+///
+/// # Implementation
+///
+/// Oneshots are implemented around one atomic uint variable. This variable
+/// indicates both the state of the port/chan but also contains any tasks
+/// blocked on the port. All atomic operations happen on this one word.
+///
+/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
+/// on behalf of the channel side of things (it can be mentally thought of as
+/// consuming the port). This upgrade is then also stored in the shared packet.
+/// The one caveat to consider is that when a port sees a disconnected channel
+/// it must check for data because there is no "data plus upgrade" state.
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::mem;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+
+use atomics;
+use comm::Receiver;
+
+// Various states you can find a port in.
+static EMPTY: uint = 0;
+static DATA: uint = 1;
+static DISCONNECTED: uint = 2;
+
+pub struct Packet<T> {
+ // Internal state of the chan/port pair (stores the blocked task as well)
+ state: atomics::AtomicUint,
+ // One-shot data slot location
+ data: Option<T>,
+ // when used for the second time, a oneshot channel must be upgraded, and
+ // this contains the slot for the upgrade
+ upgrade: MyUpgrade<T>,
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(BlockedTask),
+}
+
+pub enum SelectionResult<T> {
+ SelCanceled(BlockedTask),
+ SelUpgraded(BlockedTask, Receiver<T>),
+ SelSuccess,
+}
+
+enum MyUpgrade<T> {
+ NothingSent,
+ SendUsed,
+ GoUp(Receiver<T>),
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ data: None,
+ upgrade: NothingSent,
+ state: atomics::AtomicUint::new(EMPTY),
+ }
+ }
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // Sanity check
+ match self.upgrade {
+ NothingSent => {}
+ _ => fail!("sending on a oneshot that's already sent on "),
+ }
+ assert!(self.data.is_none());
+ self.data = Some(t);
+ self.upgrade = SendUsed;
+
+ match self.state.swap(DATA, atomics::SeqCst) {
+ // Sent the data, no one was waiting
+ EMPTY => Ok(()),
+
+ // Couldn't send the data, the port hung up first. Return the data
+ // back up the stack.
+ DISCONNECTED => {
+ Err(self.data.take_unwrap())
+ }
+
+ // Not possible, these are one-use channels
+ DATA => unreachable!(),
+
+ // Anything else means that there was a task waiting on the other
+ // end. We leave the 'DATA' state inside so it'll pick it up on the
+ // other end.
+ n => unsafe {
+ let t = BlockedTask::cast_from_uint(n);
+ t.wake().map(|t| t.reawaken());
+ Ok(())
+ }
+ }
+ }
+
+ // Just tests whether this channel has been sent on or not, this is only
+ // safe to use from the sender.
+ pub fn sent(&self) -> bool {
+ match self.upgrade {
+ NothingSent => false,
+ _ => true,
+ }
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure<T>> {
+ // Attempt to not block the task (it's a little expensive). If it looks
+ // like we're not empty, then immediately go through to `try_recv`.
+ if self.state.load(atomics::SeqCst) == EMPTY {
+ let t: Box<Task> = Local::take();
+ t.deschedule(1, |task| {
+ let n = unsafe { task.cast_to_uint() };
+ match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
+ // Nothing on the channel, we legitimately block
+ EMPTY => Ok(()),
+
+ // If there's data or it's a disconnected channel, then we
+ // failed the cmpxchg, so we just wake ourselves back up
+ DATA | DISCONNECTED => {
+ unsafe { Err(BlockedTask::cast_from_uint(n)) }
+ }
+
+ // Only one thread is allowed to sleep on this port
+ _ => unreachable!()
+ }
+ });
+ }
+
+ self.try_recv()
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
+ match self.state.load(atomics::SeqCst) {
+ EMPTY => Err(Empty),
+
+ // We saw some data on the channel, but the channel can be used
+ // again to send us an upgrade. As a result, we need to re-insert
+ // into the channel that there's no data available (otherwise we'll
+ // just see DATA next time). This is done as a cmpxchg because if
+ // the state changes under our feet we'd rather just see that state
+ // change.
+ DATA => {
+ self.state.compare_and_swap(DATA, EMPTY, atomics::SeqCst);
+ match self.data.take() {
+ Some(data) => Ok(data),
+ None => unreachable!(),
+ }
+ }
+
+ // There's no guarantee that we receive before an upgrade happens,
+ // and an upgrade flags the channel as disconnected, so when we see
+ // this we first need to check if there's data available and *then*
+ // we go through and process the upgrade.
+ DISCONNECTED => {
+ match self.data.take() {
+ Some(data) => Ok(data),
+ None => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ SendUsed | NothingSent => Err(Disconnected),
+ GoUp(upgrade) => Err(Upgraded(upgrade))
+ }
+ }
+ }
+ }
+ _ => unreachable!()
+ }
+ }
+
+ // Returns whether the upgrade was completed. If the upgrade wasn't
+ // completed, then the port couldn't get sent to the other half (it will
+ // never receive it).
+ pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
+ let prev = match self.upgrade {
+ NothingSent => NothingSent,
+ SendUsed => SendUsed,
+ _ => fail!("upgrading again"),
+ };
+ self.upgrade = GoUp(up);
+
+ match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+ // If the channel is empty or has data on it, then we're good to go.
+ // Senders will check the data before the upgrade (in case we
+ // plastered over the DATA state).
+ DATA | EMPTY => UpSuccess,
+
+ // If the other end is already disconnected, then we failed the
+ // upgrade. Be sure to trash the port we were given.
+ DISCONNECTED => { self.upgrade = prev; UpDisconnected }
+
+ // If someone's waiting, we gotta wake them up
+ n => UpWoke(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ }
+
+ pub fn drop_chan(&mut self) {
+ match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+ DATA | DISCONNECTED | EMPTY => {}
+
+ // If someone's waiting, we gotta wake them up
+ n => unsafe {
+ let t = BlockedTask::cast_from_uint(n);
+ t.wake().map(|t| t.reawaken());
+ }
+ }
+ }
+
+ pub fn drop_port(&mut self) {
+ match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+ // An empty channel has nothing to do, and a remotely disconnected
+ // channel also has nothing to do b/c we're about to run the drop
+ // glue
+ DISCONNECTED | EMPTY => {}
+
+ // There's data on the channel, so make sure we destroy it promptly.
+ // This is why not using an arc is a little difficult (need the box
+ // to stay valid while we take the data).
+ DATA => { self.data.take_unwrap(); }
+
+ // We're the only ones that can block on this port
+ _ => unreachable!()
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // If Ok, the value is whether this port has data, if Err, then the upgraded
+ // port needs to be checked instead of this one.
+ pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
+ match self.state.load(atomics::SeqCst) {
+ EMPTY => Ok(false), // Welp, we tried
+ DATA => Ok(true), // we have some un-acquired data
+ DISCONNECTED if self.data.is_some() => Ok(true), // we have data
+ DISCONNECTED => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ // The other end sent us an upgrade, so we need to
+ // propagate upwards whether the upgrade can receive
+ // data
+ GoUp(upgrade) => Err(upgrade),
+
+ // If the other end disconnected without sending an
+ // upgrade, then we have data to receive (the channel is
+ // disconnected).
+ up => { self.upgrade = up; Ok(true) }
+ }
+ }
+ _ => unreachable!(), // we're the "one blocker"
+ }
+ }
+
+ // Attempts to start selection on this port. This can either succeed, fail
+ // because there is data, or fail because there is an upgrade pending.
+ pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
+ let n = unsafe { task.cast_to_uint() };
+ match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
+ EMPTY => SelSuccess,
+ DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
+ DISCONNECTED if self.data.is_some() => {
+ SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ DISCONNECTED => {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ // The other end sent us an upgrade, so we need to
+ // propagate upwards whether the upgrade can receive
+ // data
+ GoUp(upgrade) => {
+ SelUpgraded(unsafe { BlockedTask::cast_from_uint(n) },
+ upgrade)
+ }
+
+ // If the other end disconnected without sending an
+ // upgrade, then we have data to receive (the channel is
+ // disconnected).
+ up => {
+ self.upgrade = up;
+ SelCanceled(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+ }
+ }
+ _ => unreachable!(), // we're the "one blocker"
+ }
+ }
+
+ // Remove a previous selecting task from this port. This ensures that the
+ // blocked task will no longer be visible to any other threads.
+ //
+ // The return value indicates whether there's data on this port.
+ pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
+ let state = match self.state.load(atomics::SeqCst) {
+ // Each of these states means that no further activity will happen
+ // with regard to abortion selection
+ s @ EMPTY |
+ s @ DATA |
+ s @ DISCONNECTED => s,
+
+ // If we've got a blocked task, then use an atomic to gain ownership
+ // of it (may fail)
+ n => self.state.compare_and_swap(n, EMPTY, atomics::SeqCst)
+ };
+
+ // Now that we've got ownership of our state, figure out what to do
+ // about it.
+ match state {
+ EMPTY => unreachable!(),
+ // our task used for select was stolen
+ DATA => Ok(true),
+
+ // If the other end has hung up, then we have complete ownership
+ // of the port. First, check if there was data waiting for us. This
+ // is possible if the other end sent something and then hung up.
+ //
+ // We then need to check to see if there was an upgrade requested,
+ // and if so, the upgraded port needs to have its selection aborted.
+ DISCONNECTED => {
+ if self.data.is_some() {
+ Ok(true)
+ } else {
+ match mem::replace(&mut self.upgrade, SendUsed) {
+ GoUp(port) => Err(port),
+ _ => Ok(true),
+ }
+ }
+ }
+
+ // We woke ourselves up from select. Assert that the task should be
+ // trashed and returne that we don't have any data.
+ n => {
+ let t = unsafe { BlockedTask::cast_from_uint(n) };
+ t.trash();
+ Ok(false)
+ }
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(atomics::SeqCst), DISCONNECTED);
+ }
+}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Selection over an array of receivers
+//!
+//! This module contains the implementation machinery necessary for selecting
+//! over a number of receivers. One large goal of this module is to provide an
+//! efficient interface to selecting over any receiver of any type.
+//!
+//! This is achieved through an architecture of a "receiver set" in which
+//! receivers are added to a set and then the entire set is waited on at once.
+//! The set can be waited on multiple times to prevent re-adding each receiver
+//! to the set.
+//!
+//! Usage of this module is currently encouraged to go through the use of the
+//! `select!` macro. This macro allows naturally binding of variables to the
+//! received values of receivers in a much more natural syntax then usage of the
+//! `Select` structure directly.
+//!
+//! # Example
+//!
+//! ```rust
+//! let (tx1, rx1) = channel();
+//! let (tx2, rx2) = channel();
+//!
+//! tx1.send(1);
+//! tx2.send(2);
+//!
+//! select! {
+//! val = rx1.recv() => {
+//! assert_eq!(val, 1);
+//! },
+//! val = rx2.recv() => {
+//! assert_eq!(val, 2);
+//! }
+//! }
+//! ```
+
+#![allow(dead_code)]
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::cell::Cell;
+use core::kinds::marker;
+use core::mem;
+use core::uint;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+
+use comm::Receiver;
+
+/// The "receiver set" of the select interface. This structure is used to manage
+/// a set of receivers which are being selected over.
+pub struct Select {
+ head: *mut Handle<'static, ()>,
+ tail: *mut Handle<'static, ()>,
+ next_id: Cell<uint>,
+ marker1: marker::NoSend,
+}
+
+/// A handle to a receiver which is currently a member of a `Select` set of
+/// receivers. This handle is used to keep the receiver in the set as well as
+/// interact with the underlying receiver.
+pub struct Handle<'rx, T> {
+ /// The ID of this handle, used to compare against the return value of
+ /// `Select::wait()`
+ id: uint,
+ selector: &'rx Select,
+ next: *mut Handle<'static, ()>,
+ prev: *mut Handle<'static, ()>,
+ added: bool,
+ packet: &'rx Packet,
+
+ // due to our fun transmutes, we be sure to place this at the end. (nothing
+ // previous relies on T)
+ rx: &'rx Receiver<T>,
+}
+
+struct Packets { cur: *mut Handle<'static, ()> }
+
+#[doc(hidden)]
+pub trait Packet {
+ fn can_recv(&self) -> bool;
+ fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>;
+ fn abort_selection(&self) -> bool;
+}
+
+impl Select {
+ /// Creates a new selection structure. This set is initially empty and
+ /// `wait` will fail!() if called.
+ ///
+ /// Usage of this struct directly can sometimes be burdensome, and usage is
+ /// rather much easier through the `select!` macro.
+ pub fn new() -> Select {
+ Select {
+ marker1: marker::NoSend,
+ head: 0 as *mut Handle<'static, ()>,
+ tail: 0 as *mut Handle<'static, ()>,
+ next_id: Cell::new(1),
+ }
+ }
+
+ /// Creates a new handle into this receiver set for a new receiver. Note
+ /// that this does *not* add the receiver to the receiver set, for that you
+ /// must call the `add` method on the handle itself.
+ pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> {
+ let id = self.next_id.get();
+ self.next_id.set(id + 1);
+ Handle {
+ id: id,
+ selector: self,
+ next: 0 as *mut Handle<'static, ()>,
+ prev: 0 as *mut Handle<'static, ()>,
+ added: false,
+ rx: rx,
+ packet: rx,
+ }
+ }
+
+ /// Waits for an event on this receiver set. The returned value is *not* an
+ /// index, but rather an id. This id can be queried against any active
+ /// `Handle` structures (each one has an `id` method). The handle with
+ /// the matching `id` will have some sort of event available on it. The
+ /// event could either be that data is available or the corresponding
+ /// channel has been closed.
+ pub fn wait(&self) -> uint {
+ self.wait2(false)
+ }
+
+ /// Helper method for skipping the preflight checks during testing
+ fn wait2(&self, do_preflight_checks: bool) -> uint {
+ // Note that this is currently an inefficient implementation. We in
+ // theory have knowledge about all receivers in the set ahead of time,
+ // so this method shouldn't really have to iterate over all of them yet
+ // again. The idea with this "receiver set" interface is to get the
+ // interface right this time around, and later this implementation can
+ // be optimized.
+ //
+ // This implementation can be summarized by:
+ //
+ // fn select(receivers) {
+ // if any receiver ready { return ready index }
+ // deschedule {
+ // block on all receivers
+ // }
+ // unblock on all receivers
+ // return ready index
+ // }
+ //
+ // Most notably, the iterations over all of the receivers shouldn't be
+ // necessary.
+ unsafe {
+ let mut amt = 0;
+ for p in self.iter() {
+ amt += 1;
+ if do_preflight_checks && (*p).packet.can_recv() {
+ return (*p).id;
+ }
+ }
+ assert!(amt > 0);
+
+ let mut ready_index = amt;
+ let mut ready_id = uint::MAX;
+ let mut iter = self.iter().enumerate();
+
+ // Acquire a number of blocking contexts, and block on each one
+ // sequentially until one fails. If one fails, then abort
+ // immediately so we can go unblock on all the other receivers.
+ let task: Box<Task> = Local::take();
+ task.deschedule(amt, |task| {
+ // Prepare for the block
+ let (i, handle) = iter.next().unwrap();
+ match (*handle).packet.start_selection(task) {
+ Ok(()) => Ok(()),
+ Err(task) => {
+ ready_index = i;
+ ready_id = (*handle).id;
+ Err(task)
+ }
+ }
+ });
+
+ // Abort the selection process on each receiver. If the abort
+ // process returns `true`, then that means that the receiver is
+ // ready to receive some data. Note that this also means that the
+ // receiver may have yet to have fully read the `to_wake` field and
+ // woken us up (although the wakeup is guaranteed to fail).
+ //
+ // This situation happens in the window of where a sender invokes
+ // increment(), sees -1, and then decides to wake up the task. After
+ // all this is done, the sending thread will set `selecting` to
+ // `false`. Until this is done, we cannot return. If we were to
+ // return, then a sender could wake up a receiver which has gone
+ // back to sleep after this call to `select`.
+ //
+ // Note that it is a "fairly small window" in which an increment()
+ // views that it should wake a thread up until the `selecting` bit
+ // is set to false. For now, the implementation currently just spins
+ // in a yield loop. This is very distasteful, but this
+ // implementation is already nowhere near what it should ideally be.
+ // A rewrite should focus on avoiding a yield loop, and for now this
+ // implementation is tying us over to a more efficient "don't
+ // iterate over everything every time" implementation.
+ for handle in self.iter().take(ready_index) {
+ if (*handle).packet.abort_selection() {
+ ready_id = (*handle).id;
+ }
+ }
+
+ assert!(ready_id != uint::MAX);
+ return ready_id;
+ }
+ }
+
+ fn iter(&self) -> Packets { Packets { cur: self.head } }
+}
+
+impl<'rx, T: Send> Handle<'rx, T> {
+ /// Retrieve the id of this handle.
+ #[inline]
+ pub fn id(&self) -> uint { self.id }
+
+ /// Receive a value on the underlying receiver. Has the same semantics as
+ /// `Receiver.recv`
+ pub fn recv(&mut self) -> T { self.rx.recv() }
+ /// Block to receive a value on the underlying receiver, returning `Some` on
+ /// success or `None` if the channel disconnects. This function has the same
+ /// semantics as `Receiver.recv_opt`
+ pub fn recv_opt(&mut self) -> Result<T, ()> { self.rx.recv_opt() }
+
+ /// Adds this handle to the receiver set that the handle was created from. This
+ /// method can be called multiple times, but it has no effect if `add` was
+ /// called previously.
+ ///
+ /// This method is unsafe because it requires that the `Handle` is not moved
+ /// while it is added to the `Select` set.
+ pub unsafe fn add(&mut self) {
+ if self.added { return }
+ let selector: &mut Select = mem::transmute(&*self.selector);
+ let me: *mut Handle<'static, ()> = mem::transmute(&*self);
+
+ if selector.head.is_null() {
+ selector.head = me;
+ selector.tail = me;
+ } else {
+ (*me).prev = selector.tail;
+ assert!((*me).next.is_null());
+ (*selector.tail).next = me;
+ selector.tail = me;
+ }
+ self.added = true;
+ }
+
+ /// Removes this handle from the `Select` set. This method is unsafe because
+ /// it has no guarantee that the `Handle` was not moved since `add` was
+ /// called.
+ pub unsafe fn remove(&mut self) {
+ if !self.added { return }
+
+ let selector: &mut Select = mem::transmute(&*self.selector);
+ let me: *mut Handle<'static, ()> = mem::transmute(&*self);
+
+ if self.prev.is_null() {
+ assert_eq!(selector.head, me);
+ selector.head = self.next;
+ } else {
+ (*self.prev).next = self.next;
+ }
+ if self.next.is_null() {
+ assert_eq!(selector.tail, me);
+ selector.tail = self.prev;
+ } else {
+ (*self.next).prev = self.prev;
+ }
+
+ self.next = 0 as *mut Handle<'static, ()>;
+ self.prev = 0 as *mut Handle<'static, ()>;
+
+ self.added = false;
+ }
+}
+
+#[unsafe_destructor]
+impl Drop for Select {
+ fn drop(&mut self) {
+ assert!(self.head.is_null());
+ assert!(self.tail.is_null());
+ }
+}
+
+#[unsafe_destructor]
+impl<'rx, T: Send> Drop for Handle<'rx, T> {
+ fn drop(&mut self) {
+ unsafe { self.remove() }
+ }
+}
+
+impl Iterator<*mut Handle<'static, ()>> for Packets {
+ fn next(&mut self) -> Option<*mut Handle<'static, ()>> {
+ if self.cur.is_null() {
+ None
+ } else {
+ let ret = Some(self.cur);
+ unsafe { self.cur = (*self.cur).next; }
+ ret
+ }
+ }
+}
+
+#[cfg(test)]
+#[allow(unused_imports)]
+mod test {
+ use std::prelude::*;
+
+ use super::super::*;
+
+ // Don't use the libstd version so we can pull in the right Select structure
+ // (std::comm points at the wrong one)
+ macro_rules! select {
+ (
+ $($name:pat = $rx:ident.$meth:ident() => $code:expr),+
+ ) => ({
+ use comm::Select;
+ let sel = Select::new();
+ $( let mut $rx = sel.handle(&$rx); )+
+ unsafe {
+ $( $rx.add(); )+
+ }
+ let ret = sel.wait();
+ $( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
+ { unreachable!() }
+ })
+ }
+
+ test!(fn smoke() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ tx1.send(1);
+ select! (
+ foo = rx1.recv() => { assert_eq!(foo, 1); },
+ _bar = rx2.recv() => { fail!() }
+ )
+ tx2.send(2);
+ select! (
+ _foo = rx1.recv() => { fail!() },
+ bar = rx2.recv() => { assert_eq!(bar, 2) }
+ )
+ drop(tx1);
+ select! (
+ foo = rx1.recv_opt() => { assert_eq!(foo, Err(())); },
+ _bar = rx2.recv() => { fail!() }
+ )
+ drop(tx2);
+ select! (
+ bar = rx2.recv_opt() => { assert_eq!(bar, Err(())); }
+ )
+ })
+
+ test!(fn smoke2() {
+ let (_tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (_tx3, rx3) = channel::<int>();
+ let (_tx4, rx4) = channel::<int>();
+ let (tx5, rx5) = channel::<int>();
+ tx5.send(4);
+ select! (
+ _foo = rx1.recv() => { fail!("1") },
+ _foo = rx2.recv() => { fail!("2") },
+ _foo = rx3.recv() => { fail!("3") },
+ _foo = rx4.recv() => { fail!("4") },
+ foo = rx5.recv() => { assert_eq!(foo, 4); }
+ )
+ })
+
+ test!(fn closed() {
+ let (_tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ drop(tx2);
+
+ select! (
+ _a1 = rx1.recv_opt() => { fail!() },
+ a2 = rx2.recv_opt() => { assert_eq!(a2, Err(())); }
+ )
+ })
+
+ test!(fn unblocks() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<int>();
+
+ spawn(proc() {
+ for _ in range(0, 20) { task::deschedule(); }
+ tx1.send(1);
+ rx3.recv();
+ for _ in range(0, 20) { task::deschedule(); }
+ });
+
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ _b = rx2.recv() => { fail!() }
+ )
+ tx3.send(1);
+ select! (
+ a = rx1.recv_opt() => { assert_eq!(a, Err(())); },
+ _b = rx2.recv() => { fail!() }
+ )
+ })
+
+ test!(fn both_ready() {
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ for _ in range(0, 20) { task::deschedule(); }
+ tx1.send(1);
+ tx2.send(2);
+ rx3.recv();
+ });
+
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ a = rx2.recv() => { assert_eq!(a, 2); }
+ )
+ select! (
+ a = rx1.recv() => { assert_eq!(a, 1); },
+ a = rx2.recv() => { assert_eq!(a, 2); }
+ )
+ assert_eq!(rx1.try_recv(), Err(Empty));
+ assert_eq!(rx2.try_recv(), Err(Empty));
+ tx3.send(());
+ })
+
+ test!(fn stress() {
+ static AMT: int = 10000;
+ let (tx1, rx1) = channel::<int>();
+ let (tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ for i in range(0, AMT) {
+ if i % 2 == 0 {
+ tx1.send(i);
+ } else {
+ tx2.send(i);
+ }
+ rx3.recv();
+ }
+ });
+
+ for i in range(0, AMT) {
+ select! (
+ i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1); },
+ i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2); }
+ )
+ tx3.send(());
+ }
+ })
+
+ test!(fn cloning() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ rx3.recv();
+ tx1.clone();
+ assert_eq!(rx3.try_recv(), Err(Empty));
+ tx1.send(2);
+ rx3.recv();
+ });
+
+ tx3.send(());
+ select!(
+ _i1 = rx1.recv() => {},
+ _i2 = rx2.recv() => fail!()
+ )
+ tx3.send(());
+ })
+
+ test!(fn cloning2() {
+ let (tx1, rx1) = channel::<int>();
+ let (_tx2, rx2) = channel::<int>();
+ let (tx3, rx3) = channel::<()>();
+
+ spawn(proc() {
+ rx3.recv();
+ tx1.clone();
+ assert_eq!(rx3.try_recv(), Err(Empty));
+ tx1.send(2);
+ rx3.recv();
+ });
+
+ tx3.send(());
+ select!(
+ _i1 = rx1.recv() => {},
+ _i2 = rx2.recv() => fail!()
+ )
+ tx3.send(());
+ })
+
+ test!(fn cloning3() {
+ let (tx1, rx1) = channel::<()>();
+ let (tx2, rx2) = channel::<()>();
+ let (tx3, rx3) = channel::<()>();
+ spawn(proc() {
+ let s = Select::new();
+ let mut h1 = s.handle(&rx1);
+ let mut h2 = s.handle(&rx2);
+ unsafe { h2.add(); }
+ unsafe { h1.add(); }
+ assert_eq!(s.wait(), h2.id);
+ tx3.send(());
+ });
+
+ for _ in range(0, 1000) { task::deschedule(); }
+ drop(tx1.clone());
+ tx2.send(());
+ rx3.recv();
+ })
+
+ test!(fn preflight1() {
+ let (tx, rx) = channel();
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight2() {
+ let (tx, rx) = channel();
+ tx.send(());
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight3() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ select!(
+ () = rx.recv() => {}
+ )
+ })
+
+ test!(fn preflight4() {
+ let (tx, rx) = channel();
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight5() {
+ let (tx, rx) = channel();
+ tx.send(());
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight6() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight7() {
+ let (tx, rx) = channel::<()>();
+ drop(tx);
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight8() {
+ let (tx, rx) = channel();
+ tx.send(());
+ drop(tx);
+ rx.recv();
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn preflight9() {
+ let (tx, rx) = channel();
+ drop(tx.clone());
+ tx.send(());
+ drop(tx);
+ rx.recv();
+ let s = Select::new();
+ let mut h = s.handle(&rx);
+ unsafe { h.add(); }
+ assert_eq!(s.wait2(false), h.id);
+ })
+
+ test!(fn oneshot_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn stream_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ tx1.send(());
+ tx1.send(());
+ rx1.recv();
+ rx1.recv();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn shared_data_waiting() {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ drop(tx1.clone());
+ tx1.send(());
+ rx1.recv();
+ spawn(proc() {
+ select! {
+ () = rx1.recv() => {}
+ }
+ tx2.send(());
+ });
+
+ for _ in range(0, 100) { task::deschedule() }
+ tx1.send(());
+ rx2.recv();
+ })
+
+ test!(fn sync1() {
+ let (tx, rx) = sync_channel(1);
+ tx.send(1);
+ select! {
+ n = rx.recv() => { assert_eq!(n, 1); }
+ }
+ })
+
+ test!(fn sync2() {
+ let (tx, rx) = sync_channel(0);
+ spawn(proc() {
+ for _ in range(0, 100) { task::deschedule() }
+ tx.send(1);
+ });
+ select! {
+ n = rx.recv() => { assert_eq!(n, 1); }
+ }
+ })
+
+ test!(fn sync3() {
+ let (tx1, rx1) = sync_channel(0);
+ let (tx2, rx2) = channel();
+ spawn(proc() { tx1.send(1); });
+ spawn(proc() { tx2.send(2); });
+ select! {
+ n = rx1.recv() => {
+ assert_eq!(n, 1);
+ assert_eq!(rx2.recv(), 2);
+ },
+ n = rx2.recv() => {
+ assert_eq!(n, 2);
+ assert_eq!(rx1.recv(), 1);
+ }
+ }
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Shared channels
+///
+/// This is the flavor of channels which are not necessarily optimized for any
+/// particular use case, but are the most general in how they are used. Shared
+/// channels are cloneable allowing for multiple senders.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module. You'll also note that the implementation of the shared and stream
+/// channels are quite similar, and this is no coincidence!
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::cmp;
+use core::int;
+use rustrt::local::Local;
+use rustrt::mutex::NativeMutex;
+use rustrt::task::{Task, BlockedTask};
+use rustrt::thread::Thread;
+
+use atomics;
+use mpsc = mpsc_queue;
+
+static DISCONNECTED: int = int::MIN;
+static FUDGE: int = 1024;
+#[cfg(test)]
+static MAX_STEALS: int = 5;
+#[cfg(not(test))]
+static MAX_STEALS: int = 1 << 20;
+
+pub struct Packet<T> {
+ queue: mpsc::Queue<T>,
+ cnt: atomics::AtomicInt, // How many items are on this channel
+ steals: int, // How many times has a port received without blocking?
+ to_wake: atomics::AtomicUint, // Task to wake up
+
+ // The number of channels which are currently using this packet.
+ channels: atomics::AtomicInt,
+
+ // See the discussion in Port::drop and the channel send methods for what
+ // these are used for
+ port_dropped: atomics::AtomicBool,
+ sender_drain: atomics::AtomicInt,
+
+ // this lock protects various portions of this implementation during
+ // select()
+ select_lock: NativeMutex,
+}
+
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+impl<T: Send> Packet<T> {
+ // Creation of a packet *must* be followed by a call to postinit_lock
+ // and later by inherit_blocker
+ pub fn new() -> Packet<T> {
+ let p = Packet {
+ queue: mpsc::Queue::new(),
+ cnt: atomics::AtomicInt::new(0),
+ steals: 0,
+ to_wake: atomics::AtomicUint::new(0),
+ channels: atomics::AtomicInt::new(2),
+ port_dropped: atomics::AtomicBool::new(false),
+ sender_drain: atomics::AtomicInt::new(0),
+ select_lock: unsafe { NativeMutex::new() },
+ };
+ return p;
+ }
+
+ // This function should be used after newly created Packet
+ // was wrapped with an Arc
+ // In other case mutex data will be duplicated while clonning
+ // and that could cause problems on platforms where it is
+ // represented by opaque data structure
+ pub fn postinit_lock(&mut self) {
+ unsafe { self.select_lock.lock_noguard() }
+ }
+
+ // This function is used at the creation of a shared packet to inherit a
+ // previously blocked task. This is done to prevent spurious wakeups of
+ // tasks in select().
+ //
+ // This can only be called at channel-creation time
+ pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
+ match task {
+ Some(task) => {
+ assert_eq!(self.cnt.load(atomics::SeqCst), 0);
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ self.to_wake.store(unsafe { task.cast_to_uint() },
+ atomics::SeqCst);
+ self.cnt.store(-1, atomics::SeqCst);
+
+ // This store is a little sketchy. What's happening here is
+ // that we're transferring a blocker from a oneshot or stream
+ // channel to this shared channel. In doing so, we never
+ // spuriously wake them up and rather only wake them up at the
+ // appropriate time. This implementation of shared channels
+ // assumes that any blocking recv() will undo the increment of
+ // steals performed in try_recv() once the recv is complete.
+ // This thread that we're inheriting, however, is not in the
+ // middle of recv. Hence, the first time we wake them up,
+ // they're going to wake up from their old port, move on to the
+ // upgraded port, and then call the block recv() function.
+ //
+ // When calling this function, they'll find there's data
+ // immediately available, counting it as a steal. This in fact
+ // wasn't a steal because we appropriately blocked them waiting
+ // for data.
+ //
+ // To offset this bad increment, we initially set the steal
+ // count to -1. You'll find some special code in
+ // abort_selection() as well to ensure that this -1 steal count
+ // doesn't escape too far.
+ self.steals = -1;
+ }
+ None => {}
+ }
+
+ // When the shared packet is constructed, we grabbed this lock. The
+ // purpose of this lock is to ensure that abort_selection() doesn't
+ // interfere with this method. After we unlock this lock, we're
+ // signifying that we're done modifying self.cnt and self.to_wake and
+ // the port is ready for the world to continue using it.
+ unsafe { self.select_lock.unlock_noguard() }
+ }
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // See Port::drop for what's going on
+ if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
+
+ // Note that the multiple sender case is a little tricker
+ // semantically than the single sender case. The logic for
+ // incrementing is "add and if disconnected store disconnected".
+ // This could end up leading some senders to believe that there
+ // wasn't a disconnect if in fact there was a disconnect. This means
+ // that while one thread is attempting to re-store the disconnected
+ // states, other threads could walk through merrily incrementing
+ // this very-negative disconnected count. To prevent senders from
+ // spuriously attempting to send when the channels is actually
+ // disconnected, the count has a ranged check here.
+ //
+ // This is also done for another reason. Remember that the return
+ // value of this function is:
+ //
+ // `true` == the data *may* be received, this essentially has no
+ // meaning
+ // `false` == the data will *never* be received, this has a lot of
+ // meaning
+ //
+ // In the SPSC case, we have a check of 'queue.is_empty()' to see
+ // whether the data was actually received, but this same condition
+ // means nothing in a multi-producer context. As a result, this
+ // preflight check serves as the definitive "this will never be
+ // received". Once we get beyond this check, we have permanently
+ // entered the realm of "this may be received"
+ if self.cnt.load(atomics::SeqCst) < DISCONNECTED + FUDGE {
+ return Err(t)
+ }
+
+ self.queue.push(t);
+ match self.cnt.fetch_add(1, atomics::SeqCst) {
+ -1 => {
+ self.take_to_wake().wake().map(|t| t.reawaken());
+ }
+
+ // In this case, we have possibly failed to send our data, and
+ // we need to consider re-popping the data in order to fully
+ // destroy it. We must arbitrate among the multiple senders,
+ // however, because the queues that we're using are
+ // single-consumer queues. In order to do this, all exiting
+ // pushers will use an atomic count in order to count those
+ // flowing through. Pushers who see 0 are required to drain as
+ // much as possible, and then can only exit when they are the
+ // only pusher (otherwise they must try again).
+ n if n < DISCONNECTED + FUDGE => {
+ // see the comment in 'try' for a shared channel for why this
+ // window of "not disconnected" is ok.
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+
+ if self.sender_drain.fetch_add(1, atomics::SeqCst) == 0 {
+ loop {
+ // drain the queue, for info on the thread yield see the
+ // discussion in try_recv
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => {}
+ mpsc::Empty => break,
+ mpsc::Inconsistent => Thread::yield_now(),
+ }
+ }
+ // maybe we're done, if we're not the last ones
+ // here, then we need to go try again.
+ if self.sender_drain.fetch_sub(1, atomics::SeqCst) == 1 {
+ break
+ }
+ }
+
+ // At this point, there may still be data on the queue,
+ // but only if the count hasn't been incremented and
+ // some other sender hasn't finished pushing data just
+ // yet. That sender in question will drain its own data.
+ }
+ }
+
+ // Can't make any assumptions about this case like in the SPSC case.
+ _ => {}
+ }
+
+ Ok(())
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure> {
+ // This code is essentially the exact same as that found in the stream
+ // case (see stream.rs)
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ let task: Box<Task> = Local::take();
+ task.deschedule(1, |task| {
+ self.decrement(task)
+ });
+
+ match self.try_recv() {
+ data @ Ok(..) => { self.steals -= 1; data }
+ data => data,
+ }
+ }
+
+ // Essentially the exact same thing as the stream decrement function.
+ fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ let n = unsafe { task.cast_to_uint() };
+ self.to_wake.store(n, atomics::SeqCst);
+
+ let steals = self.steals;
+ self.steals = 0;
+
+ match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
+ DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 { return Ok(()) }
+ }
+ }
+
+ self.to_wake.store(0, atomics::SeqCst);
+ Err(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure> {
+ let ret = match self.queue.pop() {
+ mpsc::Data(t) => Some(t),
+ mpsc::Empty => None,
+
+ // This is a bit of an interesting case. The channel is
+ // reported as having data available, but our pop() has
+ // failed due to the queue being in an inconsistent state.
+ // This means that there is some pusher somewhere which has
+ // yet to complete, but we are guaranteed that a pop will
+ // eventually succeed. In this case, we spin in a yield loop
+ // because the remote sender should finish their enqueue
+ // operation "very quickly".
+ //
+ // Note that this yield loop does *not* attempt to do a green
+ // yield (regardless of the context), but *always* performs an
+ // OS-thread yield. The reasoning for this is that the pusher in
+ // question which is causing the inconsistent state is
+ // guaranteed to *not* be a blocked task (green tasks can't get
+ // pre-empted), so it must be on a different OS thread. Also,
+ // `try_recv` is normally a "guaranteed no rescheduling" context
+ // in a green-thread situation. By yielding control of the
+ // thread, we will hopefully allow time for the remote task on
+ // the other OS thread to make progress.
+ //
+ // Avoiding this yield loop would require a different queue
+ // abstraction which provides the guarantee that after M
+ // pushes have succeeded, at least M pops will succeed. The
+ // current queues guarantee that if there are N active
+ // pushes, you can pop N times once all N have finished.
+ mpsc::Inconsistent => {
+ let data;
+ loop {
+ Thread::yield_now();
+ match self.queue.pop() {
+ mpsc::Data(t) => { data = t; break }
+ mpsc::Empty => fail!("inconsistent => empty"),
+ mpsc::Inconsistent => {}
+ }
+ }
+ Some(data)
+ }
+ };
+ match ret {
+ // See the discussion in the stream implementation for why we
+ // might decrement steals.
+ Some(data) => {
+ if self.steals > MAX_STEALS {
+ match self.cnt.swap(0, atomics::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, self.steals);
+ self.steals -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(self.steals >= 0);
+ }
+ self.steals += 1;
+ Ok(data)
+ }
+
+ // See the discussion in the stream implementation for why we try
+ // again.
+ None => {
+ match self.cnt.load(atomics::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+ _ => {
+ match self.queue.pop() {
+ mpsc::Data(t) => Ok(t),
+ mpsc::Empty => Err(Disconnected),
+ // with no senders, an inconsistency is impossible.
+ mpsc::Inconsistent => unreachable!(),
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&mut self) {
+ self.channels.fetch_add(1, atomics::SeqCst);
+ }
+
+ // Decrement the reference count on a channel. This is called whenever a
+ // Chan is dropped and may end up waking up a receiver. It's the receiver's
+ // responsibility on the other end to figure out that we've disconnected.
+ pub fn drop_chan(&mut self) {
+ match self.channels.fetch_sub(1, atomics::SeqCst) {
+ 1 => {}
+ n if n > 1 => return,
+ n => fail!("bad number of channels left {}", n),
+ }
+
+ match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
+ -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
+ DISCONNECTED => {}
+ n => { assert!(n >= 0); }
+ }
+ }
+
+ // See the long discussion inside of stream.rs for why the queue is drained,
+ // and why it is done in this fashion.
+ pub fn drop_port(&mut self) {
+ self.port_dropped.store(true, atomics::SeqCst);
+ let mut steals = self.steals;
+ while {
+ let cnt = self.cnt.compare_and_swap(
+ steals, DISCONNECTED, atomics::SeqCst);
+ cnt != DISCONNECTED && cnt != steals
+ } {
+ // See the discussion in 'try_recv' for why we yield
+ // control of this thread.
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => { steals += 1; }
+ mpsc::Empty | mpsc::Inconsistent => break,
+ }
+ }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&mut self) -> BlockedTask {
+ let task = self.to_wake.load(atomics::SeqCst);
+ self.to_wake.store(0, atomics::SeqCst);
+ assert!(task != 0);
+ unsafe { BlockedTask::cast_from_uint(task) }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // Helper function for select, tests whether this port can receive without
+ // blocking (obviously not an atomic decision).
+ //
+ // This is different than the stream version because there's no need to peek
+ // at the queue, we can just look at the local count.
+ pub fn can_recv(&mut self) -> bool {
+ let cnt = self.cnt.load(atomics::SeqCst);
+ cnt == DISCONNECTED || cnt - self.steals > 0
+ }
+
+ // increment the count on the channel (used for selection)
+ fn bump(&mut self, amt: int) -> int {
+ match self.cnt.fetch_add(amt, atomics::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+ DISCONNECTED
+ }
+ n => n
+ }
+ }
+
+ // Inserts the blocked task for selection on this port, returning it back if
+ // the port already has data on it.
+ //
+ // The code here is the same as in stream.rs, except that it doesn't need to
+ // peek at the channel to see if an upgrade is pending.
+ pub fn start_selection(&mut self,
+ task: BlockedTask) -> Result<(), BlockedTask> {
+ match self.decrement(task) {
+ Ok(()) => Ok(()),
+ Err(task) => {
+ let prev = self.bump(1);
+ assert!(prev == DISCONNECTED || prev >= 0);
+ return Err(task);
+ }
+ }
+ }
+
+ // Cancels a previous task waiting on this port, returning whether there's
+ // data on the port.
+ //
+ // This is similar to the stream implementation (hence fewer comments), but
+ // uses a different value for the "steals" variable.
+ pub fn abort_selection(&mut self, _was_upgrade: bool) -> bool {
+ // Before we do anything else, we bounce on this lock. The reason for
+ // doing this is to ensure that any upgrade-in-progress is gone and
+ // done with. Without this bounce, we can race with inherit_blocker
+ // about looking at and dealing with to_wake. Once we have acquired the
+ // lock, we are guaranteed that inherit_blocker is done.
+ unsafe {
+ let _guard = self.select_lock.lock();
+ }
+
+ // Like the stream implementation, we want to make sure that the count
+ // on the channel goes non-negative. We don't know how negative the
+ // stream currently is, so instead of using a steal value of 1, we load
+ // the channel count and figure out what we should do to make it
+ // positive.
+ let steals = {
+ let cnt = self.cnt.load(atomics::SeqCst);
+ if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
+ };
+ let prev = self.bump(steals + 1);
+
+ if prev == DISCONNECTED {
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ true
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+ if prev < 0 {
+ self.take_to_wake().trash();
+ } else {
+ while self.to_wake.load(atomics::SeqCst) != 0 {
+ Thread::yield_now();
+ }
+ }
+ // if the number of steals is -1, it was the pre-emptive -1 steal
+ // count from when we inherited a blocker. This is fine because
+ // we're just going to overwrite it with a real value.
+ assert!(self.steals == 0 || self.steals == -1);
+ self.steals = steals;
+ prev >= 0
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ assert_eq!(self.channels.load(atomics::SeqCst), 0);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Stream channels
+///
+/// This is the flavor of channels which are optimized for one sender and one
+/// receiver. The sender will be upgraded to a shared channel if the channel is
+/// cloned.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module.
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::cmp;
+use core::int;
+use rustrt::local::Local;
+use rustrt::task::{Task, BlockedTask};
+use rustrt::thread::Thread;
+
+use atomics;
+use comm::Receiver;
+use spsc = spsc_queue;
+
+static DISCONNECTED: int = int::MIN;
+#[cfg(test)]
+static MAX_STEALS: int = 5;
+#[cfg(not(test))]
+static MAX_STEALS: int = 1 << 20;
+
+pub struct Packet<T> {
+ queue: spsc::Queue<Message<T>>, // internal queue for all message
+
+ cnt: atomics::AtomicInt, // How many items are on this channel
+ steals: int, // How many times has a port received without blocking?
+ to_wake: atomics::AtomicUint, // Task to wake up
+
+ port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed.
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(BlockedTask),
+}
+
+pub enum SelectionResult<T> {
+ SelSuccess,
+ SelCanceled(BlockedTask),
+ SelUpgraded(BlockedTask, Receiver<T>),
+}
+
+// Any message could contain an "upgrade request" to a new shared port, so the
+// internal queue it's a queue of T, but rather Message<T>
+enum Message<T> {
+ Data(T),
+ GoUp(Receiver<T>),
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ queue: spsc::Queue::new(128),
+
+ cnt: atomics::AtomicInt::new(0),
+ steals: 0,
+ to_wake: atomics::AtomicUint::new(0),
+
+ port_dropped: atomics::AtomicBool::new(false),
+ }
+ }
+
+
+ pub fn send(&mut self, t: T) -> Result<(), T> {
+ // If the other port has deterministically gone away, then definitely
+ // must return the data back up the stack. Otherwise, the data is
+ // considered as being sent.
+ if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
+
+ match self.do_send(Data(t)) {
+ UpSuccess | UpDisconnected => {},
+ UpWoke(task) => { task.wake().map(|t| t.reawaken()); }
+ }
+ Ok(())
+ }
+ pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
+ // If the port has gone away, then there's no need to proceed any
+ // further.
+ if self.port_dropped.load(atomics::SeqCst) { return UpDisconnected }
+
+ self.do_send(GoUp(up))
+ }
+
+ fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
+ self.queue.push(t);
+ match self.cnt.fetch_add(1, atomics::SeqCst) {
+ // As described in the mod's doc comment, -1 == wakeup
+ -1 => UpWoke(self.take_to_wake()),
+ // As as described before, SPSC queues must be >= -2
+ -2 => UpSuccess,
+
+ // Be sure to preserve the disconnected state, and the return value
+ // in this case is going to be whether our data was received or not.
+ // This manifests itself on whether we have an empty queue or not.
+ //
+ // Primarily, are required to drain the queue here because the port
+ // will never remove this data. We can only have at most one item to
+ // drain (the port drains the rest).
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+ let first = self.queue.pop();
+ let second = self.queue.pop();
+ assert!(second.is_none());
+
+ match first {
+ Some(..) => UpSuccess, // we failed to send the data
+ None => UpDisconnected, // we successfully sent data
+ }
+ }
+
+ // Otherwise we just sent some data on a non-waiting queue, so just
+ // make sure the world is sane and carry on!
+ n => { assert!(n >= 0); UpSuccess }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&mut self) -> BlockedTask {
+ let task = self.to_wake.load(atomics::SeqCst);
+ self.to_wake.store(0, atomics::SeqCst);
+ assert!(task != 0);
+ unsafe { BlockedTask::cast_from_uint(task) }
+ }
+
+ // Decrements the count on the channel for a sleeper, returning the sleeper
+ // back if it shouldn't sleep. Note that this is the location where we take
+ // steals into account.
+ fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ let n = unsafe { task.cast_to_uint() };
+ self.to_wake.store(n, atomics::SeqCst);
+
+ let steals = self.steals;
+ self.steals = 0;
+
+ match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
+ DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 { return Ok(()) }
+ }
+ }
+
+ self.to_wake.store(0, atomics::SeqCst);
+ Err(unsafe { BlockedTask::cast_from_uint(n) })
+ }
+
+ pub fn recv(&mut self) -> Result<T, Failure<T>> {
+ // Optimistic preflight check (scheduling is expensive).
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ // Welp, our channel has no data. Deschedule the current task and
+ // initiate the blocking protocol.
+ let task: Box<Task> = Local::take();
+ task.deschedule(1, |task| {
+ self.decrement(task)
+ });
+
+ match self.try_recv() {
+ // Messages which actually popped from the queue shouldn't count as
+ // a steal, so offset the decrement here (we already have our
+ // "steal" factored into the channel count above).
+ data @ Ok(..) |
+ data @ Err(Upgraded(..)) => {
+ self.steals -= 1;
+ data
+ }
+
+ data => data,
+ }
+ }
+
+ pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
+ match self.queue.pop() {
+ // If we stole some data, record to that effect (this will be
+ // factored into cnt later on).
+ //
+ // Note that we don't allow steals to grow without bound in order to
+ // prevent eventual overflow of either steals or cnt as an overflow
+ // would have catastrophic results. Sometimes, steals > cnt, but
+ // other times cnt > steals, so we don't know the relation between
+ // steals and cnt. This code path is executed only rarely, so we do
+ // a pretty slow operation, of swapping 0 into cnt, taking steals
+ // down as much as possible (without going negative), and then
+ // adding back in whatever we couldn't factor into steals.
+ Some(data) => {
+ if self.steals > MAX_STEALS {
+ match self.cnt.swap(0, atomics::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, self.steals);
+ self.steals -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(self.steals >= 0);
+ }
+ self.steals += 1;
+ match data {
+ Data(t) => Ok(t),
+ GoUp(up) => Err(Upgraded(up)),
+ }
+ }
+
+ None => {
+ match self.cnt.load(atomics::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+
+ // This is a little bit of a tricky case. We failed to pop
+ // data above, and then we have viewed that the channel is
+ // disconnected. In this window more data could have been
+ // sent on the channel. It doesn't really make sense to
+ // return that the channel is disconnected when there's
+ // actually data on it, so be extra sure there's no data by
+ // popping one more time.
+ //
+ // We can ignore steals because the other end is
+ // disconnected and we'll never need to really factor in our
+ // steals again.
+ _ => {
+ match self.queue.pop() {
+ Some(Data(t)) => Ok(t),
+ Some(GoUp(up)) => Err(Upgraded(up)),
+ None => Err(Disconnected),
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pub fn drop_chan(&mut self) {
+ // Dropping a channel is pretty simple, we just flag it as disconnected
+ // and then wakeup a blocker if there is one.
+ match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
+ -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
+ DISCONNECTED => {}
+ n => { assert!(n >= 0); }
+ }
+ }
+
+ pub fn drop_port(&mut self) {
+ // Dropping a port seems like a fairly trivial thing. In theory all we
+ // need to do is flag that we're disconnected and then everything else
+ // can take over (we don't have anyone to wake up).
+ //
+ // The catch for Ports is that we want to drop the entire contents of
+ // the queue. There are multiple reasons for having this property, the
+ // largest of which is that if another chan is waiting in this channel
+ // (but not received yet), then waiting on that port will cause a
+ // deadlock.
+ //
+ // So if we accept that we must now destroy the entire contents of the
+ // queue, this code may make a bit more sense. The tricky part is that
+ // we can't let any in-flight sends go un-dropped, we have to make sure
+ // *everything* is dropped and nothing new will come onto the channel.
+
+ // The first thing we do is set a flag saying that we're done for. All
+ // sends are gated on this flag, so we're immediately guaranteed that
+ // there are a bounded number of active sends that we'll have to deal
+ // with.
+ self.port_dropped.store(true, atomics::SeqCst);
+
+ // Now that we're guaranteed to deal with a bounded number of senders,
+ // we need to drain the queue. This draining process happens atomically
+ // with respect to the "count" of the channel. If the count is nonzero
+ // (with steals taken into account), then there must be data on the
+ // channel. In this case we drain everything and then try again. We will
+ // continue to fail while active senders send data while we're dropping
+ // data, but eventually we're guaranteed to break out of this loop
+ // (because there is a bounded number of senders).
+ let mut steals = self.steals;
+ while {
+ let cnt = self.cnt.compare_and_swap(
+ steals, DISCONNECTED, atomics::SeqCst);
+ cnt != DISCONNECTED && cnt != steals
+ } {
+ loop {
+ match self.queue.pop() {
+ Some(..) => { steals += 1; }
+ None => break
+ }
+ }
+ }
+
+ // At this point in time, we have gated all future senders from sending,
+ // and we have flagged the channel as being disconnected. The senders
+ // still have some responsibility, however, because some sends may not
+ // complete until after we flag the disconnection. There are more
+ // details in the sending methods that see DISCONNECTED
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // Tests to see whether this port can receive without blocking. If Ok is
+ // returned, then that's the answer. If Err is returned, then the returned
+ // port needs to be queried instead (an upgrade happened)
+ pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
+ // We peek at the queue to see if there's anything on it, and we use
+ // this return value to determine if we should pop from the queue and
+ // upgrade this channel immediately. If it looks like we've got an
+ // upgrade pending, then go through the whole recv rigamarole to update
+ // the internal state.
+ match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.recv() {
+ Err(Upgraded(port)) => Err(port),
+ _ => unreachable!(),
+ }
+ }
+ Some(..) => Ok(true),
+ None => Ok(false)
+ }
+ }
+
+ // increment the count on the channel (used for selection)
+ fn bump(&mut self, amt: int) -> int {
+ match self.cnt.fetch_add(amt, atomics::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, atomics::SeqCst);
+ DISCONNECTED
+ }
+ n => n
+ }
+ }
+
+ // Attempts to start selecting on this port. Like a oneshot, this can fail
+ // immediately because of an upgrade.
+ pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
+ match self.decrement(task) {
+ Ok(()) => SelSuccess,
+ Err(task) => {
+ let ret = match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.queue.pop() {
+ Some(GoUp(port)) => SelUpgraded(task, port),
+ _ => unreachable!(),
+ }
+ }
+ Some(..) => SelCanceled(task),
+ None => SelCanceled(task),
+ };
+ // Undo our decrement above, and we should be guaranteed that the
+ // previous value is positive because we're not going to sleep
+ let prev = self.bump(1);
+ assert!(prev == DISCONNECTED || prev >= 0);
+ return ret;
+ }
+ }
+ }
+
+ // Removes a previous task from being blocked in this port
+ pub fn abort_selection(&mut self,
+ was_upgrade: bool) -> Result<bool, Receiver<T>> {
+ // If we're aborting selection after upgrading from a oneshot, then
+ // we're guarantee that no one is waiting. The only way that we could
+ // have seen the upgrade is if data was actually sent on the channel
+ // half again. For us, this means that there is guaranteed to be data on
+ // this channel. Furthermore, we're guaranteed that there was no
+ // start_selection previously, so there's no need to modify `self.cnt`
+ // at all.
+ //
+ // Hence, because of these invariants, we immediately return `Ok(true)`.
+ // Note that the data may not actually be sent on the channel just yet.
+ // The other end could have flagged the upgrade but not sent data to
+ // this end. This is fine because we know it's a small bounded windows
+ // of time until the data is actually sent.
+ if was_upgrade {
+ assert_eq!(self.steals, 0);
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ return Ok(true)
+ }
+
+ // We want to make sure that the count on the channel goes non-negative,
+ // and in the stream case we can have at most one steal, so just assume
+ // that we had one steal.
+ let steals = 1;
+ let prev = self.bump(steals + 1);
+
+ // If we were previously disconnected, then we know for sure that there
+ // is no task in to_wake, so just keep going
+ let has_data = if prev == DISCONNECTED {
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ true // there is data, that data is that we're disconnected
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+
+ // If the previous count was negative, then we just made things go
+ // positive, hence we passed the -1 boundary and we're responsible
+ // for removing the to_wake() field and trashing it.
+ //
+ // If the previous count was positive then we're in a tougher
+ // situation. A possible race is that a sender just incremented
+ // through -1 (meaning it's going to try to wake a task up), but it
+ // hasn't yet read the to_wake. In order to prevent a future recv()
+ // from waking up too early (this sender picking up the plastered
+ // over to_wake), we spin loop here waiting for to_wake to be 0.
+ // Note that this entire select() implementation needs an overhaul,
+ // and this is *not* the worst part of it, so this is not done as a
+ // final solution but rather out of necessity for now to get
+ // something working.
+ if prev < 0 {
+ self.take_to_wake().trash();
+ } else {
+ while self.to_wake.load(atomics::SeqCst) != 0 {
+ Thread::yield_now();
+ }
+ }
+ assert_eq!(self.steals, 0);
+ self.steals = steals;
+
+ // if we were previously positive, then there's surely data to
+ // receive
+ prev >= 0
+ };
+
+ // Now that we've determined that this queue "has data", we peek at the
+ // queue to see if the data is an upgrade or not. If it's an upgrade,
+ // then we need to destroy this port and abort selection on the
+ // upgraded port.
+ if has_data {
+ match self.queue.peek() {
+ Some(&GoUp(..)) => {
+ match self.queue.pop() {
+ Some(GoUp(port)) => Err(port),
+ _ => unreachable!(),
+ }
+ }
+ _ => Ok(true),
+ }
+ } else {
+ Ok(false)
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
+ assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Synchronous channels/ports
+///
+/// This channel implementation differs significantly from the asynchronous
+/// implementations found next to it (oneshot/stream/share). This is an
+/// implementation of a synchronous, bounded buffer channel.
+///
+/// Each channel is created with some amount of backing buffer, and sends will
+/// *block* until buffer space becomes available. A buffer size of 0 is valid,
+/// which means that every successful send is paired with a successful recv.
+///
+/// This flavor of channels defines a new `send_opt` method for channels which
+/// is the method by which a message is sent but the task does not fail if it
+/// cannot be delivered.
+///
+/// Another major difference is that send() will *always* return back the data
+/// if it couldn't be sent. This is because it is deterministically known when
+/// the data is received and when it is not received.
+///
+/// Implementation-wise, it can all be summed up with "use a mutex plus some
+/// logic". The mutex used here is an OS native mutex, meaning that no user code
+/// is run inside of the mutex (to prevent context switching). This
+/// implementation shares almost all code for the buffered and unbuffered cases
+/// of a synchronous channel. There are a few branches for the unbuffered case,
+/// but they're mostly just relevant to blocking senders.
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use collections::Vec;
+use collections::Collection;
+use core::mem;
+use core::ty::Unsafe;
+use rustrt::local::Local;
+use rustrt::mutex::{NativeMutex, LockGuard};
+use rustrt::task::{Task, BlockedTask};
+
+use atomics;
+
+pub struct Packet<T> {
+ /// Only field outside of the mutex. Just done for kicks, but mainly because
+ /// the other shared channel already had the code implemented
+ channels: atomics::AtomicUint,
+
+ /// The state field is protected by this mutex
+ lock: NativeMutex,
+ state: Unsafe<State<T>>,
+}
+
+struct State<T> {
+ disconnected: bool, // Is the channel disconnected yet?
+ queue: Queue, // queue of senders waiting to send data
+ blocker: Blocker, // currently blocked task on this channel
+ buf: Buffer<T>, // storage for buffered messages
+ cap: uint, // capacity of this channel
+
+ /// A curious flag used to indicate whether a sender failed or succeeded in
+ /// blocking. This is used to transmit information back to the task that it
+ /// must dequeue its message from the buffer because it was not received.
+ /// This is only relevant in the 0-buffer case. This obviously cannot be
+ /// safely constructed, but it's guaranteed to always have a valid pointer
+ /// value.
+ canceled: Option<&'static mut bool>,
+}
+
+/// Possible flavors of tasks who can be blocked on this channel.
+enum Blocker {
+ BlockedSender(BlockedTask),
+ BlockedReceiver(BlockedTask),
+ NoneBlocked
+}
+
+/// Simple queue for threading tasks together. Nodes are stack-allocated, so
+/// this structure is not safe at all
+struct Queue {
+ head: *mut Node,
+ tail: *mut Node,
+}
+
+struct Node {
+ task: Option<BlockedTask>,
+ next: *mut Node,
+}
+
+/// A simple ring-buffer
+struct Buffer<T> {
+ buf: Vec<Option<T>>,
+ start: uint,
+ size: uint,
+}
+
+#[deriving(Show)]
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+/// Atomically blocks the current task, placing it into `slot`, unlocking `lock`
+/// in the meantime. This re-locks the mutex upon returning.
+fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
+ lock: &NativeMutex) {
+ let me: Box<Task> = Local::take();
+ me.deschedule(1, |task| {
+ match mem::replace(slot, f(task)) {
+ NoneBlocked => {}
+ _ => unreachable!(),
+ }
+ unsafe { lock.unlock_noguard(); }
+ Ok(())
+ });
+ unsafe { lock.lock_noguard(); }
+}
+
+/// Wakes up a task, dropping the lock at the correct time
+fn wakeup(task: BlockedTask, guard: LockGuard) {
+ // We need to be careful to wake up the waiting task *outside* of the mutex
+ // in case it incurs a context switch.
+ mem::drop(guard);
+ task.wake().map(|t| t.reawaken());
+}
+
+impl<T: Send> Packet<T> {
+ pub fn new(cap: uint) -> Packet<T> {
+ Packet {
+ channels: atomics::AtomicUint::new(1),
+ lock: unsafe { NativeMutex::new() },
+ state: Unsafe::new(State {
+ disconnected: false,
+ blocker: NoneBlocked,
+ cap: cap,
+ canceled: None,
+ queue: Queue {
+ head: 0 as *mut Node,
+ tail: 0 as *mut Node,
+ },
+ buf: Buffer {
+ buf: Vec::from_fn(cap + if cap == 0 {1} else {0}, |_| None),
+ start: 0,
+ size: 0,
+ },
+ }),
+ }
+ }
+
+ // Locks this channel, returning a guard for the state and the mutable state
+ // itself. Care should be taken to ensure that the state does not escape the
+ // guard!
+ //
+ // Note that we're ok promoting an & reference to an &mut reference because
+ // the lock ensures that we're the only ones in the world with a pointer to
+ // the state.
+ fn lock<'a>(&'a self) -> (LockGuard<'a>, &'a mut State<T>) {
+ unsafe {
+ let guard = self.lock.lock();
+ (guard, &mut *self.state.get())
+ }
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ let (guard, state) = self.lock();
+
+ // wait for a slot to become available, and enqueue the data
+ while !state.disconnected && state.buf.size() == state.buf.cap() {
+ state.queue.enqueue(&self.lock);
+ }
+ if state.disconnected { return Err(t) }
+ state.buf.enqueue(t);
+
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ // if our capacity is 0, then we need to wait for a receiver to be
+ // available to take our data. After waiting, we check again to make
+ // sure the port didn't go away in the meantime. If it did, we need
+ // to hand back our data.
+ NoneBlocked if state.cap == 0 => {
+ let mut canceled = false;
+ assert!(state.canceled.is_none());
+ state.canceled = Some(unsafe { mem::transmute(&mut canceled) });
+ wait(&mut state.blocker, BlockedSender, &self.lock);
+ if canceled {Err(state.buf.dequeue())} else {Ok(())}
+ }
+
+ // success, we buffered some data
+ NoneBlocked => Ok(()),
+
+ // success, someone's about to receive our buffered data.
+ BlockedReceiver(task) => { wakeup(task, guard); Ok(()) }
+
+ BlockedSender(..) => fail!("lolwut"),
+ }
+ }
+
+ pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
+ let (guard, state) = self.lock();
+ if state.disconnected {
+ Err(super::RecvDisconnected(t))
+ } else if state.buf.size() == state.buf.cap() {
+ Err(super::Full(t))
+ } else if state.cap == 0 {
+ // With capacity 0, even though we have buffer space we can't
+ // transfer the data unless there's a receiver waiting.
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => Err(super::Full(t)),
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(task) => {
+ state.buf.enqueue(t);
+ wakeup(task, guard);
+ Ok(())
+ }
+ }
+ } else {
+ // If the buffer has some space and the capacity isn't 0, then we
+ // just enqueue the data for later retrieval.
+ assert!(state.buf.size() < state.buf.cap());
+ state.buf.enqueue(t);
+ Ok(())
+ }
+ }
+
+ // Receives a message from this channel
+ //
+ // When reading this, remember that there can only ever be one receiver at
+ // time.
+ pub fn recv(&self) -> Result<T, ()> {
+ let (guard, state) = self.lock();
+
+ // Wait for the buffer to have something in it. No need for a while loop
+ // because we're the only receiver.
+ let mut waited = false;
+ if !state.disconnected && state.buf.size() == 0 {
+ wait(&mut state.blocker, BlockedReceiver, &self.lock);
+ waited = true;
+ }
+ if state.disconnected && state.buf.size() == 0 { return Err(()) }
+
+ // Pick up the data, wake up our neighbors, and carry on
+ assert!(state.buf.size() > 0);
+ let ret = state.buf.dequeue();
+ self.wakeup_senders(waited, guard, state);
+ return Ok(ret);
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure> {
+ let (guard, state) = self.lock();
+
+ // Easy cases first
+ if state.disconnected { return Err(Disconnected) }
+ if state.buf.size() == 0 { return Err(Empty) }
+
+ // Be sure to wake up neighbors
+ let ret = Ok(state.buf.dequeue());
+ self.wakeup_senders(false, guard, state);
+
+ return ret;
+ }
+
+ // Wake up pending senders after some data has been received
+ //
+ // * `waited` - flag if the receiver blocked to receive some data, or if it
+ // just picked up some data on the way out
+ // * `guard` - the lock guard that is held over this channel's lock
+ fn wakeup_senders(&self, waited: bool,
+ guard: LockGuard,
+ state: &mut State<T>) {
+ let pending_sender1: Option<BlockedTask> = state.queue.dequeue();
+
+ // If this is a no-buffer channel (cap == 0), then if we didn't wait we
+ // need to ACK the sender. If we waited, then the sender waking us up
+ // was already the ACK.
+ let pending_sender2 = if state.cap == 0 && !waited {
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedReceiver(..) => unreachable!(),
+ BlockedSender(task) => {
+ state.canceled.take();
+ Some(task)
+ }
+ }
+ } else {
+ None
+ };
+ mem::drop((state, guard));
+
+ // only outside of the lock do we wake up the pending tasks
+ pending_sender1.map(|t| t.wake().map(|t| t.reawaken()));
+ pending_sender2.map(|t| t.wake().map(|t| t.reawaken()));
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&self) {
+ self.channels.fetch_add(1, atomics::SeqCst);
+ }
+
+ pub fn drop_chan(&self) {
+ // Only flag the channel as disconnected if we're the last channel
+ match self.channels.fetch_sub(1, atomics::SeqCst) {
+ 1 => {}
+ _ => return
+ }
+
+ // Not much to do other than wake up a receiver if one's there
+ let (guard, state) = self.lock();
+ if state.disconnected { return }
+ state.disconnected = true;
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(task) => wakeup(task, guard),
+ }
+ }
+
+ pub fn drop_port(&self) {
+ let (guard, state) = self.lock();
+
+ if state.disconnected { return }
+ state.disconnected = true;
+
+ // If the capacity is 0, then the sender may want its data back after
+ // we're disconnected. Otherwise it's now our responsibility to destroy
+ // the buffered data. As with many other portions of this code, this
+ // needs to be careful to destroy the data *outside* of the lock to
+ // prevent deadlock.
+ let _data = if state.cap != 0 {
+ mem::replace(&mut state.buf.buf, Vec::new())
+ } else {
+ Vec::new()
+ };
+ let mut queue = mem::replace(&mut state.queue, Queue {
+ head: 0 as *mut Node,
+ tail: 0 as *mut Node,
+ });
+
+ let waiter = match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedSender(task) => {
+ *state.canceled.take_unwrap() = true;
+ Some(task)
+ }
+ BlockedReceiver(..) => unreachable!(),
+ };
+ mem::drop((state, guard));
+
+ loop {
+ match queue.dequeue() {
+ Some(task) => { task.wake().map(|t| t.reawaken()); }
+ None => break,
+ }
+ }
+ waiter.map(|t| t.wake().map(|t| t.reawaken()));
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // If Ok, the value is whether this port has data, if Err, then the upgraded
+ // port needs to be checked instead of this one.
+ pub fn can_recv(&self) -> bool {
+ let (_g, state) = self.lock();
+ state.disconnected || state.buf.size() > 0
+ }
+
+ // Attempts to start selection on this port. This can either succeed or fail
+ // because there is data waiting.
+ pub fn start_selection(&self, task: BlockedTask) -> Result<(), BlockedTask>{
+ let (_g, state) = self.lock();
+ if state.disconnected || state.buf.size() > 0 {
+ Err(task)
+ } else {
+ match mem::replace(&mut state.blocker, BlockedReceiver(task)) {
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(..) => unreachable!(),
+ }
+ Ok(())
+ }
+ }
+
+ // Remove a previous selecting task from this port. This ensures that the
+ // blocked task will no longer be visible to any other threads.
+ //
+ // The return value indicates whether there's data on this port.
+ pub fn abort_selection(&self) -> bool {
+ let (_g, state) = self.lock();
+ match mem::replace(&mut state.blocker, NoneBlocked) {
+ NoneBlocked => true,
+ BlockedSender(task) => {
+ state.blocker = BlockedSender(task);
+ true
+ }
+ BlockedReceiver(task) => { task.trash(); false }
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.channels.load(atomics::SeqCst), 0);
+ let (_g, state) = self.lock();
+ assert!(state.queue.dequeue().is_none());
+ assert!(state.canceled.is_none());
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Buffer, a simple ring buffer backed by Vec<T>
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Buffer<T> {
+ fn enqueue(&mut self, t: T) {
+ let pos = (self.start + self.size) % self.buf.len();
+ self.size += 1;
+ let prev = mem::replace(self.buf.get_mut(pos), Some(t));
+ assert!(prev.is_none());
+ }
+
+ fn dequeue(&mut self) -> T {
+ let start = self.start;
+ self.size -= 1;
+ self.start = (self.start + 1) % self.buf.len();
+ self.buf.get_mut(start).take_unwrap()
+ }
+
+ fn size(&self) -> uint { self.size }
+ fn cap(&self) -> uint { self.buf.len() }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
+////////////////////////////////////////////////////////////////////////////////
+
+impl Queue {
+ fn enqueue(&mut self, lock: &NativeMutex) {
+ let task: Box<Task> = Local::take();
+ let mut node = Node {
+ task: None,
+ next: 0 as *mut Node,
+ };
+ task.deschedule(1, |task| {
+ node.task = Some(task);
+ if self.tail.is_null() {
+ self.head = &mut node as *mut Node;
+ self.tail = &mut node as *mut Node;
+ } else {
+ unsafe {
+ (*self.tail).next = &mut node as *mut Node;
+ self.tail = &mut node as *mut Node;
+ }
+ }
+ unsafe { lock.unlock_noguard(); }
+ Ok(())
+ });
+ unsafe { lock.lock_noguard(); }
+ assert!(node.next.is_null());
+ }
+
+ fn dequeue(&mut self) -> Option<BlockedTask> {
+ if self.head.is_null() {
+ return None
+ }
+ let node = self.head;
+ self.head = unsafe { (*node).next };
+ if self.head.is_null() {
+ self.tail = 0 as *mut Node;
+ }
+ unsafe {
+ (*node).next = 0 as *mut Node;
+ Some((*node).task.take_unwrap())
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A (mostly) lock-free concurrent work-stealing deque
+//!
+//! This module contains an implementation of the Chase-Lev work stealing deque
+//! described in "Dynamic Circular Work-Stealing Deque". The implementation is
+//! heavily based on the pseudocode found in the paper.
+//!
+//! This implementation does not want to have the restriction of a garbage
+//! collector for reclamation of buffers, and instead it uses a shared pool of
+//! buffers. This shared pool is required for correctness in this
+//! implementation.
+//!
+//! The only lock-synchronized portions of this deque are the buffer allocation
+//! and deallocation portions. Otherwise all operations are lock-free.
+//!
+//! # Example
+//!
+//! use std::rt::deque::BufferPool;
+//!
+//! let mut pool = BufferPool::new();
+//! let (mut worker, mut stealer) = pool.deque();
+//!
+//! // Only the worker may push/pop
+//! worker.push(1);
+//! worker.pop();
+//!
+//! // Stealers take data from the other end of the deque
+//! worker.push(1);
+//! stealer.steal();
+//!
+//! // Stealers can be cloned to have many stealers stealing in parallel
+//! worker.push(1);
+//! let mut stealer2 = stealer.clone();
+//! stealer2.steal();
+
+#![experimental]
+
+// NB: the "buffer pool" strategy is not done for speed, but rather for
+// correctness. For more info, see the comment on `swap_buffer`
+
+// FIXME: all atomic operations in this module use a SeqCst ordering. That is
+// probably overkill
+
+use core::prelude::*;
+
+use alloc::arc::Arc;
+use alloc::heap::{allocate, deallocate};
+use alloc::owned::Box;
+use collections::Vec;
+use core::kinds::marker;
+use core::mem::{forget, min_align_of, size_of, transmute};
+use core::ptr;
+use rustrt::exclusive::Exclusive;
+
+use atomics::{AtomicInt, AtomicPtr, SeqCst};
+
+// Once the queue is less than 1/K full, then it will be downsized. Note that
+// the deque requires that this number be less than 2.
+static K: int = 4;
+
+// Minimum number of bits that a buffer size should be. No buffer will resize to
+// under this value, and all deques will initially contain a buffer of this
+// size.
+//
+// The size in question is 1 << MIN_BITS
+static MIN_BITS: int = 7;
+
+struct Deque<T> {
+ bottom: AtomicInt,
+ top: AtomicInt,
+ array: AtomicPtr<Buffer<T>>,
+ pool: BufferPool<T>,
+}
+
+/// Worker half of the work-stealing deque. This worker has exclusive access to
+/// one side of the deque, and uses `push` and `pop` method to manipulate it.
+///
+/// There may only be one worker per deque.
+pub struct Worker<T> {
+ deque: Arc<Deque<T>>,
+ noshare: marker::NoShare,
+}
+
+/// The stealing half of the work-stealing deque. Stealers have access to the
+/// opposite end of the deque from the worker, and they only have access to the
+/// `steal` method.
+pub struct Stealer<T> {
+ deque: Arc<Deque<T>>,
+ noshare: marker::NoShare,
+}
+
+/// When stealing some data, this is an enumeration of the possible outcomes.
+#[deriving(PartialEq, Show)]
+pub enum Stolen<T> {
+ /// The deque was empty at the time of stealing
+ Empty,
+ /// The stealer lost the race for stealing data, and a retry may return more
+ /// data.
+ Abort,
+ /// The stealer has successfully stolen some data.
+ Data(T),
+}
+
+/// The allocation pool for buffers used by work-stealing deques. Right now this
+/// structure is used for reclamation of memory after it is no longer in use by
+/// deques.
+///
+/// This data structure is protected by a mutex, but it is rarely used. Deques
+/// will only use this structure when allocating a new buffer or deallocating a
+/// previous one.
+pub struct BufferPool<T> {
+ pool: Arc<Exclusive<Vec<Box<Buffer<T>>>>>,
+}
+
+/// An internal buffer used by the chase-lev deque. This structure is actually
+/// implemented as a circular buffer, and is used as the intermediate storage of
+/// the data in the deque.
+///
+/// This type is implemented with *T instead of Vec<T> for two reasons:
+///
+/// 1. There is nothing safe about using this buffer. This easily allows the
+/// same value to be read twice in to rust, and there is nothing to
+/// prevent this. The usage by the deque must ensure that one of the
+/// values is forgotten. Furthermore, we only ever want to manually run
+/// destructors for values in this buffer (on drop) because the bounds
+/// are defined by the deque it's owned by.
+///
+/// 2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
+/// LLVM is probably pretty good at doing this already.
+struct Buffer<T> {
+ storage: *T,
+ log_size: int,
+}
+
+impl<T: Send> BufferPool<T> {
+ /// Allocates a new buffer pool which in turn can be used to allocate new
+ /// deques.
+ pub fn new() -> BufferPool<T> {
+ BufferPool { pool: Arc::new(Exclusive::new(Vec::new())) }
+ }
+
+ /// Allocates a new work-stealing deque which will send/receiving memory to
+ /// and from this buffer pool.
+ pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
+ let a = Arc::new(Deque::new(self.clone()));
+ let b = a.clone();
+ (Worker { deque: a, noshare: marker::NoShare },
+ Stealer { deque: b, noshare: marker::NoShare })
+ }
+
+ fn alloc(&self, bits: int) -> Box<Buffer<T>> {
+ unsafe {
+ let mut pool = self.pool.lock();
+ match pool.iter().position(|x| x.size() >= (1 << bits)) {
+ Some(i) => pool.remove(i).unwrap(),
+ None => box Buffer::new(bits)
+ }
+ }
+ }
+
+ fn free(&self, buf: Box<Buffer<T>>) {
+ unsafe {
+ let mut pool = self.pool.lock();
+ match pool.iter().position(|v| v.size() > buf.size()) {
+ Some(i) => pool.insert(i, buf),
+ None => pool.push(buf),
+ }
+ }
+ }
+}
+
+impl<T: Send> Clone for BufferPool<T> {
+ fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
+}
+
+impl<T: Send> Worker<T> {
+ /// Pushes data onto the front of this work queue.
+ pub fn push(&self, t: T) {
+ unsafe { self.deque.push(t) }
+ }
+ /// Pops data off the front of the work queue, returning `None` on an empty
+ /// queue.
+ pub fn pop(&self) -> Option<T> {
+ unsafe { self.deque.pop() }
+ }
+
+ /// Gets access to the buffer pool that this worker is attached to. This can
+ /// be used to create more deques which share the same buffer pool as this
+ /// deque.
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
+ }
+}
+
+impl<T: Send> Stealer<T> {
+ /// Steals work off the end of the queue (opposite of the worker's end)
+ pub fn steal(&self) -> Stolen<T> {
+ unsafe { self.deque.steal() }
+ }
+
+ /// Gets access to the buffer pool that this stealer is attached to. This
+ /// can be used to create more deques which share the same buffer pool as
+ /// this deque.
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
+ }
+}
+
+impl<T: Send> Clone for Stealer<T> {
+ fn clone(&self) -> Stealer<T> {
+ Stealer { deque: self.deque.clone(), noshare: marker::NoShare }
+ }
+}
+
+// Almost all of this code can be found directly in the paper so I'm not
+// personally going to heavily comment what's going on here.
+
+impl<T: Send> Deque<T> {
+ fn new(pool: BufferPool<T>) -> Deque<T> {
+ let buf = pool.alloc(MIN_BITS);
+ Deque {
+ bottom: AtomicInt::new(0),
+ top: AtomicInt::new(0),
+ array: AtomicPtr::new(unsafe { transmute(buf) }),
+ pool: pool,
+ }
+ }
+
+ unsafe fn push(&self, data: T) {
+ let mut b = self.bottom.load(SeqCst);
+ let t = self.top.load(SeqCst);
+ let mut a = self.array.load(SeqCst);
+ let size = b - t;
+ if size >= (*a).size() - 1 {
+ // You won't find this code in the chase-lev deque paper. This is
+ // alluded to in a small footnote, however. We always free a buffer
+ // when growing in order to prevent leaks.
+ a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
+ b = self.bottom.load(SeqCst);
+ }
+ (*a).put(b, data);
+ self.bottom.store(b + 1, SeqCst);
+ }
+
+ unsafe fn pop(&self) -> Option<T> {
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ let b = b - 1;
+ self.bottom.store(b, SeqCst);
+ let t = self.top.load(SeqCst);
+ let size = b - t;
+ if size < 0 {
+ self.bottom.store(t, SeqCst);
+ return None;
+ }
+ let data = (*a).get(b);
+ if size > 0 {
+ self.maybe_shrink(b, t);
+ return Some(data);
+ }
+ if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
+ self.bottom.store(t + 1, SeqCst);
+ return Some(data);
+ } else {
+ self.bottom.store(t + 1, SeqCst);
+ forget(data); // someone else stole this value
+ return None;
+ }
+ }
+
+ unsafe fn steal(&self) -> Stolen<T> {
+ let t = self.top.load(SeqCst);
+ let old = self.array.load(SeqCst);
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ let size = b - t;
+ if size <= 0 { return Empty }
+ if size % (*a).size() == 0 {
+ if a == old && t == self.top.load(SeqCst) {
+ return Empty
+ }
+ return Abort
+ }
+ let data = (*a).get(t);
+ if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
+ Data(data)
+ } else {
+ forget(data); // someone else stole this value
+ Abort
+ }
+ }
+
+ unsafe fn maybe_shrink(&self, b: int, t: int) {
+ let a = self.array.load(SeqCst);
+ if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
+ self.swap_buffer(b, a, (*a).resize(b, t, -1));
+ }
+ }
+
+ // Helper routine not mentioned in the paper which is used in growing and
+ // shrinking buffers to swap in a new buffer into place. As a bit of a
+ // recap, the whole point that we need a buffer pool rather than just
+ // calling malloc/free directly is that stealers can continue using buffers
+ // after this method has called 'free' on it. The continued usage is simply
+ // a read followed by a forget, but we must make sure that the memory can
+ // continue to be read after we flag this buffer for reclamation.
+ unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
+ buf: Buffer<T>) -> *mut Buffer<T> {
+ let newbuf: *mut Buffer<T> = transmute(box buf);
+ self.array.store(newbuf, SeqCst);
+ let ss = (*newbuf).size();
+ self.bottom.store(b + ss, SeqCst);
+ let t = self.top.load(SeqCst);
+ if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
+ self.bottom.store(b, SeqCst);
+ }
+ self.pool.free(transmute(old));
+ return newbuf;
+ }
+}
+
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Deque<T> {
+ fn drop(&mut self) {
+ let t = self.top.load(SeqCst);
+ let b = self.bottom.load(SeqCst);
+ let a = self.array.load(SeqCst);
+ // Free whatever is leftover in the dequeue, and then move the buffer
+ // back into the pool.
+ for i in range(t, b) {
+ let _: T = unsafe { (*a).get(i) };
+ }
+ self.pool.free(unsafe { transmute(a) });
+ }
+}
+
+#[inline]
+fn buffer_alloc_size<T>(log_size: int) -> uint {
+ (1 << log_size) * size_of::<T>()
+}
+
+impl<T: Send> Buffer<T> {
+ unsafe fn new(log_size: int) -> Buffer<T> {
+ let size = buffer_alloc_size::<T>(log_size);
+ let buffer = allocate(size, min_align_of::<T>());
+ Buffer {
+ storage: buffer as *T,
+ log_size: log_size,
+ }
+ }
+
+ fn size(&self) -> int { 1 << self.log_size }
+
+ // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
+ fn mask(&self) -> int { (1 << self.log_size) - 1 }
+
+ unsafe fn elem(&self, i: int) -> *T { self.storage.offset(i & self.mask()) }
+
+ // This does not protect against loading duplicate values of the same cell,
+ // nor does this clear out the contents contained within. Hence, this is a
+ // very unsafe method which the caller needs to treat specially in case a
+ // race is lost.
+ unsafe fn get(&self, i: int) -> T {
+ ptr::read(self.elem(i))
+ }
+
+ // Unsafe because this unsafely overwrites possibly uninitialized or
+ // initialized data.
+ unsafe fn put(&self, i: int, t: T) {
+ ptr::write(self.elem(i) as *mut T, t);
+ }
+
+ // Again, unsafe because this has incredibly dubious ownership violations.
+ // It is assumed that this buffer is immediately dropped.
+ unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
+ let buf = Buffer::new(self.log_size + delta);
+ for i in range(t, b) {
+ buf.put(i, self.get(i));
+ }
+ return buf;
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Buffer<T> {
+ fn drop(&mut self) {
+ // It is assumed that all buffers are empty on drop.
+ let size = buffer_alloc_size::<T>(self.log_size);
+ unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::*;
+ use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
+
+ use std::mem;
+ use std::rt::thread::Thread;
+ use std::rand;
+ use std::rand::Rng;
+ use atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
+ AtomicUint, INIT_ATOMIC_UINT};
+ use std::vec;
+
+ #[test]
+ fn smoke() {
+ let pool = BufferPool::new();
+ let (w, s) = pool.deque();
+ assert_eq!(w.pop(), None);
+ assert_eq!(s.steal(), Empty);
+ w.push(1);
+ assert_eq!(w.pop(), Some(1));
+ w.push(1);
+ assert_eq!(s.steal(), Data(1));
+ w.push(1);
+ assert_eq!(s.clone().steal(), Data(1));
+ }
+
+ #[test]
+ fn stealpush() {
+ static AMT: int = 100000;
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
+ let t = Thread::start(proc() {
+ let mut left = AMT;
+ while left > 0 {
+ match s.steal() {
+ Data(i) => {
+ assert_eq!(i, 1);
+ left -= 1;
+ }
+ Abort | Empty => {}
+ }
+ }
+ });
+
+ for _ in range(0, AMT) {
+ w.push(1);
+ }
+
+ t.join();
+ }
+
+ #[test]
+ fn stealpush_large() {
+ static AMT: int = 100000;
+ let pool = BufferPool::<(int, int)>::new();
+ let (w, s) = pool.deque();
+ let t = Thread::start(proc() {
+ let mut left = AMT;
+ while left > 0 {
+ match s.steal() {
+ Data((1, 10)) => { left -= 1; }
+ Data(..) => fail!(),
+ Abort | Empty => {}
+ }
+ }
+ });
+
+ for _ in range(0, AMT) {
+ w.push((1, 10));
+ }
+
+ t.join();
+ }
+
+ fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
+ nthreads: int, amt: uint) {
+ for _ in range(0, amt) {
+ w.push(box 20);
+ }
+ let mut remaining = AtomicUint::new(amt);
+ let unsafe_remaining: *mut AtomicUint = &mut remaining;
+
+ let threads = range(0, nthreads).map(|_| {
+ let s = s.clone();
+ Thread::start(proc() {
+ unsafe {
+ while (*unsafe_remaining).load(SeqCst) > 0 {
+ match s.steal() {
+ Data(box 20) => {
+ (*unsafe_remaining).fetch_sub(1, SeqCst);
+ }
+ Data(..) => fail!(),
+ Abort | Empty => {}
+ }
+ }
+ }
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ while remaining.load(SeqCst) > 0 {
+ match w.pop() {
+ Some(box 20) => { remaining.fetch_sub(1, SeqCst); }
+ Some(..) => fail!(),
+ None => {}
+ }
+ }
+
+ for thread in threads.move_iter() {
+ thread.join();
+ }
+ }
+
+ #[test]
+ fn run_stampede() {
+ let pool = BufferPool::<Box<int>>::new();
+ let (w, s) = pool.deque();
+ stampede(w, s, 8, 10000);
+ }
+
+ #[test]
+ fn many_stampede() {
+ static AMT: uint = 4;
+ let pool = BufferPool::<Box<int>>::new();
+ let threads = range(0, AMT).map(|_| {
+ let (w, s) = pool.deque();
+ Thread::start(proc() {
+ stampede(w, s, 4, 10000);
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ for thread in threads.move_iter() {
+ thread.join();
+ }
+ }
+
+ #[test]
+ fn stress() {
+ static AMT: int = 100000;
+ static NTHREADS: int = 8;
+ static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
+ static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
+
+ let threads = range(0, NTHREADS).map(|_| {
+ let s = s.clone();
+ Thread::start(proc() {
+ unsafe {
+ loop {
+ match s.steal() {
+ Data(2) => { HITS.fetch_add(1, SeqCst); }
+ Data(..) => fail!(),
+ _ if DONE.load(SeqCst) => break,
+ _ => {}
+ }
+ }
+ }
+ })
+ }).collect::<Vec<Thread<()>>>();
+
+ let mut rng = rand::task_rng();
+ let mut expected = 0;
+ while expected < AMT {
+ if rng.gen_range(0, 3) == 2 {
+ match w.pop() {
+ None => {}
+ Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
+ Some(_) => fail!(),
+ }
+ } else {
+ expected += 1;
+ w.push(2);
+ }
+ }
+
+ unsafe {
+ while HITS.load(SeqCst) < AMT as uint {
+ match w.pop() {
+ None => {}
+ Some(2) => { HITS.fetch_add(1, SeqCst); },
+ Some(_) => fail!(),
+ }
+ }
+ DONE.store(true, SeqCst);
+ }
+
+ for thread in threads.move_iter() {
+ thread.join();
+ }
+
+ assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
+ }
+
+ #[test]
+ #[ignore(cfg(windows))] // apparently windows scheduling is weird?
+ fn no_starvation() {
+ static AMT: int = 10000;
+ static NTHREADS: int = 4;
+ static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
+ let pool = BufferPool::<(int, uint)>::new();
+ let (w, s) = pool.deque();
+
+ let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
+ let s = s.clone();
+ let unique_box = box AtomicUint::new(0);
+ let thread_box = unsafe {
+ *mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
+ };
+ (Thread::start(proc() {
+ unsafe {
+ loop {
+ match s.steal() {
+ Data((1, 2)) => {
+ (*thread_box).fetch_add(1, SeqCst);
+ }
+ Data(..) => fail!(),
+ _ if DONE.load(SeqCst) => break,
+ _ => {}
+ }
+ }
+ }
+ }), unique_box)
+ }));
+
+ let mut rng = rand::task_rng();
+ let mut myhit = false;
+ 'outer: loop {
+ for _ in range(0, rng.gen_range(0, AMT)) {
+ if !myhit && rng.gen_range(0, 3) == 2 {
+ match w.pop() {
+ None => {}
+ Some((1, 2)) => myhit = true,
+ Some(_) => fail!(),
+ }
+ } else {
+ w.push((1, 2));
+ }
+ }
+
+ for slot in hits.iter() {
+ let amt = slot.load(SeqCst);
+ if amt == 0 { continue 'outer; }
+ }
+ if myhit {
+ break
+ }
+ }
+
+ unsafe { DONE.store(true, SeqCst); }
+
+ for thread in threads.move_iter() {
+ thread.join();
+ }
+ }
+}
+++ /dev/null
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!
- * A type representing values that may be computed concurrently and
- * operations for working with them.
- *
- * # Example
- *
- * ```rust
- * use sync::Future;
- * # fn fib(n: uint) -> uint {42};
- * # fn make_a_sandwich() {};
- * let mut delayed_fib = Future::spawn(proc() { fib(5000) });
- * make_a_sandwich();
- * println!("fib(5000) = {}", delayed_fib.get())
- * ```
- */
-
-#![allow(missing_doc)]
-
-use std::mem::replace;
-
-/// A type encapsulating the result of a computation which may not be complete
-pub struct Future<A> {
- state: FutureState<A>,
-}
-
-enum FutureState<A> {
- Pending(proc():Send -> A),
- Evaluating,
- Forced(A)
-}
-
-/// Methods on the `future` type
-impl<A:Clone> Future<A> {
- pub fn get(&mut self) -> A {
- //! Get the value of the future.
- (*(self.get_ref())).clone()
- }
-}
-
-impl<A> Future<A> {
- /// Gets the value from this future, forcing evaluation.
- pub fn unwrap(mut self) -> A {
- self.get_ref();
- let state = replace(&mut self.state, Evaluating);
- match state {
- Forced(v) => v,
- _ => fail!( "Logic error." ),
- }
- }
-
- pub fn get_ref<'a>(&'a mut self) -> &'a A {
- /*!
- * Executes the future's closure and then returns a reference
- * to the result. The reference lasts as long as
- * the future.
- */
- match self.state {
- Forced(ref v) => return v,
- Evaluating => fail!("Recursive forcing of future!"),
- Pending(_) => {
- match replace(&mut self.state, Evaluating) {
- Forced(_) | Evaluating => fail!("Logic error."),
- Pending(f) => {
- self.state = Forced(f());
- self.get_ref()
- }
- }
- }
- }
- }
-
- pub fn from_value(val: A) -> Future<A> {
- /*!
- * Create a future from a value.
- *
- * The value is immediately available and calling `get` later will
- * not block.
- */
-
- Future {state: Forced(val)}
- }
-
- pub fn from_fn(f: proc():Send -> A) -> Future<A> {
- /*!
- * Create a future from a function.
- *
- * The first time that the value is requested it will be retrieved by
- * calling the function. Note that this function is a local
- * function. It is not spawned into another task.
- */
-
- Future {state: Pending(f)}
- }
-}
-
-impl<A:Send> Future<A> {
- pub fn from_receiver(rx: Receiver<A>) -> Future<A> {
- /*!
- * Create a future from a port
- *
- * The first time that the value is requested the task will block
- * waiting for the result to be received on the port.
- */
-
- Future::from_fn(proc() {
- rx.recv()
- })
- }
-
- pub fn spawn(blk: proc():Send -> A) -> Future<A> {
- /*!
- * Create a future from a unique closure.
- *
- * The closure will be run in a new task and its result used as the
- * value of the future.
- */
-
- let (tx, rx) = channel();
-
- spawn(proc() {
- tx.send(blk());
- });
-
- Future::from_receiver(rx)
- }
-}
-
-#[cfg(test)]
-mod test {
- use future::Future;
-
- use std::task;
-
- #[test]
- fn test_from_value() {
- let mut f = Future::from_value("snail".to_string());
- assert_eq!(f.get(), "snail".to_string());
- }
-
- #[test]
- fn test_from_receiver() {
- let (tx, rx) = channel();
- tx.send("whale".to_string());
- let mut f = Future::from_receiver(rx);
- assert_eq!(f.get(), "whale".to_string());
- }
-
- #[test]
- fn test_from_fn() {
- let mut f = Future::from_fn(proc() "brail".to_string());
- assert_eq!(f.get(), "brail".to_string());
- }
-
- #[test]
- fn test_interface_get() {
- let mut f = Future::from_value("fail".to_string());
- assert_eq!(f.get(), "fail".to_string());
- }
-
- #[test]
- fn test_interface_unwrap() {
- let f = Future::from_value("fail".to_string());
- assert_eq!(f.unwrap(), "fail".to_string());
- }
-
- #[test]
- fn test_get_ref_method() {
- let mut f = Future::from_value(22);
- assert_eq!(*f.get_ref(), 22);
- }
-
- #[test]
- fn test_spawn() {
- let mut f = Future::spawn(proc() "bale".to_string());
- assert_eq!(f.get(), "bale".to_string());
- }
-
- #[test]
- #[should_fail]
- fn test_futurefail() {
- let mut f = Future::spawn(proc() fail!());
- let _x: String = f.get();
- }
-
- #[test]
- fn test_sendable_future() {
- let expected = "schlorf";
- let f = Future::spawn(proc() { expected });
- task::spawn(proc() {
- let mut f = f;
- let actual = f.get();
- assert_eq!(actual, expected);
- });
- }
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*!
- * Concurrency-enabled mechanisms and primitives.
- */
+//! Core concurrency-enabled mechanisms and primitives.
+//!
+//! This crate contains the implementations of Rust's core synchronization
+//! primitives. This includes channels, mutexes, condition variables, etc.
+//!
+//! The interface of this crate is experimental, and it is not recommended to
+//! use this crate specifically. Instead, its functionality is reexported
+//! through `std::sync`.
#![crate_id = "sync#0.11.0-pre"]
#![crate_type = "rlib"]
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
-#![feature(phase)]
-#![deny(deprecated_owned_vector)]
+#![feature(phase, globs, macro_rules)]
#![deny(missing_doc)]
+#![no_std]
-#[cfg(test, stage0)]
-#[phase(syntax, link)] extern crate log;
-
-#[cfg(test, not(stage0))]
-#[phase(plugin, link)] extern crate log;
-
+#[cfg(stage0)]
+#[phase(syntax, link)] extern crate core;
+#[cfg(not(stage0))]
+#[phase(plugin, link)] extern crate core;
extern crate alloc;
+extern crate collections;
+extern crate rustrt;
+
+#[cfg(test)] extern crate test;
+#[cfg(test)] extern crate native;
+#[cfg(test, stage0)] #[phase(syntax, link)] extern crate std;
+#[cfg(test, not(stage0))] #[phase(plugin, link)] extern crate std;
-pub use comm::{DuplexStream, duplex};
-pub use task_pool::TaskPool;
-pub use future::Future;
pub use alloc::arc::{Arc, Weak};
pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
RWLock, RWLockReadGuard, RWLockWriteGuard};
// The mutex/rwlock in this module are not meant for reexport
pub use raw::{Semaphore, SemaphoreGuard};
-mod comm;
-mod future;
-mod lock;
+// Core building blocks for all primitives in this crate
+
+pub mod atomics;
+
+// Concurrent data structures
+
mod mpsc_intrusive;
-mod task_pool;
+pub mod spsc_queue;
+pub mod mpsc_queue;
+pub mod mpmc_bounded_queue;
+pub mod deque;
+
+// Low-level concurrency primitives
pub mod raw;
pub mod mutex;
pub mod one;
+
+// Message-passing based communication
+
+pub mod comm;
+
+// Higher level primitives based on those above
+
+mod lock;
+
+#[cfg(not(test))]
+mod std {
+ pub use core::{fmt, option, cmp, clone};
+}
//! after grabbing the lock, the second task will immediately fail because the
//! lock is now poisoned.
-use std::task;
-use std::ty::Unsafe;
+use core::prelude::*;
+
+use core::ty::Unsafe;
+use rustrt::local::Local;
+use rustrt::task::Task;
use raw;
failed: bool,
}
+fn failing() -> bool {
+ Local::borrow(None::<Task>).unwinder.unwinding()
+}
+
impl<'a> PoisonOnFail<'a> {
fn check(flag: bool, name: &str) {
if flag {
PoisonOnFail::check(*flag, name);
PoisonOnFail {
flag: flag,
- failed: task::failing()
+ failed: failing()
}
}
}
#[unsafe_destructor]
impl<'a> Drop for PoisonOnFail<'a> {
fn drop(&mut self) {
- if !self.failed && task::failing() {
+ if !self.failed && failing() {
*self.flag = true;
}
}
#[cfg(test)]
mod tests {
+ use std::prelude::*;
use std::comm::Empty;
use std::task;
use std::task::TaskBuilder;
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+#![experimental]
+#![allow(missing_doc, dead_code)]
+
+// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+
+use core::prelude::*;
+
+use alloc::arc::Arc;
+use collections::Vec;
+use core::num::next_power_of_two;
+use core::ty::Unsafe;
+
+use atomics::{AtomicUint,Relaxed,Release,Acquire};
+
+struct Node<T> {
+ sequence: AtomicUint,
+ value: Option<T>,
+}
+
+struct State<T> {
+ pad0: [u8, ..64],
+ buffer: Vec<Unsafe<Node<T>>>,
+ mask: uint,
+ pad1: [u8, ..64],
+ enqueue_pos: AtomicUint,
+ pad2: [u8, ..64],
+ dequeue_pos: AtomicUint,
+ pad3: [u8, ..64],
+}
+
+pub struct Queue<T> {
+ state: Arc<State<T>>,
+}
+
+impl<T: Send> State<T> {
+ fn with_capacity(capacity: uint) -> State<T> {
+ let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
+ if capacity < 2 {
+ 2u
+ } else {
+ // use next power of 2 as capacity
+ next_power_of_two(capacity)
+ }
+ } else {
+ capacity
+ };
+ let buffer = Vec::from_fn(capacity, |i| {
+ Unsafe::new(Node { sequence:AtomicUint::new(i), value: None })
+ });
+ State{
+ pad0: [0, ..64],
+ buffer: buffer,
+ mask: capacity-1,
+ pad1: [0, ..64],
+ enqueue_pos: AtomicUint::new(0),
+ pad2: [0, ..64],
+ dequeue_pos: AtomicUint::new(0),
+ pad3: [0, ..64],
+ }
+ }
+
+ fn push(&self, value: T) -> bool {
+ let mask = self.mask;
+ let mut pos = self.enqueue_pos.load(Relaxed);
+ loop {
+ let node = self.buffer.get(pos & mask);
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
+ let diff: int = seq as int - pos as int;
+
+ if diff == 0 {
+ let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
+ if enqueue_pos == pos {
+ unsafe {
+ (*node.get()).value = Some(value);
+ (*node.get()).sequence.store(pos+1, Release);
+ }
+ break
+ } else {
+ pos = enqueue_pos;
+ }
+ } else if diff < 0 {
+ return false
+ } else {
+ pos = self.enqueue_pos.load(Relaxed);
+ }
+ }
+ true
+ }
+
+ fn pop(&self) -> Option<T> {
+ let mask = self.mask;
+ let mut pos = self.dequeue_pos.load(Relaxed);
+ loop {
+ let node = self.buffer.get(pos & mask);
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
+ let diff: int = seq as int - (pos + 1) as int;
+ if diff == 0 {
+ let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
+ if dequeue_pos == pos {
+ unsafe {
+ let value = (*node.get()).value.take();
+ (*node.get()).sequence.store(pos + mask + 1, Release);
+ return value
+ }
+ } else {
+ pos = dequeue_pos;
+ }
+ } else if diff < 0 {
+ return None
+ } else {
+ pos = self.dequeue_pos.load(Relaxed);
+ }
+ }
+ }
+}
+
+impl<T: Send> Queue<T> {
+ pub fn with_capacity(capacity: uint) -> Queue<T> {
+ Queue{
+ state: Arc::new(State::with_capacity(capacity))
+ }
+ }
+
+ pub fn push(&self, value: T) -> bool {
+ self.state.push(value)
+ }
+
+ pub fn pop(&self) -> Option<T> {
+ self.state.pop()
+ }
+}
+
+impl<T: Send> Clone for Queue<T> {
+ fn clone(&self) -> Queue<T> {
+ Queue { state: self.state.clone() }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::*;
+ use super::Queue;
+ use native;
+
+ #[test]
+ fn test() {
+ let nthreads = 8u;
+ let nmsgs = 1000u;
+ let q = Queue::with_capacity(nthreads*nmsgs);
+ assert_eq!(None, q.pop());
+ let (tx, rx) = channel();
+
+ for _ in range(0, nthreads) {
+ let q = q.clone();
+ let tx = tx.clone();
+ native::task::spawn(proc() {
+ let q = q;
+ for i in range(0, nmsgs) {
+ assert!(q.push(i));
+ }
+ tx.send(());
+ });
+ }
+
+ let mut completion_rxs = vec![];
+ for _ in range(0, nthreads) {
+ let (tx, rx) = channel();
+ completion_rxs.push(rx);
+ let q = q.clone();
+ native::task::spawn(proc() {
+ let q = q;
+ let mut i = 0u;
+ loop {
+ match q.pop() {
+ None => {},
+ Some(_) => {
+ i += 1;
+ if i == nmsgs { break }
+ }
+ }
+ }
+ tx.send(i);
+ });
+ }
+
+ for rx in completion_rxs.mut_iter() {
+ assert_eq!(nmsgs, rx.recv());
+ }
+ for _ in range(0, nthreads) {
+ rx.recv();
+ }
+ }
+}
//! This module implements an intrusive MPSC queue. This queue is incredibly
//! unsafe (due to use of unsafe pointers for nodes), and hence is not public.
+#![experimental]
+
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/intrusive-mpsc-node-based-queue
-use std::mem;
-use std::sync::atomics;
-use std::ty::Unsafe;
+use core::prelude::*;
+
+use core::atomics;
+use core::mem;
+use core::ty::Unsafe;
// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static
// initialization.
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+//! A mostly lock-free multi-producer, single consumer queue.
+//!
+//! This module contains an implementation of a concurrent MPSC queue. This
+//! queue can be used to share data between tasks, and is also used as the
+//! building block of channels in rust.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue may not be appropriate for all use-cases.
+
+#![experimental]
+
+// http://www.1024cores.net/home/lock-free-algorithms
+// /queues/non-intrusive-mpsc-node-based-queue
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::mem;
+use core::ty::Unsafe;
+
+use atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
+
+/// A result of the `pop` function.
+pub enum PopResult<T> {
+ /// Some data has been popped
+ Data(T),
+ /// The queue is empty
+ Empty,
+ /// The queue is in an inconsistent state. Popping data should succeed, but
+ /// some pushers have yet to make enough progress in order allow a pop to
+ /// succeed. It is recommended that a pop() occur "in the near future" in
+ /// order to see if the sender has made progress or not
+ Inconsistent,
+}
+
+struct Node<T> {
+ next: AtomicPtr<Node<T>>,
+ value: Option<T>,
+}
+
+/// The multi-producer single-consumer structure. This is not cloneable, but it
+/// may be safely shared so long as it is guaranteed that there is only one
+/// popper at a time (many pushers are allowed).
+pub struct Queue<T> {
+ head: AtomicPtr<Node<T>>,
+ tail: Unsafe<*mut Node<T>>,
+}
+
+impl<T> Node<T> {
+ unsafe fn new(v: Option<T>) -> *mut Node<T> {
+ mem::transmute(box Node {
+ next: AtomicPtr::new(0 as *mut Node<T>),
+ value: v,
+ })
+ }
+}
+
+impl<T: Send> Queue<T> {
+ /// Creates a new queue that is safe to share among multiple producers and
+ /// one consumer.
+ pub fn new() -> Queue<T> {
+ let stub = unsafe { Node::new(None) };
+ Queue {
+ head: AtomicPtr::new(stub),
+ tail: Unsafe::new(stub),
+ }
+ }
+
+ /// Pushes a new value onto this queue.
+ pub fn push(&self, t: T) {
+ unsafe {
+ let n = Node::new(Some(t));
+ let prev = self.head.swap(n, AcqRel);
+ (*prev).next.store(n, Release);
+ }
+ }
+
+ /// Pops some data from this queue.
+ ///
+ /// Note that the current implementation means that this function cannot
+ /// return `Option<T>`. It is possible for this queue to be in an
+ /// inconsistent state where many pushes have succeeded and completely
+ /// finished, but pops cannot return `Some(t)`. This inconsistent state
+ /// happens when a pusher is pre-empted at an inopportune moment.
+ ///
+ /// This inconsistent state means that this queue does indeed have data, but
+ /// it does not currently have access to it at this time.
+ pub fn pop(&self) -> PopResult<T> {
+ unsafe {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ assert!((*tail).value.is_none());
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take_unwrap();
+ let _: Box<Node<T>> = mem::transmute(tail);
+ return Data(ret);
+ }
+
+ if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
+ }
+ }
+
+ /// Attempts to pop data from this queue, but doesn't attempt too hard. This
+ /// will canonicalize inconsistent states to a `None` value.
+ pub fn casual_pop(&self) -> Option<T> {
+ match self.pop() {
+ Data(t) => Some(t),
+ Empty | Inconsistent => None,
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.tail.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Relaxed);
+ let _: Box<Node<T>> = mem::transmute(cur);
+ cur = next;
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::*;
+
+ use alloc::arc::Arc;
+
+ use native;
+ use super::{Queue, Data, Empty, Inconsistent};
+
+ #[test]
+ fn test_full() {
+ let q = Queue::new();
+ q.push(box 1);
+ q.push(box 2);
+ }
+
+ #[test]
+ fn test() {
+ let nthreads = 8u;
+ let nmsgs = 1000u;
+ let q = Queue::new();
+ match q.pop() {
+ Empty => {}
+ Inconsistent | Data(..) => fail!()
+ }
+ let (tx, rx) = channel();
+ let q = Arc::new(q);
+
+ for _ in range(0, nthreads) {
+ let tx = tx.clone();
+ let q = q.clone();
+ native::task::spawn(proc() {
+ for i in range(0, nmsgs) {
+ q.push(i);
+ }
+ tx.send(());
+ });
+ }
+
+ let mut i = 0u;
+ while i < nthreads * nmsgs {
+ match q.pop() {
+ Empty | Inconsistent => {},
+ Data(_) => { i += 1 }
+ }
+ }
+ drop(tx);
+ for _ in range(0, nthreads) {
+ rx.recv();
+ }
+ }
+}
// times in order to manage a few flags about who's blocking where and whether
// it's locked or not.
-use std::kinds::marker;
-use std::mem;
-use std::rt::local::Local;
-use std::rt::task::{BlockedTask, Task};
-use std::rt::thread::Thread;
-use std::sync::atomics;
-use std::ty::Unsafe;
-use std::rt::mutex;
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::atomics;
+use core::kinds::marker;
+use core::mem;
+use core::ty::Unsafe;
+use rustrt::local::Local;
+use rustrt::mutex;
+use rustrt::task::{BlockedTask, Task};
+use rustrt::thread::Thread;
use q = mpsc_intrusive;
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
- Unlocked => unreachable!()
+ Unlocked => unreachable!(),
}
unlocked = true;
}
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
- Unlocked => unreachable!()
+ Unlocked => unreachable!(),
}
}
#[cfg(test)]
mod test {
- extern crate native;
+ use std::prelude::*;
use super::{Mutex, StaticMutex, MUTEX_INIT};
+ use native;
#[test]
fn smoke() {
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
-use std::int;
-use std::sync::atomics;
+use core::prelude::*;
+
+use core::int;
+use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
#[cfg(test)]
mod test {
- use super::{ONCE_INIT, Once};
+ use std::prelude::*;
use std::task;
+ use super::{ONCE_INIT, Once};
#[test]
fn smoke_once() {
//! `sync` crate which wrap values directly and provide safer abstractions for
//! containing data.
-use std::kinds::marker;
-use std::mem;
-use std::sync::atomics;
-use std::ty::Unsafe;
-use std::finally::Finally;
+use core::prelude::*;
+
+use core::atomics;
+use core::finally::Finally;
+use core::kinds::marker;
+use core::mem;
+use core::ty::Unsafe;
+use collections::Vec;
use mutex;
+use comm::{Receiver, Sender, channel};
/****************************************************************************
* Internals
#[cfg(test)]
mod tests {
+ use std::prelude::*;
+
use Arc;
use super::{Semaphore, Mutex, RWLock, Condvar};
--- /dev/null
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
+
+//! A single-producer single-consumer concurrent queue
+//!
+//! This module contains the implementation of an SPSC queue which can be used
+//! concurrently between two tasks. This data structure is safe to use and
+//! enforces the semantics that there is one pusher and one popper.
+
+#![experimental]
+
+use core::prelude::*;
+
+use alloc::owned::Box;
+use core::mem;
+use core::ty::Unsafe;
+
+use atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
+
+// Node within the linked list queue of messages to send
+struct Node<T> {
+ // FIXME: this could be an uninitialized T if we're careful enough, and
+ // that would reduce memory usage (and be a bit faster).
+ // is it worth it?
+ value: Option<T>, // nullable for re-use of nodes
+ next: AtomicPtr<Node<T>>, // next node in the queue
+}
+
+/// The single-producer single-consumer queue. This structure is not cloneable,
+/// but it can be safely shared in an Arc if it is guaranteed that there
+/// is only one popper and one pusher touching the queue at any one point in
+/// time.
+pub struct Queue<T> {
+ // consumer fields
+ tail: Unsafe<*mut Node<T>>, // where to pop from
+ tail_prev: AtomicPtr<Node<T>>, // where to pop from
+
+ // producer fields
+ head: Unsafe<*mut Node<T>>, // where to push to
+ first: Unsafe<*mut Node<T>>, // where to get new nodes from
+ tail_copy: Unsafe<*mut Node<T>>, // between first/tail
+
+ // Cache maintenance fields. Additions and subtractions are stored
+ // separately in order to allow them to use nonatomic addition/subtraction.
+ cache_bound: uint,
+ cache_additions: AtomicUint,
+ cache_subtractions: AtomicUint,
+}
+
+impl<T: Send> Node<T> {
+ fn new() -> *mut Node<T> {
+ unsafe {
+ mem::transmute(box Node {
+ value: None,
+ next: AtomicPtr::new(0 as *mut Node<T>),
+ })
+ }
+ }
+}
+
+impl<T: Send> Queue<T> {
+ /// Creates a new queue. The producer returned is connected to the consumer
+ /// to push all data to the consumer.
+ ///
+ /// # Arguments
+ ///
+ /// * `bound` - This queue implementation is implemented with a linked
+ /// list, and this means that a push is always a malloc. In
+ /// order to amortize this cost, an internal cache of nodes is
+ /// maintained to prevent a malloc from always being
+ /// necessary. This bound is the limit on the size of the
+ /// cache (if desired). If the value is 0, then the cache has
+ /// no bound. Otherwise, the cache will never grow larger than
+ /// `bound` (although the queue itself could be much larger.
+ pub fn new(bound: uint) -> Queue<T> {
+ let n1 = Node::new();
+ let n2 = Node::new();
+ unsafe { (*n1).next.store(n2, Relaxed) }
+ Queue {
+ tail: Unsafe::new(n2),
+ tail_prev: AtomicPtr::new(n1),
+ head: Unsafe::new(n2),
+ first: Unsafe::new(n1),
+ tail_copy: Unsafe::new(n1),
+ cache_bound: bound,
+ cache_additions: AtomicUint::new(0),
+ cache_subtractions: AtomicUint::new(0),
+ }
+ }
+
+ /// Pushes a new value onto this queue. Note that to use this function
+ /// safely, it must be externally guaranteed that there is only one pusher.
+ pub fn push(&self, t: T) {
+ unsafe {
+ // Acquire a node (which either uses a cached one or allocates a new
+ // one), and then append this to the 'head' node.
+ let n = self.alloc();
+ assert!((*n).value.is_none());
+ (*n).value = Some(t);
+ (*n).next.store(0 as *mut Node<T>, Relaxed);
+ (**self.head.get()).next.store(n, Release);
+ *self.head.get() = n;
+ }
+ }
+
+ unsafe fn alloc(&self) -> *mut Node<T> {
+ // First try to see if we can consume the 'first' node for our uses.
+ // We try to avoid as many atomic instructions as possible here, so
+ // the addition to cache_subtractions is not atomic (plus we're the
+ // only one subtracting from the cache).
+ if *self.first.get() != *self.tail_copy.get() {
+ if self.cache_bound > 0 {
+ let b = self.cache_subtractions.load(Relaxed);
+ self.cache_subtractions.store(b + 1, Relaxed);
+ }
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
+ return ret;
+ }
+ // If the above fails, then update our copy of the tail and try
+ // again.
+ *self.tail_copy.get() = self.tail_prev.load(Acquire);
+ if *self.first.get() != *self.tail_copy.get() {
+ if self.cache_bound > 0 {
+ let b = self.cache_subtractions.load(Relaxed);
+ self.cache_subtractions.store(b + 1, Relaxed);
+ }
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
+ return ret;
+ }
+ // If all of that fails, then we have to allocate a new node
+ // (there's nothing in the node cache).
+ Node::new()
+ }
+
+ /// Attempts to pop a value from this queue. Remember that to use this type
+ /// safely you must ensure that there is only one popper at a time.
+ pub fn pop(&self) -> Option<T> {
+ unsafe {
+ // The `tail` node is not actually a used node, but rather a
+ // sentinel from where we should start popping from. Hence, look at
+ // tail's next field and see if we can use it. If we do a pop, then
+ // the current tail node is a candidate for going into the cache.
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+ if next.is_null() { return None }
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take();
+
+ *self.tail.get() = next;
+ if self.cache_bound == 0 {
+ self.tail_prev.store(tail, Release);
+ } else {
+ // FIXME: this is dubious with overflow.
+ let additions = self.cache_additions.load(Relaxed);
+ let subtractions = self.cache_subtractions.load(Relaxed);
+ let size = additions - subtractions;
+
+ if size < self.cache_bound {
+ self.tail_prev.store(tail, Release);
+ self.cache_additions.store(additions + 1, Relaxed);
+ } else {
+ (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed);
+ // We have successfully erased all references to 'tail', so
+ // now we can safely drop it.
+ let _: Box<Node<T>> = mem::transmute(tail);
+ }
+ }
+ return ret;
+ }
+ }
+
+ /// Attempts to peek at the head of the queue, returning `None` if the queue
+ /// has no data currently
+ pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
+ // This is essentially the same as above with all the popping bits
+ // stripped out.
+ unsafe {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Acquire);
+ if next.is_null() { return None }
+ return (*next).value.as_mut();
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T: Send> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.first.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Relaxed);
+ let _n: Box<Node<T>> = mem::transmute(cur);
+ cur = next;
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::prelude::*;
+
+ use alloc::arc::Arc;
+ use native;
+
+ use super::Queue;
+
+ #[test]
+ fn smoke() {
+ let q = Queue::new(0);
+ q.push(1);
+ q.push(2);
+ assert_eq!(q.pop(), Some(1));
+ assert_eq!(q.pop(), Some(2));
+ assert_eq!(q.pop(), None);
+ q.push(3);
+ q.push(4);
+ assert_eq!(q.pop(), Some(3));
+ assert_eq!(q.pop(), Some(4));
+ assert_eq!(q.pop(), None);
+ }
+
+ #[test]
+ fn drop_full() {
+ let q = Queue::new(0);
+ q.push(box 1);
+ q.push(box 2);
+ }
+
+ #[test]
+ fn smoke_bound() {
+ let q = Queue::new(1);
+ q.push(1);
+ q.push(2);
+ assert_eq!(q.pop(), Some(1));
+ assert_eq!(q.pop(), Some(2));
+ assert_eq!(q.pop(), None);
+ q.push(3);
+ q.push(4);
+ assert_eq!(q.pop(), Some(3));
+ assert_eq!(q.pop(), Some(4));
+ assert_eq!(q.pop(), None);
+ }
+
+ #[test]
+ fn stress() {
+ stress_bound(0);
+ stress_bound(1);
+
+ fn stress_bound(bound: uint) {
+ let a = Arc::new(Queue::new(bound));
+ let b = a.clone();
+ let (tx, rx) = channel();
+ native::task::spawn(proc() {
+ for _ in range(0, 100000) {
+ loop {
+ match b.pop() {
+ Some(1) => break,
+ Some(_) => fail!(),
+ None => {}
+ }
+ }
+ }
+ tx.send(());
+ });
+ for _ in range(0, 100000) {
+ a.push(1);
+ }
+ rx.recv();
+ }
+ }
+}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(missing_doc)]
-
-/// A task pool abstraction. Useful for achieving predictable CPU
-/// parallelism.
-
-use std::task;
-
-enum Msg<T> {
- Execute(proc(&T):Send),
- Quit
-}
-
-pub struct TaskPool<T> {
- channels: Vec<Sender<Msg<T>>>,
- next_index: uint,
-}
-
-#[unsafe_destructor]
-impl<T> Drop for TaskPool<T> {
- fn drop(&mut self) {
- for channel in self.channels.mut_iter() {
- channel.send(Quit);
- }
- }
-}
-
-impl<T> TaskPool<T> {
- /// Spawns a new task pool with `n_tasks` tasks. If the `sched_mode`
- /// is None, the tasks run on this scheduler; otherwise, they run on a
- /// new scheduler with the given mode. The provided `init_fn_factory`
- /// returns a function which, given the index of the task, should return
- /// local data to be kept around in that task.
- pub fn new(n_tasks: uint,
- init_fn_factory: || -> proc(uint):Send -> T)
- -> TaskPool<T> {
- assert!(n_tasks >= 1);
-
- let channels = Vec::from_fn(n_tasks, |i| {
- let (tx, rx) = channel::<Msg<T>>();
- let init_fn = init_fn_factory();
-
- let task_body = proc() {
- let local_data = init_fn(i);
- loop {
- match rx.recv() {
- Execute(f) => f(&local_data),
- Quit => break
- }
- }
- };
-
- // Run on this scheduler.
- task::spawn(task_body);
-
- tx
- });
-
- return TaskPool {
- channels: channels,
- next_index: 0,
- };
- }
-
- /// Executes the function `f` on a task in the pool. The function
- /// receives a reference to the local data returned by the `init_fn`.
- pub fn execute(&mut self, f: proc(&T):Send) {
- self.channels.get(self.next_index).send(Execute(f));
- self.next_index += 1;
- if self.next_index == self.channels.len() { self.next_index = 0; }
- }
-}
-
-#[test]
-fn test_task_pool() {
- let f: || -> proc(uint):Send -> uint = || {
- let g: proc(uint):Send -> uint = proc(i) i;
- g
- };
- let mut pool = TaskPool::new(4, f);
- for _ in range(0, 8) {
- pool.execute(proc(i) println!("Hello from thread {}!", *i));
- }
-}
use std::fmt::Show;
use std::option::Option;
use std::rc::Rc;
+use std::gc::Gc;
use serialize::{Encodable, Decodable, Encoder, Decoder};
/// A pointer abstraction. FIXME(eddyb) #10676 use Rc<T> in the future.
-pub type P<T> = @T;
+pub type P<T> = Gc<T>;
#[allow(non_snake_case_functions)]
/// Construct a P<T> from a T value.
pub fn P<T: 'static>(value: T) -> P<T> {
- @value
+ box(GC) value
}
// FIXME #6993: in librustc, uses of "ident" should be replaced
// The set of MetaItems that define the compilation environment of the crate,
// used to drive conditional compilation
-pub type CrateConfig = Vec<@MetaItem> ;
+pub type CrateConfig = Vec<Gc<MetaItem>>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Crate {
#[deriving(Clone, Encodable, Decodable, Eq, Hash)]
pub enum MetaItem_ {
MetaWord(InternedString),
- MetaList(InternedString, Vec<@MetaItem> ),
+ MetaList(InternedString, Vec<Gc<MetaItem>>),
MetaNameValue(InternedString, Lit),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Block {
pub view_items: Vec<ViewItem>,
- pub stmts: Vec<@Stmt>,
- pub expr: Option<@Expr>,
+ pub stmts: Vec<Gc<Stmt>>,
+ pub expr: Option<Gc<Expr>>,
pub id: NodeId,
pub rules: BlockCheckMode,
pub span: Span,
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct FieldPat {
pub ident: Ident,
- pub pat: @Pat,
+ pub pat: Gc<Pat>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
// which it is. The resolver determines this, and
// records this pattern's NodeId in an auxiliary
// set (of "pat_idents that refer to nullary enums")
- PatIdent(BindingMode, Path, Option<@Pat>),
- PatEnum(Path, Option<Vec<@Pat> >), /* "none" means a * pattern where
+ PatIdent(BindingMode, Path, Option<Gc<Pat>>),
+ PatEnum(Path, Option<Vec<Gc<Pat>>>), /* "none" means a * pattern where
* we don't bind the fields to names */
- PatStruct(Path, Vec<FieldPat> , bool),
- PatTup(Vec<@Pat> ),
- PatBox(@Pat),
- PatRegion(@Pat), // reference pattern
- PatLit(@Expr),
- PatRange(@Expr, @Expr),
+ PatStruct(Path, Vec<FieldPat>, bool),
+ PatTup(Vec<Gc<Pat>>),
+ PatBox(Gc<Pat>),
+ PatRegion(Gc<Pat>), // reference pattern
+ PatLit(Gc<Expr>),
+ PatRange(Gc<Expr>, Gc<Expr>),
// [a, b, ..i, y, z] is represented as
// PatVec(~[a, b], Some(i), ~[y, z])
- PatVec(Vec<@Pat> , Option<@Pat>, Vec<@Pat> ),
+ PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>),
PatMac(Mac),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Stmt_ {
// could be an item or a local (let) binding:
- StmtDecl(@Decl, NodeId),
+ StmtDecl(Gc<Decl>, NodeId),
// expr without trailing semi-colon (must have unit type):
- StmtExpr(@Expr, NodeId),
+ StmtExpr(Gc<Expr>, NodeId),
// expr with trailing semi-colon (may have any type):
- StmtSemi(@Expr, NodeId),
+ StmtSemi(Gc<Expr>, NodeId),
// bool: is there a trailing sem-colon?
StmtMac(Mac, bool),
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Local {
pub ty: P<Ty>,
- pub pat: @Pat,
- pub init: Option<@Expr>,
+ pub pat: Gc<Pat>,
+ pub init: Option<Gc<Expr>>,
pub id: NodeId,
pub span: Span,
pub source: LocalSource,
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Decl_ {
// a local (let) binding:
- DeclLocal(@Local),
+ DeclLocal(Gc<Local>),
// an item binding:
- DeclItem(@Item),
+ DeclItem(Gc<Item>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Arm {
pub attrs: Vec<Attribute>,
- pub pats: Vec<@Pat>,
- pub guard: Option<@Expr>,
- pub body: @Expr,
+ pub pats: Vec<Gc<Pat>>,
+ pub guard: Option<Gc<Expr>>,
+ pub body: Gc<Expr>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Field {
pub ident: SpannedIdent,
- pub expr: @Expr,
+ pub expr: Gc<Expr>,
pub span: Span,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Expr_ {
- ExprVstore(@Expr, ExprVstore),
+ ExprVstore(Gc<Expr>, ExprVstore),
// First expr is the place; second expr is the value.
- ExprBox(@Expr, @Expr),
- ExprVec(Vec<@Expr>),
- ExprCall(@Expr, Vec<@Expr>),
- ExprMethodCall(SpannedIdent, Vec<P<Ty>>, Vec<@Expr>),
- ExprTup(Vec<@Expr>),
- ExprBinary(BinOp, @Expr, @Expr),
- ExprUnary(UnOp, @Expr),
- ExprLit(@Lit),
- ExprCast(@Expr, P<Ty>),
- ExprIf(@Expr, P<Block>, Option<@Expr>),
- ExprWhile(@Expr, P<Block>),
+ ExprBox(Gc<Expr>, Gc<Expr>),
+ ExprVec(Vec<Gc<Expr>>),
+ ExprCall(Gc<Expr>, Vec<Gc<Expr>>),
+ ExprMethodCall(SpannedIdent, Vec<P<Ty>>, Vec<Gc<Expr>>),
+ ExprTup(Vec<Gc<Expr>>),
+ ExprBinary(BinOp, Gc<Expr>, Gc<Expr>),
+ ExprUnary(UnOp, Gc<Expr>),
+ ExprLit(Gc<Lit>),
+ ExprCast(Gc<Expr>, P<Ty>),
+ ExprIf(Gc<Expr>, P<Block>, Option<Gc<Expr>>),
+ ExprWhile(Gc<Expr>, P<Block>),
// FIXME #6993: change to Option<Name>
- ExprForLoop(@Pat, @Expr, P<Block>, Option<Ident>),
+ ExprForLoop(Gc<Pat>, Gc<Expr>, P<Block>, Option<Ident>),
// Conditionless loop (can be exited with break, cont, or ret)
// FIXME #6993: change to Option<Name>
ExprLoop(P<Block>, Option<Ident>),
- ExprMatch(@Expr, Vec<Arm>),
+ ExprMatch(Gc<Expr>, Vec<Arm>),
ExprFnBlock(P<FnDecl>, P<Block>),
ExprProc(P<FnDecl>, P<Block>),
ExprBlock(P<Block>),
- ExprAssign(@Expr, @Expr),
- ExprAssignOp(BinOp, @Expr, @Expr),
- ExprField(@Expr, Ident, Vec<P<Ty>>),
- ExprIndex(@Expr, @Expr),
+ ExprAssign(Gc<Expr>, Gc<Expr>),
+ ExprAssignOp(BinOp, Gc<Expr>, Gc<Expr>),
+ ExprField(Gc<Expr>, Ident, Vec<P<Ty>>),
+ ExprIndex(Gc<Expr>, Gc<Expr>),
/// Expression that looks like a "name". For example,
/// `std::slice::from_elem::<uint>` is an ExprPath that's the "name" part
/// of a function call.
ExprPath(Path),
- ExprAddrOf(Mutability, @Expr),
+ ExprAddrOf(Mutability, Gc<Expr>),
ExprBreak(Option<Ident>),
ExprAgain(Option<Ident>),
- ExprRet(Option<@Expr>),
+ ExprRet(Option<Gc<Expr>>),
ExprInlineAsm(InlineAsm),
ExprMac(Mac),
// A struct literal expression.
- ExprStruct(Path, Vec<Field> , Option<@Expr> /* base */),
+ ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */),
// A vector literal constructed from one repeated element.
- ExprRepeat(@Expr /* element */, @Expr /* count */),
+ ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */),
// No-op: used solely so we can pretty-print faithfully
- ExprParen(@Expr)
+ ExprParen(Gc<Expr>)
}
// When the main rust parser encounters a syntax-extension invocation, it
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum TraitMethod {
Required(TypeMethod),
- Provided(@Method),
+ Provided(Gc<Method>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
TyBox(P<Ty>),
TyUniq(P<Ty>),
TyVec(P<Ty>),
- TyFixedLengthVec(P<Ty>, @Expr),
+ TyFixedLengthVec(P<Ty>, Gc<Expr>),
TyPtr(MutTy),
TyRptr(Option<Lifetime>, MutTy),
- TyClosure(@ClosureTy, Option<Lifetime>),
- TyProc(@ClosureTy),
- TyBareFn(@BareFnTy),
- TyUnboxedFn(@UnboxedFnTy),
+ TyClosure(Gc<ClosureTy>, Option<Lifetime>),
+ TyProc(Gc<ClosureTy>),
+ TyBareFn(Gc<BareFnTy>),
+ TyUnboxedFn(Gc<UnboxedFnTy>),
TyTup(Vec<P<Ty>> ),
TyPath(Path, Option<OwnedSlice<TyParamBound>>, NodeId), // for #7264; see above
- TyTypeof(@Expr),
+ TyTypeof(Gc<Expr>),
// TyInfer means the type should be inferred instead of it having been
// specified. This can appear anywhere in a type.
TyInfer,
pub asm: InternedString,
pub asm_str_style: StrStyle,
pub clobbers: InternedString,
- pub inputs: Vec<(InternedString, @Expr)>,
- pub outputs: Vec<(InternedString, @Expr)>,
+ pub inputs: Vec<(InternedString, Gc<Expr>)>,
+ pub outputs: Vec<(InternedString, Gc<Expr>)>,
pub volatile: bool,
pub alignstack: bool,
pub dialect: AsmDialect
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Arg {
pub ty: P<Ty>,
- pub pat: @Pat,
+ pub pat: Gc<Pat>,
pub id: NodeId,
}
node: TyInfer,
span: DUMMY_SP,
}),
- pat: @Pat {
+ pat: box(GC) Pat {
id: DUMMY_NODE_ID,
node: PatIdent(BindByValue(mutability), path, None),
span: span
/// to the last token in the external file.
pub inner: Span,
pub view_items: Vec<ViewItem>,
- pub items: Vec<@Item>,
+ pub items: Vec<Gc<Item>>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct ForeignMod {
pub abi: Abi,
pub view_items: Vec<ViewItem>,
- pub items: Vec<@ForeignItem>,
+ pub items: Vec<Gc<ForeignItem>>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum VariantKind {
TupleVariantKind(Vec<VariantArg>),
- StructVariantKind(@StructDef),
+ StructVariantKind(Gc<StructDef>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub attrs: Vec<Attribute>,
pub kind: VariantKind,
pub id: NodeId,
- pub disr_expr: Option<@Expr>,
+ pub disr_expr: Option<Gc<Expr>>,
pub vis: Visibility,
}
// (containing arbitrary characters) from which to fetch the crate sources
// For example, extern crate whatever = "github.com/mozilla/rust"
ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId),
- ViewItemUse(@ViewPath),
+ ViewItemUse(Gc<ViewPath>),
}
// Meta-data associated with an item
pub struct Attribute_ {
pub id: AttrId,
pub style: AttrStyle,
- pub value: @MetaItem,
+ pub value: Gc<MetaItem>,
pub is_sugared_doc: bool,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Item_ {
- ItemStatic(P<Ty>, Mutability, @Expr),
+ ItemStatic(P<Ty>, Mutability, Gc<Expr>),
ItemFn(P<FnDecl>, FnStyle, Abi, Generics, P<Block>),
ItemMod(Mod),
ItemForeignMod(ForeignMod),
ItemTy(P<Ty>, Generics),
ItemEnum(EnumDef, Generics),
- ItemStruct(@StructDef, Generics),
+ ItemStruct(Gc<StructDef>, Generics),
ItemTrait(Generics, Sized, Vec<TraitRef> , Vec<TraitMethod> ),
ItemImpl(Generics,
Option<TraitRef>, // (optional) trait this impl implements
P<Ty>, // self
- Vec<@Method> ),
+ Vec<Gc<Method>>),
// a macro invocation (which includes macro definition)
ItemMac(Mac),
}
// that we trans.
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum InlinedItem {
- IIItem(@Item),
- IIMethod(DefId /* impl id */, bool /* is provided */, @Method),
- IIForeign(@ForeignItem),
+ IIItem(Gc<Item>),
+ IIMethod(DefId /* impl id */, bool /* is provided */, Gc<Method>),
+ IIForeign(Gc<ForeignItem>),
}
#[cfg(test)]
use std::cell::RefCell;
use std::fmt;
+use std::gc::Gc;
use std::iter;
use std::slice;
-use std::string::String;
#[deriving(Clone, PartialEq)]
pub enum PathElem {
#[deriving(Clone)]
pub enum Node {
- NodeItem(@Item),
- NodeForeignItem(@ForeignItem),
- NodeTraitMethod(@TraitMethod),
- NodeMethod(@Method),
+ NodeItem(Gc<Item>),
+ NodeForeignItem(Gc<ForeignItem>),
+ NodeTraitMethod(Gc<TraitMethod>),
+ NodeMethod(Gc<Method>),
NodeVariant(P<Variant>),
- NodeExpr(@Expr),
- NodeStmt(@Stmt),
- NodeArg(@Pat),
- NodeLocal(@Pat),
- NodePat(@Pat),
+ NodeExpr(Gc<Expr>),
+ NodeStmt(Gc<Stmt>),
+ NodeArg(Gc<Pat>),
+ NodeLocal(Gc<Pat>),
+ NodePat(Gc<Pat>),
NodeBlock(P<Block>),
/// NodeStructCtor represents a tuple struct.
- NodeStructCtor(@StructDef),
+ NodeStructCtor(Gc<StructDef>),
- NodeLifetime(@Lifetime),
+ NodeLifetime(Gc<Lifetime>),
}
// The odd layout is to bring down the total size.
NotPresent,
// All the node types, with a parent ID.
- EntryItem(NodeId, @Item),
- EntryForeignItem(NodeId, @ForeignItem),
- EntryTraitMethod(NodeId, @TraitMethod),
- EntryMethod(NodeId, @Method),
+ EntryItem(NodeId, Gc<Item>),
+ EntryForeignItem(NodeId, Gc<ForeignItem>),
+ EntryTraitMethod(NodeId, Gc<TraitMethod>),
+ EntryMethod(NodeId, Gc<Method>),
EntryVariant(NodeId, P<Variant>),
- EntryExpr(NodeId, @Expr),
- EntryStmt(NodeId, @Stmt),
- EntryArg(NodeId, @Pat),
- EntryLocal(NodeId, @Pat),
- EntryPat(NodeId, @Pat),
+ EntryExpr(NodeId, Gc<Expr>),
+ EntryStmt(NodeId, Gc<Stmt>),
+ EntryArg(NodeId, Gc<Pat>),
+ EntryLocal(NodeId, Gc<Pat>),
+ EntryPat(NodeId, Gc<Pat>),
EntryBlock(NodeId, P<Block>),
- EntryStructCtor(NodeId, @StructDef),
- EntryLifetime(NodeId, @Lifetime),
+ EntryStructCtor(NodeId, Gc<StructDef>),
+ EntryLifetime(NodeId, Gc<Lifetime>),
// Roots for node trees.
RootCrate,
}
}
- pub fn expect_item(&self, id: NodeId) -> @Item {
+ pub fn expect_item(&self, id: NodeId) -> Gc<Item> {
match self.find(id) {
Some(NodeItem(item)) => item,
_ => fail!("expected item, found {}", self.node_to_str(id))
}
}
- pub fn expect_struct(&self, id: NodeId) -> @StructDef {
+ pub fn expect_struct(&self, id: NodeId) -> Gc<StructDef> {
match self.find(id) {
Some(NodeItem(i)) => {
match i.node {
}
}
- pub fn expect_foreign_item(&self, id: NodeId) -> @ForeignItem {
+ pub fn expect_foreign_item(&self, id: NodeId) -> Gc<ForeignItem> {
match self.find(id) {
Some(NodeForeignItem(item)) => item,
_ => fail!("expected foreign item, found {}", self.node_to_str(id))
self.fold_ops.new_span(span)
}
- fn fold_item(&mut self, i: @Item) -> SmallVector<@Item> {
+ fn fold_item(&mut self, i: Gc<Item>) -> SmallVector<Gc<Item>> {
let parent = self.parent;
self.parent = DUMMY_NODE_ID;
- let i = fold::noop_fold_item(i, self).expect_one("expected one item");
+ let i = fold::noop_fold_item(&*i, self).expect_one("expected one item");
assert_eq!(self.parent, i.id);
match i.node {
}
}
ItemForeignMod(ref nm) => {
- for &nitem in nm.items.iter() {
- self.insert(nitem.id, EntryForeignItem(self.parent, nitem));
+ for nitem in nm.items.iter() {
+ self.insert(nitem.id, EntryForeignItem(self.parent,
+ nitem.clone()));
}
}
- ItemStruct(struct_def, _) => {
+ ItemStruct(ref struct_def, _) => {
// If this is a tuple-like struct, register the constructor.
match struct_def.ctor_id {
Some(ctor_id) => {
self.insert(ctor_id, EntryStructCtor(self.parent,
- struct_def));
+ struct_def.clone()));
}
None => {}
}
match *tm {
Required(ref m) => {
self.insert(m.id, EntryTraitMethod(self.parent,
- @(*tm).clone()));
+ box(GC) (*tm).clone()));
}
Provided(m) => {
self.insert(m.id, EntryTraitMethod(self.parent,
- @Provided(m)));
+ box(GC) Provided(m)));
}
}
}
SmallVector::one(i)
}
- fn fold_pat(&mut self, pat: @Pat) -> @Pat {
+ fn fold_pat(&mut self, pat: Gc<Pat>) -> Gc<Pat> {
let pat = fold::noop_fold_pat(pat, self);
match pat.node {
PatIdent(..) => {
pat
}
- fn fold_expr(&mut self, expr: @Expr) -> @Expr {
+ fn fold_expr(&mut self, expr: Gc<Expr>) -> Gc<Expr> {
let expr = fold::noop_fold_expr(expr, self);
self.insert(expr.id, EntryExpr(self.parent, expr));
expr
}
- fn fold_stmt(&mut self, stmt: &Stmt) -> SmallVector<@Stmt> {
+ fn fold_stmt(&mut self, stmt: &Stmt) -> SmallVector<Gc<Stmt>> {
let stmt = fold::noop_fold_stmt(stmt, self).expect_one("expected one statement");
- self.insert(ast_util::stmt_id(stmt), EntryStmt(self.parent, stmt));
+ self.insert(ast_util::stmt_id(&*stmt), EntryStmt(self.parent, stmt));
SmallVector::one(stmt)
}
m
}
- fn fold_method(&mut self, m: @Method) -> @Method {
+ fn fold_method(&mut self, m: Gc<Method>) -> Gc<Method> {
let parent = self.parent;
self.parent = DUMMY_NODE_ID;
- let m = fold::noop_fold_method(m, self);
+ let m = fold::noop_fold_method(&*m, self);
assert_eq!(self.parent, m.id);
self.parent = parent;
m
fn fold_lifetime(&mut self, lifetime: &Lifetime) -> Lifetime {
let lifetime = fold::noop_fold_lifetime(lifetime, self);
- self.insert(lifetime.id, EntryLifetime(self.parent, @lifetime));
+ self.insert(lifetime.id, EntryLifetime(self.parent, box(GC) lifetime));
lifetime
}
}
IIItem(_) => {}
IIMethod(impl_did, is_provided, m) => {
let entry = if is_provided {
- EntryTraitMethod(cx.parent, @Provided(m))
+ EntryTraitMethod(cx.parent, box(GC) Provided(m))
} else {
EntryMethod(cx.parent, m)
};
token::get_ident(variant.node.name),
map.path_to_str(id), id)).to_string()
}
- Some(NodeExpr(expr)) => {
+ Some(NodeExpr(ref expr)) => {
(format!("expr {} (id={})",
- pprust::expr_to_str(expr), id)).to_string()
+ pprust::expr_to_str(&**expr), id)).to_string()
}
- Some(NodeStmt(stmt)) => {
+ Some(NodeStmt(ref stmt)) => {
(format!("stmt {} (id={})",
- pprust::stmt_to_str(stmt), id)).to_string()
+ pprust::stmt_to_str(&**stmt), id)).to_string()
}
- Some(NodeArg(pat)) => {
+ Some(NodeArg(ref pat)) => {
(format!("arg {} (id={})",
- pprust::pat_to_str(pat), id)).to_string()
+ pprust::pat_to_str(&**pat), id)).to_string()
}
- Some(NodeLocal(pat)) => {
+ Some(NodeLocal(ref pat)) => {
(format!("local {} (id={})",
- pprust::pat_to_str(pat), id)).to_string()
+ pprust::pat_to_str(&**pat), id)).to_string()
}
- Some(NodePat(pat)) => {
- (format!("pat {} (id={})", pprust::pat_to_str(pat), id)).to_string()
+ Some(NodePat(ref pat)) => {
+ (format!("pat {} (id={})", pprust::pat_to_str(&**pat), id)).to_string()
}
- Some(NodeBlock(block)) => {
+ Some(NodeBlock(ref block)) => {
(format!("block {} (id={})",
- pprust::block_to_str(block), id)).to_string()
+ pprust::block_to_str(&**block), id)).to_string()
}
Some(NodeStructCtor(_)) => {
(format!("struct_ctor {} (id={})",
}
Some(NodeLifetime(ref l)) => {
(format!("lifetime {} (id={})",
- pprust::lifetime_to_str(*l), id)).to_string()
+ pprust::lifetime_to_str(&**l), id)).to_string()
}
None => {
(format!("unknown node (id={})", id)).to_string()
use std::cell::Cell;
use std::cmp;
-use std::string::String;
+use std::gc::Gc;
use std::u32;
pub fn path_name_i(idents: &[Ident]) -> String {
pub fn unop_to_str(op: UnOp) -> &'static str {
match op {
- UnBox => "@",
+ UnBox => "box(GC) ",
UnUniq => "box() ",
UnDeref => "*",
UnNot => "!",
}
}
-pub fn is_path(e: @Expr) -> bool {
+pub fn is_path(e: Gc<Expr>) -> bool {
return match e.node { ExprPath(_) => true, _ => false };
}
}
}
-pub fn is_call_expr(e: @Expr) -> bool {
+pub fn is_call_expr(e: Gc<Expr>) -> bool {
match e.node { ExprCall(..) => true, _ => false }
}
-pub fn block_from_expr(e: @Expr) -> P<Block> {
+pub fn block_from_expr(e: Gc<Expr>) -> P<Block> {
P(Block {
view_items: Vec::new(),
stmts: Vec::new(),
}
}
-pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> @Pat {
- @ast::Pat { id: id,
+pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> Gc<Pat> {
+ box(GC) ast::Pat { id: id,
node: PatIdent(BindByValue(MutImmutable), ident_to_path(s, i), None),
span: s }
}
}
}
-pub fn unguarded_pat(a: &Arm) -> Option<Vec<@Pat> > {
+pub fn unguarded_pat(a: &Arm) -> Option<Vec<Gc<Pat>>> {
if is_unguarded(a) {
Some(/* FIXME (#2543) */ a.pats.clone())
} else {
token::gensym_ident(pretty.as_slice())
}
-pub fn public_methods(ms: Vec<@Method> ) -> Vec<@Method> {
+pub fn public_methods(ms: Vec<Gc<Method>> ) -> Vec<Gc<Method>> {
ms.move_iter().filter(|m| {
match m.vis {
Public => true,
}
pub fn split_trait_methods(trait_methods: &[TraitMethod])
- -> (Vec<TypeMethod> , Vec<@Method> ) {
+ -> (Vec<TypeMethod> , Vec<Gc<Method>> ) {
let mut reqd = Vec::new();
let mut provd = Vec::new();
for trt_method in trait_methods.iter() {
visitor.result.get()
}
-pub fn is_item_impl(item: @ast::Item) -> bool {
+pub fn is_item_impl(item: Gc<ast::Item>) -> bool {
match item.node {
ItemImpl(..) => true,
_ => false
}
match pat.node {
- PatIdent(_, _, Some(p)) => walk_pat(p, it),
+ PatIdent(_, _, Some(ref p)) => walk_pat(&**p, it),
PatStruct(_, ref fields, _) => {
- fields.iter().advance(|f| walk_pat(f.pat, |p| it(p)))
+ fields.iter().advance(|f| walk_pat(&*f.pat, |p| it(p)))
}
PatEnum(_, Some(ref s)) | PatTup(ref s) => {
- s.iter().advance(|&p| walk_pat(p, |p| it(p)))
+ s.iter().advance(|p| walk_pat(&**p, |p| it(p)))
}
- PatBox(s) | PatRegion(s) => {
- walk_pat(s, it)
+ PatBox(ref s) | PatRegion(ref s) => {
+ walk_pat(&**s, it)
}
PatVec(ref before, ref slice, ref after) => {
- before.iter().advance(|&p| walk_pat(p, |p| it(p))) &&
- slice.iter().advance(|&p| walk_pat(p, |p| it(p))) &&
- after.iter().advance(|&p| walk_pat(p, |p| it(p)))
+ before.iter().advance(|p| walk_pat(&**p, |p| it(p))) &&
+ slice.iter().advance(|p| walk_pat(&**p, |p| it(p))) &&
+ after.iter().advance(|p| walk_pat(&**p, |p| it(p)))
}
PatMac(_) => fail!("attempted to analyze unexpanded pattern"),
PatWild | PatWildMulti | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) |
/// Returns true if the given pattern consists solely of an identifier
/// and false otherwise.
-pub fn pat_is_ident(pat: @ast::Pat) -> bool {
+pub fn pat_is_ident(pat: Gc<ast::Pat>) -> bool {
match pat.node {
ast::PatIdent(..) => true,
_ => false,
}
// Returns true if this literal is a string and false otherwise.
-pub fn lit_is_str(lit: @Lit) -> bool {
+pub fn lit_is_str(lit: Gc<Lit>) -> bool {
match lit.node {
LitStr(..) => true,
_ => false,
use std::collections::HashSet;
use std::collections::BitvSet;
+use std::gc::Gc;
local_data_key!(used_attrs: BitvSet)
*/
fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type.
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]>;
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]>;
}
impl AttrMetaMethods for Attribute {
fn value_str(&self) -> Option<InternedString> {
self.meta().value_str()
}
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]> {
self.node.value.meta_item_list()
}
}
}
}
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]> {
match self.node {
MetaList(_, ref l) => Some(l.as_slice()),
_ => None
}
// Annoying, but required to get test_cfg to work
-impl AttrMetaMethods for @MetaItem {
+impl AttrMetaMethods for Gc<MetaItem> {
fn name(&self) -> InternedString { (**self).name() }
fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
- fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
+ fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]> {
(**self).meta_item_list()
}
}
pub trait AttributeMethods {
- fn meta(&self) -> @MetaItem;
+ fn meta(&self) -> Gc<MetaItem>;
fn desugar_doc(&self) -> Attribute;
}
impl AttributeMethods for Attribute {
/// Extract the MetaItem from inside this Attribute.
- fn meta(&self) -> @MetaItem {
+ fn meta(&self) -> Gc<MetaItem> {
self.node.value
}
/* Constructors */
pub fn mk_name_value_item_str(name: InternedString, value: InternedString)
- -> @MetaItem {
+ -> Gc<MetaItem> {
let value_lit = dummy_spanned(ast::LitStr(value, ast::CookedStr));
mk_name_value_item(name, value_lit)
}
pub fn mk_name_value_item(name: InternedString, value: ast::Lit)
- -> @MetaItem {
- @dummy_spanned(MetaNameValue(name, value))
+ -> Gc<MetaItem> {
+ box(GC) dummy_spanned(MetaNameValue(name, value))
}
-pub fn mk_list_item(name: InternedString, items: Vec<@MetaItem> ) -> @MetaItem {
- @dummy_spanned(MetaList(name, items))
+pub fn mk_list_item(name: InternedString,
+ items: Vec<Gc<MetaItem>>) -> Gc<MetaItem> {
+ box(GC) dummy_spanned(MetaList(name, items))
}
-pub fn mk_word_item(name: InternedString) -> @MetaItem {
- @dummy_spanned(MetaWord(name))
+pub fn mk_word_item(name: InternedString) -> Gc<MetaItem> {
+ box(GC) dummy_spanned(MetaWord(name))
}
local_data_key!(next_attr_id: uint)
}
/// Returns an inner attribute with the given value.
-pub fn mk_attr_inner(id: AttrId, item: @MetaItem) -> Attribute {
+pub fn mk_attr_inner(id: AttrId, item: Gc<MetaItem>) -> Attribute {
dummy_spanned(Attribute_ {
id: id,
style: ast::AttrInner,
}
/// Returns an outer attribute with the given value.
-pub fn mk_attr_outer(id: AttrId, item: @MetaItem) -> Attribute {
+pub fn mk_attr_outer(id: AttrId, item: Gc<MetaItem>) -> Attribute {
dummy_spanned(Attribute_ {
id: id,
style: ast::AttrOuter,
let attr = Attribute_ {
id: id,
style: style,
- value: @spanned(lo, hi, MetaNameValue(InternedString::new("doc"),
+ value: box(GC) spanned(lo, hi, MetaNameValue(InternedString::new("doc"),
lit)),
is_sugared_doc: true
};
/// Check if `needle` occurs in `haystack` by a structural
/// comparison. This is slightly subtle, and relies on ignoring the
/// span included in the `==` comparison a plain MetaItem.
-pub fn contains(haystack: &[@ast::MetaItem],
- needle: @ast::MetaItem) -> bool {
+pub fn contains(haystack: &[Gc<ast::MetaItem>],
+ needle: Gc<ast::MetaItem>) -> bool {
debug!("attr::contains (name={})", needle.name());
haystack.iter().any(|item| {
debug!(" testing: {}", item.name());
.and_then(|at| at.value_str())
}
-pub fn last_meta_item_value_str_by_name(items: &[@MetaItem], name: &str)
+pub fn last_meta_item_value_str_by_name(items: &[Gc<MetaItem>], name: &str)
-> Option<InternedString> {
items.iter()
.rev()
/* Higher-level applications */
-pub fn sort_meta_items(items: &[@MetaItem]) -> Vec<@MetaItem> {
+pub fn sort_meta_items(items: &[Gc<MetaItem>]) -> Vec<Gc<MetaItem>> {
// This is sort of stupid here, but we need to sort by
// human-readable strings.
let mut v = items.iter()
.map(|&mi| (mi.name(), mi))
- .collect::<Vec<(InternedString, @MetaItem)> >();
+ .collect::<Vec<(InternedString, Gc<MetaItem>)> >();
v.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
v.move_iter().map(|(_, m)| {
match m.node {
MetaList(ref n, ref mis) => {
- @Spanned {
+ box(GC) Spanned {
node: MetaList((*n).clone(),
sort_meta_items(mis.as_slice())),
.. /*bad*/ (*m).clone()
* From a list of crate attributes get only the meta_items that affect crate
* linkage
*/
-pub fn find_linkage_metas(attrs: &[Attribute]) -> Vec<@MetaItem> {
+pub fn find_linkage_metas(attrs: &[Attribute]) -> Vec<Gc<MetaItem>> {
let mut result = Vec::new();
for attr in attrs.iter().filter(|at| at.check_name("link")) {
match attr.meta().node {
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="a")]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="b")]`) == false
pub fn test_cfg<AM: AttrMetaMethods, It: Iterator<AM>>
- (cfg: &[@MetaItem], mut metas: It) -> bool {
+ (cfg: &[Gc<MetaItem>], mut metas: It) -> bool {
// having no #[cfg(...)] attributes counts as matching.
let mut no_cfgs = true;
})
}
-pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[@MetaItem]) {
+pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[Gc<MetaItem>]) {
let mut set = HashSet::new();
for meta in metas.iter() {
let name = meta.name();
use serialize::{Encodable, Decodable, Encoder, Decoder};
use std::cell::RefCell;
+use std::gc::Gc;
use std::rc::Rc;
-use std::string::String;
pub trait Pos {
fn from_uint(n: uint) -> Self;
pub hi: BytePos,
/// Information about where the macro came from, if this piece of
/// code was created by a macro expansion.
- pub expn_info: Option<@ExpnInfo>
+ pub expn_info: Option<Gc<ExpnInfo>>
}
pub static DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_info: None };
use parse::token::InternedString;
use parse::token;
-
enum State {
Asm,
Outputs,
out));
}
- MacExpr::new(@ast::Expr {
+ MacExpr::new(box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprInlineAsm(ast::InlineAsm {
asm: token::intern_and_get_ident(asm.get()),
use util::small_vector::SmallVector;
use std::collections::HashMap;
+use std::gc::Gc;
// new-style macro! tt code:
//
}
pub type ItemDecorator =
- fn(&mut ExtCtxt, Span, @ast::MetaItem, @ast::Item, |@ast::Item|);
+ fn(&mut ExtCtxt, Span, Gc<ast::MetaItem>, Gc<ast::Item>, |Gc<ast::Item>|);
pub type ItemModifier =
- fn(&mut ExtCtxt, Span, @ast::MetaItem, @ast::Item) -> @ast::Item;
+ fn(&mut ExtCtxt, Span, Gc<ast::MetaItem>, Gc<ast::Item>) -> Gc<ast::Item>;
pub struct BasicMacroExpander {
pub expander: MacroExpanderFn,
None
}
/// Create an expression.
- fn make_expr(&self) -> Option<@ast::Expr> {
+ fn make_expr(&self) -> Option<Gc<ast::Expr>> {
None
}
/// Create zero or more items.
- fn make_items(&self) -> Option<SmallVector<@ast::Item>> {
+ fn make_items(&self) -> Option<SmallVector<Gc<ast::Item>>> {
None
}
/// Create a pattern.
- fn make_pat(&self) -> Option<@ast::Pat> {
+ fn make_pat(&self) -> Option<Gc<ast::Pat>> {
None
}
///
/// By default this attempts to create an expression statement,
/// returning None if that fails.
- fn make_stmt(&self) -> Option<@ast::Stmt> {
+ fn make_stmt(&self) -> Option<Gc<ast::Stmt>> {
self.make_expr()
- .map(|e| @codemap::respan(e.span, ast::StmtExpr(e, ast::DUMMY_NODE_ID)))
+ .map(|e| box(GC) codemap::respan(e.span, ast::StmtExpr(e, ast::DUMMY_NODE_ID)))
}
}
/// A convenience type for macros that return a single expression.
pub struct MacExpr {
- e: @ast::Expr
+ e: Gc<ast::Expr>,
}
impl MacExpr {
- pub fn new(e: @ast::Expr) -> Box<MacResult> {
+ pub fn new(e: Gc<ast::Expr>) -> Box<MacResult> {
box MacExpr { e: e } as Box<MacResult>
}
}
impl MacResult for MacExpr {
- fn make_expr(&self) -> Option<@ast::Expr> {
+ fn make_expr(&self) -> Option<Gc<ast::Expr>> {
Some(self.e)
}
}
/// A convenience type for macros that return a single pattern.
pub struct MacPat {
- p: @ast::Pat
+ p: Gc<ast::Pat>,
}
impl MacPat {
- pub fn new(p: @ast::Pat) -> Box<MacResult> {
+ pub fn new(p: Gc<ast::Pat>) -> Box<MacResult> {
box MacPat { p: p } as Box<MacResult>
}
}
impl MacResult for MacPat {
- fn make_pat(&self) -> Option<@ast::Pat> {
+ fn make_pat(&self) -> Option<Gc<ast::Pat>> {
Some(self.p)
}
}
/// A convenience type for macros that return a single item.
pub struct MacItem {
- i: @ast::Item
+ i: Gc<ast::Item>
}
impl MacItem {
- pub fn new(i: @ast::Item) -> Box<MacResult> {
+ pub fn new(i: Gc<ast::Item>) -> Box<MacResult> {
box MacItem { i: i } as Box<MacResult>
}
}
impl MacResult for MacItem {
- fn make_items(&self) -> Option<SmallVector<@ast::Item>> {
+ fn make_items(&self) -> Option<SmallVector<Gc<ast::Item>>> {
Some(SmallVector::one(self.i))
}
- fn make_stmt(&self) -> Option<@ast::Stmt> {
- Some(@codemap::respan(
+ fn make_stmt(&self) -> Option<Gc<ast::Stmt>> {
+ Some(box(GC) codemap::respan(
self.i.span,
ast::StmtDecl(
- @codemap::respan(self.i.span, ast::DeclItem(self.i)),
+ box(GC) codemap::respan(self.i.span, ast::DeclItem(self.i)),
ast::DUMMY_NODE_ID)))
}
}
}
/// A plain dummy expression.
- pub fn raw_expr(sp: Span) -> @ast::Expr {
- @ast::Expr {
+ pub fn raw_expr(sp: Span) -> Gc<ast::Expr> {
+ box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
- node: ast::ExprLit(@codemap::respan(sp, ast::LitNil)),
+ node: ast::ExprLit(box(GC) codemap::respan(sp, ast::LitNil)),
span: sp,
}
}
/// A plain dummy pattern.
- pub fn raw_pat(sp: Span) -> @ast::Pat {
- @ast::Pat {
+ pub fn raw_pat(sp: Span) -> Gc<ast::Pat> {
+ box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: ast::PatWild,
span: sp,
}
impl MacResult for DummyResult {
- fn make_expr(&self) -> Option<@ast::Expr> {
+ fn make_expr(&self) -> Option<Gc<ast::Expr>> {
Some(DummyResult::raw_expr(self.span))
}
- fn make_pat(&self) -> Option<@ast::Pat> {
+ fn make_pat(&self) -> Option<Gc<ast::Pat>> {
Some(DummyResult::raw_pat(self.span))
}
- fn make_items(&self) -> Option<SmallVector<@ast::Item>> {
+ fn make_items(&self) -> Option<SmallVector<Gc<ast::Item>>> {
if self.expr_only {
None
} else {
Some(SmallVector::zero())
}
}
- fn make_stmt(&self) -> Option<@ast::Stmt> {
- Some(@codemap::respan(self.span,
+ fn make_stmt(&self) -> Option<Gc<ast::Stmt>> {
+ Some(box(GC) codemap::respan(self.span,
ast::StmtExpr(DummyResult::raw_expr(self.span),
ast::DUMMY_NODE_ID)))
}
pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess,
pub cfg: ast::CrateConfig,
- pub backtrace: Option<@ExpnInfo>,
+ pub backtrace: Option<Gc<ExpnInfo>>,
pub ecfg: expand::ExpansionConfig,
pub mod_path: Vec<ast::Ident> ,
}
}
- pub fn expand_expr(&mut self, mut e: @ast::Expr) -> @ast::Expr {
+ pub fn expand_expr(&mut self, mut e: Gc<ast::Expr>) -> Gc<ast::Expr> {
loop {
match e.node {
ast::ExprMac(..) => {
}
}
pub fn print_backtrace(&self) { }
- pub fn backtrace(&self) -> Option<@ExpnInfo> { self.backtrace }
+ pub fn backtrace(&self) -> Option<Gc<ExpnInfo>> { self.backtrace }
pub fn mod_push(&mut self, i: ast::Ident) { self.mod_path.push(i); }
pub fn mod_pop(&mut self) { self.mod_path.pop().unwrap(); }
pub fn mod_path(&self) -> Vec<ast::Ident> {
match ei {
ExpnInfo {call_site: cs, callee: ref callee} => {
self.backtrace =
- Some(@ExpnInfo {
+ Some(box(GC) ExpnInfo {
call_site: Span {lo: cs.lo, hi: cs.hi,
- expn_info: self.backtrace},
+ expn_info: self.backtrace.clone()},
callee: (*callee).clone()
});
}
/// Extract a string literal from the macro expanded version of `expr`,
/// emitting `err_msg` if `expr` is not a string literal. This does not stop
/// compilation on error, merely emits a non-fatal error and returns None.
-pub fn expr_to_str(cx: &mut ExtCtxt, expr: @ast::Expr, err_msg: &str)
+pub fn expr_to_str(cx: &mut ExtCtxt, expr: Gc<ast::Expr>, err_msg: &str)
-> Option<(InternedString, ast::StrStyle)> {
// we want to be able to handle e.g. concat("foo", "bar")
let expr = cx.expand_expr(expr);
/// parsing error, emit a non-fatal error and return None.
pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
sp: Span,
- tts: &[ast::TokenTree]) -> Option<Vec<@ast::Expr> > {
+ tts: &[ast::TokenTree]) -> Option<Vec<Gc<ast::Expr>>> {
let mut p = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(),
tts.iter()
use parse::token::InternedString;
use parse::token;
+use std::gc::Gc;
+
// Transitional reexports so qquote can find the paths it is looking for
mod syntax {
pub use ext;
fn lifetime(&self, span: Span, ident: ast::Name) -> ast::Lifetime;
// statements
- fn stmt_expr(&self, expr: @ast::Expr) -> @ast::Stmt;
- fn stmt_let(&self, sp: Span, mutbl: bool, ident: ast::Ident, ex: @ast::Expr) -> @ast::Stmt;
+ fn stmt_expr(&self, expr: Gc<ast::Expr>) -> Gc<ast::Stmt>;
+ fn stmt_let(&self, sp: Span, mutbl: bool, ident: ast::Ident,
+ ex: Gc<ast::Expr>) -> Gc<ast::Stmt>;
fn stmt_let_typed(&self,
sp: Span,
mutbl: bool,
ident: ast::Ident,
typ: P<ast::Ty>,
- ex: @ast::Expr)
- -> @ast::Stmt;
+ ex: Gc<ast::Expr>)
+ -> Gc<ast::Stmt>;
// blocks
- fn block(&self, span: Span, stmts: Vec<@ast::Stmt> , expr: Option<@ast::Expr>) -> P<ast::Block>;
- fn block_expr(&self, expr: @ast::Expr) -> P<ast::Block>;
+ fn block(&self, span: Span, stmts: Vec<Gc<ast::Stmt>>,
+ expr: Option<Gc<ast::Expr>>) -> P<ast::Block>;
+ fn block_expr(&self, expr: Gc<ast::Expr>) -> P<ast::Block>;
fn block_all(&self, span: Span,
view_items: Vec<ast::ViewItem> ,
- stmts: Vec<@ast::Stmt> ,
- expr: Option<@ast::Expr>) -> P<ast::Block>;
+ stmts: Vec<Gc<ast::Stmt>> ,
+ expr: Option<Gc<ast::Expr>>) -> P<ast::Block>;
// expressions
- fn expr(&self, span: Span, node: ast::Expr_) -> @ast::Expr;
- fn expr_path(&self, path: ast::Path) -> @ast::Expr;
- fn expr_ident(&self, span: Span, id: ast::Ident) -> @ast::Expr;
+ fn expr(&self, span: Span, node: ast::Expr_) -> Gc<ast::Expr>;
+ fn expr_path(&self, path: ast::Path) -> Gc<ast::Expr>;
+ fn expr_ident(&self, span: Span, id: ast::Ident) -> Gc<ast::Expr>;
- fn expr_self(&self, span: Span) -> @ast::Expr;
+ fn expr_self(&self, span: Span) -> Gc<ast::Expr>;
fn expr_binary(&self, sp: Span, op: ast::BinOp,
- lhs: @ast::Expr, rhs: @ast::Expr) -> @ast::Expr;
- fn expr_deref(&self, sp: Span, e: @ast::Expr) -> @ast::Expr;
- fn expr_unary(&self, sp: Span, op: ast::UnOp, e: @ast::Expr) -> @ast::Expr;
-
- fn expr_managed(&self, sp: Span, e: @ast::Expr) -> @ast::Expr;
- fn expr_addr_of(&self, sp: Span, e: @ast::Expr) -> @ast::Expr;
- fn expr_mut_addr_of(&self, sp: Span, e: @ast::Expr) -> @ast::Expr;
- fn expr_field_access(&self, span: Span, expr: @ast::Expr, ident: ast::Ident) -> @ast::Expr;
- fn expr_call(&self, span: Span, expr: @ast::Expr, args: Vec<@ast::Expr> ) -> @ast::Expr;
- fn expr_call_ident(&self, span: Span, id: ast::Ident, args: Vec<@ast::Expr> ) -> @ast::Expr;
+ lhs: Gc<ast::Expr>, rhs: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_deref(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_unary(&self, sp: Span, op: ast::UnOp, e: Gc<ast::Expr>) -> Gc<ast::Expr>;
+
+ fn expr_managed(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_addr_of(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_mut_addr_of(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_field_access(&self, span: Span, expr: Gc<ast::Expr>,
+ ident: ast::Ident) -> Gc<ast::Expr>;
+ fn expr_call(&self, span: Span, expr: Gc<ast::Expr>,
+ args: Vec<Gc<ast::Expr>>) -> Gc<ast::Expr>;
+ fn expr_call_ident(&self, span: Span, id: ast::Ident,
+ args: Vec<Gc<ast::Expr>>) -> Gc<ast::Expr>;
fn expr_call_global(&self, sp: Span, fn_path: Vec<ast::Ident> ,
- args: Vec<@ast::Expr> ) -> @ast::Expr;
+ args: Vec<Gc<ast::Expr>>) -> Gc<ast::Expr>;
fn expr_method_call(&self, span: Span,
- expr: @ast::Expr, ident: ast::Ident,
- args: Vec<@ast::Expr> ) -> @ast::Expr;
- fn expr_block(&self, b: P<ast::Block>) -> @ast::Expr;
- fn expr_cast(&self, sp: Span, expr: @ast::Expr, ty: P<ast::Ty>) -> @ast::Expr;
-
- fn field_imm(&self, span: Span, name: Ident, e: @ast::Expr) -> ast::Field;
- fn expr_struct(&self, span: Span, path: ast::Path, fields: Vec<ast::Field> ) -> @ast::Expr;
- fn expr_struct_ident(&self, span: Span, id: ast::Ident, fields: Vec<ast::Field> ) -> @ast::Expr;
-
- fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> @ast::Expr;
-
- fn expr_uint(&self, span: Span, i: uint) -> @ast::Expr;
- fn expr_int(&self, sp: Span, i: int) -> @ast::Expr;
- fn expr_u8(&self, sp: Span, u: u8) -> @ast::Expr;
- fn expr_bool(&self, sp: Span, value: bool) -> @ast::Expr;
-
- fn expr_vstore(&self, sp: Span, expr: @ast::Expr, vst: ast::ExprVstore) -> @ast::Expr;
- fn expr_vec(&self, sp: Span, exprs: Vec<@ast::Expr> ) -> @ast::Expr;
- fn expr_vec_ng(&self, sp: Span) -> @ast::Expr;
- fn expr_vec_slice(&self, sp: Span, exprs: Vec<@ast::Expr> ) -> @ast::Expr;
- fn expr_str(&self, sp: Span, s: InternedString) -> @ast::Expr;
- fn expr_str_uniq(&self, sp: Span, s: InternedString) -> @ast::Expr;
-
- fn expr_some(&self, sp: Span, expr: @ast::Expr) -> @ast::Expr;
- fn expr_none(&self, sp: Span) -> @ast::Expr;
-
- fn expr_fail(&self, span: Span, msg: InternedString) -> @ast::Expr;
- fn expr_unreachable(&self, span: Span) -> @ast::Expr;
-
- fn expr_ok(&self, span: Span, expr: @ast::Expr) -> @ast::Expr;
- fn expr_err(&self, span: Span, expr: @ast::Expr) -> @ast::Expr;
- fn expr_try(&self, span: Span, head: @ast::Expr) -> @ast::Expr;
-
- fn pat(&self, span: Span, pat: ast::Pat_) -> @ast::Pat;
- fn pat_wild(&self, span: Span) -> @ast::Pat;
- fn pat_lit(&self, span: Span, expr: @ast::Expr) -> @ast::Pat;
- fn pat_ident(&self, span: Span, ident: ast::Ident) -> @ast::Pat;
+ expr: Gc<ast::Expr>, ident: ast::Ident,
+ args: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr>;
+ fn expr_block(&self, b: P<ast::Block>) -> Gc<ast::Expr>;
+ fn expr_cast(&self, sp: Span, expr: Gc<ast::Expr>,
+ ty: P<ast::Ty>) -> Gc<ast::Expr>;
+
+ fn field_imm(&self, span: Span, name: Ident, e: Gc<ast::Expr>) -> ast::Field;
+ fn expr_struct(&self, span: Span, path: ast::Path,
+ fields: Vec<ast::Field> ) -> Gc<ast::Expr>;
+ fn expr_struct_ident(&self, span: Span, id: ast::Ident,
+ fields: Vec<ast::Field> ) -> Gc<ast::Expr>;
+
+ fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> Gc<ast::Expr>;
+
+ fn expr_uint(&self, span: Span, i: uint) -> Gc<ast::Expr>;
+ fn expr_int(&self, sp: Span, i: int) -> Gc<ast::Expr>;
+ fn expr_u8(&self, sp: Span, u: u8) -> Gc<ast::Expr>;
+ fn expr_bool(&self, sp: Span, value: bool) -> Gc<ast::Expr>;
+
+ fn expr_vstore(&self, sp: Span, expr: Gc<ast::Expr>, vst: ast::ExprVstore) -> Gc<ast::Expr>;
+ fn expr_vec(&self, sp: Span, exprs: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr>;
+ fn expr_vec_ng(&self, sp: Span) -> Gc<ast::Expr>;
+ fn expr_vec_slice(&self, sp: Span, exprs: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr>;
+ fn expr_str(&self, sp: Span, s: InternedString) -> Gc<ast::Expr>;
+ fn expr_str_uniq(&self, sp: Span, s: InternedString) -> Gc<ast::Expr>;
+
+ fn expr_some(&self, sp: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_none(&self, sp: Span) -> Gc<ast::Expr>;
+
+ fn expr_fail(&self, span: Span, msg: InternedString) -> Gc<ast::Expr>;
+ fn expr_unreachable(&self, span: Span) -> Gc<ast::Expr>;
+
+ fn expr_ok(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_err(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn expr_try(&self, span: Span, head: Gc<ast::Expr>) -> Gc<ast::Expr>;
+
+ fn pat(&self, span: Span, pat: ast::Pat_) -> Gc<ast::Pat>;
+ fn pat_wild(&self, span: Span) -> Gc<ast::Pat>;
+ fn pat_lit(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Pat>;
+ fn pat_ident(&self, span: Span, ident: ast::Ident) -> Gc<ast::Pat>;
fn pat_ident_binding_mode(&self,
span: Span,
ident: ast::Ident,
- bm: ast::BindingMode) -> @ast::Pat;
- fn pat_enum(&self, span: Span, path: ast::Path, subpats: Vec<@ast::Pat> ) -> @ast::Pat;
+ bm: ast::BindingMode) -> Gc<ast::Pat>;
+ fn pat_enum(&self, span: Span, path: ast::Path,
+ subpats: Vec<Gc<ast::Pat>>) -> Gc<ast::Pat>;
fn pat_struct(&self, span: Span,
- path: ast::Path, field_pats: Vec<ast::FieldPat> ) -> @ast::Pat;
+ path: ast::Path, field_pats: Vec<ast::FieldPat> ) -> Gc<ast::Pat>;
- fn arm(&self, span: Span, pats: Vec<@ast::Pat> , expr: @ast::Expr) -> ast::Arm;
+ fn arm(&self, span: Span, pats: Vec<Gc<ast::Pat>> , expr: Gc<ast::Expr>) -> ast::Arm;
fn arm_unreachable(&self, span: Span) -> ast::Arm;
- fn expr_match(&self, span: Span, arg: @ast::Expr, arms: Vec<ast::Arm> ) -> @ast::Expr;
+ fn expr_match(&self, span: Span, arg: Gc<ast::Expr>, arms: Vec<ast::Arm> ) -> Gc<ast::Expr>;
fn expr_if(&self, span: Span,
- cond: @ast::Expr, then: @ast::Expr, els: Option<@ast::Expr>) -> @ast::Expr;
+ cond: Gc<ast::Expr>, then: Gc<ast::Expr>,
+ els: Option<Gc<ast::Expr>>) -> Gc<ast::Expr>;
fn lambda_fn_decl(&self, span: Span,
- fn_decl: P<ast::FnDecl>, blk: P<ast::Block>) -> @ast::Expr;
+ fn_decl: P<ast::FnDecl>, blk: P<ast::Block>) -> Gc<ast::Expr>;
- fn lambda(&self, span: Span, ids: Vec<ast::Ident> , blk: P<ast::Block>) -> @ast::Expr;
- fn lambda0(&self, span: Span, blk: P<ast::Block>) -> @ast::Expr;
- fn lambda1(&self, span: Span, blk: P<ast::Block>, ident: ast::Ident) -> @ast::Expr;
+ fn lambda(&self, span: Span, ids: Vec<ast::Ident> , blk: P<ast::Block>) -> Gc<ast::Expr>;
+ fn lambda0(&self, span: Span, blk: P<ast::Block>) -> Gc<ast::Expr>;
+ fn lambda1(&self, span: Span, blk: P<ast::Block>, ident: ast::Ident) -> Gc<ast::Expr>;
- fn lambda_expr(&self, span: Span, ids: Vec<ast::Ident> , blk: @ast::Expr) -> @ast::Expr;
- fn lambda_expr_0(&self, span: Span, expr: @ast::Expr) -> @ast::Expr;
- fn lambda_expr_1(&self, span: Span, expr: @ast::Expr, ident: ast::Ident) -> @ast::Expr;
+ fn lambda_expr(&self, span: Span, ids: Vec<ast::Ident> , blk: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn lambda_expr_0(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr>;
+ fn lambda_expr_1(&self, span: Span, expr: Gc<ast::Expr>, ident: ast::Ident) -> Gc<ast::Expr>;
- fn lambda_stmts(&self, span: Span, ids: Vec<ast::Ident> , blk: Vec<@ast::Stmt> ) -> @ast::Expr;
- fn lambda_stmts_0(&self, span: Span, stmts: Vec<@ast::Stmt> ) -> @ast::Expr;
- fn lambda_stmts_1(&self, span: Span, stmts: Vec<@ast::Stmt> , ident: ast::Ident) -> @ast::Expr;
+ fn lambda_stmts(&self, span: Span, ids: Vec<ast::Ident>,
+ blk: Vec<Gc<ast::Stmt>>) -> Gc<ast::Expr>;
+ fn lambda_stmts_0(&self, span: Span,
+ stmts: Vec<Gc<ast::Stmt>>) -> Gc<ast::Expr>;
+ fn lambda_stmts_1(&self, span: Span,
+ stmts: Vec<Gc<ast::Stmt>>, ident: ast::Ident) -> Gc<ast::Expr>;
// items
fn item(&self, span: Span,
- name: Ident, attrs: Vec<ast::Attribute> , node: ast::Item_) -> @ast::Item;
+ name: Ident, attrs: Vec<ast::Attribute>,
+ node: ast::Item_) -> Gc<ast::Item>;
fn arg(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> ast::Arg;
// FIXME unused self
inputs: Vec<ast::Arg> ,
output: P<ast::Ty>,
generics: Generics,
- body: P<ast::Block>) -> @ast::Item;
+ body: P<ast::Block>) -> Gc<ast::Item>;
fn item_fn(&self,
span: Span,
name: Ident,
inputs: Vec<ast::Arg> ,
output: P<ast::Ty>,
- body: P<ast::Block>) -> @ast::Item;
+ body: P<ast::Block>) -> Gc<ast::Item>;
fn variant(&self, span: Span, name: Ident, tys: Vec<P<ast::Ty>> ) -> ast::Variant;
fn item_enum_poly(&self,
span: Span,
name: Ident,
enum_definition: ast::EnumDef,
- generics: Generics) -> @ast::Item;
- fn item_enum(&self, span: Span, name: Ident, enum_def: ast::EnumDef) -> @ast::Item;
+ generics: Generics) -> Gc<ast::Item>;
+ fn item_enum(&self, span: Span, name: Ident,
+ enum_def: ast::EnumDef) -> Gc<ast::Item>;
fn item_struct_poly(&self,
span: Span,
name: Ident,
struct_def: ast::StructDef,
- generics: Generics) -> @ast::Item;
- fn item_struct(&self, span: Span, name: Ident, struct_def: ast::StructDef) -> @ast::Item;
+ generics: Generics) -> Gc<ast::Item>;
+ fn item_struct(&self, span: Span, name: Ident,
+ struct_def: ast::StructDef) -> Gc<ast::Item>;
fn item_mod(&self, span: Span, inner_span: Span,
- name: Ident, attrs: Vec<ast::Attribute> ,
- vi: Vec<ast::ViewItem> , items: Vec<@ast::Item> ) -> @ast::Item;
+ name: Ident, attrs: Vec<ast::Attribute>,
+ vi: Vec<ast::ViewItem>,
+ items: Vec<Gc<ast::Item>>) -> Gc<ast::Item>;
fn item_ty_poly(&self,
span: Span,
name: Ident,
ty: P<ast::Ty>,
- generics: Generics) -> @ast::Item;
- fn item_ty(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> @ast::Item;
+ generics: Generics) -> Gc<ast::Item>;
+ fn item_ty(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> Gc<ast::Item>;
- fn attribute(&self, sp: Span, mi: @ast::MetaItem) -> ast::Attribute;
+ fn attribute(&self, sp: Span, mi: Gc<ast::MetaItem>) -> ast::Attribute;
- fn meta_word(&self, sp: Span, w: InternedString) -> @ast::MetaItem;
+ fn meta_word(&self, sp: Span, w: InternedString) -> Gc<ast::MetaItem>;
fn meta_list(&self,
sp: Span,
name: InternedString,
- mis: Vec<@ast::MetaItem> )
- -> @ast::MetaItem;
+ mis: Vec<Gc<ast::MetaItem>>)
+ -> Gc<ast::MetaItem>;
fn meta_name_value(&self,
sp: Span,
name: InternedString,
value: ast::Lit_)
- -> @ast::MetaItem;
+ -> Gc<ast::MetaItem>;
fn view_use(&self, sp: Span,
- vis: ast::Visibility, vp: @ast::ViewPath) -> ast::ViewItem;
+ vis: ast::Visibility, vp: Gc<ast::ViewPath>) -> ast::ViewItem;
fn view_use_simple(&self, sp: Span, vis: ast::Visibility, path: ast::Path) -> ast::ViewItem;
fn view_use_simple_(&self, sp: Span, vis: ast::Visibility,
ident: ast::Ident, path: ast::Path) -> ast::ViewItem;
ast::Lifetime { id: ast::DUMMY_NODE_ID, span: span, name: name }
}
- fn stmt_expr(&self, expr: @ast::Expr) -> @ast::Stmt {
- @respan(expr.span, ast::StmtSemi(expr, ast::DUMMY_NODE_ID))
+ fn stmt_expr(&self, expr: Gc<ast::Expr>) -> Gc<ast::Stmt> {
+ box(GC) respan(expr.span, ast::StmtSemi(expr, ast::DUMMY_NODE_ID))
}
- fn stmt_let(&self, sp: Span, mutbl: bool, ident: ast::Ident, ex: @ast::Expr) -> @ast::Stmt {
+ fn stmt_let(&self, sp: Span, mutbl: bool, ident: ast::Ident,
+ ex: Gc<ast::Expr>) -> Gc<ast::Stmt> {
let pat = if mutbl {
self.pat_ident_binding_mode(sp, ident, ast::BindByValue(ast::MutMutable))
} else {
self.pat_ident(sp, ident)
};
- let local = @ast::Local {
+ let local = box(GC) ast::Local {
ty: self.ty_infer(sp),
pat: pat,
init: Some(ex),
source: ast::LocalLet,
};
let decl = respan(sp, ast::DeclLocal(local));
- @respan(sp, ast::StmtDecl(@decl, ast::DUMMY_NODE_ID))
+ box(GC) respan(sp, ast::StmtDecl(box(GC) decl, ast::DUMMY_NODE_ID))
}
fn stmt_let_typed(&self,
mutbl: bool,
ident: ast::Ident,
typ: P<ast::Ty>,
- ex: @ast::Expr)
- -> @ast::Stmt {
+ ex: Gc<ast::Expr>)
+ -> Gc<ast::Stmt> {
let pat = if mutbl {
self.pat_ident_binding_mode(sp, ident, ast::BindByValue(ast::MutMutable))
} else {
self.pat_ident(sp, ident)
};
- let local = @ast::Local {
+ let local = box(GC) ast::Local {
ty: typ,
pat: pat,
init: Some(ex),
source: ast::LocalLet,
};
let decl = respan(sp, ast::DeclLocal(local));
- @respan(sp, ast::StmtDecl(@decl, ast::DUMMY_NODE_ID))
+ box(GC) respan(sp, ast::StmtDecl(box(GC) decl, ast::DUMMY_NODE_ID))
}
- fn block(&self, span: Span, stmts: Vec<@ast::Stmt> , expr: Option<@Expr>) -> P<ast::Block> {
+ fn block(&self, span: Span, stmts: Vec<Gc<ast::Stmt>>,
+ expr: Option<Gc<Expr>>) -> P<ast::Block> {
self.block_all(span, Vec::new(), stmts, expr)
}
- fn block_expr(&self, expr: @ast::Expr) -> P<ast::Block> {
+ fn block_expr(&self, expr: Gc<ast::Expr>) -> P<ast::Block> {
self.block_all(expr.span, Vec::new(), Vec::new(), Some(expr))
}
fn block_all(&self,
span: Span,
view_items: Vec<ast::ViewItem> ,
- stmts: Vec<@ast::Stmt> ,
- expr: Option<@ast::Expr>) -> P<ast::Block> {
+ stmts: Vec<Gc<ast::Stmt>>,
+ expr: Option<Gc<ast::Expr>>) -> P<ast::Block> {
P(ast::Block {
view_items: view_items,
stmts: stmts,
})
}
- fn expr(&self, span: Span, node: ast::Expr_) -> @ast::Expr {
- @ast::Expr {
+ fn expr(&self, span: Span, node: ast::Expr_) -> Gc<ast::Expr> {
+ box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: node,
span: span,
}
}
- fn expr_path(&self, path: ast::Path) -> @ast::Expr {
+ fn expr_path(&self, path: ast::Path) -> Gc<ast::Expr> {
self.expr(path.span, ast::ExprPath(path))
}
- fn expr_ident(&self, span: Span, id: ast::Ident) -> @ast::Expr {
+ fn expr_ident(&self, span: Span, id: ast::Ident) -> Gc<ast::Expr> {
self.expr_path(self.path_ident(span, id))
}
- fn expr_self(&self, span: Span) -> @ast::Expr {
+ fn expr_self(&self, span: Span) -> Gc<ast::Expr> {
self.expr_ident(span, special_idents::self_)
}
fn expr_binary(&self, sp: Span, op: ast::BinOp,
- lhs: @ast::Expr, rhs: @ast::Expr) -> @ast::Expr {
+ lhs: Gc<ast::Expr>, rhs: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprBinary(op, lhs, rhs))
}
- fn expr_deref(&self, sp: Span, e: @ast::Expr) -> @ast::Expr {
+ fn expr_deref(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr_unary(sp, ast::UnDeref, e)
}
- fn expr_unary(&self, sp: Span, op: ast::UnOp, e: @ast::Expr) -> @ast::Expr {
+ fn expr_unary(&self, sp: Span, op: ast::UnOp, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprUnary(op, e))
}
- fn expr_managed(&self, sp: Span, e: @ast::Expr) -> @ast::Expr {
+ fn expr_managed(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr_unary(sp, ast::UnBox, e)
}
- fn expr_field_access(&self, sp: Span, expr: @ast::Expr, ident: ast::Ident) -> @ast::Expr {
+ fn expr_field_access(&self, sp: Span, expr: Gc<ast::Expr>, ident: ast::Ident) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprField(expr, ident, Vec::new()))
}
- fn expr_addr_of(&self, sp: Span, e: @ast::Expr) -> @ast::Expr {
+ fn expr_addr_of(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprAddrOf(ast::MutImmutable, e))
}
- fn expr_mut_addr_of(&self, sp: Span, e: @ast::Expr) -> @ast::Expr {
+ fn expr_mut_addr_of(&self, sp: Span, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprAddrOf(ast::MutMutable, e))
}
- fn expr_call(&self, span: Span, expr: @ast::Expr, args: Vec<@ast::Expr> ) -> @ast::Expr {
+ fn expr_call(&self, span: Span, expr: Gc<ast::Expr>,
+ args: Vec<Gc<ast::Expr>>) -> Gc<ast::Expr> {
self.expr(span, ast::ExprCall(expr, args))
}
- fn expr_call_ident(&self, span: Span, id: ast::Ident, args: Vec<@ast::Expr> ) -> @ast::Expr {
+ fn expr_call_ident(&self, span: Span, id: ast::Ident,
+ args: Vec<Gc<ast::Expr>>) -> Gc<ast::Expr> {
self.expr(span, ast::ExprCall(self.expr_ident(span, id), args))
}
fn expr_call_global(&self, sp: Span, fn_path: Vec<ast::Ident> ,
- args: Vec<@ast::Expr> ) -> @ast::Expr {
+ args: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr> {
let pathexpr = self.expr_path(self.path_global(sp, fn_path));
self.expr_call(sp, pathexpr, args)
}
fn expr_method_call(&self, span: Span,
- expr: @ast::Expr,
+ expr: Gc<ast::Expr>,
ident: ast::Ident,
- mut args: Vec<@ast::Expr> ) -> @ast::Expr {
+ mut args: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr> {
let id = Spanned { node: ident, span: span };
args.unshift(expr);
self.expr(span, ast::ExprMethodCall(id, Vec::new(), args))
}
- fn expr_block(&self, b: P<ast::Block>) -> @ast::Expr {
+ fn expr_block(&self, b: P<ast::Block>) -> Gc<ast::Expr> {
self.expr(b.span, ast::ExprBlock(b))
}
- fn field_imm(&self, span: Span, name: Ident, e: @ast::Expr) -> ast::Field {
+ fn field_imm(&self, span: Span, name: Ident, e: Gc<ast::Expr>) -> ast::Field {
ast::Field { ident: respan(span, name), expr: e, span: span }
}
- fn expr_struct(&self, span: Span, path: ast::Path, fields: Vec<ast::Field> ) -> @ast::Expr {
+ fn expr_struct(&self, span: Span, path: ast::Path, fields: Vec<ast::Field> ) -> Gc<ast::Expr> {
self.expr(span, ast::ExprStruct(path, fields, None))
}
fn expr_struct_ident(&self, span: Span,
- id: ast::Ident, fields: Vec<ast::Field> ) -> @ast::Expr {
+ id: ast::Ident, fields: Vec<ast::Field> ) -> Gc<ast::Expr> {
self.expr_struct(span, self.path_ident(span, id), fields)
}
- fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> @ast::Expr {
- self.expr(sp, ast::ExprLit(@respan(sp, lit)))
+ fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> Gc<ast::Expr> {
+ self.expr(sp, ast::ExprLit(box(GC) respan(sp, lit)))
}
- fn expr_uint(&self, span: Span, i: uint) -> @ast::Expr {
+ fn expr_uint(&self, span: Span, i: uint) -> Gc<ast::Expr> {
self.expr_lit(span, ast::LitUint(i as u64, ast::TyU))
}
- fn expr_int(&self, sp: Span, i: int) -> @ast::Expr {
+ fn expr_int(&self, sp: Span, i: int) -> Gc<ast::Expr> {
self.expr_lit(sp, ast::LitInt(i as i64, ast::TyI))
}
- fn expr_u8(&self, sp: Span, u: u8) -> @ast::Expr {
+ fn expr_u8(&self, sp: Span, u: u8) -> Gc<ast::Expr> {
self.expr_lit(sp, ast::LitUint(u as u64, ast::TyU8))
}
- fn expr_bool(&self, sp: Span, value: bool) -> @ast::Expr {
+ fn expr_bool(&self, sp: Span, value: bool) -> Gc<ast::Expr> {
self.expr_lit(sp, ast::LitBool(value))
}
- fn expr_vstore(&self, sp: Span, expr: @ast::Expr, vst: ast::ExprVstore) -> @ast::Expr {
+ fn expr_vstore(&self, sp: Span, expr: Gc<ast::Expr>, vst: ast::ExprVstore) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprVstore(expr, vst))
}
- fn expr_vec(&self, sp: Span, exprs: Vec<@ast::Expr> ) -> @ast::Expr {
+ fn expr_vec(&self, sp: Span, exprs: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprVec(exprs))
}
- fn expr_vec_ng(&self, sp: Span) -> @ast::Expr {
+ fn expr_vec_ng(&self, sp: Span) -> Gc<ast::Expr> {
self.expr_call_global(sp,
vec!(self.ident_of("std"),
self.ident_of("vec"),
self.ident_of("new")),
Vec::new())
}
- fn expr_vec_slice(&self, sp: Span, exprs: Vec<@ast::Expr> ) -> @ast::Expr {
+ fn expr_vec_slice(&self, sp: Span, exprs: Vec<Gc<ast::Expr>> ) -> Gc<ast::Expr> {
self.expr_vstore(sp, self.expr_vec(sp, exprs), ast::ExprVstoreSlice)
}
- fn expr_str(&self, sp: Span, s: InternedString) -> @ast::Expr {
+ fn expr_str(&self, sp: Span, s: InternedString) -> Gc<ast::Expr> {
self.expr_lit(sp, ast::LitStr(s, ast::CookedStr))
}
- fn expr_str_uniq(&self, sp: Span, s: InternedString) -> @ast::Expr {
+ fn expr_str_uniq(&self, sp: Span, s: InternedString) -> Gc<ast::Expr> {
self.expr_vstore(sp, self.expr_str(sp, s), ast::ExprVstoreUniq)
}
- fn expr_cast(&self, sp: Span, expr: @ast::Expr, ty: P<ast::Ty>) -> @ast::Expr {
+ fn expr_cast(&self, sp: Span, expr: Gc<ast::Expr>, ty: P<ast::Ty>) -> Gc<ast::Expr> {
self.expr(sp, ast::ExprCast(expr, ty))
}
- fn expr_some(&self, sp: Span, expr: @ast::Expr) -> @ast::Expr {
+ fn expr_some(&self, sp: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
let some = vec!(
self.ident_of("std"),
self.ident_of("option"),
self.expr_call_global(sp, some, vec!(expr))
}
- fn expr_none(&self, sp: Span) -> @ast::Expr {
+ fn expr_none(&self, sp: Span) -> Gc<ast::Expr> {
let none = self.path_global(sp, vec!(
self.ident_of("std"),
self.ident_of("option"),
self.expr_path(none)
}
- fn expr_fail(&self, span: Span, msg: InternedString) -> @ast::Expr {
+ fn expr_fail(&self, span: Span, msg: InternedString) -> Gc<ast::Expr> {
let loc = self.codemap().lookup_char_pos(span.lo);
self.expr_call_global(
span,
self.expr_uint(span, loc.line)))
}
- fn expr_unreachable(&self, span: Span) -> @ast::Expr {
+ fn expr_unreachable(&self, span: Span) -> Gc<ast::Expr> {
self.expr_fail(span,
InternedString::new(
"internal error: entered unreachable code"))
}
- fn expr_ok(&self, sp: Span, expr: @ast::Expr) -> @ast::Expr {
+ fn expr_ok(&self, sp: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
let ok = vec!(
self.ident_of("std"),
self.ident_of("result"),
self.expr_call_global(sp, ok, vec!(expr))
}
- fn expr_err(&self, sp: Span, expr: @ast::Expr) -> @ast::Expr {
+ fn expr_err(&self, sp: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
let err = vec!(
self.ident_of("std"),
self.ident_of("result"),
self.expr_call_global(sp, err, vec!(expr))
}
- fn expr_try(&self, sp: Span, head: @ast::Expr) -> @ast::Expr {
+ fn expr_try(&self, sp: Span, head: Gc<ast::Expr>) -> Gc<ast::Expr> {
let ok = self.ident_of("Ok");
let ok_path = self.path_ident(sp, ok);
let err = self.ident_of("Err");
}
- fn pat(&self, span: Span, pat: ast::Pat_) -> @ast::Pat {
- @ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: span }
+ fn pat(&self, span: Span, pat: ast::Pat_) -> Gc<ast::Pat> {
+ box(GC) ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: span }
}
- fn pat_wild(&self, span: Span) -> @ast::Pat {
+ fn pat_wild(&self, span: Span) -> Gc<ast::Pat> {
self.pat(span, ast::PatWild)
}
- fn pat_lit(&self, span: Span, expr: @ast::Expr) -> @ast::Pat {
+ fn pat_lit(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Pat> {
self.pat(span, ast::PatLit(expr))
}
- fn pat_ident(&self, span: Span, ident: ast::Ident) -> @ast::Pat {
+ fn pat_ident(&self, span: Span, ident: ast::Ident) -> Gc<ast::Pat> {
self.pat_ident_binding_mode(span, ident, ast::BindByValue(ast::MutImmutable))
}
fn pat_ident_binding_mode(&self,
span: Span,
ident: ast::Ident,
- bm: ast::BindingMode) -> @ast::Pat {
+ bm: ast::BindingMode) -> Gc<ast::Pat> {
let path = self.path_ident(span, ident);
let pat = ast::PatIdent(bm, path, None);
self.pat(span, pat)
}
- fn pat_enum(&self, span: Span, path: ast::Path, subpats: Vec<@ast::Pat> ) -> @ast::Pat {
+ fn pat_enum(&self, span: Span, path: ast::Path, subpats: Vec<Gc<ast::Pat>> ) -> Gc<ast::Pat> {
let pat = ast::PatEnum(path, Some(subpats));
self.pat(span, pat)
}
fn pat_struct(&self, span: Span,
- path: ast::Path, field_pats: Vec<ast::FieldPat> ) -> @ast::Pat {
+ path: ast::Path, field_pats: Vec<ast::FieldPat> ) -> Gc<ast::Pat> {
let pat = ast::PatStruct(path, field_pats, false);
self.pat(span, pat)
}
- fn arm(&self, _span: Span, pats: Vec<@ast::Pat> , expr: @ast::Expr) -> ast::Arm {
+ fn arm(&self, _span: Span, pats: Vec<Gc<ast::Pat>> , expr: Gc<ast::Expr>) -> ast::Arm {
ast::Arm {
attrs: vec!(),
pats: pats,
self.arm(span, vec!(self.pat_wild(span)), self.expr_unreachable(span))
}
- fn expr_match(&self, span: Span, arg: @ast::Expr, arms: Vec<ast::Arm> ) -> @Expr {
+ fn expr_match(&self, span: Span, arg: Gc<ast::Expr>,
+ arms: Vec<ast::Arm>) -> Gc<Expr> {
self.expr(span, ast::ExprMatch(arg, arms))
}
fn expr_if(&self, span: Span,
- cond: @ast::Expr, then: @ast::Expr, els: Option<@ast::Expr>) -> @ast::Expr {
+ cond: Gc<ast::Expr>, then: Gc<ast::Expr>,
+ els: Option<Gc<ast::Expr>>) -> Gc<ast::Expr> {
let els = els.map(|x| self.expr_block(self.block_expr(x)));
self.expr(span, ast::ExprIf(cond, self.block_expr(then), els))
}
fn lambda_fn_decl(&self, span: Span,
- fn_decl: P<ast::FnDecl>, blk: P<ast::Block>) -> @ast::Expr {
+ fn_decl: P<ast::FnDecl>, blk: P<ast::Block>) -> Gc<ast::Expr> {
self.expr(span, ast::ExprFnBlock(fn_decl, blk))
}
- fn lambda(&self, span: Span, ids: Vec<ast::Ident> , blk: P<ast::Block>) -> @ast::Expr {
+ fn lambda(&self, span: Span, ids: Vec<ast::Ident> , blk: P<ast::Block>) -> Gc<ast::Expr> {
let fn_decl = self.fn_decl(
ids.iter().map(|id| self.arg(span, *id, self.ty_infer(span))).collect(),
self.ty_infer(span));
self.expr(span, ast::ExprFnBlock(fn_decl, blk))
}
- fn lambda0(&self, span: Span, blk: P<ast::Block>) -> @ast::Expr {
+ fn lambda0(&self, span: Span, blk: P<ast::Block>) -> Gc<ast::Expr> {
self.lambda(span, Vec::new(), blk)
}
- fn lambda1(&self, span: Span, blk: P<ast::Block>, ident: ast::Ident) -> @ast::Expr {
+ fn lambda1(&self, span: Span, blk: P<ast::Block>, ident: ast::Ident) -> Gc<ast::Expr> {
self.lambda(span, vec!(ident), blk)
}
- fn lambda_expr(&self, span: Span, ids: Vec<ast::Ident> , expr: @ast::Expr) -> @ast::Expr {
+ fn lambda_expr(&self, span: Span, ids: Vec<ast::Ident> , expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.lambda(span, ids, self.block_expr(expr))
}
- fn lambda_expr_0(&self, span: Span, expr: @ast::Expr) -> @ast::Expr {
+ fn lambda_expr_0(&self, span: Span, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
self.lambda0(span, self.block_expr(expr))
}
- fn lambda_expr_1(&self, span: Span, expr: @ast::Expr, ident: ast::Ident) -> @ast::Expr {
+ fn lambda_expr_1(&self, span: Span, expr: Gc<ast::Expr>, ident: ast::Ident) -> Gc<ast::Expr> {
self.lambda1(span, self.block_expr(expr), ident)
}
fn lambda_stmts(&self,
span: Span,
ids: Vec<ast::Ident>,
- stmts: Vec<@ast::Stmt>)
- -> @ast::Expr {
+ stmts: Vec<Gc<ast::Stmt>>)
+ -> Gc<ast::Expr> {
self.lambda(span, ids, self.block(span, stmts, None))
}
- fn lambda_stmts_0(&self, span: Span, stmts: Vec<@ast::Stmt> ) -> @ast::Expr {
+ fn lambda_stmts_0(&self, span: Span,
+ stmts: Vec<Gc<ast::Stmt>>) -> Gc<ast::Expr> {
self.lambda0(span, self.block(span, stmts, None))
}
- fn lambda_stmts_1(&self, span: Span, stmts: Vec<@ast::Stmt> , ident: ast::Ident) -> @ast::Expr {
+ fn lambda_stmts_1(&self, span: Span, stmts: Vec<Gc<ast::Stmt>>,
+ ident: ast::Ident) -> Gc<ast::Expr> {
self.lambda1(span, self.block(span, stmts, None), ident)
}
}
fn item(&self, span: Span,
- name: Ident, attrs: Vec<ast::Attribute> , node: ast::Item_) -> @ast::Item {
+ name: Ident, attrs: Vec<ast::Attribute>,
+ node: ast::Item_) -> Gc<ast::Item> {
// FIXME: Would be nice if our generated code didn't violate
// Rust coding conventions
- @ast::Item { ident: name,
+ box(GC) ast::Item { ident: name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: node,
inputs: Vec<ast::Arg> ,
output: P<ast::Ty>,
generics: Generics,
- body: P<ast::Block>) -> @ast::Item {
+ body: P<ast::Block>) -> Gc<ast::Item> {
self.item(span,
name,
Vec::new(),
inputs: Vec<ast::Arg> ,
output: P<ast::Ty>,
body: P<ast::Block>
- ) -> @ast::Item {
+ ) -> Gc<ast::Item> {
self.item_fn_poly(
span,
name,
fn item_enum_poly(&self, span: Span, name: Ident,
enum_definition: ast::EnumDef,
- generics: Generics) -> @ast::Item {
+ generics: Generics) -> Gc<ast::Item> {
self.item(span, name, Vec::new(), ast::ItemEnum(enum_definition, generics))
}
fn item_enum(&self, span: Span, name: Ident,
- enum_definition: ast::EnumDef) -> @ast::Item {
+ enum_definition: ast::EnumDef) -> Gc<ast::Item> {
self.item_enum_poly(span, name, enum_definition,
ast_util::empty_generics())
}
fn item_struct(&self, span: Span, name: Ident,
- struct_def: ast::StructDef) -> @ast::Item {
+ struct_def: ast::StructDef) -> Gc<ast::Item> {
self.item_struct_poly(
span,
name,
}
fn item_struct_poly(&self, span: Span, name: Ident,
- struct_def: ast::StructDef, generics: Generics) -> @ast::Item {
- self.item(span, name, Vec::new(), ast::ItemStruct(@struct_def, generics))
+ struct_def: ast::StructDef, generics: Generics) -> Gc<ast::Item> {
+ self.item(span, name, Vec::new(), ast::ItemStruct(box(GC) struct_def, generics))
}
fn item_mod(&self, span: Span, inner_span: Span, name: Ident,
attrs: Vec<ast::Attribute> ,
vi: Vec<ast::ViewItem> ,
- items: Vec<@ast::Item> ) -> @ast::Item {
+ items: Vec<Gc<ast::Item>>) -> Gc<ast::Item> {
self.item(
span,
name,
}
fn item_ty_poly(&self, span: Span, name: Ident, ty: P<ast::Ty>,
- generics: Generics) -> @ast::Item {
+ generics: Generics) -> Gc<ast::Item> {
self.item(span, name, Vec::new(), ast::ItemTy(ty, generics))
}
- fn item_ty(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> @ast::Item {
+ fn item_ty(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> Gc<ast::Item> {
self.item_ty_poly(span, name, ty, ast_util::empty_generics())
}
- fn attribute(&self, sp: Span, mi: @ast::MetaItem) -> ast::Attribute {
+ fn attribute(&self, sp: Span, mi: Gc<ast::MetaItem>) -> ast::Attribute {
respan(sp, ast::Attribute_ {
id: attr::mk_attr_id(),
style: ast::AttrOuter,
})
}
- fn meta_word(&self, sp: Span, w: InternedString) -> @ast::MetaItem {
- @respan(sp, ast::MetaWord(w))
+ fn meta_word(&self, sp: Span, w: InternedString) -> Gc<ast::MetaItem> {
+ box(GC) respan(sp, ast::MetaWord(w))
}
fn meta_list(&self,
sp: Span,
name: InternedString,
- mis: Vec<@ast::MetaItem> )
- -> @ast::MetaItem {
- @respan(sp, ast::MetaList(name, mis))
+ mis: Vec<Gc<ast::MetaItem>> )
+ -> Gc<ast::MetaItem> {
+ box(GC) respan(sp, ast::MetaList(name, mis))
}
fn meta_name_value(&self,
sp: Span,
name: InternedString,
value: ast::Lit_)
- -> @ast::MetaItem {
- @respan(sp, ast::MetaNameValue(name, respan(sp, value)))
+ -> Gc<ast::MetaItem> {
+ box(GC) respan(sp, ast::MetaNameValue(name, respan(sp, value)))
}
fn view_use(&self, sp: Span,
- vis: ast::Visibility, vp: @ast::ViewPath) -> ast::ViewItem {
+ vis: ast::Visibility, vp: Gc<ast::ViewPath>) -> ast::ViewItem {
ast::ViewItem {
node: ast::ViewItemUse(vp),
attrs: Vec::new(),
fn view_use_simple_(&self, sp: Span, vis: ast::Visibility,
ident: ast::Ident, path: ast::Path) -> ast::ViewItem {
self.view_use(sp, vis,
- @respan(sp,
+ box(GC) respan(sp,
ast::ViewPathSimple(ident,
path,
ast::DUMMY_NODE_ID)))
}).collect();
self.view_use(sp, vis,
- @respan(sp,
+ box(GC) respan(sp,
ast::ViewPathList(self.path(sp, path),
imports,
ast::DUMMY_NODE_ID)))
fn view_use_glob(&self, sp: Span,
vis: ast::Visibility, path: Vec<ast::Ident> ) -> ast::ViewItem {
self.view_use(sp, vis,
- @respan(sp,
+ box(GC) respan(sp,
ast::ViewPathGlob(self.path(sp, path), ast::DUMMY_NODE_ID)))
}
}
fn duplicate(&self, cx: &ExtCtxt) -> Self;
}
-impl Duplicate for @ast::Expr {
- fn duplicate(&self, _: &ExtCtxt) -> @ast::Expr {
+impl Duplicate for Gc<ast::Expr> {
+ fn duplicate(&self, _: &ExtCtxt) -> Gc<ast::Expr> {
let mut folder = Duplicator;
folder.fold_expr(*self)
}
}
let res = str_to_ident(res_str.as_slice());
- let e = @ast::Expr {
+ let e = box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(
ast::Path {
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
+use std::gc::Gc;
+
pub fn expand_deriving_bound(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let name = match mitem.node {
MetaWord(ref tname) => {
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_clone(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let trait_def = TraitDef {
fn cs_clone(
name: &str,
cx: &mut ExtCtxt, trait_span: Span,
- substr: &Substructure) -> @Expr {
+ substr: &Substructure) -> Gc<Expr> {
let clone_ident = substr.method_ident;
let ctor_ident;
let all_fields;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_eq(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
// structures are equal if all fields are equal, and non equal, if
// any fields are not equal or if the enum variants are different
- fn cs_eq(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> @Expr {
+ fn cs_eq(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> Gc<Expr> {
cs_and(|cx, span, _, _| cx.expr_bool(span, false),
cx, span, substr)
}
- fn cs_ne(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> @Expr {
+ fn cs_ne(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> Gc<Expr> {
cs_or(|cx, span, _, _| cx.expr_bool(span, true),
cx, span, substr)
}
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_ord(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
macro_rules! md (
($name:expr, $op:expr, $equal:expr) => { {
let inline = cx.meta_word(span, InternedString::new("inline"));
}
/// Strict inequality.
-fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> @Expr {
+fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt, span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let op = if less {ast::BiLt} else {ast::BiGt};
cs_fold(
false, // need foldr,
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_totaleq(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
- fn cs_total_eq_assert(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> @Expr {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
+ fn cs_total_eq_assert(cx: &mut ExtCtxt, span: Span,
+ substr: &Substructure) -> Gc<Expr> {
cs_same_method(|cx, span, exprs| {
// create `a.<method>(); b.<method>(); c.<method>(); ...`
// (where method is `assert_receiver_is_total_eq`)
use parse::token::InternedString;
use std::cmp::{Ordering, Equal, Less, Greater};
+use std::gc::Gc;
pub fn expand_deriving_totalord(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let trait_def = TraitDef {
}
pub fn cs_cmp(cx: &mut ExtCtxt, span: Span,
- substr: &Substructure) -> @Expr {
+ substr: &Substructure) -> Gc<Expr> {
let test_id = cx.ident_of("__test");
let equals_path = ordering_const(cx, span, Equal);
use parse::token::InternedString;
use parse::token;
+use std::gc::Gc;
+
pub fn expand_deriving_decodable(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
}
fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
- substr: &Substructure) -> @Expr {
+ substr: &Substructure) -> Gc<Expr> {
let decoder = substr.nonself_args[0];
let recurse = vec!(cx.ident_of("serialize"),
cx.ident_of("Decodable"),
trait_span: Span,
outer_pat_ident: Ident,
fields: &StaticFields,
- getarg: |&mut ExtCtxt, Span, InternedString, uint| -> @Expr)
- -> @Expr {
+ getarg: |&mut ExtCtxt, Span, InternedString, uint| -> Gc<Expr>)
+ -> Gc<Expr> {
match *fields {
Unnamed(ref fields) => {
if fields.is_empty() {
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_default(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let trait_def = TraitDef {
trait_def.expand(cx, mitem, item, push)
}
-fn default_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> @Expr {
+fn default_substructure(cx: &mut ExtCtxt, trait_span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let default_ident = vec!(
cx.ident_of("std"),
cx.ident_of("default"),
use ext::deriving::generic::ty::*;
use parse::token;
+use std::gc::Gc;
+
pub fn expand_deriving_encodable(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
}
fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
- substr: &Substructure) -> @Expr {
+ substr: &Substructure) -> Gc<Expr> {
let encoder = substr.nonself_args[0];
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_e");
*/
use std::cell::RefCell;
+use std::gc::Gc;
use ast;
use ast::{P, EnumDef, Expr, Ident, Generics, StructDef};
/// ident of the method
pub method_ident: Ident,
/// dereferenced access to any Self or Ptr(Self, _) arguments
- pub self_args: &'a [@Expr],
+ pub self_args: &'a [Gc<Expr>],
/// verbatim access to any other arguments
- pub nonself_args: &'a [@Expr],
+ pub nonself_args: &'a [Gc<Expr>],
pub fields: &'a SubstructureFields<'a>
}
pub name: Option<Ident>,
/// The expression corresponding to this field of `self`
/// (specifically, a reference to it).
- pub self_: @Expr,
+ pub self_: Gc<Expr>,
/// The expressions corresponding to references to this field in
/// the other Self arguments.
- pub other: Vec<@Expr>,
+ pub other: Vec<Gc<Expr>>,
}
/// Fields for a static method
pub enum StaticFields {
/// Tuple structs/enum variants like this
- Unnamed(Vec<Span> ),
+ Unnamed(Vec<Span>),
/// Normal structs/struct variants.
- Named(Vec<(Ident, Span)> )
+ Named(Vec<(Ident, Span)>),
}
/// A summary of the possible sets of fields. See above for details
/// and examples
pub enum SubstructureFields<'a> {
- Struct(Vec<FieldInfo> ),
+ Struct(Vec<FieldInfo>),
/**
Matching variants of the enum: variant index, ast::Variant,
fields: the field name is only non-`None` in the case of a struct
variant.
*/
- EnumMatching(uint, &'a ast::Variant, Vec<FieldInfo> ),
+ EnumMatching(uint, &'a ast::Variant, Vec<FieldInfo>),
/**
non-matching variants of the enum, [(variant index, ast::Variant,
[field span, field ident, fields])] \(i.e. all fields for self are in the
first tuple, for other1 are in the second tuple, etc.)
*/
- EnumNonMatching(&'a [(uint, P<ast::Variant>, Vec<(Span, Option<Ident>, @Expr)> )]),
+ EnumNonMatching(&'a [(uint, P<ast::Variant>,
+ Vec<(Span, Option<Ident>, Gc<Expr>)>)]),
/// A static method where Self is a struct.
StaticStruct(&'a ast::StructDef, StaticFields),
/// A static method where Self is an enum.
- StaticEnum(&'a ast::EnumDef, Vec<(Ident, Span, StaticFields)> )
+ StaticEnum(&'a ast::EnumDef, Vec<(Ident, Span, StaticFields)>),
}
all the fields of all the structures, see above for details.
*/
pub type CombineSubstructureFunc<'a> =
- |&mut ExtCtxt, Span, &Substructure|: 'a -> @Expr;
+ |&mut ExtCtxt, Span, &Substructure|: 'a -> Gc<Expr>;
/**
Deal with non-matching enum variants, the arguments are a list
pub type EnumNonMatchFunc<'a> =
|&mut ExtCtxt,
Span,
- &[(uint, P<ast::Variant>, Vec<(Span, Option<Ident>, @Expr)> )],
- &[@Expr]|: 'a
- -> @Expr;
+ &[(uint, P<ast::Variant>, Vec<(Span, Option<Ident>, Gc<Expr>)>)],
+ &[Gc<Expr>]|: 'a
+ -> Gc<Expr>;
pub fn combine_substructure<'a>(f: CombineSubstructureFunc<'a>)
-> RefCell<CombineSubstructureFunc<'a>> {
impl<'a> TraitDef<'a> {
pub fn expand(&self,
cx: &mut ExtCtxt,
- _mitem: @ast::MetaItem,
- item: @ast::Item,
- push: |@ast::Item|) {
+ _mitem: Gc<ast::MetaItem>,
+ item: Gc<ast::Item>,
+ push: |Gc<ast::Item>|) {
let newitem = match item.node {
- ast::ItemStruct(struct_def, ref generics) => {
+ ast::ItemStruct(ref struct_def, ref generics) => {
self.expand_struct_def(cx,
- struct_def,
+ &**struct_def,
item.ident,
generics)
}
_ => false,
}
}).map(|a| a.clone()));
- push(@ast::Item {
+ push(box(GC) ast::Item {
attrs: attrs,
..(*newitem).clone()
})
cx: &mut ExtCtxt,
type_ident: Ident,
generics: &Generics,
- methods: Vec<@ast::Method> ) -> @ast::Item {
+ methods: Vec<Gc<ast::Method>> ) -> Gc<ast::Item> {
let trait_path = self.path.to_path(cx, self.span, type_ident, generics);
let Generics { mut lifetimes, ty_params } =
// Just mark it now since we know that it'll end up used downstream
attr::mark_used(&attr);
let opt_trait_ref = Some(trait_ref);
- let ident = ast_util::impl_pretty_name(&opt_trait_ref, self_type);
+ let ident = ast_util::impl_pretty_name(&opt_trait_ref, &*self_type);
cx.item(
self.span,
ident,
cx: &mut ExtCtxt,
struct_def: &StructDef,
type_ident: Ident,
- generics: &Generics) -> @ast::Item {
+ generics: &Generics) -> Gc<ast::Item> {
let methods = self.methods.iter().map(|method_def| {
let (explicit_self, self_args, nonself_args, tys) =
method_def.split_self_nonself_args(
cx: &mut ExtCtxt,
enum_def: &EnumDef,
type_ident: Ident,
- generics: &Generics) -> @ast::Item {
+ generics: &Generics) -> Gc<ast::Item> {
let methods = self.methods.iter().map(|method_def| {
let (explicit_self, self_args, nonself_args, tys) =
method_def.split_self_nonself_args(cx, self,
cx: &mut ExtCtxt,
trait_: &TraitDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr],
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>],
fields: &SubstructureFields)
- -> @Expr {
+ -> Gc<Expr> {
let substructure = Substructure {
type_ident: type_ident,
method_ident: cx.ident_of(self.name),
trait_: &TraitDef,
type_ident: Ident,
generics: &Generics)
- -> (ast::ExplicitSelf, Vec<@Expr> , Vec<@Expr> , Vec<(Ident, P<ast::Ty>)> ) {
+ -> (ast::ExplicitSelf, Vec<Gc<Expr>>, Vec<Gc<Expr>>,
+ Vec<(Ident, P<ast::Ty>)>) {
let mut self_args = Vec::new();
let mut nonself_args = Vec::new();
generics: &Generics,
explicit_self: ast::ExplicitSelf,
arg_types: Vec<(Ident, P<ast::Ty>)> ,
- body: @Expr) -> @ast::Method {
+ body: Gc<Expr>) -> Gc<ast::Method> {
// create the generics that aren't for Self
let fn_generics = self.generics.to_generics(cx, trait_.span, type_ident, generics);
let body_block = cx.block_expr(body);
// Create the method.
- @ast::Method {
+ box(GC) ast::Method {
ident: method_ident,
attrs: self.attributes.clone(),
generics: fn_generics,
trait_: &TraitDef,
struct_def: &StructDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr])
- -> @Expr {
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>])
+ -> Gc<Expr> {
let mut raw_fields = Vec::new(); // ~[[fields of self],
// [fields of next Self arg], [etc]]
trait_: &TraitDef,
struct_def: &StructDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr])
- -> @Expr {
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>])
+ -> Gc<Expr> {
let summary = trait_.summarise_struct(cx, struct_def);
self.call_substructure_method(cx,
trait_: &TraitDef,
enum_def: &EnumDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr])
- -> @Expr {
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>])
+ -> Gc<Expr> {
let mut matches = Vec::new();
self.build_enum_match(cx, trait_, enum_def, type_ident,
self_args, nonself_args,
trait_: &TraitDef,
enum_def: &EnumDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr],
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>],
matching: Option<uint>,
matches_so_far: &mut Vec<(uint, P<ast::Variant>,
- Vec<(Span, Option<Ident>, @Expr)> )> ,
- match_count: uint) -> @Expr {
+ Vec<(Span, Option<Ident>, Gc<Expr>)>)> ,
+ match_count: uint) -> Gc<Expr> {
if match_count == self_args.len() {
// we've matched against all arguments, so make the final
// expression at the bottom of the match tree
other: (*other).clone()
}
}).collect();
- EnumMatching(variant_index, variant, field_tuples)
+ EnumMatching(variant_index, &*variant, field_tuples)
}
None => {
EnumNonMatching(matches_so_far.as_slice())
let variant = *enum_def.variants.get(index);
let (pattern, idents) = trait_.create_enum_variant_pattern(
cx,
- variant,
+ &*variant,
current_match_str.as_slice(),
ast::MutImmutable);
let (pattern, idents) =
trait_.create_enum_variant_pattern(
cx,
- variant,
+ &*variant,
current_match_str.as_slice(),
ast::MutImmutable);
trait_: &TraitDef,
enum_def: &EnumDef,
type_ident: Ident,
- self_args: &[@Expr],
- nonself_args: &[@Expr])
- -> @Expr {
+ self_args: &[Gc<Expr>],
+ nonself_args: &[Gc<Expr>])
+ -> Gc<Expr> {
let summary = enum_def.variants.iter().map(|v| {
let ident = v.node.name;
let summary = match v.node.kind {
ast::TupleVariantKind(ref args) => {
Unnamed(args.iter().map(|va| trait_.set_expn_info(cx, va.ty.span)).collect())
}
- ast::StructVariantKind(struct_def) => {
- trait_.summarise_struct(cx, struct_def)
+ ast::StructVariantKind(ref struct_def) => {
+ trait_.summarise_struct(cx, &**struct_def)
}
};
(ident, v.span, summary)
None => cx.span_bug(self.span, "trait with empty path in generic `deriving`"),
Some(name) => *name
};
- to_set.expn_info = Some(@codemap::ExpnInfo {
+ to_set.expn_info = Some(box(GC) codemap::ExpnInfo {
call_site: to_set,
callee: codemap::NameAndSpan {
name: format!("deriving({})", trait_name).to_string(),
cx: &mut ExtCtxt,
field_paths: Vec<ast::Path> ,
mutbl: ast::Mutability)
- -> Vec<@ast::Pat> {
+ -> Vec<Gc<ast::Pat>> {
field_paths.iter().map(|path| {
cx.pat(path.span,
ast::PatIdent(ast::BindByRef(mutbl), (*path).clone(), None))
struct_def: &StructDef,
prefix: &str,
mutbl: ast::Mutability)
- -> (@ast::Pat, Vec<(Span, Option<Ident>, @Expr)> ) {
+ -> (Gc<ast::Pat>, Vec<(Span, Option<Ident>, Gc<Expr>)>) {
if struct_def.fields.is_empty() {
return (
cx.pat_ident_binding_mode(
variant: &ast::Variant,
prefix: &str,
mutbl: ast::Mutability)
- -> (@ast::Pat, Vec<(Span, Option<Ident>, @Expr)> ) {
+ -> (Gc<ast::Pat>, Vec<(Span, Option<Ident>, Gc<Expr>)> ) {
let variant_ident = variant.node.name;
match variant.node.kind {
ast::TupleVariantKind(ref variant_args) => {
(cx.pat_enum(variant.span, matching_path, subpats),
ident_expr)
}
- ast::StructVariantKind(struct_def) => {
- self.create_struct_pattern(cx, variant_ident, struct_def,
+ ast::StructVariantKind(ref struct_def) => {
+ self.create_struct_pattern(cx, variant_ident, &**struct_def,
prefix, mutbl)
}
}
left-to-right (`true`) or right-to-left (`false`).
*/
pub fn cs_fold(use_foldl: bool,
- f: |&mut ExtCtxt, Span, @Expr, @Expr, &[@Expr]| -> @Expr,
- base: @Expr,
+ f: |&mut ExtCtxt, Span, Gc<Expr>, Gc<Expr>, &[Gc<Expr>]| -> Gc<Expr>,
+ base: Gc<Expr>,
enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt,
trait_span: Span,
substructure: &Substructure)
- -> @Expr {
+ -> Gc<Expr> {
match *substructure.fields {
EnumMatching(_, _, ref all_fields) | Struct(ref all_fields) => {
if use_foldl {
~~~
*/
#[inline]
-pub fn cs_same_method(f: |&mut ExtCtxt, Span, Vec<@Expr> | -> @Expr,
+pub fn cs_same_method(f: |&mut ExtCtxt, Span, Vec<Gc<Expr>>| -> Gc<Expr>,
enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt,
trait_span: Span,
substructure: &Substructure)
- -> @Expr {
+ -> Gc<Expr> {
match *substructure.fields {
EnumMatching(_, _, ref all_fields) | Struct(ref all_fields) => {
// call self_n.method(other_1_n, other_2_n, ...)
*/
#[inline]
pub fn cs_same_method_fold(use_foldl: bool,
- f: |&mut ExtCtxt, Span, @Expr, @Expr| -> @Expr,
- base: @Expr,
+ f: |&mut ExtCtxt, Span, Gc<Expr>, Gc<Expr>| -> Gc<Expr>,
+ base: Gc<Expr>,
enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt,
trait_span: Span,
substructure: &Substructure)
- -> @Expr {
+ -> Gc<Expr> {
cs_same_method(
|cx, span, vals| {
if use_foldl {
on all the fields.
*/
#[inline]
-pub fn cs_binop(binop: ast::BinOp, base: @Expr,
+pub fn cs_binop(binop: ast::BinOp, base: Gc<Expr>,
enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt, trait_span: Span,
- substructure: &Substructure) -> @Expr {
+ substructure: &Substructure) -> Gc<Expr> {
cs_same_method_fold(
true, // foldl is good enough
|cx, span, old, new| {
#[inline]
pub fn cs_or(enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt, span: Span,
- substructure: &Substructure) -> @Expr {
+ substructure: &Substructure) -> Gc<Expr> {
cs_binop(ast::BiOr, cx.expr_bool(span, false),
enum_nonmatch_f,
cx, span, substructure)
#[inline]
pub fn cs_and(enum_nonmatch_f: EnumNonMatchFunc,
cx: &mut ExtCtxt, span: Span,
- substructure: &Substructure) -> @Expr {
+ substructure: &Substructure) -> Gc<Expr> {
cs_binop(ast::BiAnd, cx.expr_bool(span, true),
enum_nonmatch_f,
cx, span, substructure)
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
+use std::gc::Gc;
/// The types of pointers
pub enum PtrTy<'a> {
/// A type. Supports pointers (except for *), Self, and literals
pub enum Ty<'a> {
Self,
- // &/Box/@ Ty
+ // &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
// mod::mod::Type<[lifetime], [Params...]>, including a plain type
// parameter, and things like `int`
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
- -> (@Expr, ast::ExplicitSelf) {
+ -> (Gc<Expr>, ast::ExplicitSelf) {
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_hash(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let (path, generics, args) = if cx.ecfg.deriving_hash_type_parameter {
(Path::new_(vec!("std", "hash", "Hash"), None,
hash_trait_def.expand(cx, mitem, item, push);
}
-fn hash_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> @Expr {
+fn hash_substructure(cx: &mut ExtCtxt, trait_span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let state_expr = match substr.nonself_args {
[state_expr] => state_expr,
_ => cx.span_bug(trait_span, "incorrect number of arguments in `deriving(Hash)`")
use ext::base::ExtCtxt;
use codemap::Span;
+use std::gc::Gc;
+
pub mod bounds;
pub mod clone;
pub mod encodable;
pub fn expand_meta_deriving(cx: &mut ExtCtxt,
_span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
match mitem.node {
MetaNameValue(_, ref l) => {
cx.span_err(l.span, "unexpected value in `deriving`");
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_from_primitive(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let trait_def = TraitDef {
trait_def.expand(cx, mitem, item, push)
}
-fn cs_from(name: &str, cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> @Expr {
+fn cs_from(name: &str, cx: &mut ExtCtxt, trait_span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let n = match substr.nonself_args {
[n] => n,
_ => cx.span_bug(trait_span, "incorrect number of arguments in `deriving(FromPrimitive)`")
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
+use std::gc::Gc;
+
pub fn expand_deriving_rand(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
trait_def.expand(cx, mitem, item, push)
}
-fn rand_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> @Expr {
+fn rand_substructure(cx: &mut ExtCtxt, trait_span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let rng = match substr.nonself_args {
[rng] => vec!( rng ),
_ => cx.bug("Incorrect number of arguments to `rand` in `deriving(Rand)`")
trait_span: Span,
ctor_ident: Ident,
summary: &StaticFields,
- rand_call: |&mut ExtCtxt, Span| -> @Expr)
- -> @Expr {
+ rand_call: |&mut ExtCtxt, Span| -> Gc<Expr>)
+ -> Gc<Expr> {
match *summary {
Unnamed(ref fields) => {
if fields.is_empty() {
use std::collections::HashMap;
use std::string::String;
+use std::gc::Gc;
pub fn expand_deriving_show(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
// &mut ::std::fmt::Formatter
let fmtr = Ptr(box Literal(Path::new(vec!("std", "fmt", "Formatter"))),
Borrowed(None, ast::MutMutable));
// we construct a format string and then defer to std::fmt, since that
// knows what's up with formatting at so on.
fn show_substructure(cx: &mut ExtCtxt, span: Span,
- substr: &Substructure) -> @Expr {
+ substr: &Substructure) -> Gc<Expr> {
// build `<name>`, `<name>({}, {}, ...)` or `<name> { <field>: {},
// <field>: {}, ... }` based on the "shape".
//
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
+use std::gc::Gc;
+
pub fn expand_deriving_zero(cx: &mut ExtCtxt,
span: Span,
- mitem: @MetaItem,
- item: @Item,
- push: |@Item|) {
+ mitem: Gc<MetaItem>,
+ item: Gc<Item>,
+ push: |Gc<Item>|) {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let trait_def = TraitDef {
trait_def.expand(cx, mitem, item, push)
}
-fn zero_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> @Expr {
+fn zero_substructure(cx: &mut ExtCtxt, trait_span: Span,
+ substr: &Substructure) -> Gc<Expr> {
let zero_ident = vec!(
cx.ident_of("std"),
cx.ident_of("num"),
use visit::Visitor;
use util::small_vector::SmallVector;
-pub fn expand_expr(e: @ast::Expr, fld: &mut MacroExpander) -> @ast::Expr {
+use std::gc::Gc;
+
+pub fn expand_expr(e: Gc<ast::Expr>, fld: &mut MacroExpander) -> Gc<ast::Expr> {
match e.node {
// expr_mac should really be expr_ext or something; it's the
// entry-point for all syntax extensions.
fld.fold_expr(marked_after).node.clone();
fld.cx.bt_pop();
- @ast::Expr {
+ box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: fully_expanded,
span: e.span,
let value_ident = token::gensym_ident("__value");
// this is careful to use src_pat.span so that error
// messages point exact at that.
- let local = @ast::Local {
+ let local = box(GC) ast::Local {
ty: fld.cx.ty_infer(src_pat.span),
pat: src_pat,
init: Some(fld.cx.expr_ident(src_pat.span, value_ident)),
source: ast::LocalFor
};
let local = codemap::respan(src_pat.span, ast::DeclLocal(local));
- let local = @codemap::respan(span, ast::StmtDecl(@local, ast::DUMMY_NODE_ID));
+ let local = box(GC) codemap::respan(span, ast::StmtDecl(box(GC) local,
+ ast::DUMMY_NODE_ID));
// { let ...; <src_loop_block> }
let block = fld.cx.block(span, vec![local],
// in a block enclosed by loop head.
fld.extsbox.push_frame();
fld.extsbox.info().pending_renames.push(rename);
- let expanded_block = expand_block_elts(loop_block, fld);
+ let expanded_block = expand_block_elts(&*loop_block, fld);
fld.extsbox.pop_frame();
(expanded_block, Some(renamed_ident))
)
// When we enter a module, record it, for the sake of `module!`
-pub fn expand_item(it: @ast::Item, fld: &mut MacroExpander)
- -> SmallVector<@ast::Item> {
+pub fn expand_item(it: Gc<ast::Item>, fld: &mut MacroExpander)
+ -> SmallVector<Gc<ast::Item>> {
let it = expand_item_modifiers(it, fld);
let mut decorator_items = SmallVector::zero();
// we'd ideally decorator_items.push_all(expand_item(item, fld)),
// but that double-mut-borrows fld
- let mut items: SmallVector<@ast::Item> = SmallVector::zero();
+ let mut items: SmallVector<Gc<ast::Item>> = SmallVector::zero();
dec_fn(fld.cx, attr.span, attr.node.value, it,
|item| items.push(item));
decorator_items.extend(items.move_iter()
let macro_escape = contains_macro_escape(new_attrs.as_slice());
let result = with_exts_frame!(fld.extsbox,
macro_escape,
- noop_fold_item(it, fld));
+ noop_fold_item(&*it, fld));
fld.cx.mod_pop();
result
},
_ => {
- let it = @ast::Item {
+ let it = box(GC) ast::Item {
attrs: new_attrs,
..(*it).clone()
};
- noop_fold_item(it, fld)
+ noop_fold_item(&*it, fld)
}
};
new_items
}
-fn expand_item_modifiers(mut it: @ast::Item, fld: &mut MacroExpander)
- -> @ast::Item {
+fn expand_item_modifiers(mut it: Gc<ast::Item>, fld: &mut MacroExpander)
+ -> Gc<ast::Item> {
let (modifiers, attrs) = it.attrs.partitioned(|attr| {
match fld.extsbox.find(&intern(attr.name().get())) {
Some(&ItemModifier(_)) => true,
}
});
- it = @ast::Item {
+ it = box(GC) ast::Item {
attrs: attrs,
..(*it).clone()
};
// Support for item-position macro invocations, exactly the same
// logic as for expression-position macro invocations.
-pub fn expand_item_mac(it: @ast::Item, fld: &mut MacroExpander)
- -> SmallVector<@ast::Item> {
+pub fn expand_item_mac(it: Gc<ast::Item>, fld: &mut MacroExpander)
+ -> SmallVector<Gc<ast::Item>> {
let (pth, tts) = match it.node {
ItemMac(codemap::Spanned {
node: MacInvocTT(ref pth, ref tts, _),
}
// expand a stmt
-pub fn expand_stmt(s: &Stmt, fld: &mut MacroExpander) -> SmallVector<@Stmt> {
+pub fn expand_stmt(s: &Stmt, fld: &mut MacroExpander) -> SmallVector<Gc<Stmt>> {
// why the copying here and not in expand_expr?
// looks like classic changed-in-only-one-place
let (pth, tts, semi) = match s.node {
}
};
- mark_stmt(expanded,fm)
+ mark_stmt(&*expanded,fm)
}
_ => {
};
// Keep going, outside-in.
- let fully_expanded = fld.fold_stmt(marked_after);
+ let fully_expanded = fld.fold_stmt(&*marked_after);
if fully_expanded.is_empty() {
fld.cx.span_err(pth.span, "macro didn't expand to a statement");
return SmallVector::zero();
}
fld.cx.bt_pop();
- let fully_expanded: SmallVector<@Stmt> = fully_expanded.move_iter()
- .map(|s| @Spanned { span: s.span, node: s.node.clone() })
+ let fully_expanded: SmallVector<Gc<Stmt>> = fully_expanded.move_iter()
+ .map(|s| box(GC) Spanned { span: s.span, node: s.node.clone() })
.collect();
fully_expanded.move_iter().map(|s| {
match s.node {
StmtExpr(e, stmt_id) if semi => {
- @Spanned {
+ box(GC) Spanned {
span: s.span,
node: StmtSemi(e, stmt_id)
}
// expand a non-macro stmt. this is essentially the fallthrough for
// expand_stmt, above.
fn expand_non_macro_stmt(s: &Stmt, fld: &mut MacroExpander)
- -> SmallVector<@Stmt> {
+ -> SmallVector<Gc<Stmt>> {
// is it a let?
match s.node {
StmtDecl(decl, node_id) => {
// names, as well... but that should be okay, as long as
// the new names are gensyms for the old ones.
let mut name_finder = new_name_finder(Vec::new());
- name_finder.visit_pat(expanded_pat,());
+ name_finder.visit_pat(&*expanded_pat,());
// generate fresh names, push them to a new pending list
let mut new_pending_renames = Vec::new();
for ident in name_finder.ident_accumulator.iter() {
// also, don't forget to expand the init:
let new_init_opt = init.map(|e| fld.fold_expr(e));
let rewritten_local =
- @Local {
+ box(GC) Local {
ty: local.ty,
pat: rewritten_pat,
init: new_init_opt,
span: span,
source: source
};
- SmallVector::one(@Spanned {
- node: StmtDecl(@Spanned {
+ SmallVector::one(box(GC) Spanned {
+ node: StmtDecl(box(GC) Spanned {
node: DeclLocal(rewritten_local),
span: stmt_span
},
}
// visit optional subpattern of pat_ident:
for subpat in inner.iter() {
- self.visit_pat(*subpat, ())
+ self.visit_pat(&**subpat, ())
}
}
// use the default traversal for non-pat_idents
let renamed_stmt = {
let pending_renames = &mut fld.extsbox.info().pending_renames;
let mut rename_fld = renames_to_fold(pending_renames);
- rename_fld.fold_stmt(*x).expect_one("rename_fold didn't return one value")
+ rename_fld.fold_stmt(&**x).expect_one("rename_fold didn't return one value")
};
- fld.fold_stmt(renamed_stmt).move_iter()
+ fld.fold_stmt(&*renamed_stmt).move_iter()
}).collect();
let new_expr = b.expr.map(|x| {
let expr = {
})
}
-pub fn expand_pat(p: @ast::Pat, fld: &mut MacroExpander) -> @ast::Pat {
+pub fn expand_pat(p: Gc<ast::Pat>, fld: &mut MacroExpander) -> Gc<ast::Pat> {
let (pth, tts) = match p.node {
PatMac(ref mac) => {
match mac.node {
fld.fold_pat(marked_after).node.clone();
fld.cx.bt_pop();
- @ast::Pat {
+ box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: fully_expanded,
span: p.span,
}
impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
- fn fold_expr(&mut self, expr: @ast::Expr) -> @ast::Expr {
+ fn fold_expr(&mut self, expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
expand_expr(expr, self)
}
- fn fold_pat(&mut self, pat: @ast::Pat) -> @ast::Pat {
+ fn fold_pat(&mut self, pat: Gc<ast::Pat>) -> Gc<ast::Pat> {
expand_pat(pat, self)
}
- fn fold_item(&mut self, item: @ast::Item) -> SmallVector<@ast::Item> {
+ fn fold_item(&mut self, item: Gc<ast::Item>) -> SmallVector<Gc<ast::Item>> {
expand_item(item, self)
}
- fn fold_stmt(&mut self, stmt: &ast::Stmt) -> SmallVector<@ast::Stmt> {
+ fn fold_stmt(&mut self, stmt: &ast::Stmt) -> SmallVector<Gc<ast::Stmt>> {
expand_stmt(stmt, self)
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
- expand_block(block, self)
+ expand_block(&*block, self)
}
fn new_span(&mut self, span: Span) -> Span {
}
// apply a given mark to the given expr. Used following the expansion of a macro.
-fn mark_expr(expr: @ast::Expr, m: Mrk) -> @ast::Expr {
+fn mark_expr(expr: Gc<ast::Expr>, m: Mrk) -> Gc<ast::Expr> {
new_mark_folder(m).fold_expr(expr)
}
// apply a given mark to the given pattern. Used following the expansion of a macro.
-fn mark_pat(pat: @ast::Pat, m: Mrk) -> @ast::Pat {
+fn mark_pat(pat: Gc<ast::Pat>, m: Mrk) -> Gc<ast::Pat> {
new_mark_folder(m).fold_pat(pat)
}
// apply a given mark to the given stmt. Used following the expansion of a macro.
-fn mark_stmt(expr: &ast::Stmt, m: Mrk) -> @ast::Stmt {
+fn mark_stmt(expr: &ast::Stmt, m: Mrk) -> Gc<ast::Stmt> {
new_mark_folder(m).fold_stmt(expr)
.expect_one("marking a stmt didn't return a stmt")
}
// apply a given mark to the given item. Used following the expansion of a macro.
-fn mark_item(expr: @ast::Item, m: Mrk) -> SmallVector<@ast::Item> {
+fn mark_item(expr: Gc<ast::Item>, m: Mrk) -> SmallVector<Gc<ast::Item>> {
new_mark_folder(m).fold_item(expr)
}
-fn original_span(cx: &ExtCtxt) -> @codemap::ExpnInfo {
+fn original_span(cx: &ExtCtxt) -> Gc<codemap::ExpnInfo> {
let mut relevant_info = cx.backtrace();
let mut einfo = relevant_info.unwrap();
loop {
node: Attribute_ {
id: attr::mk_attr_id(),
style: AttrOuter,
- value: @Spanned {
+ value: box(GC) Spanned {
node: MetaWord(token::intern_and_get_ident(s)),
span: codemap::DUMMY_SP,
},
use parse = fmt_macros;
use std::collections::{HashMap, HashSet};
+use std::gc::Gc;
#[deriving(PartialEq)]
enum ArgumentType {
// Parsed argument expressions and the types that we've found so far for
// them.
- args: Vec<@ast::Expr>,
+ args: Vec<Gc<ast::Expr>>,
arg_types: Vec<Option<ArgumentType>>,
// Parsed named expressions and the types that we've found for them so far.
// Note that we keep a side-array of the ordering of the named arguments
// found to be sure that we can translate them in the same order that they
// were declared in.
- names: HashMap<String, @ast::Expr>,
+ names: HashMap<String, Gc<ast::Expr>>,
name_types: HashMap<String, ArgumentType>,
name_ordering: Vec<String>,
// Collection of the compiled `rt::Piece` structures
- pieces: Vec<@ast::Expr> ,
+ pieces: Vec<Gc<ast::Expr>>,
name_positions: HashMap<String, uint>,
- method_statics: Vec<@ast::Item> ,
+ method_statics: Vec<Gc<ast::Item>>,
// Updated as arguments are consumed or methods are entered
nest_level: uint,
}
pub enum Invocation {
- Call(@ast::Expr),
- MethodCall(@ast::Expr, ast::Ident),
+ Call(Gc<ast::Expr>),
+ MethodCall(Gc<ast::Expr>, ast::Ident),
}
/// Parses the arguments from the given list of tokens, returning None
/// named arguments))
fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
tts: &[ast::TokenTree])
- -> (Invocation, Option<(@ast::Expr, Vec<@ast::Expr>, Vec<String>,
- HashMap<String, @ast::Expr>)>) {
+ -> (Invocation, Option<(Gc<ast::Expr>, Vec<Gc<ast::Expr>>, Vec<String>,
+ HashMap<String, Gc<ast::Expr>>)>) {
let mut args = Vec::new();
- let mut names = HashMap::<String, @ast::Expr>::new();
+ let mut names = HashMap::<String, Gc<ast::Expr>>::new();
let mut order = Vec::new();
let mut p = rsparse::new_parser_from_tts(ecx.parse_sess(),
self.ecx.ident_of("rt"), self.ecx.ident_of(s))
}
- fn none(&self) -> @ast::Expr {
+ fn none(&self) -> Gc<ast::Expr> {
let none = self.ecx.path_global(self.fmtsp, vec!(
self.ecx.ident_of("std"),
self.ecx.ident_of("option"),
self.ecx.expr_path(none)
}
- fn some(&self, e: @ast::Expr) -> @ast::Expr {
+ fn some(&self, e: Gc<ast::Expr>) -> Gc<ast::Expr> {
let p = self.ecx.path_global(self.fmtsp, vec!(
self.ecx.ident_of("std"),
self.ecx.ident_of("option"),
self.ecx.expr_call(self.fmtsp, p, vec!(e))
}
- fn trans_count(&self, c: parse::Count) -> @ast::Expr {
+ fn trans_count(&self, c: parse::Count) -> Gc<ast::Expr> {
let sp = self.fmtsp;
match c {
parse::CountIs(i) => {
}
}
- fn trans_method(&mut self, method: &parse::Method) -> @ast::Expr {
+ fn trans_method(&mut self, method: &parse::Method) -> Gc<ast::Expr> {
let sp = self.fmtsp;
let method = match *method {
parse::Select(ref arms, ref default) => {
}
/// Translate a `parse::Piece` to a static `rt::Piece`
- fn trans_piece(&mut self, piece: &parse::Piece) -> @ast::Expr {
+ fn trans_piece(&mut self, piece: &parse::Piece) -> Gc<ast::Expr> {
let sp = self.fmtsp;
match *piece {
parse::String(s) => {
/// Actually builds the expression which the iformat! block will be expanded
/// to
- fn to_expr(&self, invocation: Invocation) -> @ast::Expr {
+ fn to_expr(&self, invocation: Invocation) -> Gc<ast::Expr> {
let mut lets = Vec::new();
let mut locals = Vec::new();
let mut names = Vec::from_fn(self.name_positions.len(), |_| None);
// First, declare all of our methods that are statics
for &method in self.method_statics.iter() {
let decl = respan(self.fmtsp, ast::DeclItem(method));
- lets.push(@respan(self.fmtsp,
- ast::StmtDecl(@decl, ast::DUMMY_NODE_ID)));
+ lets.push(box(GC) respan(self.fmtsp,
+ ast::StmtDecl(box(GC) decl, ast::DUMMY_NODE_ID)));
}
// Next, build up the static array which will become our precompiled
let item = self.ecx.item(self.fmtsp, static_name,
self.static_attrs(), st);
let decl = respan(self.fmtsp, ast::DeclItem(item));
- lets.push(@respan(self.fmtsp, ast::StmtDecl(@decl, ast::DUMMY_NODE_ID)));
+ lets.push(box(GC) respan(self.fmtsp,
+ ast::StmtDecl(box(GC) decl, ast::DUMMY_NODE_ID)));
// Right now there is a bug such that for the expression:
// foo(bar(&1))
self.ecx.expr_match(self.fmtsp, head, vec!(arm))
}
- fn format_arg(&self, sp: Span, argno: Position, arg: @ast::Expr)
- -> @ast::Expr {
+ fn format_arg(&self, sp: Span, argno: Position, arg: Gc<ast::Expr>)
+ -> Gc<ast::Expr> {
let ty = match argno {
Exact(ref i) => self.arg_types.get(*i).get_ref(),
Named(ref s) => self.name_types.get(s)
/// expression.
pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt, sp: Span,
invocation: Invocation,
- efmt: @ast::Expr, args: Vec<@ast::Expr>,
+ efmt: Gc<ast::Expr>,
+ args: Vec<Gc<ast::Expr>>,
name_ordering: Vec<String>,
- names: HashMap<String, @ast::Expr>) -> @ast::Expr {
+ names: HashMap<String, Gc<ast::Expr>>)
+ -> Gc<ast::Expr>
+{
let arg_types = Vec::from_fn(args.len(), |_| None);
let mut cx = Context {
ecx: ecx,
use parse::token;
use parse;
+use std::gc::Gc;
/**
*
pub use parse::new_parser_from_tts;
pub use codemap::{BytePos, Span, dummy_spanned};
+ use std::gc::Gc;
+
pub trait ToTokens {
fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> ;
}
}
}
- impl ToSource for @ast::Item {
+ impl ToSource for Gc<ast::Item> {
fn to_source(&self) -> String {
- pprust::item_to_str(*self)
+ pprust::item_to_str(&**self)
}
}
- impl<'a> ToSource for &'a [@ast::Item] {
+ impl<'a> ToSource for &'a [Gc<ast::Item>] {
fn to_source(&self) -> String {
self.iter()
.map(|i| i.to_source())
}
}
- impl ToSource for @ast::Expr {
+ impl ToSource for Gc<ast::Expr> {
fn to_source(&self) -> String {
- pprust::expr_to_str(*self)
+ pprust::expr_to_str(&**self)
}
}
)
impl_to_tokens!(ast::Ident)
- impl_to_tokens!(@ast::Item)
- impl_to_tokens_self!(&'a [@ast::Item])
+ impl_to_tokens!(Gc<ast::Item>)
+ impl_to_tokens_self!(&'a [Gc<ast::Item>])
impl_to_tokens!(ast::Ty)
impl_to_tokens_self!(&'a [ast::Ty])
impl_to_tokens!(Generics)
- impl_to_tokens!(@ast::Expr)
+ impl_to_tokens!(Gc<ast::Expr>)
impl_to_tokens!(ast::Block)
impl_to_tokens!(ast::Arg)
impl_to_tokens_self!(&'a str)
impl_to_tokens!(u64)
pub trait ExtParseUtils {
- fn parse_item(&self, s: String) -> @ast::Item;
- fn parse_expr(&self, s: String) -> @ast::Expr;
- fn parse_stmt(&self, s: String) -> @ast::Stmt;
+ fn parse_item(&self, s: String) -> Gc<ast::Item>;
+ fn parse_expr(&self, s: String) -> Gc<ast::Expr>;
+ fn parse_stmt(&self, s: String) -> Gc<ast::Stmt>;
fn parse_tts(&self, s: String) -> Vec<ast::TokenTree> ;
}
impl<'a> ExtParseUtils for ExtCtxt<'a> {
- fn parse_item(&self, s: String) -> @ast::Item {
+ fn parse_item(&self, s: String) -> Gc<ast::Item> {
let res = parse::parse_item_from_source_str(
"<quote expansion>".to_string(),
s,
}
}
- fn parse_stmt(&self, s: String) -> @ast::Stmt {
+ fn parse_stmt(&self, s: String) -> Gc<ast::Stmt> {
parse::parse_stmt_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
self.parse_sess())
}
- fn parse_expr(&self, s: String) -> @ast::Expr {
+ fn parse_expr(&self, s: String) -> Gc<ast::Expr> {
parse::parse_expr_from_source_str("<quote expansion>".to_string(),
s,
self.cfg(),
}
// Lift an ident to the expr that evaluates to that ident.
-fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> @ast::Expr {
+fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> Gc<ast::Expr> {
let e_str = cx.expr_str(sp, token::get_ident(ident));
cx.expr_method_call(sp,
cx.expr_ident(sp, id_ext("ext_cx")),
vec!(e_str))
}
-fn mk_ast_path(cx: &ExtCtxt, sp: Span, name: &str) -> @ast::Expr {
+fn mk_ast_path(cx: &ExtCtxt, sp: Span, name: &str) -> Gc<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("ast"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
-fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> @ast::Expr {
+fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> Gc<ast::Expr> {
let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("token"), id_ext(name));
cx.expr_path(cx.path_global(sp, idents))
}
-fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> @ast::Expr {
+fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> Gc<ast::Expr> {
let name = match bop {
PLUS => "PLUS",
MINUS => "MINUS",
mk_token_path(cx, sp, name)
}
-fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> @ast::Expr {
+fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> Gc<ast::Expr> {
match *tok {
BINOP(binop) => {
mk_token_path(cx, sp, name)
}
-fn mk_tt(cx: &ExtCtxt, sp: Span, tt: &ast::TokenTree) -> Vec<@ast::Stmt> {
+
+fn mk_tt(cx: &ExtCtxt, sp: Span, tt: &ast::TokenTree) -> Vec<Gc<ast::Stmt>> {
match *tt {
ast::TTTok(sp, ref tok) => {
let e_sp = cx.expr_ident(sp, id_ext("_sp"));
}
fn mk_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
- -> Vec<@ast::Stmt> {
+ -> Vec<Gc<ast::Stmt>> {
let mut ss = Vec::new();
for tt in tts.iter() {
ss.push_all_move(mk_tt(cx, sp, tt));
}
fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
- -> (@ast::Expr, @ast::Expr) {
+ -> (Gc<ast::Expr>, Gc<ast::Expr>) {
// NB: It appears that the main parser loses its mind if we consider
// $foo as a TTNonterminal during the main parse, so we have to re-parse
// under quote_depth > 0. This is silly and should go away; the _guess_ is
fn expand_wrapper(cx: &ExtCtxt,
sp: Span,
- cx_expr: @ast::Expr,
- expr: @ast::Expr) -> @ast::Expr {
+ cx_expr: Gc<ast::Expr>,
+ expr: Gc<ast::Expr>) -> Gc<ast::Expr> {
let uses = [
&["syntax", "ext", "quote", "rt"],
].iter().map(|path| {
fn expand_parse_call(cx: &ExtCtxt,
sp: Span,
parse_method: &str,
- arg_exprs: Vec<@ast::Expr> ,
- tts: &[ast::TokenTree]) -> @ast::Expr {
+ arg_exprs: Vec<Gc<ast::Expr>>,
+ tts: &[ast::TokenTree]) -> Gc<ast::Expr> {
let (cx_expr, tts_expr) = expand_tts(cx, sp, tts);
let cfg_call = || cx.expr_method_call(
use parse::token;
use print::pprust;
+use std::gc::Gc;
use std::io::File;
use std::rc::Rc;
use std::str;
}
// recur along an ExpnInfo chain to find the original expression
-fn topmost_expn_info(expn_info: @codemap::ExpnInfo) -> @codemap::ExpnInfo {
+fn topmost_expn_info(expn_info: Gc<codemap::ExpnInfo>) -> Gc<codemap::ExpnInfo> {
match *expn_info {
ExpnInfo { call_site: ref call_site, .. } => {
match call_site.expn_info {
"meta" => token::NtMeta(p.parse_meta_item()),
"tt" => {
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
- let res = token::NtTT(@p.parse_token_tree());
+ let res = token::NtTT(box(GC) p.parse_token_tree());
p.quote_depth -= 1u;
res
}
use std::cell::RefCell;
use std::rc::Rc;
+use std::gc::Gc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
}
impl<'a> MacResult for ParserAnyMacro<'a> {
- fn make_expr(&self) -> Option<@ast::Expr> {
+ fn make_expr(&self) -> Option<Gc<ast::Expr>> {
let ret = self.parser.borrow_mut().parse_expr();
self.ensure_complete_parse(true);
Some(ret)
}
- fn make_pat(&self) -> Option<@ast::Pat> {
+ fn make_pat(&self) -> Option<Gc<ast::Pat>> {
let ret = self.parser.borrow_mut().parse_pat();
self.ensure_complete_parse(false);
Some(ret)
}
- fn make_items(&self) -> Option<SmallVector<@ast::Item>> {
+ fn make_items(&self) -> Option<SmallVector<Gc<ast::Item>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
self.ensure_complete_parse(false);
Some(ret)
}
- fn make_stmt(&self) -> Option<@ast::Stmt> {
+ fn make_stmt(&self) -> Option<Gc<ast::Stmt>> {
let attrs = self.parser.borrow_mut().parse_outer_attributes();
let ret = self.parser.borrow_mut().parse_stmt(attrs);
self.ensure_complete_parse(true);
use util::small_vector::SmallVector;
use std::rc::Rc;
+use std::gc::Gc;
// We may eventually want to be able to fold over type parameters, too.
pub trait Folder {
noop_fold_crate(c, self)
}
- fn fold_meta_items(&mut self, meta_items: &[@MetaItem]) -> Vec<@MetaItem> {
+ fn fold_meta_items(&mut self, meta_items: &[Gc<MetaItem>]) -> Vec<Gc<MetaItem>> {
meta_items.iter().map(|x| fold_meta_item_(*x, self)).collect()
}
- fn fold_view_path(&mut self, view_path: @ViewPath) -> @ViewPath {
+ fn fold_view_path(&mut self, view_path: Gc<ViewPath>) -> Gc<ViewPath> {
let inner_view_path = match view_path.node {
ViewPathSimple(ref ident, ref path, node_id) => {
let id = self.new_id(node_id);
id)
}
};
- @Spanned {
+ box(GC) Spanned {
node: inner_view_path,
span: self.new_span(view_path.span),
}
noop_fold_view_item(vi, self)
}
- fn fold_foreign_item(&mut self, ni: @ForeignItem) -> @ForeignItem {
- noop_fold_foreign_item(ni, self)
+ fn fold_foreign_item(&mut self, ni: Gc<ForeignItem>) -> Gc<ForeignItem> {
+ noop_fold_foreign_item(&*ni, self)
}
- fn fold_item(&mut self, i: @Item) -> SmallVector<@Item> {
- noop_fold_item(i, self)
+ fn fold_item(&mut self, i: Gc<Item>) -> SmallVector<Gc<Item>> {
+ noop_fold_item(&*i, self)
}
fn fold_struct_field(&mut self, sf: &StructField) -> StructField {
noop_fold_type_method(m, self)
}
- fn fold_method(&mut self, m: @Method) -> @Method {
- noop_fold_method(m, self)
+ fn fold_method(&mut self, m: Gc<Method>) -> Gc<Method> {
+ noop_fold_method(&*m, self)
}
fn fold_block(&mut self, b: P<Block>) -> P<Block> {
noop_fold_block(b, self)
}
- fn fold_stmt(&mut self, s: &Stmt) -> SmallVector<@Stmt> {
+ fn fold_stmt(&mut self, s: &Stmt) -> SmallVector<Gc<Stmt>> {
noop_fold_stmt(s, self)
}
}
}
- fn fold_pat(&mut self, p: @Pat) -> @Pat {
+ fn fold_pat(&mut self, p: Gc<Pat>) -> Gc<Pat> {
noop_fold_pat(p, self)
}
- fn fold_decl(&mut self, d: @Decl) -> SmallVector<@Decl> {
+ fn fold_decl(&mut self, d: Gc<Decl>) -> SmallVector<Gc<Decl>> {
let node = match d.node {
DeclLocal(ref l) => SmallVector::one(DeclLocal(self.fold_local(*l))),
DeclItem(it) => {
};
node.move_iter().map(|node| {
- @Spanned {
+ box(GC) Spanned {
node: node,
span: self.new_span(d.span),
}
}).collect()
}
- fn fold_expr(&mut self, e: @Expr) -> @Expr {
+ fn fold_expr(&mut self, e: Gc<Expr>) -> Gc<Expr> {
noop_fold_expr(e, self)
}
TyRptr(fold_opt_lifetime(region, self), fold_mt(mt, self))
}
TyClosure(ref f, ref region) => {
- TyClosure(@ClosureTy {
+ TyClosure(box(GC) ClosureTy {
fn_style: f.fn_style,
onceness: f.onceness,
bounds: fold_opt_bounds(&f.bounds, self),
- decl: self.fold_fn_decl(f.decl),
+ decl: self.fold_fn_decl(&*f.decl),
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
}, fold_opt_lifetime(region, self))
}
TyProc(ref f) => {
- TyProc(@ClosureTy {
+ TyProc(box(GC) ClosureTy {
fn_style: f.fn_style,
onceness: f.onceness,
bounds: fold_opt_bounds(&f.bounds, self),
- decl: self.fold_fn_decl(f.decl),
+ decl: self.fold_fn_decl(&*f.decl),
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
})
}
TyBareFn(ref f) => {
- TyBareFn(@BareFnTy {
+ TyBareFn(box(GC) BareFnTy {
lifetimes: f.lifetimes.iter().map(|l| self.fold_lifetime(l)).collect(),
fn_style: f.fn_style,
abi: f.abi,
- decl: self.fold_fn_decl(f.decl)
+ decl: self.fold_fn_decl(&*f.decl)
})
}
TyUnboxedFn(ref f) => {
- TyUnboxedFn(@UnboxedFnTy {
- decl: self.fold_fn_decl(f.decl),
+ TyUnboxedFn(box(GC) UnboxedFnTy {
+ decl: self.fold_fn_decl(&*f.decl),
})
}
TyTup(ref tys) => TyTup(tys.iter().map(|&ty| self.fold_ty(ty)).collect()),
fold_variant_arg_(x, self)).collect())
}
StructVariantKind(ref struct_def) => {
- kind = StructVariantKind(@ast::StructDef {
+ kind = StructVariantKind(box(GC) ast::StructDef {
fields: struct_def.fields.iter()
.map(|f| self.fold_struct_field(f)).collect(),
ctor_id: struct_def.ctor_id.map(|c| self.new_id(c)),
}
}
- fn fold_local(&mut self, l: @Local) -> @Local {
+ fn fold_local(&mut self, l: Gc<Local>) -> Gc<Local> {
let id = self.new_id(l.id); // Needs to be first, for ast_map.
- @Local {
+ box(GC) Local {
id: id,
ty: self.fold_ty(l.ty),
pat: self.fold_pat(l.pat),
}
}
- fn map_exprs(&self, f: |@Expr| -> @Expr, es: &[@Expr]) -> Vec<@Expr> {
+ fn map_exprs(&self, f: |Gc<Expr>| -> Gc<Expr>,
+ es: &[Gc<Expr>]) -> Vec<Gc<Expr>> {
es.iter().map(|x| f(*x)).collect()
}
/* some little folds that probably aren't useful to have in Folder itself*/
//used in noop_fold_item and noop_fold_crate and noop_fold_crate_directive
-fn fold_meta_item_<T: Folder>(mi: @MetaItem, fld: &mut T) -> @MetaItem {
- @Spanned {
+fn fold_meta_item_<T: Folder>(mi: Gc<MetaItem>, fld: &mut T) -> Gc<MetaItem> {
+ box(GC) Spanned {
node:
match mi.node {
MetaWord(ref id) => MetaWord((*id).clone()),
StaticRegionTyParamBound => StaticRegionTyParamBound,
UnboxedFnTyParamBound(ref unboxed_function_type) => {
UnboxedFnTyParamBound(UnboxedFnTy {
- decl: fld.fold_fn_decl(unboxed_function_type.decl),
+ decl: fld.fold_fn_decl(&*unboxed_function_type.decl),
})
}
OtherRegionTyParamBound(s) => OtherRegionTyParamBound(s)
lifetimes: fold_lifetimes(&generics.lifetimes, fld)}
}
-fn fold_struct_def<T: Folder>(struct_def: @StructDef, fld: &mut T) -> @StructDef {
- @ast::StructDef {
+fn fold_struct_def<T: Folder>(struct_def: Gc<StructDef>,
+ fld: &mut T) -> Gc<StructDef> {
+ box(GC) ast::StructDef {
fields: struct_def.fields.iter().map(|f| fold_struct_field(f, fld)).collect(),
ctor_id: struct_def.ctor_id.map(|cid| fld.new_id(cid)),
super_struct: match struct_def.super_struct {
pub fn noop_fold_block<T: Folder>(b: P<Block>, folder: &mut T) -> P<Block> {
let id = folder.new_id(b.id); // Needs to be first, for ast_map.
let view_items = b.view_items.iter().map(|x| folder.fold_view_item(x)).collect();
- let stmts = b.stmts.iter().flat_map(|s| folder.fold_stmt(*s).move_iter()).collect();
+ let stmts = b.stmts.iter().flat_map(|s| folder.fold_stmt(&**s).move_iter()).collect();
P(Block {
id: id,
view_items: view_items,
}
ItemFn(decl, fn_style, abi, ref generics, body) => {
ItemFn(
- folder.fold_fn_decl(decl),
+ folder.fold_fn_decl(&*decl),
fn_style,
abi,
fold_generics(generics, folder),
ItemEnum(
ast::EnumDef {
variants: enum_definition.variants.iter().map(|&x| {
- folder.fold_variant(x)
+ folder.fold_variant(&*x)
}).collect(),
},
fold_generics(generics, folder))
ident: fld.fold_ident(m.ident),
attrs: m.attrs.iter().map(|a| fold_attribute_(*a, fld)).collect(),
fn_style: m.fn_style,
- decl: fld.fold_fn_decl(m.decl),
+ decl: fld.fold_fn_decl(&*m.decl),
generics: fold_generics(&m.generics, fld),
explicit_self: fld.fold_explicit_self(&m.explicit_self),
span: fld.new_span(m.span),
}
}
-pub fn noop_fold_item<T: Folder>(i: &Item, folder: &mut T) -> SmallVector<@Item> {
+pub fn noop_fold_item<T: Folder>(i: &Item,
+ folder: &mut T) -> SmallVector<Gc<Item>> {
let id = folder.new_id(i.id); // Needs to be first, for ast_map.
let node = folder.fold_item_underscore(&i.node);
let ident = match node {
// The node may have changed, recompute the "pretty" impl name.
ItemImpl(_, ref maybe_trait, ty, _) => {
- ast_util::impl_pretty_name(maybe_trait, ty)
+ ast_util::impl_pretty_name(maybe_trait, &*ty)
}
_ => i.ident
};
- SmallVector::one(@Item {
+ SmallVector::one(box(GC) Item {
id: id,
ident: folder.fold_ident(ident),
attrs: i.attrs.iter().map(|e| fold_attribute_(*e, folder)).collect(),
})
}
-pub fn noop_fold_foreign_item<T: Folder>(ni: &ForeignItem, folder: &mut T) -> @ForeignItem {
+pub fn noop_fold_foreign_item<T: Folder>(ni: &ForeignItem,
+ folder: &mut T) -> Gc<ForeignItem> {
let id = folder.new_id(ni.id); // Needs to be first, for ast_map.
- @ForeignItem {
+ box(GC) ForeignItem {
id: id,
ident: folder.fold_ident(ni.ident),
attrs: ni.attrs.iter().map(|x| fold_attribute_(*x, folder)).collect(),
}
}
-pub fn noop_fold_method<T: Folder>(m: &Method, folder: &mut T) -> @Method {
+pub fn noop_fold_method<T: Folder>(m: &Method, folder: &mut T) -> Gc<Method> {
let id = folder.new_id(m.id); // Needs to be first, for ast_map.
- @Method {
+ box(GC) Method {
id: id,
ident: folder.fold_ident(m.ident),
attrs: m.attrs.iter().map(|a| fold_attribute_(*a, folder)).collect(),
generics: fold_generics(&m.generics, folder),
explicit_self: folder.fold_explicit_self(&m.explicit_self),
fn_style: m.fn_style,
- decl: folder.fold_fn_decl(m.decl),
+ decl: folder.fold_fn_decl(&*m.decl),
body: folder.fold_block(m.body),
span: folder.new_span(m.span),
vis: m.vis
}
}
-pub fn noop_fold_pat<T: Folder>(p: @Pat, folder: &mut T) -> @Pat {
+pub fn noop_fold_pat<T: Folder>(p: Gc<Pat>, folder: &mut T) -> Gc<Pat> {
let id = folder.new_id(p.id);
let node = match p.node {
PatWild => PatWild,
PatMac(ref mac) => PatMac(folder.fold_mac(mac)),
};
- @Pat {
+ box(GC) Pat {
id: id,
span: folder.new_span(p.span),
node: node,
}
}
-pub fn noop_fold_expr<T: Folder>(e: @Expr, folder: &mut T) -> @Expr {
+pub fn noop_fold_expr<T: Folder>(e: Gc<Expr>, folder: &mut T) -> Gc<Expr> {
let id = folder.new_id(e.id);
let node = match e.node {
ExprVstore(e, v) => {
ExprMatch(folder.fold_expr(expr),
arms.iter().map(|x| folder.fold_arm(x)).collect())
}
- ExprFnBlock(decl, body) => {
- ExprFnBlock(folder.fold_fn_decl(decl), folder.fold_block(body))
+ ExprFnBlock(ref decl, ref body) => {
+ ExprFnBlock(folder.fold_fn_decl(&**decl),
+ folder.fold_block(body.clone()))
}
- ExprProc(decl, body) => {
- ExprProc(folder.fold_fn_decl(decl), folder.fold_block(body))
+ ExprProc(ref decl, ref body) => {
+ ExprProc(folder.fold_fn_decl(&**decl),
+ folder.fold_block(body.clone()))
}
- ExprBlock(blk) => ExprBlock(folder.fold_block(blk)),
+ ExprBlock(ref blk) => ExprBlock(folder.fold_block(blk.clone())),
ExprAssign(el, er) => {
ExprAssign(folder.fold_expr(el), folder.fold_expr(er))
}
ExprParen(ex) => ExprParen(folder.fold_expr(ex))
};
- @Expr {
+ box(GC) Expr {
id: id,
node: node,
span: folder.new_span(e.span),
}
}
-pub fn noop_fold_stmt<T: Folder>(s: &Stmt, folder: &mut T) -> SmallVector<@Stmt> {
+pub fn noop_fold_stmt<T: Folder>(s: &Stmt,
+ folder: &mut T) -> SmallVector<Gc<Stmt>> {
let nodes = match s.node {
StmtDecl(d, id) => {
let id = folder.new_id(id);
StmtMac(ref mac, semi) => SmallVector::one(StmtMac(folder.fold_mac(mac), semi))
};
- nodes.move_iter().map(|node| @Spanned {
+ nodes.move_iter().map(|node| box(GC) Spanned {
node: node,
span: folder.new_span(s.span),
}).collect()
use parse::parser::Parser;
use parse::token::INTERPOLATED;
+use std::gc::Gc;
+
// a parser that can parse attributes.
pub trait ParserAttr {
- fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> ;
+ fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>;
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute;
fn parse_inner_attrs_and_next(&mut self)
- -> (Vec<ast::Attribute> , Vec<ast::Attribute> );
- fn parse_meta_item(&mut self) -> @ast::MetaItem;
- fn parse_meta_seq(&mut self) -> Vec<@ast::MetaItem> ;
- fn parse_optional_meta(&mut self) -> Vec<@ast::MetaItem> ;
+ -> (Vec<ast::Attribute>, Vec<ast::Attribute>);
+ fn parse_meta_item(&mut self) -> Gc<ast::MetaItem>;
+ fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>;
+ fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>>;
}
impl<'a> ParserAttr for Parser<'a> {
// matches meta_item = IDENT
// | IDENT = lit
// | IDENT meta_seq
- fn parse_meta_item(&mut self) -> @ast::MetaItem {
+ fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> {
match self.token {
token::INTERPOLATED(token::NtMeta(e)) => {
self.bump();
}
}
let hi = self.span.hi;
- @spanned(lo, hi, ast::MetaNameValue(name, lit))
+ box(GC) spanned(lo, hi, ast::MetaNameValue(name, lit))
}
token::LPAREN => {
let inner_items = self.parse_meta_seq();
let hi = self.span.hi;
- @spanned(lo, hi, ast::MetaList(name, inner_items))
+ box(GC) spanned(lo, hi, ast::MetaList(name, inner_items))
}
_ => {
let hi = self.last_span.hi;
- @spanned(lo, hi, ast::MetaWord(name))
+ box(GC) spanned(lo, hi, ast::MetaWord(name))
}
}
}
// matches meta_seq = ( COMMASEP(meta_item) )
- fn parse_meta_seq(&mut self) -> Vec<@ast::MetaItem> {
+ fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> {
self.parse_seq(&token::LPAREN,
&token::RPAREN,
seq_sep_trailing_disallowed(token::COMMA),
|p| p.parse_meta_item()).node
}
- fn parse_optional_meta(&mut self) -> Vec<@ast::MetaItem> {
+ fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>> {
match self.token {
token::LPAREN => self.parse_meta_seq(),
_ => Vec::new()
// Predicates on exprs and stmts that the pretty-printer and parser use
use ast;
+use std::gc::Gc;
// does this expression require a semicolon to be treated
// as a statement? The negation of this: 'can this expression
// 'if true {...} else {...}
// |x| 5 '
// isn't parsed as (if true {...} else {...} | x) | 5
-pub fn expr_requires_semi_to_be_stmt(e: @ast::Expr) -> bool {
+pub fn expr_requires_semi_to_be_stmt(e: Gc<ast::Expr>) -> bool {
match e.node {
ast::ExprIf(..)
| ast::ExprMatch(..)
}
}
-pub fn expr_is_simple_block(e: @ast::Expr) -> bool {
+pub fn expr_is_simple_block(e: Gc<ast::Expr>) -> bool {
match e.node {
ast::ExprBlock(block) => block.rules == ast::DefaultBlock,
_ => false
use parse::parser::Parser;
use std::cell::RefCell;
+use std::gc::Gc;
use std::io::File;
use std::rc::Rc;
use std::str;
source: String,
cfg: ast::CrateConfig,
sess: &ParseSess)
- -> @ast::Expr {
+ -> Gc<ast::Expr> {
let mut p = new_parser_from_source_str(sess, cfg, name, source);
maybe_aborted(p.parse_expr(), p)
}
source: String,
cfg: ast::CrateConfig,
sess: &ParseSess)
- -> Option<@ast::Item> {
+ -> Option<Gc<ast::Item>> {
let mut p = new_parser_from_source_str(sess, cfg, name, source);
let attrs = p.parse_outer_attributes();
maybe_aborted(p.parse_item(attrs),p)
source: String,
cfg: ast::CrateConfig,
sess: &ParseSess)
- -> @ast::MetaItem {
+ -> Gc<ast::MetaItem> {
let mut p = new_parser_from_source_str(sess, cfg, name, source);
maybe_aborted(p.parse_meta_item(),p)
}
cfg: ast::CrateConfig,
attrs: Vec<ast::Attribute> ,
sess: &ParseSess)
- -> @ast::Stmt {
+ -> Gc<ast::Stmt> {
let mut p = new_parser_from_source_str(
sess,
cfg,
#[test] fn path_exprs_1() {
assert!(string_to_expr("a".to_string()) ==
- @ast::Expr{
+ box(GC) ast::Expr{
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(ast::Path {
span: sp(0, 1),
#[test] fn path_exprs_2 () {
assert!(string_to_expr("::a::b".to_string()) ==
- @ast::Expr {
+ box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(ast::Path {
span: sp(0, 6),
#[test] fn ret_expr() {
assert!(string_to_expr("return d".to_string()) ==
- @ast::Expr{
+ box(GC) ast::Expr{
id: ast::DUMMY_NODE_ID,
- node:ast::ExprRet(Some(@ast::Expr{
+ node:ast::ExprRet(Some(box(GC) ast::Expr{
id: ast::DUMMY_NODE_ID,
node:ast::ExprPath(ast::Path{
span: sp(7, 8),
#[test] fn parse_stmt_1 () {
assert!(string_to_stmt("b;".to_string()) ==
- @Spanned{
- node: ast::StmtExpr(@ast::Expr {
+ box(GC) Spanned{
+ node: ast::StmtExpr(box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(ast::Path {
span:sp(0,1),
let sess = new_parse_sess();
let mut parser = string_to_parser(&sess, "b".to_string());
assert!(parser.parse_pat() ==
- @ast::Pat{id: ast::DUMMY_NODE_ID,
+ box(GC) ast::Pat{id: ast::DUMMY_NODE_ID,
node: ast::PatIdent(
ast::BindByValue(ast::MutImmutable),
ast::Path {
// this test depends on the intern order of "fn" and "int"
assert!(string_to_item("fn a (b : int) { b; }".to_string()) ==
Some(
- @ast::Item{ident:str_to_ident("a"),
+ box(GC) ast::Item{ident:str_to_ident("a"),
attrs:Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemFn(ast::P(ast::FnDecl {
}, None, ast::DUMMY_NODE_ID),
span:sp(10,13)
}),
- pat: @ast::Pat {
+ pat: box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: ast::PatIdent(
ast::BindByValue(ast::MutImmutable),
},
ast::P(ast::Block {
view_items: Vec::new(),
- stmts: vec!(@Spanned{
- node: ast::StmtSemi(@ast::Expr{
+ stmts: vec!(box(GC) Spanned{
+ node: ast::StmtSemi(box(GC) ast::Expr{
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(
ast::Path{
#[test] fn parse_exprs () {
// just make sure that they parse....
string_to_expr("3 + 4".to_string());
- string_to_expr("a::z.froob(b,@(987+3))".to_string());
+ string_to_expr("a::z.froob(b,box(GC)(987+3))".to_string());
}
#[test] fn attrs_fix_bug () {
string_to_item("pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
- -> Result<@Writer, String> {
+ -> Result<Gc<Writer>, String> {
#[cfg(windows)]
fn wb() -> c_int {
(O_WRONLY | libc::consts::os::extra::O_BINARY) as c_int
use parse::parser;
use parse::token;
+use std::gc::Gc;
+
/// The specific types of unsupported syntax
#[deriving(PartialEq, Eq, Hash)]
pub enum ObsoleteSyntax {
ObsoleteOwnedType,
ObsoleteOwnedExpr,
ObsoleteOwnedPattern,
+ ObsoleteOwnedVector,
}
pub trait ParserObsoleteMethods {
fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax);
// Reports an obsolete syntax non-fatal error, and returns
// a placeholder expression
- fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> @Expr;
+ fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr>;
fn report(&mut self,
sp: Span,
kind: ObsoleteSyntax,
"`~` notation for owned pointer patterns",
"use the `box` operator instead of `~`"
),
+ ObsoleteOwnedVector => (
+ "`~[T]` is no longer a type",
+ "use the `Vec` type instead"
+ ),
};
self.report(sp, kind, kind_str, desc);
// Reports an obsolete syntax non-fatal error, and returns
// a placeholder expression
- fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> @Expr {
+ fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr> {
self.obsolete(sp, kind);
- self.mk_expr(sp.lo, sp.hi, ExprLit(@respan(sp, LitNil)))
+ self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil)))
}
fn report(&mut self,
use std::collections::HashSet;
use std::mem::replace;
use std::rc::Rc;
-use std::string::String;
+use std::gc::Gc;
#[allow(non_camel_case_types)]
#[deriving(PartialEq)]
enum ItemOrViewItem {
// Indicates a failure to parse any kind of item. The attributes are
// returned.
- IoviNone(Vec<Attribute> ),
- IoviItem(@Item),
- IoviForeignItem(@ForeignItem),
+ IoviNone(Vec<Attribute>),
+ IoviItem(Gc<Item>),
+ IoviForeignItem(Gc<ForeignItem>),
IoviViewItem(ViewItem)
}
struct ParsedItemsAndViewItems {
- attrs_remaining: Vec<Attribute> ,
- view_items: Vec<ViewItem> ,
- items: Vec<@Item> ,
- foreign_items: Vec<@ForeignItem>
+ attrs_remaining: Vec<Attribute>,
+ view_items: Vec<ViewItem>,
+ items: Vec<Gc<Item>>,
+ foreign_items: Vec<Gc<ForeignItem>>
}
/* ident is handled by common.rs */
// Commit to parsing a complete expression `e` expected to be
// followed by some token from the set edible + inedible. Recover
// from anticipated input errors, discarding erroneous characters.
- pub fn commit_expr(&mut self, e: @Expr, edible: &[token::Token], inedible: &[token::Token]) {
+ pub fn commit_expr(&mut self, e: Gc<Expr>, edible: &[token::Token],
+ inedible: &[token::Token]) {
debug!("commit_expr {:?}", e);
match e.node {
ExprPath(..) => {
self.expect_one_of(edible, inedible)
}
- pub fn commit_expr_expecting(&mut self, e: @Expr, edible: token::Token) {
+ pub fn commit_expr_expecting(&mut self, e: Gc<Expr>, edible: token::Token) {
self.commit_expr(e, &[edible], &[])
}
// Commit to parsing a complete statement `s`, which expects to be
// followed by some token from the set edible + inedible. Check
// for recoverable input errors, discarding erroneous characters.
- pub fn commit_stmt(&mut self, s: @Stmt, edible: &[token::Token], inedible: &[token::Token]) {
+ pub fn commit_stmt(&mut self, s: Gc<Stmt>, edible: &[token::Token],
+ inedible: &[token::Token]) {
debug!("commit_stmt {:?}", s);
let _s = s; // unused, but future checks might want to inspect `s`.
if self.last_token.as_ref().map_or(false, |t| is_ident_or_path(*t)) {
self.expect_one_of(edible, inedible)
}
- pub fn commit_stmt_expecting(&mut self, s: @Stmt, edible: token::Token) {
+ pub fn commit_stmt_expecting(&mut self, s: Gc<Stmt>, edible: token::Token) {
self.commit_stmt(s, &[edible], &[])
}
self.expect_keyword(keywords::Fn);
let (decl, lifetimes) = self.parse_ty_fn_decl(true);
- return TyBareFn(@BareFnTy {
+ return TyBareFn(box(GC) BareFnTy {
abi: abi,
fn_style: fn_style,
lifetimes: lifetimes,
cf: ret_style,
variadic: variadic
});
- TyProc(@ClosureTy {
+ TyProc(box(GC) ClosureTy {
fn_style: NormalFn,
onceness: Once,
bounds: bounds,
});
if is_unboxed {
- TyUnboxedFn(@UnboxedFnTy {
+ TyUnboxedFn(box(GC) UnboxedFnTy {
decl: decl,
})
} else {
- TyClosure(@ClosureTy {
+ TyClosure(box(GC) ClosureTy {
fn_style: fn_style,
onceness: onceness,
bounds: bounds,
let (inner_attrs, body) =
p.parse_inner_attrs_and_block();
let attrs = attrs.append(inner_attrs.as_slice());
- Provided(@ast::Method {
+ Provided(box(GC) ast::Method {
ident: ident,
attrs: attrs,
generics: generics,
// OWNED POINTER
self.bump();
match self.token {
- token::IDENT(ref ident, _)
- if "str" == token::get_ident(*ident).get() => {
- // This is OK (for now).
- }
- token::LBRACKET => {} // Also OK.
+ token::LBRACKET =>
+ self.obsolete(self.last_span, ObsoleteOwnedVector),
_ => self.obsolete(self.last_span, ObsoleteOwnedType),
};
TyUniq(self.parse_ty(false))
}
}
- pub fn maybe_parse_fixed_vstore(&mut self) -> Option<@ast::Expr> {
+ pub fn maybe_parse_fixed_vstore(&mut self) -> Option<Gc<ast::Expr>> {
if self.token == token::COMMA &&
self.look_ahead(1, |t| *t == token::DOTDOT) {
self.bump();
}
// matches '-' lit | lit
- pub fn parse_literal_maybe_minus(&mut self) -> @Expr {
+ pub fn parse_literal_maybe_minus(&mut self) -> Gc<Expr> {
let minus_lo = self.span.lo;
let minus_present = self.eat(&token::BINOP(token::MINUS));
let lo = self.span.lo;
- let literal = @self.parse_lit();
+ let literal = box(GC) self.parse_lit();
let hi = self.span.hi;
let expr = self.mk_expr(lo, hi, ExprLit(literal));
}
}
- pub fn mk_expr(&mut self, lo: BytePos, hi: BytePos, node: Expr_) -> @Expr {
- @Expr {
+ pub fn mk_expr(&mut self, lo: BytePos, hi: BytePos, node: Expr_) -> Gc<Expr> {
+ box(GC) Expr {
id: ast::DUMMY_NODE_ID,
node: node,
span: mk_sp(lo, hi),
}
}
- pub fn mk_unary(&mut self, unop: ast::UnOp, expr: @Expr) -> ast::Expr_ {
+ pub fn mk_unary(&mut self, unop: ast::UnOp, expr: Gc<Expr>) -> ast::Expr_ {
ExprUnary(unop, expr)
}
- pub fn mk_binary(&mut self, binop: ast::BinOp, lhs: @Expr, rhs: @Expr) -> ast::Expr_ {
+ pub fn mk_binary(&mut self, binop: ast::BinOp,
+ lhs: Gc<Expr>, rhs: Gc<Expr>) -> ast::Expr_ {
ExprBinary(binop, lhs, rhs)
}
- pub fn mk_call(&mut self, f: @Expr, args: Vec<@Expr> ) -> ast::Expr_ {
+ pub fn mk_call(&mut self, f: Gc<Expr>, args: Vec<Gc<Expr>>) -> ast::Expr_ {
ExprCall(f, args)
}
fn mk_method_call(&mut self,
ident: ast::SpannedIdent,
tps: Vec<P<Ty>>,
- args: Vec<@Expr>)
+ args: Vec<Gc<Expr>>)
-> ast::Expr_ {
ExprMethodCall(ident, tps, args)
}
- pub fn mk_index(&mut self, expr: @Expr, idx: @Expr) -> ast::Expr_ {
+ pub fn mk_index(&mut self, expr: Gc<Expr>, idx: Gc<Expr>) -> ast::Expr_ {
ExprIndex(expr, idx)
}
- pub fn mk_field(&mut self, expr: @Expr, ident: Ident, tys: Vec<P<Ty>> ) -> ast::Expr_ {
+ pub fn mk_field(&mut self, expr: Gc<Expr>, ident: Ident,
+ tys: Vec<P<Ty>>) -> ast::Expr_ {
ExprField(expr, ident, tys)
}
- pub fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: @Expr, rhs: @Expr) -> ast::Expr_ {
+ pub fn mk_assign_op(&mut self, binop: ast::BinOp,
+ lhs: Gc<Expr>, rhs: Gc<Expr>) -> ast::Expr_ {
ExprAssignOp(binop, lhs, rhs)
}
- pub fn mk_mac_expr(&mut self, lo: BytePos, hi: BytePos, m: Mac_) -> @Expr {
- @Expr {
+ pub fn mk_mac_expr(&mut self, lo: BytePos, hi: BytePos, m: Mac_) -> Gc<Expr> {
+ box(GC) Expr {
id: ast::DUMMY_NODE_ID,
node: ExprMac(codemap::Spanned {node: m, span: mk_sp(lo, hi)}),
span: mk_sp(lo, hi),
}
}
- pub fn mk_lit_u32(&mut self, i: u32) -> @Expr {
+ pub fn mk_lit_u32(&mut self, i: u32) -> Gc<Expr> {
let span = &self.span;
- let lv_lit = @codemap::Spanned {
+ let lv_lit = box(GC) codemap::Spanned {
node: LitUint(i as u64, TyU32),
span: *span
};
- @Expr {
+ box(GC) Expr {
id: ast::DUMMY_NODE_ID,
node: ExprLit(lv_lit),
span: *span,
// at the bottom (top?) of the precedence hierarchy,
// parse things like parenthesized exprs,
// macros, return, etc.
- pub fn parse_bottom_expr(&mut self) -> @Expr {
+ pub fn parse_bottom_expr(&mut self) -> Gc<Expr> {
maybe_whole_expr!(self);
let lo = self.span.lo;
if self.token == token::RPAREN {
hi = self.span.hi;
self.bump();
- let lit = @spanned(lo, hi, LitNil);
+ let lit = box(GC) spanned(lo, hi, LitNil);
return self.mk_expr(lo, hi, ExprLit(lit));
}
let mut es = vec!(self.parse_expr());
// other literal expression
let lit = self.parse_lit();
hi = lit.span.hi;
- ex = ExprLit(@lit);
+ ex = ExprLit(box(GC) lit);
}
return self.mk_expr(lo, hi, ex);
// parse a block or unsafe block
pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode)
- -> @Expr {
+ -> Gc<Expr> {
self.expect(&token::LBRACE);
let blk = self.parse_block_tail(lo, blk_mode);
return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk));
}
// parse a.b or a(13) or a[4] or just a
- pub fn parse_dot_or_call_expr(&mut self) -> @Expr {
+ pub fn parse_dot_or_call_expr(&mut self) -> Gc<Expr> {
let b = self.parse_bottom_expr();
self.parse_dot_or_call_expr_with(b)
}
- pub fn parse_dot_or_call_expr_with(&mut self, e0: @Expr) -> @Expr {
+ pub fn parse_dot_or_call_expr_with(&mut self, e0: Gc<Expr>) -> Gc<Expr> {
let mut e = e0;
let lo = e.span.lo;
let mut hi;
}
// parse a prefix-operator expr
- pub fn parse_prefix_expr(&mut self) -> @Expr {
+ pub fn parse_prefix_expr(&mut self) -> Gc<Expr> {
let lo = self.span.lo;
let hi;
hi = e.span.hi;
// HACK: turn ~[...] into a ~-vec
ex = match e.node {
- ExprVec(..) | ExprRepeat(..) => ExprVstore(e, ExprVstoreUniq),
+ ExprVec(..) | ExprRepeat(..) => {
+ self.obsolete(self.last_span, ObsoleteOwnedVector);
+ ExprVstore(e, ExprVstoreUniq)
+ }
ExprLit(lit) if lit_is_str(lit) => {
self.obsolete(self.last_span, ObsoleteOwnedExpr);
ExprVstore(e, ExprVstoreUniq)
// HACK: turn `box [...]` into a boxed-vec
ex = match subexpression.node {
ExprVec(..) | ExprRepeat(..) => {
+ self.obsolete(self.last_span, ObsoleteOwnedVector);
ExprVstore(subexpression, ExprVstoreUniq)
}
ExprLit(lit) if lit_is_str(lit) => {
}
// parse an expression of binops
- pub fn parse_binops(&mut self) -> @Expr {
+ pub fn parse_binops(&mut self) -> Gc<Expr> {
let prefix_expr = self.parse_prefix_expr();
self.parse_more_binops(prefix_expr, 0)
}
// parse an expression of binops of at least min_prec precedence
- pub fn parse_more_binops(&mut self, lhs: @Expr, min_prec: uint) -> @Expr {
+ pub fn parse_more_binops(&mut self, lhs: Gc<Expr>,
+ min_prec: uint) -> Gc<Expr> {
if self.expr_is_complete(lhs) { return lhs; }
// Prevent dynamic borrow errors later on by limiting the
// parse an assignment expression....
// actually, this seems to be the main entry point for
// parsing an arbitrary expression.
- pub fn parse_assign_expr(&mut self) -> @Expr {
+ pub fn parse_assign_expr(&mut self) -> Gc<Expr> {
let lo = self.span.lo;
let lhs = self.parse_binops();
match self.token {
}
// parse an 'if' expression ('if' token already eaten)
- pub fn parse_if_expr(&mut self) -> @Expr {
+ pub fn parse_if_expr(&mut self) -> Gc<Expr> {
let lo = self.last_span.lo;
let cond = self.parse_expr();
let thn = self.parse_block();
- let mut els: Option<@Expr> = None;
+ let mut els: Option<Gc<Expr>> = None;
let mut hi = thn.span.hi;
if self.eat_keyword(keywords::Else) {
let elexpr = self.parse_else_expr();
}
// `|args| { ... }` or `{ ...}` like in `do` expressions
- pub fn parse_lambda_block_expr(&mut self) -> @Expr {
+ pub fn parse_lambda_block_expr(&mut self) -> Gc<Expr> {
self.parse_lambda_expr_(
|p| {
match p.token {
}
// `|args| expr`
- pub fn parse_lambda_expr(&mut self) -> @Expr {
+ pub fn parse_lambda_expr(&mut self) -> Gc<Expr> {
self.parse_lambda_expr_(|p| p.parse_fn_block_decl(),
|p| p.parse_expr())
}
// and in parsing a block expr as e.g. in for...
pub fn parse_lambda_expr_(&mut self,
parse_decl: |&mut Parser| -> P<FnDecl>,
- parse_body: |&mut Parser| -> @Expr)
- -> @Expr {
+ parse_body: |&mut Parser| -> Gc<Expr>)
+ -> Gc<Expr> {
let lo = self.span.lo;
let decl = parse_decl(self);
let body = parse_body(self);
return self.mk_expr(lo, body.span.hi, ExprFnBlock(decl, fakeblock));
}
- pub fn parse_else_expr(&mut self) -> @Expr {
+ pub fn parse_else_expr(&mut self) -> Gc<Expr> {
if self.eat_keyword(keywords::If) {
return self.parse_if_expr();
} else {
}
// parse a 'for' .. 'in' expression ('for' token already eaten)
- pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>) -> @Expr {
+ pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> {
// Parse: `for <src_pat> in <src_expr> <src_loop_block>`
let lo = self.last_span.lo;
self.mk_expr(lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident))
}
- pub fn parse_while_expr(&mut self) -> @Expr {
+ pub fn parse_while_expr(&mut self) -> Gc<Expr> {
let lo = self.last_span.lo;
let cond = self.parse_expr();
let body = self.parse_block();
return self.mk_expr(lo, hi, ExprWhile(cond, body));
}
- pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>) -> @Expr {
+ pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> {
let lo = self.last_span.lo;
let body = self.parse_block();
let hi = body.span.hi;
|| self.look_ahead(1, |t| *t == token::DOTDOT))
}
- fn parse_match_expr(&mut self) -> @Expr {
+ fn parse_match_expr(&mut self) -> Gc<Expr> {
let lo = self.last_span.lo;
let discriminant = self.parse_expr();
self.commit_expr_expecting(discriminant, token::LBRACE);
}
// parse an expression
- pub fn parse_expr(&mut self) -> @Expr {
+ pub fn parse_expr(&mut self) -> Gc<Expr> {
return self.parse_expr_res(UNRESTRICTED);
}
// parse an expression, subject to the given restriction
- fn parse_expr_res(&mut self, r: restriction) -> @Expr {
+ fn parse_expr_res(&mut self, r: restriction) -> Gc<Expr> {
let old = self.restriction;
self.restriction = r;
let e = self.parse_assign_expr();
}
// parse the RHS of a local variable declaration (e.g. '= 14;')
- fn parse_initializer(&mut self) -> Option<@Expr> {
+ fn parse_initializer(&mut self) -> Option<Gc<Expr>> {
if self.token == token::EQ {
self.bump();
Some(self.parse_expr())
}
// parse patterns, separated by '|' s
- fn parse_pats(&mut self) -> Vec<@Pat> {
+ fn parse_pats(&mut self) -> Vec<Gc<Pat>> {
let mut pats = Vec::new();
loop {
pats.push(self.parse_pat());
fn parse_pat_vec_elements(
&mut self,
- ) -> (Vec<@Pat> , Option<@Pat>, Vec<@Pat> ) {
+ ) -> (Vec<Gc<Pat>> , Option<Gc<Pat>>, Vec<Gc<Pat>> ) {
let mut before = Vec::new();
let mut slice = None;
let mut after = Vec::new();
if is_slice {
if self.token == token::COMMA || self.token == token::RBRACKET {
- slice = Some(@ast::Pat {
+ slice = Some(box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: PatWildMulti,
span: self.span,
} else {
let fieldpath = ast_util::ident_to_path(self.last_span,
fieldname);
- @ast::Pat {
+ box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: PatIdent(bind_type, fieldpath, None),
span: self.last_span
}
// parse a pattern.
- pub fn parse_pat(&mut self) -> @Pat {
+ pub fn parse_pat(&mut self) -> Gc<Pat> {
maybe_whole!(self, NtPat);
let lo = self.span.lo;
self.bump();
pat = PatWild;
hi = self.last_span.hi;
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
pat = PatBox(sub);
hi = self.last_span.hi;
self.obsolete(self.last_span, ObsoleteOwnedPattern);
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
let sub = self.parse_pat();
pat = PatRegion(sub);
hi = self.last_span.hi;
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
if self.token == token::RPAREN {
hi = self.span.hi;
self.bump();
- let lit = @codemap::Spanned {
+ let lit = box(GC) codemap::Spanned {
node: LitNil,
span: mk_sp(lo, hi)};
let expr = self.mk_expr(lo, hi, ExprLit(lit));
pat = PatTup(fields);
}
hi = self.last_span.hi;
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
self.expect(&token::RBRACKET);
pat = ast::PatVec(before, slice, after);
hi = self.last_span.hi;
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
let sub = self.parse_pat();
pat = PatBox(sub);
hi = self.last_span.hi;
- return @ast::Pat {
+ return box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi)
pat = PatStruct(enum_path, fields, etc);
}
_ => {
- let mut args: Vec<@Pat> = Vec::new();
+ let mut args: Vec<Gc<Pat>> = Vec::new();
match self.token {
token::LPAREN => {
let is_dotdot = self.look_ahead(1, |t| {
}
}
hi = self.last_span.hi;
- @ast::Pat {
+ box(GC) ast::Pat {
id: ast::DUMMY_NODE_ID,
node: pat,
span: mk_sp(lo, hi),
}
// parse a local variable declaration
- fn parse_local(&mut self) -> @Local {
+ fn parse_local(&mut self) -> Gc<Local> {
let lo = self.span.lo;
let pat = self.parse_pat();
});
if self.eat(&token::COLON) { ty = self.parse_ty(false); }
let init = self.parse_initializer();
- @ast::Local {
+ box(GC) ast::Local {
ty: ty,
pat: pat,
init: init,
}
// parse a "let" stmt
- fn parse_let(&mut self) -> @Decl {
+ fn parse_let(&mut self) -> Gc<Decl> {
let lo = self.span.lo;
let local = self.parse_local();
- return @spanned(lo, self.last_span.hi, DeclLocal(local));
+ box(GC) spanned(lo, self.last_span.hi, DeclLocal(local))
}
// parse a structure field
// parse a statement. may include decl.
// precondition: any attributes are parsed already
- pub fn parse_stmt(&mut self, item_attrs: Vec<Attribute> ) -> @Stmt {
+ pub fn parse_stmt(&mut self, item_attrs: Vec<Attribute>) -> Gc<Stmt> {
maybe_whole!(self, NtStmt);
fn check_expected_item(p: &mut Parser, found_attrs: bool) {
check_expected_item(self, !item_attrs.is_empty());
self.expect_keyword(keywords::Let);
let decl = self.parse_let();
- return @spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID));
+ return box(GC) spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID));
} else if is_ident(&self.token)
&& !token::is_any_keyword(&self.token)
&& self.look_ahead(1, |t| *t == token::NOT) {
let hi = self.span.hi;
if id == token::special_idents::invalid {
- return @spanned(lo, hi, StmtMac(
+ return box(GC) spanned(lo, hi, StmtMac(
spanned(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT)), false));
} else {
// if it has a special ident, it's definitely an item
- return @spanned(lo, hi, StmtDecl(
- @spanned(lo, hi, DeclItem(
+ return box(GC) spanned(lo, hi, StmtDecl(
+ box(GC) spanned(lo, hi, DeclItem(
self.mk_item(
lo, hi, id /*id is good here*/,
ItemMac(spanned(lo, hi, MacInvocTT(pth, tts, EMPTY_CTXT))),
match self.parse_item_or_view_item(item_attrs, false) {
IoviItem(i) => {
let hi = i.span.hi;
- let decl = @spanned(lo, hi, DeclItem(i));
- return @spanned(lo, hi, StmtDecl(decl, ast::DUMMY_NODE_ID));
+ let decl = box(GC) spanned(lo, hi, DeclItem(i));
+ return box(GC) spanned(lo, hi, StmtDecl(decl, ast::DUMMY_NODE_ID));
}
IoviViewItem(vi) => {
self.span_fatal(vi.span,
// Remainder are line-expr stmts.
let e = self.parse_expr_res(RESTRICT_STMT_EXPR);
- return @spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID));
+ return box(GC) spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID));
}
}
// is this expression a successfully-parsed statement?
- fn expr_is_complete(&mut self, e: @Expr) -> bool {
+ fn expr_is_complete(&mut self, e: Gc<Expr>) -> bool {
return self.restriction == RESTRICT_STMT_EXPR &&
!classify::expr_requires_semi_to_be_stmt(e);
}
false, false);
for item in items.iter() {
- let decl = @spanned(item.span.lo, item.span.hi, DeclItem(*item));
- stmts.push(@spanned(item.span.lo, item.span.hi,
+ let decl = box(GC) spanned(item.span.lo, item.span.hi, DeclItem(*item));
+ stmts.push(box(GC) spanned(item.span.lo, item.span.hi,
StmtDecl(decl, ast::DUMMY_NODE_ID)));
}
match stmt.node {
StmtExpr(e, stmt_id) => {
// expression without semicolon
- if classify::stmt_ends_with_semi(stmt) {
+ if classify::stmt_ends_with_semi(&*stmt) {
// Just check for errors and recover; do not eat semicolon yet.
self.commit_stmt(stmt, &[], &[token::SEMI, token::RBRACE]);
}
hi: self.last_span.hi,
expn_info: stmt.span.expn_info,
};
- stmts.push(@codemap::Spanned {
+ stmts.push(box(GC) codemap::Spanned {
node: StmtSemi(e, stmt_id),
span: span_with_semi,
});
match self.token {
token::SEMI => {
self.bump();
- stmts.push(@codemap::Spanned {
+ stmts.push(box(GC) codemap::Spanned {
node: StmtMac((*m).clone(), true),
span: stmt.span,
});
}
}
_ => { // all other kinds of statements:
- stmts.push(stmt);
+ stmts.push(stmt.clone());
- if classify::stmt_ends_with_semi(stmt) {
+ if classify::stmt_ends_with_semi(&*stmt) {
self.commit_stmt_expecting(stmt, token::SEMI);
}
}
fn mk_item(&mut self, lo: BytePos, hi: BytePos, ident: Ident,
node: Item_, vis: Visibility,
- attrs: Vec<Attribute> ) -> @Item {
- @Item {
+ attrs: Vec<Attribute>) -> Gc<Item> {
+ box(GC) Item {
ident: ident,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
}
// parse a method in a trait impl, starting with `attrs` attributes.
- fn parse_method(&mut self, already_parsed_attrs: Option<Vec<Attribute> >) -> @Method {
+ fn parse_method(&mut self,
+ already_parsed_attrs: Option<Vec<Attribute>>) -> Gc<Method> {
let next_attrs = self.parse_outer_attributes();
let attrs = match already_parsed_attrs {
Some(mut a) => { a.push_all_move(next_attrs); a }
let (inner_attrs, body) = self.parse_inner_attrs_and_block();
let hi = body.span.hi;
let attrs = attrs.append(inner_attrs.as_slice());
- @ast::Method {
+ box(GC) ast::Method {
ident: ident,
attrs: attrs,
generics: generics,
method_attrs = None;
}
- let ident = ast_util::impl_pretty_name(&opt_trait, ty);
+ let ident = ast_util::impl_pretty_name(&opt_trait, &*ty);
(ident, ItemImpl(generics, opt_trait, ty, meths), Some(inner_attrs))
}
let _ = ast::DUMMY_NODE_ID; // FIXME: Workaround for crazy bug.
let new_id = ast::DUMMY_NODE_ID;
(class_name,
- ItemStruct(@ast::StructDef {
+ ItemStruct(box(GC) ast::StructDef {
fields: fields,
ctor_id: if is_tuple_like { Some(new_id) } else { None },
super_struct: super_struct,
items: starting_items,
..
} = self.parse_items_and_view_items(first_item_attrs, true, true);
- let mut items: Vec<@Item> = starting_items;
+ let mut items: Vec<Gc<Item>> = starting_items;
let attrs_remaining_len = attrs_remaining.len();
// don't think this other loop is even necessary....
// parse a function declaration from a foreign module
fn parse_item_foreign_fn(&mut self, vis: ast::Visibility,
- attrs: Vec<Attribute> ) -> @ForeignItem {
+ attrs: Vec<Attribute>) -> Gc<ForeignItem> {
let lo = self.span.lo;
self.expect_keyword(keywords::Fn);
let decl = self.parse_fn_decl(true);
let hi = self.span.hi;
self.expect(&token::SEMI);
- @ast::ForeignItem { ident: ident,
- attrs: attrs,
- node: ForeignItemFn(decl, generics),
- id: ast::DUMMY_NODE_ID,
- span: mk_sp(lo, hi),
- vis: vis }
+ box(GC) ast::ForeignItem { ident: ident,
+ attrs: attrs,
+ node: ForeignItemFn(decl, generics),
+ id: ast::DUMMY_NODE_ID,
+ span: mk_sp(lo, hi),
+ vis: vis }
}
// parse a static item from a foreign module
fn parse_item_foreign_static(&mut self, vis: ast::Visibility,
- attrs: Vec<Attribute> ) -> @ForeignItem {
+ attrs: Vec<Attribute> ) -> Gc<ForeignItem> {
let lo = self.span.lo;
self.expect_keyword(keywords::Static);
let ty = self.parse_ty(false);
let hi = self.span.hi;
self.expect(&token::SEMI);
- @ast::ForeignItem {
+ box(GC) ast::ForeignItem {
ident: ident,
attrs: attrs,
node: ForeignItemStatic(ty, mutbl),
// parse a structure-like enum variant definition
// this should probably be renamed or refactored...
- fn parse_struct_def(&mut self) -> @StructDef {
+ fn parse_struct_def(&mut self) -> Gc<StructDef> {
let mut fields: Vec<StructField> = Vec::new();
while self.token != token::RBRACE {
fields.push(self.parse_struct_decl_field());
}
self.bump();
- return @ast::StructDef {
+ return box(GC) ast::StructDef {
fields: fields,
ctor_id: None,
super_struct: None,
INTERPOLATED(token::NtItem(item)) => {
self.bump();
let new_attrs = attrs.append(item.attrs.as_slice());
- return IoviItem(@Item {
+ return IoviItem(box(GC) Item {
attrs: new_attrs,
..(*item).clone()
});
return IoviNone(attrs);
}
- pub fn parse_item(&mut self, attrs: Vec<Attribute> ) -> Option<@Item> {
+ pub fn parse_item(&mut self, attrs: Vec<Attribute> ) -> Option<Gc<Item>> {
match self.parse_item_or_view_item(attrs, true) {
IoviNone(_) => None,
IoviViewItem(_) =>
// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE
// | MOD? non_global_path MOD_SEP STAR
// | MOD? non_global_path
- fn parse_view_path(&mut self) -> @ViewPath {
+ fn parse_view_path(&mut self) -> Gc<ViewPath> {
let lo = self.span.lo;
if self.token == token::LBRACE {
global: false,
segments: Vec::new()
};
- return @spanned(lo, self.span.hi,
+ return box(GC) spanned(lo, self.span.hi,
ViewPathList(path, idents, ast::DUMMY_NODE_ID));
}
}
}).collect()
};
- return @spanned(lo, self.span.hi,
+ return box(GC) spanned(lo, self.span.hi,
ViewPathSimple(first_ident, path,
ast::DUMMY_NODE_ID));
}
}
}).collect()
};
- return @spanned(lo, self.span.hi,
+ return box(GC) spanned(lo, self.span.hi,
ViewPathList(path, idents, ast::DUMMY_NODE_ID));
}
}
}).collect()
};
- return @spanned(lo, self.span.hi,
+ return box(GC) spanned(lo, self.span.hi,
ViewPathGlob(path, ast::DUMMY_NODE_ID));
}
}
}).collect()
};
- return @spanned(lo,
+ return box(GC) spanned(lo,
self.last_span.hi,
ViewPathSimple(last, path, ast::DUMMY_NODE_ID));
}
use serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
-use std::path::BytesContainer;
+use std::gc::Gc;
use std::mem;
+use std::path::BytesContainer;
use std::rc::Rc;
-use std::string::String;
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
/// For interpolation during macro expansion.
pub enum Nonterminal {
- NtItem(@ast::Item),
+ NtItem(Gc<ast::Item>),
NtBlock(P<ast::Block>),
- NtStmt(@ast::Stmt),
- NtPat( @ast::Pat),
- NtExpr(@ast::Expr),
+ NtStmt(Gc<ast::Stmt>),
+ NtPat( Gc<ast::Pat>),
+ NtExpr(Gc<ast::Expr>),
NtTy( P<ast::Ty>),
NtIdent(Box<ast::Ident>, bool),
- NtMeta(@ast::MetaItem), // stuff inside brackets for attributes
+ NtMeta(Gc<ast::MetaItem>), // stuff inside brackets for attributes
NtPath(Box<ast::Path>),
- NtTT( @ast::TokenTree), // needs @ed to break a circularity
+ NtTT( Gc<ast::TokenTree>), // needs @ed to break a circularity
NtMatchers(Vec<ast::Matcher> )
}
EOF => "<eof>".to_string(),
INTERPOLATED(ref nt) => {
match nt {
- &NtExpr(e) => ::print::pprust::expr_to_str(e),
- &NtMeta(e) => ::print::pprust::meta_item_to_str(e),
+ &NtExpr(ref e) => ::print::pprust::expr_to_str(&**e),
+ &NtMeta(ref e) => ::print::pprust::meta_item_to_str(&**e),
_ => {
let mut s = "an interpolated ".to_string();
match *nt {
use print::pp::{Breaks, Consistent, Inconsistent, eof};
use print::pp;
+use std::gc::Gc;
use std::io::{IoResult, MemWriter};
use std::io;
use std::mem;
use std::str;
-use std::string::String;
pub enum AnnNode<'a> {
NodeBlock(&'a ast::Block),
}
pub fn commasep_exprs(&mut self, b: Breaks,
- exprs: &[@ast::Expr]) -> IoResult<()> {
- self.commasep_cmnt(b, exprs, |s, &e| s.print_expr(e), |e| e.span)
+ exprs: &[Gc<ast::Expr>]) -> IoResult<()> {
+ self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&**e), |e| e.span)
}
pub fn print_mod(&mut self, _mod: &ast::Mod,
try!(self.print_view_item(vitem));
}
for item in _mod.items.iter() {
- try!(self.print_item(*item));
+ try!(self.print_item(&**item));
}
Ok(())
}
try!(self.print_view_item(vitem));
}
for item in nmod.items.iter() {
- try!(self.print_foreign_item(*item));
+ try!(self.print_foreign_item(&**item));
}
Ok(())
}
match ty.node {
ast::TyNil => try!(word(&mut self.s, "()")),
ast::TyBot => try!(word(&mut self.s, "!")),
- ast::TyBox(ty) => {
+ ast::TyBox(ref ty) => {
try!(word(&mut self.s, "@"));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
}
- ast::TyUniq(ty) => {
+ ast::TyUniq(ref ty) => {
try!(word(&mut self.s, "~"));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
}
- ast::TyVec(ty) => {
+ ast::TyVec(ref ty) => {
try!(word(&mut self.s, "["));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
try!(word(&mut self.s, "]"));
}
ast::TyPtr(ref mt) => {
&None,
f.fn_style,
ast::Many,
- f.decl,
+ &*f.decl,
None,
&None,
Some(&generics),
region,
f.fn_style,
f.onceness,
- f.decl,
+ &*f.decl,
None,
&f.bounds,
Some(&generics),
None,
false));
}
- ast::TyProc(f) => {
+ ast::TyProc(ref f) => {
let generics = ast::Generics {
lifetimes: f.lifetimes.clone(),
ty_params: OwnedSlice::empty()
&None,
f.fn_style,
f.onceness,
- f.decl,
+ &*f.decl,
None,
&f.bounds,
Some(&generics),
&None,
ast::NormalFn,
ast::Many,
- f.decl,
+ &*f.decl,
None,
&None,
None,
ast::TyPath(ref path, ref bounds, _) => {
try!(self.print_bounded_path(path, bounds));
}
- ast::TyFixedLengthVec(ty, v) => {
+ ast::TyFixedLengthVec(ref ty, ref v) => {
try!(word(&mut self.s, "["));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
try!(word(&mut self.s, ", .."));
- try!(self.print_expr(v));
+ try!(self.print_expr(&**v));
try!(word(&mut self.s, "]"));
}
- ast::TyTypeof(e) => {
+ ast::TyTypeof(ref e) => {
try!(word(&mut self.s, "typeof("));
- try!(self.print_expr(e));
+ try!(self.print_expr(&**e));
try!(word(&mut self.s, ")"));
}
ast::TyInfer => {
}
pub fn print_type_ref(&mut self, ty: &P<ast::Ty>) -> IoResult<()> {
- self.print_type(*ty)
+ self.print_type(&**ty)
}
pub fn print_foreign_item(&mut self,
try!(self.maybe_print_comment(item.span.lo));
try!(self.print_outer_attributes(item.attrs.as_slice()));
match item.node {
- ast::ForeignItemFn(decl, ref generics) => {
- try!(self.print_fn(decl, None, abi::Rust, item.ident, generics,
- None, item.vis));
+ ast::ForeignItemFn(ref decl, ref generics) => {
+ try!(self.print_fn(&**decl, None, abi::Rust, item.ident, generics,
+ None, item.vis));
try!(self.end()); // end head-ibox
try!(word(&mut self.s, ";"));
self.end() // end the outer fn box
}
- ast::ForeignItemStatic(t, m) => {
+ ast::ForeignItemStatic(ref t, m) => {
try!(self.head(visibility_qualified(item.vis,
"static").as_slice()));
if m {
}
try!(self.print_ident(item.ident));
try!(self.word_space(":"));
- try!(self.print_type(t));
+ try!(self.print_type(&**t));
try!(word(&mut self.s, ";"));
try!(self.end()); // end the head-ibox
self.end() // end the outer cbox
try!(self.print_outer_attributes(item.attrs.as_slice()));
try!(self.ann.pre(self, NodeItem(item)));
match item.node {
- ast::ItemStatic(ty, m, expr) => {
+ ast::ItemStatic(ref ty, m, ref expr) => {
try!(self.head(visibility_qualified(item.vis,
"static").as_slice()));
if m == ast::MutMutable {
}
try!(self.print_ident(item.ident));
try!(self.word_space(":"));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
try!(space(&mut self.s));
try!(self.end()); // end the head-ibox
try!(self.word_space("="));
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
try!(word(&mut self.s, ";"));
try!(self.end()); // end the outer cbox
}
- ast::ItemFn(decl, fn_style, abi, ref typarams, body) => {
+ ast::ItemFn(ref decl, fn_style, abi, ref typarams, ref body) => {
try!(self.print_fn(
- decl,
+ &**decl,
Some(fn_style),
abi,
item.ident,
item.vis
));
try!(word(&mut self.s, " "));
- try!(self.print_block_with_attrs(body, item.attrs.as_slice()));
+ try!(self.print_block_with_attrs(&**body, item.attrs.as_slice()));
}
ast::ItemMod(ref _mod) => {
try!(self.head(visibility_qualified(item.vis,
try!(self.print_foreign_mod(nmod, item.attrs.as_slice()));
try!(self.bclose(item.span));
}
- ast::ItemTy(ty, ref params) => {
+ ast::ItemTy(ref ty, ref params) => {
try!(self.ibox(indent_unit));
try!(self.ibox(0u));
try!(self.word_nbsp(visibility_qualified(item.vis,
try!(space(&mut self.s));
try!(self.word_space("="));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
try!(word(&mut self.s, ";"));
try!(self.end()); // end the outer ibox
}
item.vis
));
}
- ast::ItemStruct(struct_def, ref generics) => {
+ ast::ItemStruct(ref struct_def, ref generics) => {
if struct_def.is_virtual {
try!(self.word_space("virtual"));
}
try!(self.head(visibility_qualified(item.vis,
"struct").as_slice()));
- try!(self.print_struct(struct_def, generics, item.ident, item.span));
+ try!(self.print_struct(&**struct_def, generics, item.ident,
+ item.span));
}
- ast::ItemImpl(ref generics, ref opt_trait, ty, ref methods) => {
+ ast::ItemImpl(ref generics, ref opt_trait, ref ty, ref methods) => {
try!(self.head(visibility_qualified(item.vis,
"impl").as_slice()));
if generics.is_parameterized() {
&None => {}
}
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
try!(space(&mut self.s));
try!(self.bopen());
try!(self.print_inner_attributes(item.attrs.as_slice()));
for meth in methods.iter() {
- try!(self.print_method(*meth));
+ try!(self.print_method(&**meth));
}
try!(self.bclose(item.span));
}
variants: &[P<ast::Variant>],
span: codemap::Span) -> IoResult<()> {
try!(self.bopen());
- for &v in variants.iter() {
+ for v in variants.iter() {
try!(self.space_if_not_bol());
try!(self.maybe_print_comment(v.span.lo));
try!(self.print_outer_attributes(v.node.attrs.as_slice()));
try!(self.ibox(indent_unit));
- try!(self.print_variant(v));
+ try!(self.print_variant(&**v));
try!(word(&mut self.s, ","));
try!(self.end());
try!(self.maybe_print_trailing_comment(v.span, None));
try!(self.print_ident(ident));
try!(self.print_generics(generics));
match struct_def.super_struct {
- Some(t) => {
+ Some(ref t) => {
try!(self.word_space(":"));
- try!(self.print_type(t));
+ try!(self.print_type(&**t));
},
None => {},
}
ast::UnnamedField(vis) => {
try!(s.print_visibility(vis));
try!(s.maybe_print_comment(field.span.lo));
- s.print_type(field.node.ty)
+ s.print_type(&*field.node.ty)
}
}
}
try!(self.print_visibility(visibility));
try!(self.print_ident(ident));
try!(self.word_nbsp(":"));
- try!(self.print_type(field.node.ty));
+ try!(self.print_type(&*field.node.ty));
try!(word(&mut self.s, ","));
}
}
try!(self.popen());
try!(self.commasep(Consistent,
args.as_slice(),
- |s, arg| s.print_type(arg.ty)));
+ |s, arg| s.print_type(&*arg.ty)));
try!(self.pclose());
}
}
- ast::StructVariantKind(struct_def) => {
+ ast::StructVariantKind(ref struct_def) => {
try!(self.head(""));
let generics = ast_util::empty_generics();
- try!(self.print_struct(struct_def, &generics, v.node.name, v.span));
+ try!(self.print_struct(&**struct_def, &generics, v.node.name, v.span));
}
}
match v.node.disr_expr {
- Some(d) => {
+ Some(ref d) => {
try!(space(&mut self.s));
try!(self.word_space("="));
- self.print_expr(d)
+ self.print_expr(&**d)
}
_ => Ok(())
}
&None,
m.fn_style,
ast::Many,
- m.decl,
+ &*m.decl,
Some(m.ident),
&None,
Some(&m.generics),
m: &ast::TraitMethod) -> IoResult<()> {
match *m {
Required(ref ty_m) => self.print_ty_method(ty_m),
- Provided(m) => self.print_method(m)
+ Provided(ref m) => self.print_method(&**m)
}
}
try!(self.hardbreak_if_not_bol());
try!(self.maybe_print_comment(meth.span.lo));
try!(self.print_outer_attributes(meth.attrs.as_slice()));
- try!(self.print_fn(meth.decl, Some(meth.fn_style), abi::Rust,
+ try!(self.print_fn(&*meth.decl, Some(meth.fn_style), abi::Rust,
meth.ident, &meth.generics, Some(meth.explicit_self.node),
meth.vis));
try!(word(&mut self.s, " "));
- self.print_block_with_attrs(meth.body, meth.attrs.as_slice())
+ self.print_block_with_attrs(&*meth.body, meth.attrs.as_slice())
}
pub fn print_outer_attributes(&mut self,
ast::AttrInner => try!(word(&mut self.s, "#![")),
ast::AttrOuter => try!(word(&mut self.s, "#[")),
}
- try!(self.print_meta_item(attr.meta()));
+ try!(self.print_meta_item(&*attr.meta()));
word(&mut self.s, "]")
}
}
pub fn print_stmt(&mut self, st: &ast::Stmt) -> IoResult<()> {
try!(self.maybe_print_comment(st.span.lo));
match st.node {
- ast::StmtDecl(decl, _) => {
- try!(self.print_decl(decl));
+ ast::StmtDecl(ref decl, _) => {
+ try!(self.print_decl(&**decl));
}
- ast::StmtExpr(expr, _) => {
+ ast::StmtExpr(ref expr, _) => {
try!(self.space_if_not_bol());
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
}
- ast::StmtSemi(expr, _) => {
+ ast::StmtSemi(ref expr, _) => {
try!(self.space_if_not_bol());
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
try!(word(&mut self.s, ";"));
}
ast::StmtMac(ref mac, semi) => {
try!(self.print_view_item(vi));
}
for st in blk.stmts.iter() {
- try!(self.print_stmt(*st));
+ try!(self.print_stmt(&**st));
}
match blk.expr {
- Some(expr) => {
+ Some(ref expr) => {
try!(self.space_if_not_bol());
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
try!(self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi)));
}
_ => ()
self.ann.post(self, NodeBlock(blk))
}
- fn print_else(&mut self, els: Option<@ast::Expr>) -> IoResult<()> {
+ fn print_else(&mut self, els: Option<Gc<ast::Expr>>) -> IoResult<()> {
match els {
Some(_else) => {
match _else.node {
// "another else-if"
- ast::ExprIf(i, t, e) => {
+ ast::ExprIf(ref i, ref t, e) => {
try!(self.cbox(indent_unit - 1u));
try!(self.ibox(0u));
try!(word(&mut self.s, " else if "));
- try!(self.print_expr(i));
+ try!(self.print_expr(&**i));
try!(space(&mut self.s));
- try!(self.print_block(t));
+ try!(self.print_block(&**t));
self.print_else(e)
}
// "final else"
- ast::ExprBlock(b) => {
+ ast::ExprBlock(ref b) => {
try!(self.cbox(indent_unit - 1u));
try!(self.ibox(0u));
try!(word(&mut self.s, " else "));
- self.print_block(b)
+ self.print_block(&**b)
}
// BLEAH, constraints would be great here
_ => {
}
pub fn print_if(&mut self, test: &ast::Expr, blk: &ast::Block,
- elseopt: Option<@ast::Expr>, chk: bool) -> IoResult<()> {
+ elseopt: Option<Gc<ast::Expr>>, chk: bool) -> IoResult<()> {
try!(self.head("if"));
if chk { try!(self.word_nbsp("check")); }
try!(self.print_expr(test));
}
}
- fn print_call_post(&mut self, args: &[@ast::Expr]) -> IoResult<()> {
+ fn print_call_post(&mut self, args: &[Gc<ast::Expr>]) -> IoResult<()> {
try!(self.popen());
try!(self.commasep_exprs(Inconsistent, args));
self.pclose()
try!(self.ibox(indent_unit));
try!(self.ann.pre(self, NodeExpr(expr)));
match expr.node {
- ast::ExprVstore(e, v) => {
+ ast::ExprVstore(ref e, v) => {
try!(self.print_expr_vstore(v));
- try!(self.print_expr(e));
+ try!(self.print_expr(&**e));
},
- ast::ExprBox(p, e) => {
+ ast::ExprBox(ref p, ref e) => {
try!(word(&mut self.s, "box"));
try!(word(&mut self.s, "("));
- try!(self.print_expr(p));
+ try!(self.print_expr(&**p));
try!(self.word_space(")"));
- try!(self.print_expr(e));
+ try!(self.print_expr(&**e));
}
ast::ExprVec(ref exprs) => {
try!(self.ibox(indent_unit));
try!(self.end());
}
- ast::ExprRepeat(element, count) => {
+ ast::ExprRepeat(ref element, ref count) => {
try!(self.ibox(indent_unit));
try!(word(&mut self.s, "["));
- try!(self.print_expr(element));
+ try!(self.print_expr(&**element));
try!(word(&mut self.s, ","));
try!(word(&mut self.s, ".."));
- try!(self.print_expr(count));
+ try!(self.print_expr(&**count));
try!(word(&mut self.s, "]"));
try!(self.end());
}
try!(s.ibox(indent_unit));
try!(s.print_ident(field.ident.node));
try!(s.word_space(":"));
- try!(s.print_expr(field.expr));
+ try!(s.print_expr(&*field.expr));
s.end()
},
|f| f.span));
match wth {
- Some(expr) => {
+ Some(ref expr) => {
try!(self.ibox(indent_unit));
if !fields.is_empty() {
try!(word(&mut self.s, ","));
try!(space(&mut self.s));
}
try!(word(&mut self.s, ".."));
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
try!(self.end());
}
_ => try!(word(&mut self.s, ","))
}
try!(self.pclose());
}
- ast::ExprCall(func, ref args) => {
- try!(self.print_expr_maybe_paren(func));
+ ast::ExprCall(ref func, ref args) => {
+ try!(self.print_expr_maybe_paren(&**func));
try!(self.print_call_post(args.as_slice()));
}
ast::ExprMethodCall(ident, ref tys, ref args) => {
let base_args = args.slice_from(1);
- try!(self.print_expr(*args.get(0)));
+ try!(self.print_expr(&**args.get(0)));
try!(word(&mut self.s, "."));
try!(self.print_ident(ident.node));
if tys.len() > 0u {
}
try!(self.print_call_post(base_args));
}
- ast::ExprBinary(op, lhs, rhs) => {
- try!(self.print_expr(lhs));
+ ast::ExprBinary(op, ref lhs, ref rhs) => {
+ try!(self.print_expr(&**lhs));
try!(space(&mut self.s));
try!(self.word_space(ast_util::binop_to_str(op)));
- try!(self.print_expr(rhs));
+ try!(self.print_expr(&**rhs));
}
- ast::ExprUnary(op, expr) => {
+ ast::ExprUnary(op, ref expr) => {
try!(word(&mut self.s, ast_util::unop_to_str(op)));
- try!(self.print_expr_maybe_paren(expr));
+ try!(self.print_expr_maybe_paren(&**expr));
}
- ast::ExprAddrOf(m, expr) => {
+ ast::ExprAddrOf(m, ref expr) => {
try!(word(&mut self.s, "&"));
try!(self.print_mutability(m));
- try!(self.print_expr_maybe_paren(expr));
+ try!(self.print_expr_maybe_paren(&**expr));
}
- ast::ExprLit(lit) => try!(self.print_literal(lit)),
- ast::ExprCast(expr, ty) => {
- try!(self.print_expr(expr));
+ ast::ExprLit(ref lit) => try!(self.print_literal(&**lit)),
+ ast::ExprCast(ref expr, ref ty) => {
+ try!(self.print_expr(&**expr));
try!(space(&mut self.s));
try!(self.word_space("as"));
- try!(self.print_type(ty));
+ try!(self.print_type(&**ty));
}
- ast::ExprIf(test, blk, elseopt) => {
- try!(self.print_if(test, blk, elseopt, false));
+ ast::ExprIf(ref test, ref blk, elseopt) => {
+ try!(self.print_if(&**test, &**blk, elseopt, false));
}
- ast::ExprWhile(test, blk) => {
+ ast::ExprWhile(ref test, ref blk) => {
try!(self.head("while"));
- try!(self.print_expr(test));
+ try!(self.print_expr(&**test));
try!(space(&mut self.s));
- try!(self.print_block(blk));
+ try!(self.print_block(&**blk));
}
- ast::ExprForLoop(pat, iter, blk, opt_ident) => {
+ ast::ExprForLoop(ref pat, ref iter, ref blk, opt_ident) => {
for ident in opt_ident.iter() {
try!(word(&mut self.s, "'"));
try!(self.print_ident(*ident));
try!(self.word_space(":"));
}
try!(self.head("for"));
- try!(self.print_pat(pat));
+ try!(self.print_pat(&**pat));
try!(space(&mut self.s));
try!(self.word_space("in"));
- try!(self.print_expr(iter));
+ try!(self.print_expr(&**iter));
try!(space(&mut self.s));
- try!(self.print_block(blk));
+ try!(self.print_block(&**blk));
}
- ast::ExprLoop(blk, opt_ident) => {
+ ast::ExprLoop(ref blk, opt_ident) => {
for ident in opt_ident.iter() {
try!(word(&mut self.s, "'"));
try!(self.print_ident(*ident));
}
try!(self.head("loop"));
try!(space(&mut self.s));
- try!(self.print_block(blk));
+ try!(self.print_block(&**blk));
}
- ast::ExprMatch(expr, ref arms) => {
+ ast::ExprMatch(ref expr, ref arms) => {
try!(self.cbox(indent_unit));
try!(self.ibox(4));
try!(self.word_nbsp("match"));
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
try!(space(&mut self.s));
try!(self.bopen());
let len = arms.len();
try!(space(&mut self.s));
try!(self.word_space("|"));
}
- try!(self.print_pat(*p));
+ try!(self.print_pat(&**p));
}
try!(space(&mut self.s));
match arm.guard {
- Some(e) => {
+ Some(ref e) => {
try!(self.word_space("if"));
- try!(self.print_expr(e));
+ try!(self.print_expr(&**e));
try!(space(&mut self.s));
}
None => ()
try!(self.word_space("=>"));
match arm.body.node {
- ast::ExprBlock(blk) => {
+ ast::ExprBlock(ref blk) => {
// the block will close the pattern's ibox
- try!(self.print_block_unclosed_indent(blk, indent_unit));
+ try!(self.print_block_unclosed_indent(&**blk,
+ indent_unit));
}
_ => {
try!(self.end()); // close the ibox for the pattern
- try!(self.print_expr(arm.body));
+ try!(self.print_expr(&*arm.body));
}
}
- if !expr_is_simple_block(expr)
+ if !expr_is_simple_block(expr.clone())
&& i < len - 1 {
try!(word(&mut self.s, ","));
}
}
try!(self.bclose_(expr.span, indent_unit));
}
- ast::ExprFnBlock(decl, body) => {
+ ast::ExprFnBlock(ref decl, ref body) => {
// in do/for blocks we don't want to show an empty
// argument list, but at this point we don't know which
// we are inside.
//
// if !decl.inputs.is_empty() {
- try!(self.print_fn_block_args(decl));
+ try!(self.print_fn_block_args(&**decl));
try!(space(&mut self.s));
// }
if !body.stmts.is_empty() || !body.expr.is_some() {
- try!(self.print_block_unclosed(body));
+ try!(self.print_block_unclosed(&**body));
} else {
// we extract the block, so as not to create another set of boxes
match body.expr.unwrap().node {
- ast::ExprBlock(blk) => {
- try!(self.print_block_unclosed(blk));
+ ast::ExprBlock(ref blk) => {
+ try!(self.print_block_unclosed(&**blk));
}
_ => {
// this is a bare expression
- try!(self.print_expr(body.expr.unwrap()));
+ try!(self.print_expr(&*body.expr.unwrap()));
try!(self.end()); // need to close a box
}
}
// empty box to satisfy the close.
try!(self.ibox(0));
}
- ast::ExprProc(decl, body) => {
+ ast::ExprProc(ref decl, ref body) => {
// in do/for blocks we don't want to show an empty
// argument list, but at this point we don't know which
// we are inside.
//
// if !decl.inputs.is_empty() {
- try!(self.print_proc_args(decl));
+ try!(self.print_proc_args(&**decl));
try!(space(&mut self.s));
// }
assert!(body.stmts.is_empty());
assert!(body.expr.is_some());
// we extract the block, so as not to create another set of boxes
match body.expr.unwrap().node {
- ast::ExprBlock(blk) => {
- try!(self.print_block_unclosed(blk));
+ ast::ExprBlock(ref blk) => {
+ try!(self.print_block_unclosed(&**blk));
}
_ => {
// this is a bare expression
- try!(self.print_expr(body.expr.unwrap()));
+ try!(self.print_expr(&*body.expr.unwrap()));
try!(self.end()); // need to close a box
}
}
// empty box to satisfy the close.
try!(self.ibox(0));
}
- ast::ExprBlock(blk) => {
+ ast::ExprBlock(ref blk) => {
// containing cbox, will be closed by print-block at }
try!(self.cbox(indent_unit));
// head-box, will be closed by print-block after {
try!(self.ibox(0u));
- try!(self.print_block(blk));
+ try!(self.print_block(&**blk));
}
- ast::ExprAssign(lhs, rhs) => {
- try!(self.print_expr(lhs));
+ ast::ExprAssign(ref lhs, ref rhs) => {
+ try!(self.print_expr(&**lhs));
try!(space(&mut self.s));
try!(self.word_space("="));
- try!(self.print_expr(rhs));
+ try!(self.print_expr(&**rhs));
}
- ast::ExprAssignOp(op, lhs, rhs) => {
- try!(self.print_expr(lhs));
+ ast::ExprAssignOp(op, ref lhs, ref rhs) => {
+ try!(self.print_expr(&**lhs));
try!(space(&mut self.s));
try!(word(&mut self.s, ast_util::binop_to_str(op)));
try!(self.word_space("="));
- try!(self.print_expr(rhs));
+ try!(self.print_expr(&**rhs));
}
- ast::ExprField(expr, id, ref tys) => {
- try!(self.print_expr(expr));
+ ast::ExprField(ref expr, id, ref tys) => {
+ try!(self.print_expr(&**expr));
try!(word(&mut self.s, "."));
try!(self.print_ident(id));
if tys.len() > 0u {
try!(word(&mut self.s, ">"));
}
}
- ast::ExprIndex(expr, index) => {
- try!(self.print_expr(expr));
+ ast::ExprIndex(ref expr, ref index) => {
+ try!(self.print_expr(&**expr));
try!(word(&mut self.s, "["));
- try!(self.print_expr(index));
+ try!(self.print_expr(&**index));
try!(word(&mut self.s, "]"));
}
ast::ExprPath(ref path) => try!(self.print_path(path, true)),
try!(space(&mut self.s))
}
}
- ast::ExprRet(result) => {
+ ast::ExprRet(ref result) => {
try!(word(&mut self.s, "return"));
- match result {
- Some(expr) => {
+ match *result {
+ Some(ref expr) => {
try!(word(&mut self.s, " "));
- try!(self.print_expr(expr));
+ try!(self.print_expr(&**expr));
}
_ => ()
}
try!(self.print_string(a.asm.get(), a.asm_str_style));
try!(self.word_space(":"));
- try!(self.commasep(Inconsistent, a.outputs.as_slice(), |s, &(ref co, o)| {
+ try!(self.commasep(Inconsistent, a.outputs.as_slice(),
+ |s, &(ref co, ref o)| {
try!(s.print_string(co.get(), ast::CookedStr));
try!(s.popen());
- try!(s.print_expr(o));
+ try!(s.print_expr(&**o));
try!(s.pclose());
Ok(())
}));
try!(space(&mut self.s));
try!(self.word_space(":"));
- try!(self.commasep(Inconsistent, a.inputs.as_slice(), |s, &(ref co, o)| {
+ try!(self.commasep(Inconsistent, a.inputs.as_slice(),
+ |s, &(ref co, ref o)| {
try!(s.print_string(co.get(), ast::CookedStr));
try!(s.popen());
- try!(s.print_expr(o));
+ try!(s.print_expr(&**o));
try!(s.pclose());
Ok(())
}));
try!(self.pclose());
}
ast::ExprMac(ref m) => try!(self.print_mac(m)),
- ast::ExprParen(e) => {
+ ast::ExprParen(ref e) => {
try!(self.popen());
- try!(self.print_expr(e));
+ try!(self.print_expr(&**e));
try!(self.pclose());
}
}
}
pub fn print_local_decl(&mut self, loc: &ast::Local) -> IoResult<()> {
- try!(self.print_pat(loc.pat));
+ try!(self.print_pat(&*loc.pat));
match loc.ty.node {
ast::TyInfer => Ok(()),
_ => {
try!(self.word_space(":"));
- self.print_type(loc.ty)
+ self.print_type(&*loc.ty)
}
}
}
pub fn print_decl(&mut self, decl: &ast::Decl) -> IoResult<()> {
try!(self.maybe_print_comment(decl.span.lo));
match decl.node {
- ast::DeclLocal(loc) => {
+ ast::DeclLocal(ref loc) => {
try!(self.space_if_not_bol());
try!(self.ibox(indent_unit));
try!(self.word_nbsp("let"));
try!(self.ibox(indent_unit));
- try!(self.print_local_decl(loc));
+ try!(self.print_local_decl(&**loc));
try!(self.end());
match loc.init {
- Some(init) => {
+ Some(ref init) => {
try!(self.nbsp());
try!(self.word_space("="));
- try!(self.print_expr(init));
+ try!(self.print_expr(&**init));
}
_ => {}
}
self.end()
}
- ast::DeclItem(item) => self.print_item(item)
+ ast::DeclItem(ref item) => self.print_item(&**item)
}
}
}
try!(self.print_path(path, true));
match sub {
- Some(p) => {
+ Some(ref p) => {
try!(word(&mut self.s, "@"));
- try!(self.print_pat(p));
+ try!(self.print_pat(&**p));
}
None => ()
}
if !args.is_empty() {
try!(self.popen());
try!(self.commasep(Inconsistent, args.as_slice(),
- |s, &p| s.print_pat(p)));
+ |s, p| s.print_pat(&**p)));
try!(self.pclose());
}
}
try!(s.cbox(indent_unit));
try!(s.print_ident(f.ident));
try!(s.word_space(":"));
- try!(s.print_pat(f.pat));
+ try!(s.print_pat(&*f.pat));
s.end()
},
|f| f.pat.span));
try!(self.popen());
try!(self.commasep(Inconsistent,
elts.as_slice(),
- |s, &p| s.print_pat(p)));
+ |s, p| s.print_pat(&**p)));
if elts.len() == 1 {
try!(word(&mut self.s, ","));
}
try!(self.pclose());
}
- ast::PatBox(inner) => {
+ ast::PatBox(ref inner) => {
try!(word(&mut self.s, "box "));
- try!(self.print_pat(inner));
+ try!(self.print_pat(&**inner));
}
- ast::PatRegion(inner) => {
+ ast::PatRegion(ref inner) => {
try!(word(&mut self.s, "&"));
- try!(self.print_pat(inner));
+ try!(self.print_pat(&**inner));
}
- ast::PatLit(e) => try!(self.print_expr(e)),
- ast::PatRange(begin, end) => {
- try!(self.print_expr(begin));
+ ast::PatLit(ref e) => try!(self.print_expr(&**e)),
+ ast::PatRange(ref begin, ref end) => {
+ try!(self.print_expr(&**begin));
try!(space(&mut self.s));
try!(word(&mut self.s, ".."));
- try!(self.print_expr(end));
+ try!(self.print_expr(&**end));
}
ast::PatVec(ref before, slice, ref after) => {
try!(word(&mut self.s, "["));
try!(self.commasep(Inconsistent,
before.as_slice(),
- |s, &p| s.print_pat(p)));
- for &p in slice.iter() {
+ |s, p| s.print_pat(&**p)));
+ for p in slice.iter() {
if !before.is_empty() { try!(self.word_space(",")); }
- match *p {
+ match **p {
ast::Pat { node: ast::PatWildMulti, .. } => {
// this case is handled by print_pat
}
_ => try!(word(&mut self.s, "..")),
}
- try!(self.print_pat(p));
+ try!(self.print_pat(&**p));
if !after.is_empty() { try!(self.word_space(",")); }
}
try!(self.commasep(Inconsistent,
after.as_slice(),
- |s, &p| s.print_pat(p)));
+ |s, p| s.print_pat(&**p)));
try!(word(&mut self.s, "]"));
}
ast::PatMac(ref m) => try!(self.print_mac(m)),
_ => {
try!(self.space_if_not_bol());
try!(self.word_space("->"));
- self.print_type(decl.output)
+ self.print_type(&*decl.output)
}
}
}
_ => {
try!(self.space_if_not_bol());
try!(self.word_space("->"));
- try!(self.print_type(decl.output));
+ try!(self.print_type(&*decl.output));
}
}
_ => {
try!(self.space_if_not_bol());
try!(self.word_space("->"));
- try!(self.print_type(decl.output));
+ try!(self.print_type(&*decl.output));
}
}
&None,
ast::NormalFn,
ast::Many,
- unboxed_function_type.decl,
+ &*unboxed_function_type.decl,
None,
&None,
None,
try!(s.print_ident(param.ident));
try!(s.print_bounds(&None, ¶m.bounds, false));
match param.default {
- Some(default) => {
+ Some(ref default) => {
try!(space(&mut s.s));
try!(s.word_space("="));
- s.print_type(default)
+ s.print_type(&**default)
}
_ => Ok(())
}
try!(self.popen());
try!(self.commasep(Consistent,
items.as_slice(),
- |s, &i| s.print_meta_item(i)));
+ |s, i| s.print_meta_item(&**i)));
try!(self.pclose());
}
}
ast::ViewItemUse(ref vp) => {
try!(self.head("use"));
- try!(self.print_view_path(*vp));
+ try!(self.print_view_path(&**vp));
}
}
try!(word(&mut self.s, ";"));
pub fn print_mt(&mut self, mt: &ast::MutTy) -> IoResult<()> {
try!(self.print_mutability(mt.mutbl));
- self.print_type(mt.ty)
+ self.print_type(&*mt.ty)
}
pub fn print_arg(&mut self, input: &ast::Arg) -> IoResult<()> {
try!(self.ibox(indent_unit));
match input.ty.node {
- ast::TyInfer => try!(self.print_pat(input.pat)),
+ ast::TyInfer => try!(self.print_pat(&*input.pat)),
_ => {
match input.pat.node {
ast::PatIdent(_, ref path, _) if
// Do nothing.
}
_ => {
- try!(self.print_pat(input.pat));
+ try!(self.print_pat(&*input.pat));
try!(word(&mut self.s, ":"));
try!(space(&mut self.s));
}
}
- try!(self.print_type(input.ty));
+ try!(self.print_type(&*input.ty));
}
}
self.end()
if decl.cf == ast::NoReturn {
try!(self.word_nbsp("!"));
} else {
- try!(self.print_type(decl.output));
+ try!(self.print_type(&*decl.output));
}
try!(self.end());
}
use parse;
use owned_slice::OwnedSlice;
+use std::gc::Gc;
+
// Context-passing AST walker. Each overridden visit method has full control
// over what happens with its node, it can do its own traversal of the node's
// children (potentially passing in different contexts to each), call
item: &ast::InlinedItem,
env: E) {
match *item {
- IIItem(i) => visitor.visit_item(i, env),
- IIForeign(i) => visitor.visit_foreign_item(i, env),
- IIMethod(_, _, m) => walk_method_helper(visitor, m, env),
+ IIItem(i) => visitor.visit_item(&*i, env),
+ IIForeign(i) => visitor.visit_foreign_item(&*i, env),
+ IIMethod(_, _, m) => walk_method_helper(visitor, &*m, env),
}
}
}
for item in module.items.iter() {
- visitor.visit_item(*item, env.clone())
+ visitor.visit_item(&**item, env.clone())
}
}
}
pub fn walk_local<E: Clone, V: Visitor<E>>(visitor: &mut V, local: &Local, env: E) {
- visitor.visit_pat(local.pat, env.clone());
- visitor.visit_ty(local.ty, env.clone());
+ visitor.visit_pat(&*local.pat, env.clone());
+ visitor.visit_ty(&*local.ty, env.clone());
match local.init {
None => {}
- Some(initializer) => visitor.visit_expr(initializer, env),
+ Some(initializer) => visitor.visit_expr(&*initializer, env),
}
}
pub fn walk_item<E: Clone, V: Visitor<E>>(visitor: &mut V, item: &Item, env: E) {
visitor.visit_ident(item.span, item.ident, env.clone());
match item.node {
- ItemStatic(typ, _, expr) => {
- visitor.visit_ty(typ, env.clone());
- visitor.visit_expr(expr, env.clone());
+ ItemStatic(ref typ, _, ref expr) => {
+ visitor.visit_ty(&**typ, env.clone());
+ visitor.visit_expr(&**expr, env.clone());
}
ItemFn(declaration, fn_style, abi, ref generics, body) => {
visitor.visit_fn(&FkItemFn(item.ident, generics, fn_style, abi),
- declaration,
- body,
+ &*declaration,
+ &*body,
item.span,
item.id,
env.clone())
visitor.visit_view_item(view_item, env.clone())
}
for foreign_item in foreign_module.items.iter() {
- visitor.visit_foreign_item(*foreign_item, env.clone())
+ visitor.visit_foreign_item(&**foreign_item, env.clone())
}
}
- ItemTy(typ, ref type_parameters) => {
- visitor.visit_ty(typ, env.clone());
+ ItemTy(ref typ, ref type_parameters) => {
+ visitor.visit_ty(&**typ, env.clone());
visitor.visit_generics(type_parameters, env.clone())
}
ItemEnum(ref enum_definition, ref type_parameters) => {
trait_reference, env.clone()),
None => ()
}
- visitor.visit_ty(typ, env.clone());
+ visitor.visit_ty(&*typ, env.clone());
for method in methods.iter() {
- walk_method_helper(visitor, *method, env.clone())
+ walk_method_helper(visitor, &**method, env.clone())
}
}
- ItemStruct(struct_definition, ref generics) => {
+ ItemStruct(ref struct_definition, ref generics) => {
visitor.visit_generics(generics, env.clone());
- visitor.visit_struct_def(struct_definition,
+ visitor.visit_struct_def(&**struct_definition,
item.ident,
generics,
item.id,
generics: &Generics,
env: E) {
for &variant in enum_definition.variants.iter() {
- visitor.visit_variant(variant, generics, env.clone());
+ visitor.visit_variant(&*variant, generics, env.clone());
}
}
match variant.node.kind {
TupleVariantKind(ref variant_arguments) => {
for variant_argument in variant_arguments.iter() {
- visitor.visit_ty(variant_argument.ty, env.clone())
+ visitor.visit_ty(&*variant_argument.ty, env.clone())
}
}
- StructVariantKind(struct_definition) => {
- visitor.visit_struct_def(struct_definition,
+ StructVariantKind(ref struct_definition) => {
+ visitor.visit_struct_def(&**struct_definition,
variant.node.name,
generics,
variant.node.id,
}
}
match variant.node.disr_expr {
- Some(expr) => visitor.visit_expr(expr, env.clone()),
+ Some(ref expr) => visitor.visit_expr(&**expr, env.clone()),
None => ()
}
for attr in variant.node.attrs.iter() {
pub fn walk_ty<E: Clone, V: Visitor<E>>(visitor: &mut V, typ: &Ty, env: E) {
match typ.node {
TyUniq(ty) | TyVec(ty) | TyBox(ty) => {
- visitor.visit_ty(ty, env)
+ visitor.visit_ty(&*ty, env)
}
TyPtr(ref mutable_type) => {
- visitor.visit_ty(mutable_type.ty, env)
+ visitor.visit_ty(&*mutable_type.ty, env)
}
TyRptr(ref lifetime, ref mutable_type) => {
visitor.visit_opt_lifetime_ref(typ.span, lifetime, env.clone());
- visitor.visit_ty(mutable_type.ty, env)
+ visitor.visit_ty(&*mutable_type.ty, env)
}
TyTup(ref tuple_element_types) => {
for &tuple_element_type in tuple_element_types.iter() {
- visitor.visit_ty(tuple_element_type, env.clone())
+ visitor.visit_ty(&*tuple_element_type, env.clone())
}
}
TyClosure(ref function_declaration, ref region) => {
for argument in function_declaration.decl.inputs.iter() {
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.decl.output, env.clone());
+ visitor.visit_ty(&*function_declaration.decl.output, env.clone());
for bounds in function_declaration.bounds.iter() {
walk_ty_param_bounds(visitor, bounds, env.clone())
}
}
TyProc(ref function_declaration) => {
for argument in function_declaration.decl.inputs.iter() {
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.decl.output, env.clone());
+ visitor.visit_ty(&*function_declaration.decl.output, env.clone());
for bounds in function_declaration.bounds.iter() {
walk_ty_param_bounds(visitor, bounds, env.clone())
}
}
TyBareFn(ref function_declaration) => {
for argument in function_declaration.decl.inputs.iter() {
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.decl.output, env.clone());
+ visitor.visit_ty(&*function_declaration.decl.output, env.clone());
walk_lifetime_decls(visitor, &function_declaration.lifetimes,
env.clone());
}
TyUnboxedFn(ref function_declaration) => {
for argument in function_declaration.decl.inputs.iter() {
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.decl.output, env.clone());
+ visitor.visit_ty(&*function_declaration.decl.output, env.clone());
}
TyPath(ref path, ref bounds, id) => {
visitor.visit_path(path, id, env.clone());
walk_ty_param_bounds(visitor, bounds, env.clone())
}
}
- TyFixedLengthVec(ty, expression) => {
- visitor.visit_ty(ty, env.clone());
- visitor.visit_expr(expression, env)
+ TyFixedLengthVec(ref ty, ref expression) => {
+ visitor.visit_ty(&**ty, env.clone());
+ visitor.visit_expr(&**expression, env)
}
- TyTypeof(expression) => {
- visitor.visit_expr(expression, env)
+ TyTypeof(ref expression) => {
+ visitor.visit_expr(&**expression, env)
}
TyNil | TyBot | TyInfer => {}
}
for segment in path.segments.iter() {
visitor.visit_ident(path.span, segment.identifier, env.clone());
- for &typ in segment.types.iter() {
- visitor.visit_ty(typ, env.clone());
+ for typ in segment.types.iter() {
+ visitor.visit_ty(&**typ, env.clone());
}
for lifetime in segment.lifetimes.iter() {
visitor.visit_lifetime_ref(lifetime, env.clone());
visitor.visit_path(path, pattern.id, env.clone());
for children in children.iter() {
for child in children.iter() {
- visitor.visit_pat(*child, env.clone())
+ visitor.visit_pat(&**child, env.clone())
}
}
}
PatStruct(ref path, ref fields, _) => {
visitor.visit_path(path, pattern.id, env.clone());
for field in fields.iter() {
- visitor.visit_pat(field.pat, env.clone())
+ visitor.visit_pat(&*field.pat, env.clone())
}
}
PatTup(ref tuple_elements) => {
for tuple_element in tuple_elements.iter() {
- visitor.visit_pat(*tuple_element, env.clone())
+ visitor.visit_pat(&**tuple_element, env.clone())
}
}
- PatBox(subpattern) |
- PatRegion(subpattern) => {
- visitor.visit_pat(subpattern, env)
+ PatBox(ref subpattern) |
+ PatRegion(ref subpattern) => {
+ visitor.visit_pat(&**subpattern, env)
}
PatIdent(_, ref path, ref optional_subpattern) => {
visitor.visit_path(path, pattern.id, env.clone());
match *optional_subpattern {
None => {}
- Some(subpattern) => visitor.visit_pat(subpattern, env),
+ Some(ref subpattern) => visitor.visit_pat(&**subpattern, env),
}
}
- PatLit(expression) => visitor.visit_expr(expression, env),
- PatRange(lower_bound, upper_bound) => {
- visitor.visit_expr(lower_bound, env.clone());
- visitor.visit_expr(upper_bound, env)
+ PatLit(ref expression) => visitor.visit_expr(&**expression, env),
+ PatRange(ref lower_bound, ref upper_bound) => {
+ visitor.visit_expr(&**lower_bound, env.clone());
+ visitor.visit_expr(&**upper_bound, env)
}
PatWild | PatWildMulti => (),
PatVec(ref prepattern, ref slice_pattern, ref postpatterns) => {
for prepattern in prepattern.iter() {
- visitor.visit_pat(*prepattern, env.clone())
+ visitor.visit_pat(&**prepattern, env.clone())
}
for slice_pattern in slice_pattern.iter() {
- visitor.visit_pat(*slice_pattern, env.clone())
+ visitor.visit_pat(&**slice_pattern, env.clone())
}
for postpattern in postpatterns.iter() {
- visitor.visit_pat(*postpattern, env.clone())
+ visitor.visit_pat(&**postpattern, env.clone())
}
}
PatMac(ref macro) => visitor.visit_mac(macro, env),
visitor.visit_ident(foreign_item.span, foreign_item.ident, env.clone());
match foreign_item.node {
- ForeignItemFn(function_declaration, ref generics) => {
- walk_fn_decl(visitor, function_declaration, env.clone());
+ ForeignItemFn(ref function_declaration, ref generics) => {
+ walk_fn_decl(visitor, &**function_declaration, env.clone());
visitor.visit_generics(generics, env.clone())
}
- ForeignItemStatic(typ, _) => visitor.visit_ty(typ, env.clone()),
+ ForeignItemStatic(ref typ, _) => visitor.visit_ty(&**typ, env.clone()),
}
for attr in foreign_item.attrs.iter() {
StaticRegionTyParamBound => {}
UnboxedFnTyParamBound(ref function_declaration) => {
for argument in function_declaration.decl.inputs.iter() {
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.decl.output,
+ visitor.visit_ty(&*function_declaration.decl.output,
env.clone());
}
OtherRegionTyParamBound(..) => {}
for type_parameter in generics.ty_params.iter() {
walk_ty_param_bounds(visitor, &type_parameter.bounds, env.clone());
match type_parameter.default {
- Some(ty) => visitor.visit_ty(ty, env.clone()),
+ Some(ref ty) => visitor.visit_ty(&**ty, env.clone()),
None => {}
}
}
function_declaration: &FnDecl,
env: E) {
for argument in function_declaration.inputs.iter() {
- visitor.visit_pat(argument.pat, env.clone());
- visitor.visit_ty(argument.ty, env.clone())
+ visitor.visit_pat(&*argument.pat, env.clone());
+ visitor.visit_ty(&*argument.ty, env.clone())
}
- visitor.visit_ty(function_declaration.output, env)
+ visitor.visit_ty(&*function_declaration.output, env)
}
// Note: there is no visit_method() method in the visitor, instead override
env: E) {
visitor.visit_ident(method.span, method.ident, env.clone());
visitor.visit_fn(&FkMethod(method.ident, &method.generics, method),
- method.decl,
- method.body,
+ &*method.decl,
+ &*method.body,
method.span,
method.id,
env.clone());
visitor.visit_ident(method_type.span, method_type.ident, env.clone());
visitor.visit_explicit_self(&method_type.explicit_self, env.clone());
for argument_type in method_type.decl.inputs.iter() {
- visitor.visit_ty(argument_type.ty, env.clone())
+ visitor.visit_ty(&*argument_type.ty, env.clone())
}
visitor.visit_generics(&method_type.generics, env.clone());
- visitor.visit_ty(method_type.decl.output, env.clone());
+ visitor.visit_ty(&*method_type.decl.output, env.clone());
for attr in method_type.attrs.iter() {
visitor.visit_attribute(attr, env.clone());
}
Required(ref method_type) => {
visitor.visit_ty_method(method_type, env)
}
- Provided(method) => walk_method_helper(visitor, method, env),
+ Provided(ref method) => walk_method_helper(visitor, &**method, env),
}
}
struct_definition: &StructDef,
env: E) {
match struct_definition.super_struct {
- Some(t) => visitor.visit_ty(t, env.clone()),
+ Some(ref t) => visitor.visit_ty(&**t, env.clone()),
None => {},
}
for field in struct_definition.fields.iter() {
_ => {}
}
- visitor.visit_ty(struct_field.node.ty, env.clone());
+ visitor.visit_ty(&*struct_field.node.ty, env.clone());
for attr in struct_field.node.attrs.iter() {
visitor.visit_attribute(attr, env.clone());
visitor.visit_view_item(view_item, env.clone())
}
for statement in block.stmts.iter() {
- visitor.visit_stmt(*statement, env.clone())
+ visitor.visit_stmt(&**statement, env.clone())
}
walk_expr_opt(visitor, block.expr, env)
}
pub fn walk_stmt<E: Clone, V: Visitor<E>>(visitor: &mut V, statement: &Stmt, env: E) {
match statement.node {
- StmtDecl(declaration, _) => visitor.visit_decl(declaration, env),
- StmtExpr(expression, _) | StmtSemi(expression, _) => {
- visitor.visit_expr(expression, env)
+ StmtDecl(ref declaration, _) => visitor.visit_decl(&**declaration, env),
+ StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
+ visitor.visit_expr(&**expression, env)
}
StmtMac(ref macro, _) => visitor.visit_mac(macro, env),
}
pub fn walk_decl<E: Clone, V: Visitor<E>>(visitor: &mut V, declaration: &Decl, env: E) {
match declaration.node {
- DeclLocal(ref local) => visitor.visit_local(*local, env),
- DeclItem(item) => visitor.visit_item(item, env),
+ DeclLocal(ref local) => visitor.visit_local(&**local, env),
+ DeclItem(ref item) => visitor.visit_item(&**item, env),
}
}
pub fn walk_expr_opt<E: Clone, V: Visitor<E>>(visitor: &mut V,
- optional_expression: Option<@Expr>,
+ optional_expression: Option<Gc<Expr>>,
env: E) {
match optional_expression {
None => {}
- Some(expression) => visitor.visit_expr(expression, env),
+ Some(ref expression) => visitor.visit_expr(&**expression, env),
}
}
pub fn walk_exprs<E: Clone, V: Visitor<E>>(visitor: &mut V,
- expressions: &[@Expr],
+ expressions: &[Gc<Expr>],
env: E) {
for expression in expressions.iter() {
- visitor.visit_expr(*expression, env.clone())
+ visitor.visit_expr(&**expression, env.clone())
}
}
pub fn walk_expr<E: Clone, V: Visitor<E>>(visitor: &mut V, expression: &Expr, env: E) {
match expression.node {
- ExprVstore(subexpression, _) => {
- visitor.visit_expr(subexpression, env.clone())
+ ExprVstore(ref subexpression, _) => {
+ visitor.visit_expr(&**subexpression, env.clone())
}
- ExprBox(place, subexpression) => {
- visitor.visit_expr(place, env.clone());
- visitor.visit_expr(subexpression, env.clone())
+ ExprBox(ref place, ref subexpression) => {
+ visitor.visit_expr(&**place, env.clone());
+ visitor.visit_expr(&**subexpression, env.clone())
}
ExprVec(ref subexpressions) => {
walk_exprs(visitor, subexpressions.as_slice(), env.clone())
}
- ExprRepeat(element, count) => {
- visitor.visit_expr(element, env.clone());
- visitor.visit_expr(count, env.clone())
+ ExprRepeat(ref element, ref count) => {
+ visitor.visit_expr(&**element, env.clone());
+ visitor.visit_expr(&**count, env.clone())
}
ExprStruct(ref path, ref fields, optional_base) => {
visitor.visit_path(path, expression.id, env.clone());
for field in fields.iter() {
- visitor.visit_expr(field.expr, env.clone())
+ visitor.visit_expr(&*field.expr, env.clone())
}
walk_expr_opt(visitor, optional_base, env.clone())
}
ExprTup(ref subexpressions) => {
for subexpression in subexpressions.iter() {
- visitor.visit_expr(*subexpression, env.clone())
+ visitor.visit_expr(&**subexpression, env.clone())
}
}
- ExprCall(callee_expression, ref arguments) => {
+ ExprCall(ref callee_expression, ref arguments) => {
for argument in arguments.iter() {
- visitor.visit_expr(*argument, env.clone())
+ visitor.visit_expr(&**argument, env.clone())
}
- visitor.visit_expr(callee_expression, env.clone())
+ visitor.visit_expr(&**callee_expression, env.clone())
}
ExprMethodCall(_, ref types, ref arguments) => {
walk_exprs(visitor, arguments.as_slice(), env.clone());
- for &typ in types.iter() {
- visitor.visit_ty(typ, env.clone())
+ for typ in types.iter() {
+ visitor.visit_ty(&**typ, env.clone())
}
}
- ExprBinary(_, left_expression, right_expression) => {
- visitor.visit_expr(left_expression, env.clone());
- visitor.visit_expr(right_expression, env.clone())
+ ExprBinary(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(&**left_expression, env.clone());
+ visitor.visit_expr(&**right_expression, env.clone())
}
- ExprAddrOf(_, subexpression) | ExprUnary(_, subexpression) => {
- visitor.visit_expr(subexpression, env.clone())
+ ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => {
+ visitor.visit_expr(&**subexpression, env.clone())
}
ExprLit(_) => {}
- ExprCast(subexpression, typ) => {
- visitor.visit_expr(subexpression, env.clone());
- visitor.visit_ty(typ, env.clone())
+ ExprCast(ref subexpression, ref typ) => {
+ visitor.visit_expr(&**subexpression, env.clone());
+ visitor.visit_ty(&**typ, env.clone())
}
- ExprIf(head_expression, if_block, optional_else) => {
- visitor.visit_expr(head_expression, env.clone());
- visitor.visit_block(if_block, env.clone());
+ ExprIf(ref head_expression, ref if_block, optional_else) => {
+ visitor.visit_expr(&**head_expression, env.clone());
+ visitor.visit_block(&**if_block, env.clone());
walk_expr_opt(visitor, optional_else, env.clone())
}
- ExprWhile(subexpression, block) => {
- visitor.visit_expr(subexpression, env.clone());
- visitor.visit_block(block, env.clone())
+ ExprWhile(ref subexpression, ref block) => {
+ visitor.visit_expr(&**subexpression, env.clone());
+ visitor.visit_block(&**block, env.clone())
}
- ExprForLoop(pattern, subexpression, block, _) => {
- visitor.visit_pat(pattern, env.clone());
- visitor.visit_expr(subexpression, env.clone());
- visitor.visit_block(block, env.clone())
+ ExprForLoop(ref pattern, ref subexpression, ref block, _) => {
+ visitor.visit_pat(&**pattern, env.clone());
+ visitor.visit_expr(&**subexpression, env.clone());
+ visitor.visit_block(&**block, env.clone())
}
- ExprLoop(block, _) => visitor.visit_block(block, env.clone()),
- ExprMatch(subexpression, ref arms) => {
- visitor.visit_expr(subexpression, env.clone());
+ ExprLoop(ref block, _) => visitor.visit_block(&**block, env.clone()),
+ ExprMatch(ref subexpression, ref arms) => {
+ visitor.visit_expr(&**subexpression, env.clone());
for arm in arms.iter() {
visitor.visit_arm(arm, env.clone())
}
}
- ExprFnBlock(function_declaration, body) => {
+ ExprFnBlock(ref function_declaration, ref body) => {
visitor.visit_fn(&FkFnBlock,
- function_declaration,
- body,
+ &**function_declaration,
+ &**body,
expression.span,
expression.id,
env.clone())
}
- ExprProc(function_declaration, body) => {
+ ExprProc(ref function_declaration, ref body) => {
visitor.visit_fn(&FkFnBlock,
- function_declaration,
- body,
+ &**function_declaration,
+ &**body,
expression.span,
expression.id,
env.clone())
}
- ExprBlock(block) => visitor.visit_block(block, env.clone()),
- ExprAssign(left_hand_expression, right_hand_expression) => {
- visitor.visit_expr(right_hand_expression, env.clone());
- visitor.visit_expr(left_hand_expression, env.clone())
+ ExprBlock(ref block) => visitor.visit_block(&**block, env.clone()),
+ ExprAssign(ref left_hand_expression, ref right_hand_expression) => {
+ visitor.visit_expr(&**right_hand_expression, env.clone());
+ visitor.visit_expr(&**left_hand_expression, env.clone())
}
- ExprAssignOp(_, left_expression, right_expression) => {
- visitor.visit_expr(right_expression, env.clone());
- visitor.visit_expr(left_expression, env.clone())
+ ExprAssignOp(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(&**right_expression, env.clone());
+ visitor.visit_expr(&**left_expression, env.clone())
}
- ExprField(subexpression, _, ref types) => {
- visitor.visit_expr(subexpression, env.clone());
- for &typ in types.iter() {
- visitor.visit_ty(typ, env.clone())
+ ExprField(ref subexpression, _, ref types) => {
+ visitor.visit_expr(&**subexpression, env.clone());
+ for typ in types.iter() {
+ visitor.visit_ty(&**typ, env.clone())
}
}
- ExprIndex(main_expression, index_expression) => {
- visitor.visit_expr(main_expression, env.clone());
- visitor.visit_expr(index_expression, env.clone())
+ ExprIndex(ref main_expression, ref index_expression) => {
+ visitor.visit_expr(&**main_expression, env.clone());
+ visitor.visit_expr(&**index_expression, env.clone())
}
ExprPath(ref path) => {
visitor.visit_path(path, expression.id, env.clone())
walk_expr_opt(visitor, optional_expression, env.clone())
}
ExprMac(ref macro) => visitor.visit_mac(macro, env.clone()),
- ExprParen(subexpression) => {
- visitor.visit_expr(subexpression, env.clone())
+ ExprParen(ref subexpression) => {
+ visitor.visit_expr(&**subexpression, env.clone())
}
ExprInlineAsm(ref assembler) => {
- for &(_, input) in assembler.inputs.iter() {
- visitor.visit_expr(input, env.clone())
+ for &(_, ref input) in assembler.inputs.iter() {
+ visitor.visit_expr(&**input, env.clone())
}
- for &(_, output) in assembler.outputs.iter() {
- visitor.visit_expr(output, env.clone())
+ for &(_, ref output) in assembler.outputs.iter() {
+ visitor.visit_expr(&**output, env.clone())
}
}
}
pub fn walk_arm<E: Clone, V: Visitor<E>>(visitor: &mut V, arm: &Arm, env: E) {
for pattern in arm.pats.iter() {
- visitor.visit_pat(*pattern, env.clone())
+ visitor.visit_pat(&**pattern, env.clone())
}
walk_expr_opt(visitor, arm.guard, env.clone());
- visitor.visit_expr(arm.body, env.clone());
+ visitor.visit_expr(&*arm.body, env.clone());
for attr in arm.attrs.iter() {
visitor.visit_attribute(attr, env.clone());
}
html_root_url = "http://doc.rust-lang.org/")]
#![feature(asm, macro_rules, phase)]
-#![deny(deprecated_owned_vector)]
extern crate getopts;
extern crate regex;
MetricChange, Improvement, Regression, LikelyNoise,
StaticTestFn, StaticTestName, DynTestName, DynTestFn,
run_test, test_main, test_main_static, filter_tests,
- parse_opts, StaticBenchFn, test_main_static_x};
+ parse_opts, StaticBenchFn};
}
pub mod stats;
test_main(args, owned_tests)
}
-pub fn test_main_static_x(args: &[~str], tests: &[TestDescAndFn]) {
- test_main_static(args.iter()
- .map(|x| x.to_string())
- .collect::<Vec<_>>()
- .as_slice(),
- tests)
-}
-
pub enum ColorConfig {
AutoColor,
AlwaysColor,
impl<'a,T: FloatMath + FromPrimitive> Stats<T> for &'a [T] {
// FIXME #11059 handle NaN, inf and overflow
- #[allow(deprecated_owned_vector)]
fn sum(self) -> T {
let mut partials = vec![];
#[test]
fn test_boxplot_nonpositive() {
- #[allow(deprecated_owned_vector)]
fn t(s: &Summary<f64>, expected: String) {
use std::io::MemWriter;
let mut m = MemWriter::new();
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(phase)]
-#![deny(deprecated_owned_vector)]
#[cfg(test)] extern crate debug;
extern crate serialize;
extern crate libc;
-#[cfg(target_os = "macos")]
-extern crate sync;
use std::io::BufReader;
use std::num;
fn os_precise_time_ns() -> u64 {
static mut TIMEBASE: libc::mach_timebase_info = libc::mach_timebase_info { numer: 0,
denom: 0 };
- static mut ONCE: sync::one::Once = sync::one::ONCE_INIT;
+ static mut ONCE: std::sync::Once = std::sync::ONCE_INIT;
unsafe {
ONCE.doit(|| {
imp::mach_timebase_info(&mut TIMEBASE);
// This also serves as a pipes test, because Arcs are implemented with pipes.
-extern crate sync;
extern crate time;
-use sync::{Arc, Future, Mutex};
+use std::sync::{Arc, Future, Mutex};
use std::os;
use std::uint;
// This also serves as a pipes test, because Arcs are implemented with pipes.
-extern crate sync;
extern crate time;
-use sync::{RWLock, Arc};
-use sync::Future;
+use std::sync::{RWLock, Arc, Future};
use std::os;
use std::uint;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern crate sync;
extern crate arena;
use std::iter::range_step;
-use sync::Future;
+use std::sync::Future;
use arena::TypedArena;
enum Tree<'a> {
// ignore-android see #10393 #13206
-extern crate sync;
-
use std::string::String;
use std::slice;
-use sync::Arc;
-use sync::Future;
+use std::sync::{Arc, Future};
static TABLE: [u8, ..4] = [ 'A' as u8, 'C' as u8, 'G' as u8, 'T' as u8 ];
static TABLE_SIZE: uint = 2 << 16;
// ignore-pretty very bad with line comments
-extern crate sync;
-
use std::io;
use std::os;
use std::simd::f64x2;
-use sync::Future;
-use sync::Arc;
+use std::sync::{Arc, Future};
static ITER: int = 50;
static LIMIT: f64 = 2.0;
#![feature(phase)]
#[phase(plugin)] extern crate green;
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
green_start!(main)
extern crate regex;
#[phase(plugin)]extern crate regex_macros;
-extern crate sync;
use std::io;
use regex::{NoExpand, Regex};
-use sync::Arc;
+use std::sync::{Arc, Future};
fn count_matches(seq: &str, variant: &Regex) -> int {
let mut n = 0;
let seq_arc = Arc::new(seq.clone()); // copy before it moves
let clen = seq.len();
- let mut seqlen = sync::Future::spawn(proc() {
- let substs = ~[
+ let mut seqlen = Future::spawn(proc() {
+ let substs = vec![
(regex!("B"), "(c|g|t)"),
(regex!("D"), "(a|g|t)"),
(regex!("H"), "(a|c|t)"),
seq.len()
});
- let variants = ~[
+ let variants = vec![
regex!("agggtaaa|tttaccct"),
regex!("[cgt]gggtaaa|tttaccc[acg]"),
regex!("a[act]ggtaaa|tttacc[agt]t"),
for variant in variants.move_iter() {
let seq_arc_copy = seq_arc.clone();
variant_strs.push(variant.to_str().to_owned());
- counts.push(sync::Future::spawn(proc() {
+ counts.push(Future::spawn(proc() {
count_matches(seq_arc_copy.as_slice(), &variant)
}));
}
#![feature(phase)]
#![allow(non_snake_case_functions)]
#[phase(plugin)] extern crate green;
-extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
-use sync::{Arc, RWLock};
+use std::sync::{Arc, RWLock};
green_start!(main)
use std::os;
use std::task;
use std::vec;
+use std::gc::Gc;
#[deriving(Clone)]
enum List<T> {
- Nil, Cons(T, @List<T>)
+ Nil, Cons(T, Gc<List<T>>)
}
enum UniqueList {
}
struct r {
- _l: @nillist,
+ _l: Gc<nillist>,
}
#[unsafe_destructor]
fn drop(&mut self) {}
}
-fn r(l: @nillist) -> r {
+fn r(l: Gc<nillist>) -> r {
r {
_l: l
}
let st = match st {
None => {
State {
- managed: @Nil,
+ managed: box(GC) Nil,
unique: box Nil,
- tuple: (@Nil, box Nil),
- vec: vec!(@Nil),
- res: r(@Nil)
+ tuple: (box(GC) Nil, box Nil),
+ vec: vec!(box(GC) Nil),
+ res: r(box(GC) Nil)
}
}
Some(st) => {
State {
- managed: @Cons((), st.managed),
- unique: box Cons((), @*st.unique),
- tuple: (@Cons((), st.tuple.ref0().clone()),
- box Cons((), @*st.tuple.ref1().clone())),
- vec: st.vec.clone().append(&[@Cons((), *st.vec.last().unwrap())]),
- res: r(@Cons((), st.res._l))
+ managed: box(GC) Cons((), st.managed),
+ unique: box Cons((), box(GC) *st.unique),
+ tuple: (box(GC) Cons((), st.tuple.ref0().clone()),
+ box Cons((), box(GC) *st.tuple.ref1().clone())),
+ vec: st.vec.clone().append(
+ &[box(GC) Cons((), *st.vec.last().unwrap())]),
+ res: r(box(GC) Cons((), st.res._l))
}
}
};
// except according to those terms.
fn test() {
- let w: ~[int];
+ let w: &mut [int];
w[5] = 0; //~ ERROR use of possibly uninitialized variable: `w`
- //~^ ERROR cannot assign to immutable vec content `w[..]`
- let mut w: ~[int];
+ let mut w: &mut [int];
w[5] = 0; //~ ERROR use of possibly uninitialized variable: `w`
}
// issue 7327
-extern crate sync;
-
-use sync::Arc;
+use std::sync::Arc;
struct A { y: Arc<int>, x: Arc<int> }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern crate sync;
-
-use sync::Future;
+use std::sync::Future;
fn main() {
let f = Future::from_value(());
#![deny(unreachable_code)]
#![allow(unused_variable)]
#![allow(dead_code)]
-#![allow(deprecated_owned_vector)]
-
fn fail_len(v: Vec<int> ) -> uint {
let mut i = 3;
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![deny(deprecated_owned_vector)]
-
-fn main() {
- ~[1]; //~ ERROR use of deprecated `~[]`
- //~^ ERROR use of deprecated `~[]`
-}
#![feature(managed_boxes)]
#![forbid(heap_memory)]
#![allow(dead_code)]
-#![allow(deprecated_owned_vector)]
struct Foo {
#![feature(globs)]
#![deny(unused_imports)]
#![allow(dead_code)]
-#![allow(deprecated_owned_vector)]
use cal = bar::c::cc;
#![allow(dead_assignment)]
#![allow(unused_variable)]
#![allow(dead_code)]
-#![allow(deprecated_owned_vector)]
#![deny(unused_mut)]
#![allow(dead_code)]
#![deny(unused_unsafe)]
-#![allow(deprecated_owned_vector)]
mod foo {
// error-pattern: use of moved value
-extern crate sync;
-use sync::Arc;
-
+use std::sync::Arc;
use std::task;
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern crate sync;
-use sync::Arc;
-
+use std::sync::Arc;
use std::task;
fn main() {
// This program would segfault if it were legal.
#![feature(once_fns)]
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn foo(blk: proc()) {
blk();
// This program would segfault if it were legal.
#![feature(once_fns)]
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn foo(blk: once ||) {
blk();
// Testing guarantees provided by once functions.
// This program would segfault if it were legal.
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn foo(blk: ||) {
blk();
fn want_foo(f: foo) {}
fn have_bar(b: bar) {
- want_foo(b); //~ ERROR (expected struct foo but found @-ptr)
+ want_foo(b); //~ ERROR (expected struct foo but found Gc-ptr)
}
fn main() {}
// error-pattern:explicit failure
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
enum e<T> { e(Arc<T>) }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern crate sync;
-
-use sync::Arc;
+use std::sync::Arc;
fn dispose(_x: Arc<bool>) { }
pub fn main() {
struct Foo;
assert!(Some(box Foo).is_some());
-
- let xs: ~[()] = ~[];
- assert!(Some(xs).is_some());
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-pretty FIXME(#14193)
+
#![feature(managed_boxes)]
enum list<T> { cons(@T, @list<T>), nil, }
#![feature(macro_rules, managed_boxes)]
#![deny(warnings)]
#![allow(unused_must_use)]
-#![allow(deprecated_owned_vector)]
extern crate debug;
#![feature(once_fns)]
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn foo(blk: proc()) {
blk();
#![feature(once_fns)]
-extern crate sync;
-use sync::Arc;
+use std::sync::Arc;
fn foo(blk: once ||) {
blk();
p.borrow_mut().y += 3;
assert_eq!(*p.borrow(), Point {x: 3, y: 5});
- let v = Rc::new(RefCell::new(~[1, 2, 3]));
+ let v = Rc::new(RefCell::new([1, 2, 3]));
v.borrow_mut()[0] = 3;
v.borrow_mut()[1] += 3;
assert_eq!((v.borrow()[0], v.borrow()[1], v.borrow()[2]), (3, 5, 3));
fn visit_char(&mut self) -> bool { true }
- fn visit_estr_box(&mut self) -> bool { true }
- fn visit_estr_uniq(&mut self) -> bool { true }
fn visit_estr_slice(&mut self) -> bool { true }
fn visit_estr_fixed(&mut self,
_sz: uint, _sz2: uint,
fn visit_ptr(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_rptr(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool { true }
- fn visit_evec_box(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool { true }
- fn visit_evec_uniq(&mut self, _mtbl: uint, inner: *TyDesc) -> bool {
- self.types.push("[".to_string());
- unsafe { visit_tydesc(inner, &mut *self as &mut TyVisitor); }
- self.types.push("]".to_string());
- true
- }
fn visit_evec_slice(&mut self, _mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_evec_fixed(&mut self, _n: uint, _sz: uint, _align: uint,
_mtbl: uint, _inner: *TyDesc) -> bool { true }
// and shared between tasks as long as all types fulfill Send.
-extern crate sync;
-
-use sync::Arc;
+use std::sync::Arc;
use std::task;
trait Pet {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-pretty FIXME(#14193)
+
#![feature(managed_boxes)]
// Test cyclic detector when using trait instances.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-pretty FIXME(#14193)
+
#![feature(managed_boxes)]
use std::cell::RefCell;