`librustuv`.
let state = self.x.get();
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
- do (&(*state).lock).lock {
+ (&(*state).lock).lock(|| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
- }
+ })
}
/// As unsafe_access(), but with a condvar, as sync::mutex.lock_cond().
blk: |x: &mut T, c: &Condvar| -> U)
-> U {
let state = self.x.get();
- do (&(*state).lock).lock_cond |cond| {
+ (&(*state).lock).lock_cond(|cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
- }
+ })
}
/**
pub fn write<U>(&self, blk: |x: &mut T| -> U) -> U {
unsafe {
let state = self.x.get();
- do (*borrow_rwlock(state)).write {
+ (*borrow_rwlock(state)).write(|| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
- }
+ })
}
}
-> U {
unsafe {
let state = self.x.get();
- do (*borrow_rwlock(state)).write_cond |cond| {
+ (*borrow_rwlock(state)).write_cond(|cond| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: false,
failed: &mut (*state).failed,
cond: cond})
- }
+ })
}
}
pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
unsafe {
let state = self.x.get();
- do (*state).lock.read {
+ (*state).lock.read(|| {
check_poison(false, (*state).failed);
blk(&(*state).data)
- }
+ })
}
}
pub fn write_downgrade<U>(&self, blk: |v: RWWriteMode<T>| -> U) -> U {
unsafe {
let state = self.x.get();
- do (*borrow_rwlock(state)).write_downgrade |write_mode| {
+ (*borrow_rwlock(state)).write_downgrade(|write_mode| {
check_poison(false, (*state).failed);
blk(RWWriteMode {
data: &mut (*state).data,
token: write_mode,
poison: PoisonOnFail(&mut (*state).failed)
})
- }
+ })
}
}
token: ref token,
poison: _
} => {
- do token.write {
- blk(data)
- }
+ token.write(|| blk(data))
}
}
}
token: ref token,
poison: ref poison
} => {
- do token.write_cond |cond| {
+ token.write_cond(|cond| {
unsafe {
let cvar = Condvar {
is_mutex: false,
};
blk(data, &cvar)
}
- }
+ })
}
}
}
data: data,
token: ref token
} => {
- do token.read { blk(data) }
+ token.read(|| blk(data))
}
}
}
do task::spawn || {
// wait until parent gets in
p.take().recv();
- do arc2.access_cond |state, cond| {
+ arc2.access_cond(|state, cond| {
*state = true;
cond.signal();
- }
+ })
}
- do arc.access_cond |state, cond| {
+ arc.access_cond(|state, cond| {
c.take().send(());
assert!(!*state);
while !*state {
cond.wait();
}
- }
+ })
}
#[test] #[should_fail]
do spawn {
let _ = p.recv();
- do arc2.access_cond |one, cond| {
+ arc2.access_cond(|one, cond| {
cond.signal();
// Parent should fail when it wakes up.
assert_eq!(*one, 0);
- }
+ })
}
- do arc.access_cond |one, cond| {
+ arc.access_cond(|one, cond| {
c.send(());
while *one == 1 {
cond.wait();
}
- }
+ })
}
#[test] #[should_fail]
let arc = ~MutexArc::new(1);
let arc2 = ~arc.clone();
do task::try || {
- do arc2.access |one| {
+ arc2.access(|one| {
assert_eq!(*one, 2);
- }
+ })
};
- do arc.access |one| {
+ arc.access(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test] #[should_fail]
let arc2 = ~(&arc).clone();
let (p, c) = comm::stream();
do task::spawn {
- do arc2.access |one| {
+ arc2.access(|one| {
c.send(());
assert!(*one == 2);
- }
+ })
}
let _ = p.recv();
let one = arc.unwrap();
let arc = ~MutexArc::new(1);
let arc2 = ~MutexArc::new(*arc);
do task::spawn || {
- do (*arc2).unsafe_access |mutex| {
- do (*mutex).access |one| {
+ (*arc2).unsafe_access(|mutex| {
+ (*mutex).access(|one| {
assert!(*one == 1);
- }
- }
+ })
+ })
};
}
}
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.write |one| {
+ arc2.write(|one| {
assert_eq!(*one, 2);
- }
+ })
};
- do arc.read |one| {
+ arc.read(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test] #[should_fail]
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.write |one| {
+ arc2.write(|one| {
assert_eq!(*one, 2);
- }
+ })
};
- do arc.write |one| {
+ arc.write(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test] #[should_fail]
fn test_rw_arc_poison_dw() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.write_downgrade |mut write_mode| {
- do write_mode.write |one| {
+ arc2.write_downgrade(|mut write_mode| {
+ write_mode.write(|one| {
assert_eq!(*one, 2);
- }
- }
+ })
+ })
};
- do arc.write |one| {
+ arc.write(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.read |one| {
+ arc2.read(|one| {
assert_eq!(*one, 2);
- }
+ })
};
- do arc.read |one| {
+ arc.read(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.read |one| {
+ arc2.read(|one| {
assert_eq!(*one, 2);
- }
+ })
};
- do arc.write |one| {
+ arc.write(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test]
fn test_rw_arc_no_poison_dr() {
let arc = RWArc::new(1);
let arc2 = arc.clone();
do task::try {
- do arc2.write_downgrade |write_mode| {
+ arc2.write_downgrade(|write_mode| {
let read_mode = arc2.downgrade(write_mode);
- do read_mode.read |one| {
+ read_mode.read(|one| {
assert_eq!(*one, 2);
- }
- }
+ })
+ })
};
- do arc.write |one| {
+ arc.write(|one| {
assert_eq!(*one, 1);
- }
+ })
}
#[test]
fn test_rw_arc() {
let (p, c) = comm::stream();
do task::spawn {
- do arc2.write |num| {
- do 10.times {
+ arc2.write(|num| {
+ 10.times(|| {
let tmp = *num;
*num = -1;
task::deschedule();
*num = tmp + 1;
- }
+ });
c.send(());
- }
+ })
}
// Readers try to catch the writer in the act
let mut children = ~[];
- do 5.times {
+ 5.times(|| {
let arc3 = arc.clone();
let mut builder = task::task();
children.push(builder.future_result());
do builder.spawn {
- do arc3.read |num| {
+ arc3.read(|num| {
assert!(*num >= 0);
- }
+ })
}
- }
+ });
// Wait for children to pass their asserts
for r in children.iter() {
// Wait for writer to finish
p.recv();
- do arc.read |num| {
+ arc.read(|num| {
assert_eq!(*num, 10);
- }
+ })
}
#[test]
fn test_rw_downgrade() {
// Reader tasks
let mut reader_convos = ~[];
- do 10.times {
+ 10.times(|| {
let ((rp1, rc1), (rp2, rc2)) = (comm::stream(), comm::stream());
reader_convos.push((rc1, rp2));
let arcn = arc.clone();
do task::spawn {
rp1.recv(); // wait for downgrader to give go-ahead
- do arcn.read |state| {
+ arcn.read(|state| {
assert_eq!(*state, 31337);
rc2.send(());
- }
+ })
}
- }
+ });
// Writer task
let arc2 = arc.clone();
let ((wp1, wc1), (wp2, wc2)) = (comm::stream(), comm::stream());
do task::spawn || {
wp1.recv();
- do arc2.write_cond |state, cond| {
+ arc2.write_cond(|state, cond| {
assert_eq!(*state, 0);
*state = 42;
cond.signal();
- }
+ });
wp1.recv();
- do arc2.write |state| {
+ arc2.write(|state| {
// This shouldn't happen until after the downgrade read
// section, and all other readers, finish.
assert_eq!(*state, 31337);
*state = 42;
- }
+ });
wc2.send(());
}
// Downgrader (us)
- do arc.write_downgrade |mut write_mode| {
- do write_mode.write_cond |state, cond| {
+ arc.write_downgrade(|mut write_mode| {
+ write_mode.write_cond(|state, cond| {
wc1.send(()); // send to another writer who will wake us up
while *state == 0 {
cond.wait();
for &(ref rc, _) in reader_convos.iter() {
rc.send(())
}
- }
+ });
let read_mode = arc.downgrade(write_mode);
- do read_mode.read |state| {
+ read_mode.read(|state| {
// complete handshake with other readers
for &(_, ref rp) in reader_convos.iter() {
rp.recv()
}
wc1.send(()); // tell writer to try again
assert_eq!(*state, 31337);
- }
- }
+ });
+ });
wp2.recv(); // complete handshake with writer
}
// writer task
let xw = x.clone();
do task::spawn {
- do xw.write_cond |state, c| {
+ xw.write_cond(|state, c| {
wc.send(()); // tell downgrader it's ok to go
c.wait();
// The core of the test is here: the condvar reacquire path
// must involve order_lock, so that it cannot race with a reader
// trying to receive the "reader cloud lock hand-off".
*state = false;
- }
+ })
}
wp.recv(); // wait for writer to get in
- do x.write_downgrade |mut write_mode| {
- do write_mode.write_cond |state, c| {
+ x.write_downgrade(|mut write_mode| {
+ write_mode.write_cond(|state, c| {
assert!(*state);
// make writer contend in the cond-reacquire path
c.signal();
- }
+ });
// make a reader task to trigger the "reader cloud lock" handoff
let xr = x.clone();
let (rp, rc) = comm::stream();
do task::spawn {
rc.send(());
- do xr.read |_state| { }
+ xr.read(|_state| { })
}
rp.recv(); // wait for reader task to exist
let read_mode = x.downgrade(write_mode);
- do read_mode.read |state| {
+ read_mode.read(|state| {
// if writer mistakenly got in, make sure it mutates state
// before we assert on it
- do 5.times { task::deschedule(); }
+ 5.times(|| task::deschedule());
// make sure writer didn't get in.
assert!(*state);
- }
- }
+ })
+ });
}
#[test]
fn test_rw_write_cond_downgrade_read_race() {
// helped to expose the race nearly 100% of the time... but adding
// deschedules in the intuitively-right locations made it even less likely,
// and I wasn't sure why :( . This is a mediocre "next best" option.
- do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
+ 8.times(|| test_rw_write_cond_downgrade_read_race_helper());
}
}
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
- do self.chunks.each |chunk| {
+ self.chunks.each(|chunk| {
if !chunk.is_pod {
destroy_chunk(chunk);
}
true
- };
+ });
}
}
}
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
- do arena.alloc { @i };
+ arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
- do arena.alloc { [0u8, 1u8, 2u8] };
+ arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
}
#[inline]
- pub fn negate(&mut self) { do self.each_storage |w| { *w = !*w; true }; }
+ pub fn negate(&mut self) {
+ self.each_storage(|w| { *w = !*w; true });
+ }
#[inline]
pub fn union(&mut self, b: &BigBitv, nbits: uint) -> bool {
#[inline]
pub fn clear(&mut self) {
match self.rep {
- Small(ref mut b) => b.clear(),
- Big(ref mut s) => { do s.each_storage() |w| { *w = 0u; true }; }
+ Small(ref mut b) => b.clear(),
+ Big(ref mut s) => {
+ s.each_storage(|w| { *w = 0u; true });
+ }
}
}
/// Set all bits to 1
#[inline]
pub fn set_all(&mut self) {
- match self.rep {
- Small(ref mut b) => b.set_all(),
- Big(ref mut s) => { do s.each_storage() |w| { *w = !0u; true }; }
- }
+ match self.rep {
+ Small(ref mut b) => b.set_all(),
+ Big(ref mut s) => {
+ s.each_storage(|w| { *w = !0u; true });
+ }
+ }
}
/// Invert all bits
#[inline]
pub fn negate(&mut self) {
- match self.rep {
- Small(ref mut b) => b.negate(),
- Big(ref mut s) => { do s.each_storage() |w| { *w = !*w; true }; }
+ match self.rep {
+ Small(ref mut b) => b.negate(),
+ Big(ref mut s) => {
+ s.each_storage(|w| { *w = !*w; true });
+ }
}
}
/// Creates a new bit vector set from the given bit vector
pub fn from_bitv(bitv: Bitv) -> BitvSet {
let mut size = 0;
- do bitv.ones |_| {
+ bitv.ones(|_| {
size += 1;
true
- };
+ });
let Bitv{rep, _} = bitv;
match rep {
Big(b) => BitvSet{ size: size, bitv: b },
impl Mutable for BitvSet {
fn clear(&mut self) {
- do self.bitv.each_storage |w| { *w = 0; true };
+ self.bitv.each_storage(|w| { *w = 0; true });
self.size = 0;
}
}
}
fn is_disjoint(&self, other: &BitvSet) -> bool {
- do self.intersection(other) |_| {
- false
- }
+ self.intersection(other, |_| false)
}
fn is_subset(&self, other: &BitvSet) -> bool {
}
fn try_recv(&self) -> Option<T> {
- do self.duplex_stream.try_recv().map |val| {
+ self.duplex_stream.try_recv().map(|val| {
self.duplex_stream.try_send(());
val
- }
+ })
}
}
/// Remove the first Node and return it, or None if the list is empty
#[inline]
fn pop_front_node(&mut self) -> Option<~Node<T>> {
- do self.list_head.take().map |mut front_node| {
+ self.list_head.take().map(|mut front_node| {
self.length -= 1;
match front_node.next.take() {
Some(node) => self.list_head = link_with_prev(node, Rawlink::none()),
None => self.list_tail = Rawlink::none()
}
front_node
- }
+ })
}
/// Add a Node last in the list
/// Remove the last Node and return it, or None if the list is empty
#[inline]
fn pop_back_node(&mut self) -> Option<~Node<T>> {
- do self.list_tail.resolve().map_default(None) |tail| {
+ self.list_tail.resolve().map_default(None, |tail| {
self.length -= 1;
self.list_tail = tail.prev;
match tail.prev.resolve() {
None => self.list_head.take(),
Some(tail_prev) => tail_prev.next.take()
}
- }
+ })
}
}
/// If the list is empty, do nothing.
#[inline]
pub fn rotate_forward(&mut self) {
- do self.pop_back_node().map |tail| {
+ self.pop_back_node().map(|tail| {
self.push_front_node(tail)
- };
+ });
}
/// Move the first element to the back of the list.
/// If the list is empty, do nothing.
#[inline]
pub fn rotate_backward(&mut self) {
- do self.pop_front_node().map |head| {
+ self.pop_front_node().map(|head| {
self.push_back_node(head)
- };
+ });
}
/// Add all elements from `other` to the end of the list
if self.nelem == 0 {
return None;
}
- do self.head.as_ref().map |head| {
+ self.head.as_ref().map(|head| {
self.nelem -= 1;
self.head = &head.next;
&head.value
- }
+ })
}
#[inline]
return None;
}
let tmp = self.tail.resolve_immut(); // FIXME: #3511: shouldn't need variable
- do tmp.as_ref().map |prev| {
+ tmp.as_ref().map(|prev| {
self.nelem -= 1;
self.tail = prev.prev;
&prev.value
- }
+ })
}
}
if self.nelem == 0 {
return None;
}
- do self.head.resolve().map |next| {
+ self.head.resolve().map(|next| {
self.nelem -= 1;
self.head = match next.next {
Some(ref mut node) => Rawlink::some(&mut **node),
None => Rawlink::none(),
};
&mut next.value
- }
+ })
}
#[inline]
if self.nelem == 0 {
return None;
}
- do self.tail.resolve().map |prev| {
+ self.tail.resolve().map(|prev| {
self.nelem -= 1;
self.tail = prev.prev;
&mut prev.value
- }
+ })
}
}
fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> T) -> T {
debug!("read_option()");
- do self.read_enum("Option") |this| {
- do this.read_enum_variant(["None", "Some"]) |this, idx| {
+ self.read_enum("Option", |this| {
+ this.read_enum_variant(["None", "Some"], |this, idx| {
match idx {
0 => f(this, false),
1 => f(this, true),
_ => fail!(),
}
- }
- }
+ })
+ })
}
fn read_seq<T>(&mut self, f: |&mut Decoder, uint| -> T) -> T {
debug!("read_seq()");
- do self.push_doc(EsVec) |d| {
+ self.push_doc(EsVec, |d| {
let len = d._next_uint(EsVecLen);
debug!(" len={}", len);
f(d, len)
- }
+ })
}
fn read_seq_elt<T>(&mut self, idx: uint, f: |&mut Decoder| -> T)
fn read_map<T>(&mut self, f: |&mut Decoder, uint| -> T) -> T {
debug!("read_map()");
- do self.push_doc(EsMap) |d| {
+ self.push_doc(EsMap, |d| {
let len = d._next_uint(EsMapLen);
debug!(" len={}", len);
f(d, len)
- }
+ })
}
fn read_map_elt_key<T>(&mut self, idx: uint, f: |&mut Decoder| -> T)
}
pub fn wr_tagged_u64(&mut self, tag_id: uint, v: u64) {
- do u64_to_be_bytes(v, 8u) |v| {
+ u64_to_be_bytes(v, 8u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_u32(&mut self, tag_id: uint, v: u32) {
- do u64_to_be_bytes(v as u64, 4u) |v| {
+ u64_to_be_bytes(v as u64, 4u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_u16(&mut self, tag_id: uint, v: u16) {
- do u64_to_be_bytes(v as u64, 2u) |v| {
+ u64_to_be_bytes(v as u64, 2u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_u8(&mut self, tag_id: uint, v: u8) {
}
pub fn wr_tagged_i64(&mut self, tag_id: uint, v: i64) {
- do u64_to_be_bytes(v as u64, 8u) |v| {
+ u64_to_be_bytes(v as u64, 8u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_i32(&mut self, tag_id: uint, v: i32) {
- do u64_to_be_bytes(v as u64, 4u) |v| {
+ u64_to_be_bytes(v as u64, 4u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_i16(&mut self, tag_id: uint, v: i16) {
- do u64_to_be_bytes(v as u64, 2u) |v| {
+ u64_to_be_bytes(v as u64, 2u, |v| {
self.wr_tagged_bytes(tag_id, v);
- }
+ })
}
pub fn wr_tagged_i8(&mut self, tag_id: uint, v: i8) {
static TDEFL_WRITE_ZLIB_HEADER : c_int = 0x01000; // write zlib header and adler32 checksum
fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
- do bytes.as_imm_buf |b, len| {
+ bytes.as_imm_buf(|b, len| {
unsafe {
let mut outsz : size_t = 0;
let res =
libc::free(res);
out
}
- }
+ })
}
pub fn deflate_bytes(bytes: &[u8]) -> ~[u8] {
}
fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> ~[u8] {
- do bytes.as_imm_buf |b, len| {
+ bytes.as_imm_buf(|b, len| {
unsafe {
let mut outsz : size_t = 0;
let res =
libc::free(res);
out
}
- }
+ })
}
pub fn inflate_bytes(bytes: &[u8]) -> ~[u8] {
// here we just need to indent the start of the description
let rowlen = row.char_len();
if rowlen < 24 {
- do (24 - rowlen).times {
+ (24 - rowlen).times(|| {
row.push_char(' ')
- }
+ })
} else {
row.push_str(desc_sep)
}
// FIXME: #5516 should be graphemes not codepoints
let mut desc_rows = ~[];
- do each_split_within(desc_normalized_whitespace, 54) |substr| {
+ each_split_within(desc_normalized_whitespace, 54, |substr| {
desc_rows.push(substr.to_owned());
true
- };
+ });
// FIXME: #5516 should be graphemes not codepoints
// wrapped description
fn test_split_within() {
fn t(s: &str, i: uint, u: &[~str]) {
let mut v = ~[];
- do each_split_within(s, i) |s| { v.push(s.to_owned()); true };
+ each_split_within(s, i, |s| { v.push(s.to_owned()); true });
assert!(v.iter().zip(u.iter()).all(|(a,b)| a == b));
}
t("", 0, []);
*/
pub fn matches_path(&self, path: &Path) -> bool {
// FIXME (#9639): This needs to handle non-utf8 paths
- do path.as_str().map_default(false) |s| {
+ path.as_str().map_default(false, |s| {
self.matches(s)
- }
+ })
}
/**
*/
pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool {
// FIXME (#9639): This needs to handle non-utf8 paths
- do path.as_str().map_default(false) |s| {
+ path.as_str().map_default(false, |s| {
self.matches_with(s, options)
- }
+ })
}
fn matches_from(&self,
fn spaces(n: uint) -> ~str {
let mut ss = ~"";
- do n.times {
- ss.push_str(" ");
- }
+ n.times(|| ss.push_str(" "));
return ss;
}
*/
pub fn foldl<T:Clone,U>(z: T, ls: @List<U>, f: |&T, &U| -> T) -> T {
let mut accum: T = z;
- do iter(ls) |elt| { accum = f(&accum, elt);}
+ iter(ls, |elt| accum = f(&accum, elt));
accum
}
/// Returns true if a list contains an element with the given value
pub fn has<T:Eq>(ls: @List<T>, elt: T) -> bool {
let mut found = false;
- do each(ls) |e| {
+ each(ls, |e| {
if *e == elt { found = true; false } else { true }
- };
+ });
return found;
}
impl BitAnd<BigUint, BigUint> for BigUint {
fn bitand(&self, other: &BigUint) -> BigUint {
let new_len = num::min(self.data.len(), other.data.len());
- let anded = do vec::from_fn(new_len) |i| {
+ let anded = vec::from_fn(new_len, |i| {
// i will never be less than the size of either data vector
let ai = self.data[i];
let bi = other.data[i];
ai & bi
- };
+ });
return BigUint::new(anded);
}
}
impl BitOr<BigUint, BigUint> for BigUint {
fn bitor(&self, other: &BigUint) -> BigUint {
let new_len = num::max(self.data.len(), other.data.len());
- let ored = do vec::from_fn(new_len) |i| {
+ let ored = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
ai | bi
- };
+ });
return BigUint::new(ored);
}
}
impl BitXor<BigUint, BigUint> for BigUint {
fn bitxor(&self, other: &BigUint) -> BigUint {
let new_len = num::max(self.data.len(), other.data.len());
- let xored = do vec::from_fn(new_len) |i| {
+ let xored = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
ai ^ bi
- };
+ });
return BigUint::new(xored);
}
}
let new_len = num::max(self.data.len(), other.data.len());
let mut carry = 0;
- let mut sum = do vec::from_fn(new_len) |i| {
+ let mut sum = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
let (hi, lo) = BigDigit::from_uint(
);
carry = hi;
lo
- };
+ });
if carry != 0 { sum.push(carry); }
return BigUint::new(sum);
}
let new_len = num::max(self.data.len(), other.data.len());
let mut borrow = 0;
- let diff = do vec::from_fn(new_len) |i| {
+ let diff = vec::from_fn(new_len, |i| {
let ai = if i < self.data.len() { self.data[i] } else { 0 };
let bi = if i < other.data.len() { other.data[i] } else { 0 };
let (hi, lo) = BigDigit::from_uint(
*/
borrow = if hi == 0 { 1 } else { 0 };
lo
- };
+ });
assert_eq!(borrow, 0); // <=> assert!((self >= other));
return BigUint::new(diff);
if n == 1 { return (*a).clone(); }
let mut carry = 0;
- let mut prod = do a.data.iter().map |ai| {
+ let mut prod = a.data.iter().map(|ai| {
let (hi, lo) = BigDigit::from_uint(
(*ai as uint) * (n as uint) + (carry as uint)
);
carry = hi;
lo
- }.collect::<~[BigDigit]>();
+ }).collect::<~[BigDigit]>();
if carry != 0 { prod.push(carry); }
return BigUint::new(prod);
}
impl ToPrimitive for BigUint {
#[inline]
fn to_i64(&self) -> Option<i64> {
- do self.to_u64().and_then |n| {
+ self.to_u64().and_then(|n| {
// If top bit of u64 is set, it's too large to convert to i64.
if n >> 63 == 0 {
Some(n as i64)
} else {
None
}
- }
+ })
}
#[cfg(target_word_size = "32")]
if n_bits == 0 || self.is_zero() { return (*self).clone(); }
let mut carry = 0;
- let mut shifted = do self.data.iter().map |elem| {
+ let mut shifted = self.data.iter().map(|elem| {
let (hi, lo) = BigDigit::from_uint(
(*elem as uint) << n_bits | (carry as uint)
);
carry = hi;
lo
- }.collect::<~[BigDigit]>();
+ }).collect::<~[BigDigit]>();
if carry != 0 { shifted.push(carry); }
return BigUint::new(shifted);
}
Plus => self.data.to_i64(),
Zero => Some(0),
Minus => {
- do self.data.to_u64().and_then |n| {
+ self.data.to_u64().and_then(|n| {
let m: u64 = 1 << 63;
if n < m {
Some(-(n as i64))
} else {
None
}
- }
+ })
}
}
}
#[inline]
fn from_i64(n: i64) -> Option<BigInt> {
if n > 0 {
- do FromPrimitive::from_u64(n as u64).and_then |n| {
+ FromPrimitive::from_u64(n as u64).and_then(|n| {
Some(BigInt::from_biguint(Plus, n))
- }
+ })
} else if n < 0 {
- do FromPrimitive::from_u64(u64::max_value - (n as u64) + 1).and_then |n| {
- Some(BigInt::from_biguint(Minus, n))
- }
+ FromPrimitive::from_u64(u64::max_value - (n as u64) + 1).and_then(
+ |n| {
+ Some(BigInt::from_biguint(Minus, n))
+ })
} else {
Some(Zero::zero())
}
if n == 0 {
Some(Zero::zero())
} else {
- do FromPrimitive::from_u64(n).and_then |n| {
+ FromPrimitive::from_u64(n).and_then(|n| {
Some(BigInt::from_biguint(Plus, n))
- }
+ })
}
}
}
fn test_rand_range() {
let mut rng = task_rng();
- do 10.times {
+ 10.times(|| {
assert_eq!(rng.gen_bigint_range(&FromPrimitive::from_uint(236).unwrap(),
&FromPrimitive::from_uint(237).unwrap()),
FromPrimitive::from_uint(236).unwrap());
- }
+ });
let l = FromPrimitive::from_uint(403469000 + 2352).unwrap();
let u = FromPrimitive::from_uint(403469000 + 3513).unwrap();
- do 1000.times {
+ 1000.times(|| {
let n: BigUint = rng.gen_biguint_below(&u);
assert!(n < u);
let n: BigUint = rng.gen_biguint_range(&l, &u);
assert!(n >= l);
assert!(n < u);
- }
+ })
}
#[test]
fn test_rand_range() {
let mut rng = task_rng();
- do 10.times {
+ 10.times(|| {
assert_eq!(rng.gen_bigint_range(&FromPrimitive::from_uint(236).unwrap(),
&FromPrimitive::from_uint(237).unwrap()),
FromPrimitive::from_uint(236).unwrap());
- }
+ });
fn check(l: BigInt, u: BigInt) {
let mut rng = task_rng();
- do 1000.times {
+ 1000.times(|| {
let n: BigInt = rng.gen_bigint_range(&l, &u);
assert!(n >= l);
assert!(n < u);
- }
+ });
}
let l: BigInt = FromPrimitive::from_uint(403469000 + 2352).unwrap();
let u: BigInt = FromPrimitive::from_uint(403469000 + 3513).unwrap();
#[bench]
fn factorial_100(bh: &mut BenchHarness) {
- do bh.iter { factorial(100); }
+ bh.iter(|| factorial(100));
}
#[bench]
fn fib_100(bh: &mut BenchHarness) {
- do bh.iter { fib(100); }
+ bh.iter(|| fib(100));
}
#[bench]
fn to_str(bh: &mut BenchHarness) {
let fac = factorial(100);
let fib = fib(100);
- do bh.iter { fac.to_str(); }
- do bh.iter { fib.to_str(); }
+ bh.iter(|| fac.to_str());
+ bh.iter(|| fib.to_str());
}
}
return None
}
let a_option: Option<T> = FromStr::from_str(split[0]);
- do a_option.and_then |a| {
+ a_option.and_then(|a| {
let b_option: Option<T> = FromStr::from_str(split[1]);
- do b_option.and_then |b| {
+ b_option.and_then(|b| {
Some(Ratio::new(a.clone(), b.clone()))
- }
- }
+ })
+ })
}
}
impl<T: FromStrRadix + Clone + Integer + Ord>
} else {
let a_option: Option<T> = FromStrRadix::from_str_radix(split[0],
radix);
- do a_option.and_then |a| {
+ a_option.and_then(|a| {
let b_option: Option<T> =
FromStrRadix::from_str_radix(split[1], radix);
- do b_option.and_then |b| {
+ b_option.and_then(|b| {
Some(Ratio::new(a.clone(), b.clone()))
- }
- }
+ })
+ })
}
}
}
}
let s = s.trim();
let mut bad = false;
- do bad_parse::cond.trap(|_| { debug!("bad"); bad = true }).inside {
+ bad_parse::cond.trap(|_| { debug!("bad"); bad = true }).inside(|| {
let v = parse_iter(&mut s.chars());
if bad || v.to_str() != s.to_owned() {
None
} else {
Some(v)
}
- }
+ })
}
#[test]
impl<'self, S:Encoder,T:Encodable<S>> Encodable<S> for &'self [T] {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
- }
+ })
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for ~[T] {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
- }
+ })
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for ~[T] {
fn decode(d: &mut D) -> ~[T] {
- do d.read_seq |d, len| {
- do vec::from_fn(len) |i| {
+ d.read_seq(|d, len| {
+ vec::from_fn(len, |i| {
d.read_seq_elt(i, |d| Decodable::decode(d))
- }
- }
+ })
+ })
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for @[T] {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s))
}
- }
+ })
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for @[T] {
fn decode(d: &mut D) -> @[T] {
- do d.read_seq |d, len| {
- do at_vec::from_fn(len) |i| {
+ d.read_seq(|d, len| {
+ at_vec::from_fn(len, |i| {
d.read_seq_elt(i, |d| Decodable::decode(d))
- }
- }
+ })
+ })
}
}
impl<S:Encoder,T:Encodable<S>> Encodable<S> for Option<T> {
fn encode(&self, s: &mut S) {
- do s.emit_option |s| {
+ s.emit_option(|s| {
match *self {
None => s.emit_option_none(),
Some(ref v) => s.emit_option_some(|s| v.encode(s)),
}
- }
+ })
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for Option<T> {
fn decode(d: &mut D) -> Option<T> {
- do d.read_option |d, b| {
+ d.read_option(|d, b| {
if b {
Some(Decodable::decode(d))
} else {
None
}
- }
+ })
}
}
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1) => {
- do s.emit_seq(2) |s| {
+ s.emit_seq(2, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
- }
+ })
}
}
}
impl<D:Decoder,T0:Decodable<D>,T1:Decodable<D>> Decodable<D> for (T0, T1) {
fn decode(d: &mut D) -> (T0, T1) {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
assert_eq!(len, 2);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(1, |d| Decodable::decode(d))
)
- }
+ })
}
}
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2) => {
- do s.emit_seq(3) |s| {
+ s.emit_seq(3, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
- }
+ })
}
}
}
T2: Decodable<D>
> Decodable<D> for (T0, T1, T2) {
fn decode(d: &mut D) -> (T0, T1, T2) {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
assert_eq!(len, 3);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(1, |d| Decodable::decode(d)),
d.read_seq_elt(2, |d| Decodable::decode(d))
)
- }
+ })
}
}
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2, ref t3) => {
- do s.emit_seq(4) |s| {
+ s.emit_seq(4, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
s.emit_seq_elt(3, |s| t3.encode(s));
- }
+ })
}
}
}
T3: Decodable<D>
> Decodable<D> for (T0, T1, T2, T3) {
fn decode(d: &mut D) -> (T0, T1, T2, T3) {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
assert_eq!(len, 4);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(2, |d| Decodable::decode(d)),
d.read_seq_elt(3, |d| Decodable::decode(d))
)
- }
+ })
}
}
fn encode(&self, s: &mut S) {
match *self {
(ref t0, ref t1, ref t2, ref t3, ref t4) => {
- do s.emit_seq(5) |s| {
+ s.emit_seq(5, |s| {
s.emit_seq_elt(0, |s| t0.encode(s));
s.emit_seq_elt(1, |s| t1.encode(s));
s.emit_seq_elt(2, |s| t2.encode(s));
s.emit_seq_elt(3, |s| t3.encode(s));
s.emit_seq_elt(4, |s| t4.encode(s));
- }
+ })
}
}
}
T4: Decodable<D>
> Decodable<D> for (T0, T1, T2, T3, T4) {
fn decode(d: &mut D) -> (T0, T1, T2, T3, T4) {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
assert_eq!(len, 5);
(
d.read_seq_elt(0, |d| Decodable::decode(d)),
d.read_seq_elt(3, |d| Decodable::decode(d)),
d.read_seq_elt(4, |d| Decodable::decode(d))
)
- }
+ })
}
}
T: Encodable<S>
> Encodable<S> for DList<T> {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s));
}
- }
+ })
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for DList<T> {
fn decode(d: &mut D) -> DList<T> {
let mut list = DList::new();
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
for i in range(0u, len) {
list.push_back(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
- }
+ });
list
}
}
T: Encodable<S>
> Encodable<S> for RingBuf<T> {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
s.emit_seq_elt(i, |s| e.encode(s));
}
- }
+ })
}
}
impl<D:Decoder,T:Decodable<D>> Decodable<D> for RingBuf<T> {
fn decode(d: &mut D) -> RingBuf<T> {
let mut deque = RingBuf::new();
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
for i in range(0u, len) {
deque.push_back(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
- }
+ });
deque
}
}
V: Encodable<E>
> Encodable<E> for HashMap<K, V> {
fn encode(&self, e: &mut E) {
- do e.emit_map(self.len()) |e| {
+ e.emit_map(self.len(), |e| {
let mut i = 0;
for (key, val) in self.iter() {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
}
- }
+ })
}
}
V: Decodable<D>
> Decodable<D> for HashMap<K, V> {
fn decode(d: &mut D) -> HashMap<K, V> {
- do d.read_map |d, len| {
+ d.read_map(|d, len| {
let mut map = HashMap::with_capacity(len);
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
map.insert(key, val);
}
map
- }
+ })
}
}
T: Encodable<S> + Hash + IterBytes + Eq
> Encodable<S> for HashSet<T> {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
let mut i = 0;
for e in self.iter() {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
}
- }
+ })
}
}
T: Decodable<D> + Hash + IterBytes + Eq
> Decodable<D> for HashSet<T> {
fn decode(d: &mut D) -> HashSet<T> {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
let mut set = HashSet::with_capacity(len);
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
- }
+ })
}
}
V: Encodable<E>
> Encodable<E> for TrieMap<V> {
fn encode(&self, e: &mut E) {
- do e.emit_map(self.len()) |e| {
+ e.emit_map(self.len(), |e| {
let mut i = 0;
- do self.each |key, val| {
+ self.each(|key, val| {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
true
- };
- }
+ });
+ })
}
}
V: Decodable<D>
> Decodable<D> for TrieMap<V> {
fn decode(d: &mut D) -> TrieMap<V> {
- do d.read_map |d, len| {
+ d.read_map(|d, len| {
let mut map = TrieMap::new();
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
map.insert(key, val);
}
map
- }
+ })
}
}
impl<S: Encoder> Encodable<S> for TrieSet {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
let mut i = 0;
- do self.each |e| {
+ self.each(|e| {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
true
- };
- }
+ });
+ })
}
}
impl<D: Decoder> Decodable<D> for TrieSet {
fn decode(d: &mut D) -> TrieSet {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
let mut set = TrieSet::new();
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
- }
+ })
}
}
V: Encodable<E> + Eq
> Encodable<E> for TreeMap<K, V> {
fn encode(&self, e: &mut E) {
- do e.emit_map(self.len()) |e| {
+ e.emit_map(self.len(), |e| {
let mut i = 0;
for (key, val) in self.iter() {
e.emit_map_elt_key(i, |e| key.encode(e));
e.emit_map_elt_val(i, |e| val.encode(e));
i += 1;
}
- }
+ })
}
}
V: Decodable<D> + Eq
> Decodable<D> for TreeMap<K, V> {
fn decode(d: &mut D) -> TreeMap<K, V> {
- do d.read_map |d, len| {
+ d.read_map(|d, len| {
let mut map = TreeMap::new();
for i in range(0u, len) {
let key = d.read_map_elt_key(i, |d| Decodable::decode(d));
map.insert(key, val);
}
map
- }
+ })
}
}
T: Encodable<S> + Eq + TotalOrd
> Encodable<S> for TreeSet<T> {
fn encode(&self, s: &mut S) {
- do s.emit_seq(self.len()) |s| {
+ s.emit_seq(self.len(), |s| {
let mut i = 0;
for e in self.iter() {
s.emit_seq_elt(i, |s| e.encode(s));
i += 1;
}
- }
+ })
}
}
T: Decodable<D> + Eq + TotalOrd
> Decodable<D> for TreeSet<T> {
fn decode(d: &mut D) -> TreeSet<T> {
- do d.read_seq |d, len| {
+ d.read_seq(|d, len| {
let mut set = TreeSet::new();
for i in range(0u, len) {
set.insert(d.read_seq_elt(i, |d| Decodable::decode(d)));
}
set
- }
+ })
}
}
impl<S:Encoder> EncoderHelpers for S {
fn emit_from_vec<T>(&mut self, v: &[T], f: |&mut S, &T|) {
- do self.emit_seq(v.len()) |this| {
+ self.emit_seq(v.len(), |this| {
for (i, e) in v.iter().enumerate() {
- do this.emit_seq_elt(i) |this| {
+ this.emit_seq_elt(i, |this| {
f(this, e)
- }
+ })
}
- }
+ })
}
}
impl<D:Decoder> DecoderHelpers for D {
fn read_to_vec<T>(&mut self, f: |&mut D| -> T) -> ~[T] {
- do self.read_seq |this, len| {
- do vec::from_fn(len) |i| {
+ self.read_seq(|this, len| {
+ vec::from_fn(len, |i| {
this.read_seq_elt(i, |this| f(this))
- }
- }
+ })
+ })
}
}
pub fn acquire(&self) {
unsafe {
let mut waiter_nobe = None;
- do (**self).with |state| {
+ (**self).with(|state| {
state.count -= 1;
if state.count < 0 {
// Create waiter nobe, enqueue ourself, and tell
// outer scope we need to block.
waiter_nobe = Some(state.waiters.wait_end());
}
- }
+ });
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
- /* do 1000.times { task::deschedule(); } */
+ /* 1000.times(|| task::deschedule()); */
// Need to wait outside the exclusive.
if waiter_nobe.is_some() {
let _ = waiter_nobe.unwrap().recv();
pub fn release(&self) {
unsafe {
- do (**self).with |state| {
+ (**self).with(|state| {
state.count += 1;
if state.count <= 0 {
state.waiters.signal();
}
- }
+ })
}
}
pub fn access<U>(&self, blk: || -> U) -> U {
- do (|| {
+ (|| {
self.acquire();
blk()
- }).finally {
+ }).finally(|| {
self.release();
- }
+ })
}
}
fn new_and_signal(count: int, num_condvars: uint)
-> Sem<~[WaitQueue]> {
let mut queues = ~[];
- do num_condvars.times {
- queues.push(WaitQueue::new());
- }
+ num_condvars.times(|| queues.push(WaitQueue::new()));
Sem::new(count, queues)
}
}
let mut out_of_bounds = None;
// Release lock, 'atomically' enqueuing ourselves in so doing.
unsafe {
- do (**self.sem).with |state| {
+ (**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
// Drop the lock.
state.count += 1;
} else {
out_of_bounds = Some(state.blocked.len());
}
- }
+ })
}
// If deschedule checks start getting inserted anywhere, we can be
// killed before or after enqueueing.
- do check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()") {
+ check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || {
// Unconditionally "block". (Might not actually block if a
// signaller already sent -- I mean 'unconditionally' in contrast
// with acquire().)
- do (|| {
+ (|| {
let _ = WaitEnd.take_unwrap().recv();
- }).finally {
+ }).finally(|| {
// Reacquire the condvar.
match self.order {
Just(lock) => do lock.access {
self.sem.acquire();
},
}
- }
- }
+ })
+ })
}
/// Wake up a blocked task. Returns false if there was no blocked task.
unsafe {
let mut out_of_bounds = None;
let mut result = false;
- do (**self.sem).with |state| {
+ (**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
result = state.blocked[condvar_id].signal();
} else {
out_of_bounds = Some(state.blocked.len());
}
- }
- do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
- result
- }
+ });
+ check_cvar_bounds(out_of_bounds,
+ condvar_id,
+ "cond.signal_on()",
+ || result)
}
}
let mut out_of_bounds = None;
let mut queue = None;
unsafe {
- do (**self.sem).with |state| {
+ (**self.sem).with(|state| {
if condvar_id < state.blocked.len() {
// To avoid :broadcast_heavy, we make a new waitqueue,
// swap it out with the old one, and broadcast on the
} else {
out_of_bounds = Some(state.blocked.len());
}
- }
- do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
+ });
+ check_cvar_bounds(out_of_bounds,
+ condvar_id,
+ "cond.signal_on()",
+ || {
let queue = queue.take_unwrap();
queue.broadcast()
- }
+ })
}
}
}
// The only other places that condvars get built are rwlock.write_cond()
// and rwlock_write_mode.
pub fn access_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
- do self.access {
- blk(&Condvar { sem: self, order: Nothing, token: NonCopyable })
- }
+ self.access(|| {
+ blk(&Condvar {
+ sem: self,
+ order: Nothing,
+ token: NonCopyable
+ })
+ })
}
}
*/
pub fn read<U>(&self, blk: || -> U) -> U {
unsafe {
- do (&self.order_lock).access {
+ (&self.order_lock).access(|| {
let state = &mut *self.state.get();
let old_count = state.read_count.fetch_add(1, atomics::Acquire);
if old_count == 0 {
(&self.access_lock).acquire();
state.read_mode = true;
}
- }
- do (|| {
+ });
+ (|| {
blk()
- }).finally {
+ }).finally(|| {
let state = &mut *self.state.get();
assert!(state.read_mode);
let old_count = state.read_count.fetch_sub(1, atomics::Release);
// this access MUST NOT go inside the exclusive access.
(&self.access_lock).release();
}
- }
+ })
}
}
*/
pub fn write<U>(&self, blk: || -> U) -> U {
(&self.order_lock).acquire();
- do (&self.access_lock).access {
+ (&self.access_lock).access(|| {
(&self.order_lock).release();
blk()
- }
+ })
}
/**
// The astute reader will also note that making waking writers use the
// order_lock is better for not starving readers.
(&self.order_lock).acquire();
- do (&self.access_lock).access_cond |cond| {
+ (&self.access_lock).access_cond(|cond| {
(&self.order_lock).release();
let opt_lock = Just(&self.order_lock);
blk(&Condvar { sem: cond.sem, order: opt_lock,
token: NonCopyable })
- }
+ })
}
/**
(&self.order_lock).acquire();
(&self.access_lock).acquire();
(&self.order_lock).release();
- do (|| {
+ (|| {
blk(RWLockWriteMode { lock: self, token: NonCopyable })
- }).finally {
+ }).finally(|| {
let writer_or_last_reader;
// Check if we're releasing from read mode or from write mode.
let state = unsafe { &mut *self.state.get() };
// Nobody left inside; release the "reader cloud" lock.
(&self.access_lock).release();
}
- }
+ })
}
/// To be called inside of the write_downgrade block.
-> TaskPool<T> {
assert!(n_tasks >= 1);
- let channels = do vec::from_fn(n_tasks) |i| {
+ let channels = vec::from_fn(n_tasks, |i| {
let (port, chan) = comm::stream::<Msg<T>>();
let init_fn = init_fn_factory();
}
chan
- };
+ });
return TaskPool { channels: channels, next_index: 0 };
}
cap = self.ti.strings.find_equiv(&("op"));
}
}
- let s = do cap.map_default(Err(~"can't find terminfo capability `sgr0`")) |op| {
+ let s = cap.map_default(Err(~"can't find terminfo capability `sgr0`"), |op| {
expand(*op, [], &mut Variables::new())
- };
+ });
if s.is_ok() {
self.out.write(s.unwrap());
} else if self.num_colors > 0 {
match op {
FormatDigit => {
let sign = if flags.sign { SignAll } else { SignNeg };
- do int_to_str_bytes_common(d, radix, sign) |c| {
+ int_to_str_bytes_common(d, radix, sign, |c| {
s.push(c);
- }
+ })
}
_ => {
- do int_to_str_bytes_common(d as uint, radix, SignNone) |c| {
+ int_to_str_bytes_common(d as uint, radix, SignNone, |c| {
s.push(c);
- }
+ })
}
};
if flags.precision > s.len() {
// semantics into parallel test runners, which in turn requires a ~[]
// rather than a &[].
pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
- let owned_tests = do tests.map |t| {
+ let owned_tests = tests.map(|t| {
match t.testfn {
StaticTestFn(f) =>
TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
fail!("non-static tests passed to test::test_main_static");
}
}
- };
+ });
test_main(args, owned_tests)
}
callback(TeFiltered(filtered_descs));
let (filtered_tests, filtered_benchs_and_metrics) =
- do filtered_tests.partition |e| {
- match e.testfn {
- StaticTestFn(_) | DynTestFn(_) => true,
- _ => false
- }
- };
+ filtered_tests.partition(|e| {
+ match e.testfn {
+ StaticTestFn(_) | DynTestFn(_) => true,
+ _ => false
+ }
+ });
// It's tempting to just spawn all the tests at once, but since we have
// many tests that run in other processes we would be making a big mess.
};
let diff : MetricDiff = self.compare_to_old(&old, pct);
- let ok = do diff.iter().all() |(_, v)| {
+ let ok = diff.iter().all(|(_, v)| {
match *v {
Regression(_) => false,
_ => true
}
- };
+ });
if ok {
debug!("rewriting file '{:?}' with updated metrics", p);
/// Advance the iterator to the next node (in order). If there are no more nodes, return `None`.
#[inline]
fn next(&mut self) -> Option<&'self T> {
- do self.iter.next().map |(value, _)| { value }
+ self.iter.next().map(|(value, _)| value)
}
}
/// Advance the iterator to the next node (in order). If there are no more nodes, return `None`.
#[inline]
fn next(&mut self) -> Option<&'self T> {
- do self.iter.next().map |(value, _)| { value }
+ self.iter.next().map(|(value, _)| value)
}
}
None => fail!("missing freshness-function for '{}'", kind),
Some(f) => (*f)(name, val)
};
- do self.ctxt.logger.write |lg| {
+ self.ctxt.logger.write(|lg| {
if fresh {
lg.info(format!("{} {}:{} is fresh",
cat, kind, name));
lg.info(format!("{} {}:{} is not fresh",
cat, kind, name))
}
- };
+ });
fresh
}
debug!("exec_work: looking up {} and {:?}", self.fn_name,
self.declared_inputs);
- let cached = do self.ctxt.db.read |db| {
+ let cached = self.ctxt.db.read(|db| {
db.prepare(self.fn_name, &self.declared_inputs)
- };
+ });
match cached {
Some((ref disc_in, ref disc_out, ref res))
let blk = bo.take_unwrap();
let chan = Cell::new(chan);
-// What happens if the task fails?
+ // XXX: What happens if the task fails?
do task::spawn {
let mut exe = Exec {
discovered_inputs: WorkMap::new(),
WorkFromTask(prep, port) => {
let (exe, v) = port.recv();
let s = json_encode(&v);
- do prep.ctxt.db.write |db| {
+ prep.ctxt.db.write(|db| {
db.cache(prep.fn_name,
&prep.declared_inputs,
&exe.discovered_inputs,
&exe.discovered_outputs,
- s);
- }
+ s)
+ });
v
}
}
RWArc::new(Logger::new()),
Arc::new(TreeMap::new()));
- let s = do cx.with_prep("test1") |prep| {
+ let s = cx.with_prep("test1", |prep| {
let subcx = cx.clone();
let pth = pth.clone();
// FIXME (#9639): This needs to handle non-utf8 paths
out.as_str().unwrap().to_owned()
}
- };
+ });
println(s);
}
let hint = hints.map(|hint| {
let mut flags = 0;
- do each_ai_flag |cval, aival| {
+ each_ai_flag(|cval, aival| {
if hint.flags & (aival as uint) != 0 {
flags |= cval as i32;
}
- }
+ });
let socktype = 0;
let protocol = 0;
req.defuse(); // uv callback now owns this request
let mut cx = Ctx { slot: None, status: 0, addrinfo: None };
- do wait_until_woken_after(&mut cx.slot) {
+ wait_until_woken_after(&mut cx.slot, || {
req.set_data(&cx);
- }
+ });
match cx.status {
0 => Ok(accum_addrinfo(cx.addrinfo.get_ref())),
let rustaddr = net::sockaddr_to_socket_addr((*addr).ai_addr);
let mut flags = 0;
- do each_ai_flag |cval, aival| {
+ each_ai_flag(|cval, aival| {
if (*addr).ai_flags & cval != 0 {
flags |= aival as uint;
}
- }
+ });
/* XXX: do we really want to support these
let protocol = match (*addr).ai_protocol {
impl Drop for AsyncWatcher {
fn drop(&mut self) {
unsafe {
- do self.exit_flag.with |should_exit| {
+ self.exit_flag.with(|should_exit| {
// NB: These two things need to happen atomically. Otherwise
// the event handler could wake up due to a *previous*
// signal and see the exit flag, destroying the handle
// before the final send.
*should_exit = true;
uvll::uv_async_send(self.handle)
- }
+ })
}
}
}
pub fn read(loop_: &Loop, fd: c_int, buf: &mut [u8], offset: i64)
-> Result<int, UvError>
{
- do execute(|req, cb| unsafe {
+ execute(|req, cb| unsafe {
uvll::uv_fs_read(loop_.handle, req,
fd, vec::raw::to_ptr(buf) as *c_void,
buf.len() as size_t, offset, cb)
- }).map |req| {
+ }).map(|req| {
req.get_result() as int
- }
+ })
}
pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int)
let mut paths = ~[];
let path = CString::new(path.with_ref(|p| p), false);
let parent = Path::new(path);
- do c_str::from_c_multistring(req.get_ptr() as *libc::c_char,
- Some(req.get_result() as uint)) |rel| {
+ c_str::from_c_multistring(req.get_ptr() as *libc::c_char,
+ Some(req.get_result() as uint),
+ |rel| {
let p = rel.as_bytes();
paths.push(parent.join(p.slice_to(rel.len())));
- };
+ });
paths
})
}
pub fn readlink(loop_: &Loop, path: &CString) -> Result<Path, UvError> {
- do execute(|req, cb| unsafe {
+ execute(|req, cb| unsafe {
uvll::uv_fs_readlink(loop_.handle, req,
path.with_ref(|p| p), cb)
- }).map |req| {
+ }).map(|req| {
Path::new(unsafe {
CString::new(req.get_ptr() as *libc::c_char, false)
})
- }
+ })
}
pub fn chown(loop_: &Loop, path: &CString, uid: int, gid: int)
0 => {
req.fired = true;
let mut slot = None;
- do wait_until_woken_after(&mut slot) {
+ wait_until_woken_after(&mut slot, || {
unsafe { uvll::set_data_for_req(req.req, &slot) }
- }
+ });
match req.get_result() {
n if n < 0 => Err(UvError(n)),
_ => Ok(req),
}
}
n => Err(UvError(n))
-
};
extern fn fs_cb(req: *uvll::uv_fs_t) {
uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null::<()>());
- do wait_until_woken_after(&mut slot) {
+ wait_until_woken_after(&mut slot, || {
uvll::set_data_for_uv_handle(self.uv_handle(), &slot);
- }
+ })
}
extern fn close_cb(handle: *uvll::uv_handle_t) {
unsafe {
assert!((*slot).is_none());
let sched: ~Scheduler = Local::take();
- do sched.deschedule_running_task_and_then |_, task| {
+ sched.deschedule_running_task_and_then(|_, task| {
f();
*slot = Some(task);
- }
+ })
}
}
#[cfg(test)]
fn local_loop() -> &'static mut Loop {
unsafe {
- cast::transmute(do Local::borrow |sched: &mut Scheduler| {
+ cast::transmute(Local::borrow(|sched: &mut Scheduler| {
let mut io = None;
do sched.event_loop.io |i| {
let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) =
io = Some(uvio);
}
io.unwrap()
- }.uv_loop())
+ }).uv_loop())
}
}
// get a handle for the current scheduler
macro_rules! get_handle_to_current_scheduler(
- () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() })
+ () => (Local::borrow(|sched: &mut Scheduler| sched.make_handle()))
)
pub fn dumb_println(args: &fmt::Arguments) {
let ip = addr.ip.to_str();
let addr = ip.with_c_str(|p| unsafe { malloc(p, addr.port as c_int) });
- do (|| {
+ (|| {
f(addr)
- }).finally {
+ }).finally(|| {
unsafe { libc::free(addr) };
- }
+ })
}
pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr {
fn test_ip4_conversion() {
use std::rt;
let ip4 = rt::test::next_test_ip4();
- do socket_addr_as_sockaddr(ip4) |addr| {
+ socket_addr_as_sockaddr(ip4, |addr| {
assert_eq!(ip4, sockaddr_to_socket_addr(addr));
- }
+ })
}
#[cfg(test)]
fn test_ip6_conversion() {
use std::rt;
let ip6 = rt::test::next_test_ip6();
- do socket_addr_as_sockaddr(ip6) |addr| {
+ socket_addr_as_sockaddr(ip6, |addr| {
assert_eq!(ip6, sockaddr_to_socket_addr(addr));
- }
+ })
}
enum SocketNameKind {
struct Ctx { status: c_int, task: Option<BlockedTask> }
let tcp = TcpWatcher::new(loop_);
- let ret = do socket_addr_as_sockaddr(address) |addr| {
+ let ret = socket_addr_as_sockaddr(address, |addr| {
let mut req = Request::new(uvll::UV_CONNECT);
let result = unsafe {
uvll::uv_tcp_connect(req.handle, tcp.handle, addr,
}
n => Err(UvError(n))
}
- };
+ });
return match ret {
Ok(()) => Ok(tcp),
impl TcpListener {
pub fn bind(loop_: &mut Loop, address: SocketAddr)
- -> Result<~TcpListener, UvError>
- {
+ -> Result<~TcpListener, UvError> {
let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) };
assert_eq!(unsafe {
uvll::uv_tcp_init(loop_.handle, handle)
impl UdpWatcher {
pub fn bind(loop_: &Loop, address: SocketAddr)
- -> Result<UdpWatcher, UvError>
- {
+ -> Result<UdpWatcher, UvError> {
let udp = UdpWatcher {
handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) },
home: get_handle_to_current_scheduler!(),
buf: Some(slice_to_uv_buf(buf)),
result: None,
};
- do wait_until_woken_after(&mut cx.task) {
+ wait_until_woken_after(&mut cx.task, || {
unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) }
- }
+ });
match cx.result.take_unwrap() {
(n, _) if n < 0 =>
Err(uv_error_to_io_error(UvError(n as c_int))),
0 => {
req.defuse(); // uv callback now owns this request
let mut cx = Ctx { task: None, result: 0 };
- do wait_until_woken_after(&mut cx.task) {
+ wait_until_woken_after(&mut cx.task, || {
req.set_data(&cx);
- }
+ });
match cx.result {
0 => Ok(()),
n => Err(uv_error_to_io_error(UvError(n)))
fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> {
let _m = self.fire_homing_missile();
status_to_io_result(unsafe {
- do multi.to_str().with_c_str |m_addr| {
+ multi.to_str().with_c_str(|m_addr| {
uvll::uv_udp_set_membership(self.handle,
m_addr, ptr::null(),
uvll::UV_JOIN_GROUP)
- }
+ })
})
}
fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> {
let _m = self.fire_homing_missile();
status_to_io_result(unsafe {
- do multi.to_str().with_c_str |m_addr| {
+ multi.to_str().with_c_str(|m_addr| {
uvll::uv_udp_set_membership(self.handle,
m_addr, ptr::null(),
uvll::UV_LEAVE_GROUP)
- }
+ })
})
}
};
unsafe fn local_io() -> &'static mut IoFactory {
- do Local::borrow |sched: &mut Scheduler| {
+ Local::borrow(|sched: &mut Scheduler| {
let mut io = None;
sched.event_loop.io(|i| io = Some(i));
cast::transmute(io.unwrap())
- }
+ })
}
let test_function: proc() = || {
// block self on sched1
let scheduler: ~Scheduler = Local::take();
- do scheduler.deschedule_running_task_and_then |_, task| {
+ scheduler.deschedule_running_task_and_then(|_, task| {
// unblock task
- do task.wake().map |task| {
+ task.wake().map(|task| {
// send self to sched2
tasksFriendHandle.take().send(TaskFromFriend(task));
- };
+ });
// sched1 should now sleep since it has nothing else to do
- }
+ })
// sched2 will wake up and get the task as we do nothing else,
// the function ends and the socket goes out of scope sched2
// will start to run the destructor the destructor will first
let mut req = Request::new(uvll::UV_CONNECT);
let pipe = PipeWatcher::new(loop_, false);
- do wait_until_woken_after(&mut cx.task) {
+ wait_until_woken_after(&mut cx.task, || {
unsafe {
uvll::uv_pipe_connect(req.handle,
pipe.handle(),
}
req.set_data(&cx);
req.defuse(); // uv callback now owns this request
- }
+ });
return match cx.result {
0 => Ok(pipe),
n => Err(UvError(n))
}
}
- let ret = do with_argv(config.program, config.args) |argv| {
- do with_env(config.env) |envp| {
+ let ret = with_argv(config.program, config.args, |argv| {
+ with_env(config.env, |envp| {
let options = uvll::uv_process_options_t {
exit_cb: on_exit,
file: unsafe { *argv },
0 => Ok(process.install()),
err => Err(UvError(err)),
}
- }
- };
+ })
+ });
match ret {
Ok(p) => Ok((p, ret_io)),
let mut wcx = WriteContext { result: 0, task: None, };
req.defuse(); // uv callback now owns this request
- do wait_until_woken_after(&mut wcx.task) {
+ wait_until_woken_after(&mut wcx.task, || {
req.set_data(&wcx);
- }
+ });
self.last_write_req = Some(Request::wrap(req.handle));
match wcx.result {
0 => Ok(()),
let _f = ForbidUnwind::new("timer");
let sched: ~Scheduler = Local::take();
- do sched.deschedule_running_task_and_then |_sched, task| {
+ sched.deschedule_running_task_and_then(|_sched, task| {
self.action = Some(WakeTask(task));
self.start(msecs, 0);
- }
+ });
self.stop();
}
let _f = ForbidUnwind::new("going home");
- let current_sched_id = do Local::borrow |sched: &mut Scheduler| {
+ let current_sched_id = Local::borrow(|sched: &mut Scheduler| {
sched.sched_id()
- };
+ });
// Only need to invoke a context switch if we're not on the right
// scheduler.
if current_sched_id != self.home().sched_id {
let scheduler: ~Scheduler = Local::take();
- do scheduler.deschedule_running_task_and_then |_, task| {
- do task.wake().map |task| {
+ scheduler.deschedule_running_task_and_then(|_, task| {
+ task.wake().map(|task| {
self.home().send(RunOnce(task));
- };
- }
+ });
+ })
}
- let current_sched_id = do Local::borrow |sched: &mut Scheduler| {
+ let current_sched_id = Local::borrow(|sched: &mut Scheduler| {
sched.sched_id()
- };
+ });
assert!(current_sched_id == self.home().sched_id);
self.home().sched_id
// original scheduler. Otherwise, we can just return and keep running
if !Task::on_appropriate_sched() {
let scheduler: ~Scheduler = Local::take();
- do scheduler.deschedule_running_task_and_then |_, task| {
- do task.wake().map |task| {
+ scheduler.deschedule_running_task_and_then(|_, task| {
+ task.wake().map(|task| {
Scheduler::run_task(task);
- };
- }
+ });
+ })
}
util::ignore(f);