The basic example below illustrates this.
```{rust,ignore}
+# #![allow(deprecated)]
use std::sync::Future;
# fn main() {
computations. The workload will be distributed on the available cores.
```{rust,ignore}
+# #![allow(deprecated)]
# use std::num::Float;
# use std::sync::Future;
fn partial_sum(start: uint) -> f64 {
a useful thing to use:
```{rust}
+# #![allow(deprecated)]
use std::sync::Future;
let mut delayed_value = Future::spawn(move || {
```
use std::sync::atomic;
-// Note that INIT_ATOMIC_UINT is a *const*, but it may be used to initialize a
+// Note that ATOMIC_UINT_INIT is a *const*, but it may be used to initialize a
// static. This static can be modified, so it is not placed in read-only memory.
-static COUNTER: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static COUNTER: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
// This table is a candidate to be placed in read-only memory.
static TABLE: &'static [uint] = &[1, 2, 3, /* ... */];
}
}
const NUM_ELEMENTS: uint = 2;
- static DROP_COUNTER: AtomicUint = atomic::INIT_ATOMIC_UINT;
+ static DROP_COUNTER: AtomicUint = atomic::ATOMIC_UINT_INIT;
let v = Vec::from_elem(NUM_ELEMENTS, Nothing);
/// An `AtomicBool` initialized to `false`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
-pub const INIT_ATOMIC_BOOL: AtomicBool =
+pub const ATOMIC_BOOL_INIT: AtomicBool =
AtomicBool { v: UnsafeCell { value: 0 } };
/// An `AtomicInt` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
-pub const INIT_ATOMIC_INT: AtomicInt =
+pub const ATOMIC_INT_INIT: AtomicInt =
AtomicInt { v: UnsafeCell { value: 0 } };
/// An `AtomicUint` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
-pub const INIT_ATOMIC_UINT: AtomicUint =
+pub const ATOMIC_UINT_INIT: AtomicUint =
AtomicUint { v: UnsafeCell { value: 0, } };
+/// Deprecated
+#[deprecated = "renamed to ATOMIC_BOOL_INIT"]
+pub const INIT_ATOMIC_BOOL: AtomicBool = ATOMIC_BOOL_INIT;
+/// Deprecated
+#[deprecated = "renamed to ATOMIC_INT_INIT"]
+pub const INIT_ATOMIC_INT: AtomicInt = ATOMIC_INT_INIT;
+/// Deprecated
+#[deprecated = "renamed to ATOMIC_UINT_INIT"]
+pub const INIT_ATOMIC_UINT: AtomicUint = ATOMIC_UINT_INIT;
+
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: uint = -1;
// FIXME: Can't be shared between threads. Dynamic borrows
// FIXME: Relationship to Atomic types and RWLock
+#![stable]
+
use clone::Clone;
use cmp::PartialEq;
use default::Default;
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
}
-static S_BOOL : AtomicBool = INIT_ATOMIC_BOOL;
-static S_INT : AtomicInt = INIT_ATOMIC_INT;
-static S_UINT : AtomicUint = INIT_ATOMIC_UINT;
+static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT;
+static S_INT : AtomicInt = ATOMIC_INT_INIT;
+static S_UINT : AtomicUint = ATOMIC_UINT_INIT;
#[test]
fn static_init() {
#[doc(hidden)]
pub fn mod_enabled(level: u32, module: &str) -> bool {
static INIT: Once = ONCE_INIT;
- INIT.doit(init);
+ INIT.call_once(init);
// It's possible for many threads are in this function, only one of them
// will perform the global initialization, but all of them will need to check
let output_path = {
let output_template = match requested_output {
Some(ref s) if s.as_slice() == "help" => {
- static PRINTED_YET : atomic::AtomicBool = atomic::INIT_ATOMIC_BOOL;
+ static PRINTED_YET : atomic::AtomicBool = atomic::ATOMIC_BOOL_INIT;
if !PRINTED_YET.load(atomic::SeqCst) {
print_help_message();
PRINTED_YET.store(true, atomic::SeqCst);
}
}
- INIT.doit(|| {
+ INIT.call_once(|| {
llvm::LLVMInitializePasses();
// Only initialize the platforms supported by Rust here, because
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
static mut POISONED: bool = false;
- INIT.doit(|| {
+ INIT.call_once(|| {
if llvm::LLVMStartMultithreaded() != 1 {
// use an extra bool to make sure that all future usage of LLVM
// cannot proceed despite the Once not running more than once.
//! Generic support for building blocking abstractions.
use thread::Thread;
-use sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, Ordering};
+use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sync::Arc;
use kinds::{Sync, Send};
use kinds::marker::{NoSend, NoSync};
pub fn tokens() -> (WaitToken, SignalToken) {
let inner = Arc::new(Inner {
thread: Thread::current(),
- woken: INIT_ATOMIC_BOOL,
+ woken: ATOMIC_BOOL_INIT,
});
let wait_token = WaitToken {
inner: inner.clone(),
use slice::{SliceExt};
use slice;
use vec::Vec;
-use kinds::{Send,Sync};
/// Wraps a Reader and buffers input from it
///
cap: uint,
}
-
-unsafe impl<R: Send> Send for BufferedReader<R> {}
-unsafe impl<R: Send+Sync> Sync for BufferedReader<R> {}
-
-
impl<R: Reader> BufferedReader<R> {
/// Creates a new `BufferedReader` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
static ONCE: Once = ONCE_INIT;
unsafe {
- ONCE.doit(|| {
+ ONCE.call_once(|| {
// The default buffer capacity is 64k, but apparently windows doesn't like
// 64k reads on stdin. See #13304 for details, but the idea is that on
// windows we use a slightly smaller buffer that's been seen to be
return TempDir::new_in(&abs_tmpdir, suffix);
}
- static CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+ static CNT: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
let mut attempts = 0u;
loop {
use os;
use prelude::*;
use std::io::net::ip::*;
-use sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
+use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Relaxed};
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
- static NEXT_OFFSET: AtomicUint = INIT_ATOMIC_UINT;
+ static NEXT_OFFSET: AtomicUint = ATOMIC_UINT_INIT;
base_port() + NEXT_OFFSET.fetch_add(1, Relaxed) as u16
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
- static COUNT: AtomicUint = INIT_ATOMIC_UINT;
+ static COUNT: AtomicUint = ATOMIC_UINT_INIT;
// base port and pid are an attempt to be unique between multiple
// test-runners of different configurations running on one
// buildbot, the count is to be unique within this executable.
use slice::CloneSliceExt;
use str::{Str, StrExt};
use string::{String, ToString};
-use sync::atomic::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
+use sync::atomic::{AtomicInt, ATOMIC_INT_INIT, SeqCst};
use vec::Vec;
#[cfg(unix)] use c_str::ToCStr;
error_string(errno() as uint)
}
-static EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
+static EXIT_STATUS: AtomicInt = ATOMIC_INT_INIT;
/// Sets the process exit code
///
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64", target_arch = "x86", target_arch = "arm")))]
fn is_getrandom_available() -> bool {
- use sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, Relaxed};
+ use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Relaxed};
- static GETRANDOM_CHECKED: AtomicBool = INIT_ATOMIC_BOOL;
- static GETRANDOM_AVAILABLE: AtomicBool = INIT_ATOMIC_BOOL;
+ static GETRANDOM_CHECKED: AtomicBool = ATOMIC_BOOL_INIT;
+ static GETRANDOM_AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
if !GETRANDOM_CHECKED.load(Relaxed) {
let mut buf: [u8; 0] = [];
// For now logging is turned off by default, and this function checks to see
// whether the magical environment variable is present to see if it's turned on.
pub fn log_enabled() -> bool {
- static ENABLED: atomic::AtomicInt = atomic::INIT_ATOMIC_INT;
+ static ENABLED: atomic::AtomicInt = atomic::ATOMIC_INT_INIT;
match ENABLED.load(atomic::SeqCst) {
1 => return false,
2 => return true,
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use core::prelude::*;
-
-use cell::UnsafeCell;
-use rt::mutex;
-
-/// An OS mutex over some data.
-///
-/// This is not a safe primitive to use, it is unaware of the libgreen
-/// scheduler, as well as being easily susceptible to misuse due to the usage of
-/// the inner NativeMutex.
-///
-/// > **Note**: This type is not recommended for general use. The mutex provided
-/// > as part of `libsync` should almost always be favored.
-pub struct Exclusive<T> {
- lock: mutex::NativeMutex,
- data: UnsafeCell<T>,
-}
-
-unsafe impl<T:Send> Send for Exclusive<T> { }
-
-unsafe impl<T:Send> Sync for Exclusive<T> { }
-
-/// An RAII guard returned via `lock`
-pub struct ExclusiveGuard<'a, T:'a> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- _guard: mutex::LockGuard<'a>,
-}
-
-impl<T: Send> Exclusive<T> {
- /// Creates a new `Exclusive` which will protect the data provided.
- pub fn new(user_data: T) -> Exclusive<T> {
- Exclusive {
- lock: unsafe { mutex::NativeMutex::new() },
- data: UnsafeCell::new(user_data),
- }
- }
-
- /// Acquires this lock, returning a guard which the data is accessed through
- /// and from which that lock will be unlocked.
- ///
- /// This method is unsafe due to many of the same reasons that the
- /// NativeMutex itself is unsafe.
- pub unsafe fn lock<'a>(&'a self) -> ExclusiveGuard<'a, T> {
- let guard = self.lock.lock();
- let data = &mut *self.data.get();
-
- ExclusiveGuard {
- _data: data,
- _guard: guard,
- }
- }
-}
-
-impl<'a, T: Send> ExclusiveGuard<'a, T> {
- // The unsafety here should be ok because our loan guarantees that the lock
- // itself is not moving
- pub fn signal(&self) {
- unsafe { self._guard.signal() }
- }
- pub fn wait(&self) {
- unsafe { self._guard.wait() }
- }
-}
-
-impl<'a, T: Send> Deref<T> for ExclusiveGuard<'a, T> {
- fn deref(&self) -> &T { &*self._data }
-}
-impl<'a, T: Send> DerefMut<T> for ExclusiveGuard<'a, T> {
- fn deref_mut(&mut self) -> &mut T { &mut *self._data }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
- use sync::Arc;
- use super::Exclusive;
- use task;
-
- #[test]
- fn exclusive_new_arc() {
- unsafe {
- let mut futures = Vec::new();
-
- let num_tasks = 10;
- let count = 10;
-
- let total = Arc::new(Exclusive::new(box 0));
-
- for _ in range(0u, num_tasks) {
- let total = total.clone();
- let (tx, rx) = channel();
- futures.push(rx);
-
- task::spawn(move || {
- for _ in range(0u, count) {
- **total.lock() += 1;
- }
- tx.send(());
- });
- };
-
- for f in futures.iter_mut() { f.recv() }
-
- assert_eq!(**total.lock(), num_tasks * count);
- }
- }
-}
// For more information, see below.
const MAX_CALLBACKS: uint = 16;
static CALLBACKS: [atomic::AtomicUint; MAX_CALLBACKS] =
- [atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
- atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
-static CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+ [atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
+ atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT];
+static CALLBACK_CNT: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
// Make sure the default failure handler is registered before we look at the
// callbacks.
static INIT: Once = ONCE_INIT;
- INIT.doit(|| unsafe { register(failure::on_fail); });
+ INIT.call_once(|| unsafe { register(failure::on_fail); });
// First, invoke call the user-defined callbacks triggered on thread panic.
//
}
pub fn min_stack() -> uint {
- static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+ static MIN: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
//! Keep a global count of live tasks:
//!
//! ```
-//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
+//! use std::sync::atomic::{AtomicUint, SeqCst, ATOMIC_UINT_INIT};
//!
-//! static GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
+//! static GLOBAL_TASK_COUNT: AtomicUint = ATOMIC_UINT_INIT;
//!
//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
//! println!("live tasks: {}", old_task_count + 1);
//! ```
-#![allow(deprecated)]
+#![stable]
use alloc::boxed::Box;
use core::mem;
pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
+pub use core::atomic::{ATOMIC_BOOL_INIT, ATOMIC_INT_INIT, ATOMIC_UINT_INIT};
pub use core::atomic::fence;
pub use core::atomic::Ordering::{mod, Relaxed, Release, Acquire, AcqRel, SeqCst};
p: AtomicUint,
}
+#[allow(deprecated)]
impl<T: Send> AtomicOption<T> {
/// Create a new `AtomicOption`
pub fn new(p: Box<T>) -> AtomicOption<T> {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use kinds::{Send, Sync};
use sync::{Mutex, Condvar};
/// A barrier enables multiple tasks to synchronize the beginning
/// }).detach();
/// }
/// ```
+#[stable]
pub struct Barrier {
lock: Mutex<BarrierState>,
cvar: Condvar,
num_threads: uint,
}
-unsafe impl Send for Barrier {}
-unsafe impl Sync for Barrier {}
-
// The inner state of a double barrier
struct BarrierState {
count: uint,
generation_id: uint,
}
-unsafe impl Send for BarrierState {}
-unsafe impl Sync for BarrierState {}
+/// A result returned from wait.
+///
+/// Currently this opaque structure only has one method, `.is_leader()`. Only
+/// one thread will receive a result that will return `true` from this function.
+#[allow(missing_copy_implementations)]
+pub struct BarrierWaitResult(bool);
impl Barrier {
/// Create a new barrier that can block a given number of threads.
///
/// A barrier will block `n`-1 threads which call `wait` and then wake up
/// all threads at once when the `n`th thread calls `wait`.
+ #[stable]
pub fn new(n: uint) -> Barrier {
Barrier {
lock: Mutex::new(BarrierState {
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
- pub fn wait(&self) {
+ ///
+ /// A single (arbitrary) thread will receive a `BarrierWaitResult` that
+ /// returns `true` from `is_leader` when returning from this function, and
+ /// all other threads will receive a result that will return `false` from
+ /// `is_leader`
+ #[stable]
+ pub fn wait(&self) -> BarrierWaitResult {
let mut lock = self.lock.lock().unwrap();
let local_gen = lock.generation_id;
lock.count += 1;
lock.count < self.num_threads {
lock = self.cvar.wait(lock).unwrap();
}
+ BarrierWaitResult(false)
} else {
lock.count = 0;
lock.generation_id += 1;
self.cvar.notify_all();
+ BarrierWaitResult(true)
}
}
}
+impl BarrierWaitResult {
+ /// Return whether this thread from `wait` is the "leader thread".
+ ///
+ /// Only one thread will have `true` returned from their result, all other
+ /// threads will have `false` returned.
+ #[stable]
+ pub fn is_leader(&self) -> bool { self.0 }
+}
+
#[cfg(test)]
mod tests {
use prelude::*;
- use sync::{Arc, Barrier};
use comm::Empty;
+ use sync::{Arc, Barrier};
#[test]
fn test_barrier() {
- let barrier = Arc::new(Barrier::new(10));
+ const N: uint = 10;
+
+ let barrier = Arc::new(Barrier::new(N));
let (tx, rx) = channel();
- for _ in range(0u, 9) {
+ for _ in range(0u, N - 1) {
let c = barrier.clone();
let tx = tx.clone();
spawn(move|| {
- c.wait();
- tx.send(true);
+ tx.send(c.wait().is_leader());
});
}
_ => false,
});
- barrier.wait();
+ let mut leader_found = barrier.wait().is_leader();
+
// Now, the barrier is cleared and we should get data.
- for _ in range(0u, 9) {
- rx.recv();
+ for _ in range(0u, N - 1) {
+ if rx.recv() {
+ assert!(!leader_found);
+ leader_found = true;
+ }
}
+ assert!(leader_found);
}
}
#[unstable = "may be merged with Condvar in the future"]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
- mutex: atomic::INIT_ATOMIC_UINT,
+ mutex: atomic::ATOMIC_UINT_INIT,
};
impl Condvar {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! A type representing values that may be computed concurrently and operations for working with
-//! them.
+//! A type representing values that may be computed concurrently and operations
+//! for working with them.
//!
//! # Example
//!
//! ```
#![allow(missing_docs)]
+#![unstable = "futures as-is have yet to be deeply reevaluated with recent \
+ core changes to Rust's synchronization story, and will likely \
+ become stable in the future but are unstable until that time"]
use core::prelude::*;
use core::mem::replace;
pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
pub use self::once::{Once, ONCE_INIT};
pub use self::semaphore::{Semaphore, SemaphoreGuard};
-pub use self::barrier::Barrier;
+pub use self::barrier::{Barrier, BarrierWaitResult};
pub use self::poison::{PoisonError, TryLockError, TryLockResult, LockResult};
pub use self::future::Future;
///
/// static START: Once = ONCE_INIT;
///
-/// START.doit(|| {
+/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
+#[stable]
pub struct Once {
mutex: StaticMutex,
cnt: atomic::AtomicInt,
unsafe impl Sync for Once {}
/// Initialization value for static `Once` values.
+#[stable]
pub const ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
- cnt: atomic::INIT_ATOMIC_INT,
- lock_cnt: atomic::INIT_ATOMIC_INT,
+ cnt: atomic::ATOMIC_INT_INIT,
+ lock_cnt: atomic::ATOMIC_INT_INIT,
};
impl Once {
/// Perform an initialization routine once and only once. The given closure
- /// will be executed if this is the first time `doit` has been called, and
- /// otherwise the routine will *not* be invoked.
+ /// will be executed if this is the first time `call_once` has been called,
+ /// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
- pub fn doit<F>(&'static self, f: F) where F: FnOnce() {
+ #[stable]
+ pub fn call_once<F>(&'static self, f: F) where F: FnOnce() {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomic::SeqCst) < 0 {
return
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
- // calling `doit` will return immediately before the initialization has
- // completed.
+ // calling `call_once` will return immediately before the initialization
+ // has completed.
let prev = self.cnt.fetch_add(1, atomic::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
- // simultaneous calls to `doit` to make this value go back to 0
+ // simultaneous calls to `call_once` to make this value go back to 0
self.cnt.store(int::MIN, atomic::SeqCst);
return
}
unsafe { self.mutex.destroy() }
}
}
+
+ /// Deprecated
+ #[deprecated = "renamed to `call_once`"]
+ pub fn doit<F>(&'static self, f: F) where F: FnOnce() { self.call_once(f) }
}
#[cfg(test)]
fn smoke_once() {
static O: Once = ONCE_INIT;
let mut a = 0i;
- O.doit(|| a += 1);
+ O.call_once(|| a += 1);
assert_eq!(a, 1);
- O.doit(|| a += 1);
+ O.call_once(|| a += 1);
assert_eq!(a, 1);
}
spawn(move|| {
for _ in range(0u, 4) { Thread::yield_now() }
unsafe {
- O.doit(|| {
+ O.call_once(|| {
assert!(!run);
run = true;
});
}
unsafe {
- O.doit(|| {
+ O.call_once(|| {
assert!(!run);
run = true;
});
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![unstable = "the interaction between semaphores and the acquisition/release \
+ of resources is currently unclear"]
+
use ops::Drop;
use sync::{Mutex, Condvar};
//! Abstraction of a thread pool for basic parallelism.
+#![unstable = "the semantics of a failing task and whether a thread is \
+ re-attached to a thread pool are somewhat unclear, and the \
+ utility of this type in `std::sync` is questionable with \
+ respect to the jobs of other primitives"]
+
use core::prelude::*;
use thread::Thread;
///
/// This value allows specific configuration of the destructor for a TLS key.
pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
- key: atomic::INIT_ATOMIC_UINT,
+ key: atomic::ATOMIC_UINT_INIT,
};
static INIT_KEYS: Once = ONCE_INIT;
use libc::{mod, c_int, c_char, c_void};
use path::BytesContainer;
use ptr;
-use sync::atomic::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
+use sync::atomic::{AtomicInt, SeqCst};
use sys::fs::FileDesc;
use os;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use prelude::*;
+
use alloc::arc::Arc;
use libc;
use c_str::CString;
use mem;
use sync::{atomic, Mutex};
use io::{mod, IoResult, IoError};
-use prelude::*;
use sys::{mod, timer, retry, c, set_nonblocking, wouldblock};
use sys::fs::{fd_t, FileDesc};
write_deadline: u64,
}
-unsafe impl Send for UnixStream {}
-unsafe impl Sync for UnixStream {}
-
impl UnixStream {
pub fn connect(addr: &CString,
timeout: Option<u64>) -> IoResult<UnixStream> {
path: CString,
}
+// we currently own the CString, so these impls should be safe
unsafe impl Send for UnixListener {}
unsafe impl Sync for UnixListener {}
closed: atomic::AtomicBool,
}
-unsafe impl Send for AcceptorInner {}
-unsafe impl Sync for AcceptorInner {}
-
impl UnixAcceptor {
pub fn fd(&self) -> fd_t { self.inner.listener.fd() }
// instead of ()
HELPER.boot(|| {}, helper);
- static ID: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+ static ID: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
let id = ID.fetch_add(1, atomic::Relaxed);
Ok(Timer {
id: id,
unsafe {
static START: Once = ONCE_INIT;
- START.doit(|| {
+ START.call_once(|| {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
pub struct Mutex { inner: atomic::AtomicUint }
-pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT };
+pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::ATOMIC_UINT_INIT };
unsafe impl Sync for Mutex {}
denom: 0 };
static ONCE: sync::Once = sync::ONCE_INIT;
unsafe {
- ONCE.doit(|| {
+ ONCE.call_once(|| {
imp::mach_timebase_info(&mut TIMEBASE);
});
let time = imp::mach_absolute_time();
denom: 0 };
static ONCE: std::sync::Once = std::sync::ONCE_INIT;
unsafe {
- ONCE.doit(|| {
+ ONCE.call_once(|| {
imp::mach_timebase_info(&mut TIMEBASE);
});
let time = imp::mach_absolute_time();
use std::sync::atomic;
pub const C1: uint = 1;
-pub const C2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+pub const C2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
pub const C3: fn() = foo;
pub const C4: uint = C1 * C1 + C1 / C1;
pub const C5: &'static uint = &C4;
pub static S1: uint = 3;
-pub static S2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+pub static S2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
fn foo() {}
extern crate arena;
use std::iter::range_step;
-use std::sync::Future;
+use std::thread::Thread;
use arena::TypedArena;
enum Tree<'a> {
let mut messages = range_step(min_depth, max_depth + 1, 2).map(|depth| {
use std::num::Int;
let iterations = 2i.pow((max_depth - depth + min_depth) as uint);
- Future::spawn(move|| {
+ Thread::spawn(move|| {
let mut chk = 0;
for i in range(1, iterations + 1) {
let arena = TypedArena::new();
format!("{}\t trees of depth {}\t check: {}",
iterations * 2, depth, chk)
})
- }).collect::<Vec<Future<String>>>();
+ }).collect::<Vec<_>>();
- for message in messages.iter_mut() {
- println!("{}", *message.get_ref());
+ for message in messages.into_iter() {
+ println!("{}", message.join().ok().unwrap());
}
println!("long lived tree of depth {}\t check: {}",
#![feature(slicing_syntax)]
use std::{cmp, iter, mem};
-use std::sync::Future;
+use std::thread::Thread;
fn rotate(x: &mut [i32]) {
let mut prev = x[0];
for (i, j) in range(0, N).zip(iter::count(0, k)) {
let max = cmp::min(j+k, perm.max());
- futures.push(Future::spawn(move|| {
+ futures.push(Thread::spawn(move|| {
work(perm, j as uint, max as uint)
}))
}
let mut checksum = 0;
let mut maxflips = 0;
- for fut in futures.iter_mut() {
- let (cs, mf) = fut.get();
+ for fut in futures.into_iter() {
+ let (cs, mf) = fut.join().ok().unwrap();
checksum += cs;
maxflips = cmp::max(maxflips, mf);
}
use std::ptr;
fn main() {
- let x = INIT_ATOMIC_BOOL;
+ let x = ATOMIC_BOOL_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
- let x = INIT_ATOMIC_INT;
+ let x = ATOMIC_INT_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
- let x = INIT_ATOMIC_UINT;
+ let x = ATOMIC_UINT_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of dereference
use std::sync::atomic;
const C1: uint = 1;
-const C2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+const C2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
const C3: fn() = foo;
const C4: uint = C1 * C1 + C1 / C1;
const C5: &'static uint = &C4;
};
static S1: uint = 3;
-static S2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static S2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
mod test {
static A: uint = 4;
// except according to those terms.
use std::task;
-use std::sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
+use std::sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Relaxed};
use std::rand::{thread_rng, Rng, Rand};
const REPEATS: uint = 5;
static drop_counts: [AtomicUint; MAX_LEN] =
// FIXME #5244: AtomicUint is not Copy.
[
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
- INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
+ ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
];
-static creation_count: AtomicUint = INIT_ATOMIC_UINT;
+static creation_count: AtomicUint = ATOMIC_UINT_INIT;
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord)]
struct DropCounter { x: uint, creation_id: uint }